1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.17.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240 {"rmac_ttl_1519_4095_frms"},
241 {"rmac_ttl_4096_8191_frms"},
242 {"rmac_ttl_8192_max_frms"},
243 {"rmac_ttl_gt_max_frms"},
244 {"rmac_osized_alt_frms"},
245 {"rmac_jabber_alt_frms"},
246 {"rmac_gt_max_alt_frms"},
248 {"rmac_len_discard"},
249 {"rmac_fcs_discard"},
252 {"rmac_red_discard"},
253 {"rmac_rts_discard"},
254 {"rmac_ingm_full_discard"},
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259 {"\n DRIVER STATISTICS"},
260 {"single_bit_ecc_errs"},
261 {"double_bit_ecc_errs"},
267 ("alarm_transceiver_temp_high"),
268 ("alarm_transceiver_temp_low"),
269 ("alarm_laser_bias_current_high"),
270 ("alarm_laser_bias_current_low"),
271 ("alarm_laser_output_power_high"),
272 ("alarm_laser_output_power_low"),
273 ("warn_transceiver_temp_high"),
274 ("warn_transceiver_temp_low"),
275 ("warn_laser_bias_current_high"),
276 ("warn_laser_bias_current_low"),
277 ("warn_laser_output_power_high"),
278 ("warn_laser_output_power_low"),
279 ("lro_aggregated_pkts"),
280 ("lro_flush_both_count"),
281 ("lro_out_of_sequence_pkts"),
282 ("lro_flush_due_to_max_pkts"),
283 ("lro_avg_aggr_pkts"),
286 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
289 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
291 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
294 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
297 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
300 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
301 init_timer(&timer); \
302 timer.function = handle; \
303 timer.data = (unsigned long) arg; \
304 mod_timer(&timer, (jiffies + exp)) \
307 static void s2io_vlan_rx_register(struct net_device *dev,
308 struct vlan_group *grp)
310 struct s2io_nic *nic = dev->priv;
313 spin_lock_irqsave(&nic->tx_lock, flags);
315 spin_unlock_irqrestore(&nic->tx_lock, flags);
318 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
321 /* Unregister the vlan */
322 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
324 struct s2io_nic *nic = dev->priv;
327 spin_lock_irqsave(&nic->tx_lock, flags);
329 nic->vlgrp->vlan_devices[vid] = NULL;
330 spin_unlock_irqrestore(&nic->tx_lock, flags);
334 * Constants to be programmed into the Xena's registers, to configure
339 static const u64 herc_act_dtx_cfg[] = {
341 0x8000051536750000ULL, 0x80000515367500E0ULL,
343 0x8000051536750004ULL, 0x80000515367500E4ULL,
345 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
347 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
349 0x801205150D440000ULL, 0x801205150D4400E0ULL,
351 0x801205150D440004ULL, 0x801205150D4400E4ULL,
353 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
355 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
360 static const u64 xena_dtx_cfg[] = {
362 0x8000051500000000ULL, 0x80000515000000E0ULL,
364 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
366 0x8001051500000000ULL, 0x80010515000000E0ULL,
368 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
370 0x8002051500000000ULL, 0x80020515000000E0ULL,
372 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
377 * Constants for Fixing the MacAddress problem seen mostly on
380 static const u64 fix_mac[] = {
381 0x0060000000000000ULL, 0x0060600000000000ULL,
382 0x0040600000000000ULL, 0x0000600000000000ULL,
383 0x0020600000000000ULL, 0x0060600000000000ULL,
384 0x0020600000000000ULL, 0x0060600000000000ULL,
385 0x0020600000000000ULL, 0x0060600000000000ULL,
386 0x0020600000000000ULL, 0x0060600000000000ULL,
387 0x0020600000000000ULL, 0x0060600000000000ULL,
388 0x0020600000000000ULL, 0x0060600000000000ULL,
389 0x0020600000000000ULL, 0x0060600000000000ULL,
390 0x0020600000000000ULL, 0x0060600000000000ULL,
391 0x0020600000000000ULL, 0x0060600000000000ULL,
392 0x0020600000000000ULL, 0x0060600000000000ULL,
393 0x0020600000000000ULL, 0x0000600000000000ULL,
394 0x0040600000000000ULL, 0x0060600000000000ULL,
398 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
399 MODULE_LICENSE("GPL");
400 MODULE_VERSION(DRV_VERSION);
403 /* Module Loadable parameters. */
404 S2IO_PARM_INT(tx_fifo_num, 1);
405 S2IO_PARM_INT(rx_ring_num, 1);
408 S2IO_PARM_INT(rx_ring_mode, 1);
409 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
410 S2IO_PARM_INT(rmac_pause_time, 0x100);
411 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
412 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
413 S2IO_PARM_INT(shared_splits, 0);
414 S2IO_PARM_INT(tmac_util_period, 5);
415 S2IO_PARM_INT(rmac_util_period, 5);
416 S2IO_PARM_INT(bimodal, 0);
417 S2IO_PARM_INT(l3l4hdr_size, 128);
418 /* Frequency of Rx desc syncs expressed as power of 2 */
419 S2IO_PARM_INT(rxsync_frequency, 3);
420 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
421 S2IO_PARM_INT(intr_type, 0);
422 /* Large receive offload feature */
423 S2IO_PARM_INT(lro, 0);
424 /* Max pkts to be aggregated by LRO at one time. If not specified,
425 * aggregation happens until we hit max IP pkt size(64K)
427 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
428 S2IO_PARM_INT(indicate_max_pkts, 0);
430 S2IO_PARM_INT(napi, 1);
431 S2IO_PARM_INT(ufo, 0);
432 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
434 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
435 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
436 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
437 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
438 static unsigned int rts_frm_len[MAX_RX_RINGS] =
439 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
441 module_param_array(tx_fifo_len, uint, NULL, 0);
442 module_param_array(rx_ring_sz, uint, NULL, 0);
443 module_param_array(rts_frm_len, uint, NULL, 0);
447 * This table lists all the devices that this driver supports.
449 static struct pci_device_id s2io_tbl[] __devinitdata = {
450 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
451 PCI_ANY_ID, PCI_ANY_ID},
452 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
453 PCI_ANY_ID, PCI_ANY_ID},
454 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
455 PCI_ANY_ID, PCI_ANY_ID},
456 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
457 PCI_ANY_ID, PCI_ANY_ID},
461 MODULE_DEVICE_TABLE(pci, s2io_tbl);
463 static struct pci_driver s2io_driver = {
465 .id_table = s2io_tbl,
466 .probe = s2io_init_nic,
467 .remove = __devexit_p(s2io_rem_nic),
470 /* A simplifier macro used both by init and free shared_mem Fns(). */
471 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
474 * init_shared_mem - Allocation and Initialization of Memory
475 * @nic: Device private variable.
476 * Description: The function allocates all the memory areas shared
477 * between the NIC and the driver. This includes Tx descriptors,
478 * Rx descriptors and the statistics block.
481 static int init_shared_mem(struct s2io_nic *nic)
484 void *tmp_v_addr, *tmp_v_addr_next;
485 dma_addr_t tmp_p_addr, tmp_p_addr_next;
486 struct RxD_block *pre_rxd_blk = NULL;
488 int lst_size, lst_per_page;
489 struct net_device *dev = nic->dev;
493 struct mac_info *mac_control;
494 struct config_param *config;
496 mac_control = &nic->mac_control;
497 config = &nic->config;
500 /* Allocation and initialization of TXDLs in FIOFs */
502 for (i = 0; i < config->tx_fifo_num; i++) {
503 size += config->tx_cfg[i].fifo_len;
505 if (size > MAX_AVAILABLE_TXDS) {
506 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
507 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
511 lst_size = (sizeof(struct TxD) * config->max_txds);
512 lst_per_page = PAGE_SIZE / lst_size;
514 for (i = 0; i < config->tx_fifo_num; i++) {
515 int fifo_len = config->tx_cfg[i].fifo_len;
516 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
517 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
519 if (!mac_control->fifos[i].list_info) {
521 "Malloc failed for list_info\n");
524 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
526 for (i = 0; i < config->tx_fifo_num; i++) {
527 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
529 mac_control->fifos[i].tx_curr_put_info.offset = 0;
530 mac_control->fifos[i].tx_curr_put_info.fifo_len =
531 config->tx_cfg[i].fifo_len - 1;
532 mac_control->fifos[i].tx_curr_get_info.offset = 0;
533 mac_control->fifos[i].tx_curr_get_info.fifo_len =
534 config->tx_cfg[i].fifo_len - 1;
535 mac_control->fifos[i].fifo_no = i;
536 mac_control->fifos[i].nic = nic;
537 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
539 for (j = 0; j < page_num; j++) {
543 tmp_v = pci_alloc_consistent(nic->pdev,
547 "pci_alloc_consistent ");
548 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
551 /* If we got a zero DMA address(can happen on
552 * certain platforms like PPC), reallocate.
553 * Store virtual address of page we don't want,
557 mac_control->zerodma_virt_addr = tmp_v;
559 "%s: Zero DMA address for TxDL. ", dev->name);
561 "Virtual address %p\n", tmp_v);
562 tmp_v = pci_alloc_consistent(nic->pdev,
566 "pci_alloc_consistent ");
567 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
571 while (k < lst_per_page) {
572 int l = (j * lst_per_page) + k;
573 if (l == config->tx_cfg[i].fifo_len)
575 mac_control->fifos[i].list_info[l].list_virt_addr =
576 tmp_v + (k * lst_size);
577 mac_control->fifos[i].list_info[l].list_phy_addr =
578 tmp_p + (k * lst_size);
584 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
585 if (!nic->ufo_in_band_v)
588 /* Allocation and initialization of RXDs in Rings */
590 for (i = 0; i < config->rx_ring_num; i++) {
591 if (config->rx_cfg[i].num_rxd %
592 (rxd_count[nic->rxd_mode] + 1)) {
593 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
594 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
596 DBG_PRINT(ERR_DBG, "RxDs per Block");
599 size += config->rx_cfg[i].num_rxd;
600 mac_control->rings[i].block_count =
601 config->rx_cfg[i].num_rxd /
602 (rxd_count[nic->rxd_mode] + 1 );
603 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
604 mac_control->rings[i].block_count;
606 if (nic->rxd_mode == RXD_MODE_1)
607 size = (size * (sizeof(struct RxD1)));
609 size = (size * (sizeof(struct RxD3)));
611 for (i = 0; i < config->rx_ring_num; i++) {
612 mac_control->rings[i].rx_curr_get_info.block_index = 0;
613 mac_control->rings[i].rx_curr_get_info.offset = 0;
614 mac_control->rings[i].rx_curr_get_info.ring_len =
615 config->rx_cfg[i].num_rxd - 1;
616 mac_control->rings[i].rx_curr_put_info.block_index = 0;
617 mac_control->rings[i].rx_curr_put_info.offset = 0;
618 mac_control->rings[i].rx_curr_put_info.ring_len =
619 config->rx_cfg[i].num_rxd - 1;
620 mac_control->rings[i].nic = nic;
621 mac_control->rings[i].ring_no = i;
623 blk_cnt = config->rx_cfg[i].num_rxd /
624 (rxd_count[nic->rxd_mode] + 1);
625 /* Allocating all the Rx blocks */
626 for (j = 0; j < blk_cnt; j++) {
627 struct rx_block_info *rx_blocks;
630 rx_blocks = &mac_control->rings[i].rx_blocks[j];
631 size = SIZE_OF_BLOCK; //size is always page size
632 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
634 if (tmp_v_addr == NULL) {
636 * In case of failure, free_shared_mem()
637 * is called, which should free any
638 * memory that was alloced till the
641 rx_blocks->block_virt_addr = tmp_v_addr;
644 memset(tmp_v_addr, 0, size);
645 rx_blocks->block_virt_addr = tmp_v_addr;
646 rx_blocks->block_dma_addr = tmp_p_addr;
647 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
648 rxd_count[nic->rxd_mode],
650 if (!rx_blocks->rxds)
652 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
653 rx_blocks->rxds[l].virt_addr =
654 rx_blocks->block_virt_addr +
655 (rxd_size[nic->rxd_mode] * l);
656 rx_blocks->rxds[l].dma_addr =
657 rx_blocks->block_dma_addr +
658 (rxd_size[nic->rxd_mode] * l);
661 /* Interlinking all Rx Blocks */
662 for (j = 0; j < blk_cnt; j++) {
664 mac_control->rings[i].rx_blocks[j].block_virt_addr;
666 mac_control->rings[i].rx_blocks[(j + 1) %
667 blk_cnt].block_virt_addr;
669 mac_control->rings[i].rx_blocks[j].block_dma_addr;
671 mac_control->rings[i].rx_blocks[(j + 1) %
672 blk_cnt].block_dma_addr;
674 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
675 pre_rxd_blk->reserved_2_pNext_RxD_block =
676 (unsigned long) tmp_v_addr_next;
677 pre_rxd_blk->pNext_RxD_Blk_physical =
678 (u64) tmp_p_addr_next;
681 if (nic->rxd_mode >= RXD_MODE_3A) {
683 * Allocation of Storages for buffer addresses in 2BUFF mode
684 * and the buffers as well.
686 for (i = 0; i < config->rx_ring_num; i++) {
687 blk_cnt = config->rx_cfg[i].num_rxd /
688 (rxd_count[nic->rxd_mode]+ 1);
689 mac_control->rings[i].ba =
690 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
692 if (!mac_control->rings[i].ba)
694 for (j = 0; j < blk_cnt; j++) {
696 mac_control->rings[i].ba[j] =
697 kmalloc((sizeof(struct buffAdd) *
698 (rxd_count[nic->rxd_mode] + 1)),
700 if (!mac_control->rings[i].ba[j])
702 while (k != rxd_count[nic->rxd_mode]) {
703 ba = &mac_control->rings[i].ba[j][k];
705 ba->ba_0_org = (void *) kmalloc
706 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
709 tmp = (unsigned long)ba->ba_0_org;
711 tmp &= ~((unsigned long) ALIGN_SIZE);
712 ba->ba_0 = (void *) tmp;
714 ba->ba_1_org = (void *) kmalloc
715 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
718 tmp = (unsigned long) ba->ba_1_org;
720 tmp &= ~((unsigned long) ALIGN_SIZE);
721 ba->ba_1 = (void *) tmp;
728 /* Allocation and initialization of Statistics block */
729 size = sizeof(struct stat_block);
730 mac_control->stats_mem = pci_alloc_consistent
731 (nic->pdev, size, &mac_control->stats_mem_phy);
733 if (!mac_control->stats_mem) {
735 * In case of failure, free_shared_mem() is called, which
736 * should free any memory that was alloced till the
741 mac_control->stats_mem_sz = size;
743 tmp_v_addr = mac_control->stats_mem;
744 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
745 memset(tmp_v_addr, 0, size);
746 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
747 (unsigned long long) tmp_p_addr);
753 * free_shared_mem - Free the allocated Memory
754 * @nic: Device private variable.
755 * Description: This function is to free all memory locations allocated by
756 * the init_shared_mem() function and return it to the kernel.
759 static void free_shared_mem(struct s2io_nic *nic)
761 int i, j, blk_cnt, size;
763 dma_addr_t tmp_p_addr;
764 struct mac_info *mac_control;
765 struct config_param *config;
766 int lst_size, lst_per_page;
767 struct net_device *dev = nic->dev;
772 mac_control = &nic->mac_control;
773 config = &nic->config;
775 lst_size = (sizeof(struct TxD) * config->max_txds);
776 lst_per_page = PAGE_SIZE / lst_size;
778 for (i = 0; i < config->tx_fifo_num; i++) {
779 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
781 for (j = 0; j < page_num; j++) {
782 int mem_blks = (j * lst_per_page);
783 if (!mac_control->fifos[i].list_info)
785 if (!mac_control->fifos[i].list_info[mem_blks].
788 pci_free_consistent(nic->pdev, PAGE_SIZE,
789 mac_control->fifos[i].
792 mac_control->fifos[i].
796 /* If we got a zero DMA address during allocation,
799 if (mac_control->zerodma_virt_addr) {
800 pci_free_consistent(nic->pdev, PAGE_SIZE,
801 mac_control->zerodma_virt_addr,
804 "%s: Freeing TxDL with zero DMA addr. ",
806 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
807 mac_control->zerodma_virt_addr);
809 kfree(mac_control->fifos[i].list_info);
812 size = SIZE_OF_BLOCK;
813 for (i = 0; i < config->rx_ring_num; i++) {
814 blk_cnt = mac_control->rings[i].block_count;
815 for (j = 0; j < blk_cnt; j++) {
816 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
818 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
820 if (tmp_v_addr == NULL)
822 pci_free_consistent(nic->pdev, size,
823 tmp_v_addr, tmp_p_addr);
824 kfree(mac_control->rings[i].rx_blocks[j].rxds);
828 if (nic->rxd_mode >= RXD_MODE_3A) {
829 /* Freeing buffer storage addresses in 2BUFF mode. */
830 for (i = 0; i < config->rx_ring_num; i++) {
831 blk_cnt = config->rx_cfg[i].num_rxd /
832 (rxd_count[nic->rxd_mode] + 1);
833 for (j = 0; j < blk_cnt; j++) {
835 if (!mac_control->rings[i].ba[j])
837 while (k != rxd_count[nic->rxd_mode]) {
839 &mac_control->rings[i].ba[j][k];
844 kfree(mac_control->rings[i].ba[j]);
846 kfree(mac_control->rings[i].ba);
850 if (mac_control->stats_mem) {
851 pci_free_consistent(nic->pdev,
852 mac_control->stats_mem_sz,
853 mac_control->stats_mem,
854 mac_control->stats_mem_phy);
856 if (nic->ufo_in_band_v)
857 kfree(nic->ufo_in_band_v);
861 * s2io_verify_pci_mode -
864 static int s2io_verify_pci_mode(struct s2io_nic *nic)
866 struct XENA_dev_config __iomem *bar0 = nic->bar0;
867 register u64 val64 = 0;
870 val64 = readq(&bar0->pci_mode);
871 mode = (u8)GET_PCI_MODE(val64);
873 if ( val64 & PCI_MODE_UNKNOWN_MODE)
874 return -1; /* Unknown PCI mode */
878 #define NEC_VENID 0x1033
879 #define NEC_DEVID 0x0125
880 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
882 struct pci_dev *tdev = NULL;
883 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
884 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
885 if (tdev->bus == s2io_pdev->bus->parent)
893 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
895 * s2io_print_pci_mode -
897 static int s2io_print_pci_mode(struct s2io_nic *nic)
899 struct XENA_dev_config __iomem *bar0 = nic->bar0;
900 register u64 val64 = 0;
902 struct config_param *config = &nic->config;
904 val64 = readq(&bar0->pci_mode);
905 mode = (u8)GET_PCI_MODE(val64);
907 if ( val64 & PCI_MODE_UNKNOWN_MODE)
908 return -1; /* Unknown PCI mode */
910 config->bus_speed = bus_speed[mode];
912 if (s2io_on_nec_bridge(nic->pdev)) {
913 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
918 if (val64 & PCI_MODE_32_BITS) {
919 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
921 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
925 case PCI_MODE_PCI_33:
926 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
928 case PCI_MODE_PCI_66:
929 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
931 case PCI_MODE_PCIX_M1_66:
932 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
934 case PCI_MODE_PCIX_M1_100:
935 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
937 case PCI_MODE_PCIX_M1_133:
938 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
940 case PCI_MODE_PCIX_M2_66:
941 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
943 case PCI_MODE_PCIX_M2_100:
944 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
946 case PCI_MODE_PCIX_M2_133:
947 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
950 return -1; /* Unsupported bus speed */
957 * init_nic - Initialization of hardware
958 * @nic: device peivate variable
959 * Description: The function sequentially configures every block
960 * of the H/W from their reset values.
961 * Return Value: SUCCESS on success and
962 * '-1' on failure (endian settings incorrect).
965 static int init_nic(struct s2io_nic *nic)
967 struct XENA_dev_config __iomem *bar0 = nic->bar0;
968 struct net_device *dev = nic->dev;
969 register u64 val64 = 0;
973 struct mac_info *mac_control;
974 struct config_param *config;
976 unsigned long long mem_share;
979 mac_control = &nic->mac_control;
980 config = &nic->config;
982 /* to set the swapper controle on the card */
983 if(s2io_set_swapper(nic)) {
984 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
989 * Herc requires EOI to be removed from reset before XGXS, so..
991 if (nic->device_type & XFRAME_II_DEVICE) {
992 val64 = 0xA500000000ULL;
993 writeq(val64, &bar0->sw_reset);
995 val64 = readq(&bar0->sw_reset);
998 /* Remove XGXS from reset state */
1000 writeq(val64, &bar0->sw_reset);
1002 val64 = readq(&bar0->sw_reset);
1004 /* Enable Receiving broadcasts */
1005 add = &bar0->mac_cfg;
1006 val64 = readq(&bar0->mac_cfg);
1007 val64 |= MAC_RMAC_BCAST_ENABLE;
1008 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009 writel((u32) val64, add);
1010 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1011 writel((u32) (val64 >> 32), (add + 4));
1013 /* Read registers in all blocks */
1014 val64 = readq(&bar0->mac_int_mask);
1015 val64 = readq(&bar0->mc_int_mask);
1016 val64 = readq(&bar0->xgxs_int_mask);
1020 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1022 if (nic->device_type & XFRAME_II_DEVICE) {
1023 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1024 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1025 &bar0->dtx_control, UF);
1027 msleep(1); /* Necessary!! */
1031 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1032 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1033 &bar0->dtx_control, UF);
1034 val64 = readq(&bar0->dtx_control);
1039 /* Tx DMA Initialization */
1041 writeq(val64, &bar0->tx_fifo_partition_0);
1042 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1044 writeq(val64, &bar0->tx_fifo_partition_3);
1047 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1049 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1050 13) | vBIT(config->tx_cfg[i].fifo_priority,
1053 if (i == (config->tx_fifo_num - 1)) {
1060 writeq(val64, &bar0->tx_fifo_partition_0);
1064 writeq(val64, &bar0->tx_fifo_partition_1);
1068 writeq(val64, &bar0->tx_fifo_partition_2);
1072 writeq(val64, &bar0->tx_fifo_partition_3);
1078 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1079 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1081 if ((nic->device_type == XFRAME_I_DEVICE) &&
1082 (get_xena_rev_id(nic->pdev) < 4))
1083 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1085 val64 = readq(&bar0->tx_fifo_partition_0);
1086 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1087 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1090 * Initialization of Tx_PA_CONFIG register to ignore packet
1091 * integrity checking.
1093 val64 = readq(&bar0->tx_pa_cfg);
1094 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1095 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1096 writeq(val64, &bar0->tx_pa_cfg);
1098 /* Rx DMA intialization. */
1100 for (i = 0; i < config->rx_ring_num; i++) {
1102 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1105 writeq(val64, &bar0->rx_queue_priority);
1108 * Allocating equal share of memory to all the
1112 if (nic->device_type & XFRAME_II_DEVICE)
1117 for (i = 0; i < config->rx_ring_num; i++) {
1120 mem_share = (mem_size / config->rx_ring_num +
1121 mem_size % config->rx_ring_num);
1122 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1125 mem_share = (mem_size / config->rx_ring_num);
1126 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1129 mem_share = (mem_size / config->rx_ring_num);
1130 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1133 mem_share = (mem_size / config->rx_ring_num);
1134 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1137 mem_share = (mem_size / config->rx_ring_num);
1138 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1141 mem_share = (mem_size / config->rx_ring_num);
1142 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1145 mem_share = (mem_size / config->rx_ring_num);
1146 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1149 mem_share = (mem_size / config->rx_ring_num);
1150 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1154 writeq(val64, &bar0->rx_queue_cfg);
1157 * Filling Tx round robin registers
1158 * as per the number of FIFOs
1160 switch (config->tx_fifo_num) {
1162 val64 = 0x0000000000000000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_0);
1164 writeq(val64, &bar0->tx_w_round_robin_1);
1165 writeq(val64, &bar0->tx_w_round_robin_2);
1166 writeq(val64, &bar0->tx_w_round_robin_3);
1167 writeq(val64, &bar0->tx_w_round_robin_4);
1170 val64 = 0x0000010000010000ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_0);
1172 val64 = 0x0100000100000100ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_1);
1174 val64 = 0x0001000001000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_2);
1176 val64 = 0x0000010000010000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_3);
1178 val64 = 0x0100000000000000ULL;
1179 writeq(val64, &bar0->tx_w_round_robin_4);
1182 val64 = 0x0001000102000001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_0);
1184 val64 = 0x0001020000010001ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_1);
1186 val64 = 0x0200000100010200ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_2);
1188 val64 = 0x0001000102000001ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_3);
1190 val64 = 0x0001020000000000ULL;
1191 writeq(val64, &bar0->tx_w_round_robin_4);
1194 val64 = 0x0001020300010200ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_0);
1196 val64 = 0x0100000102030001ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_1);
1198 val64 = 0x0200010000010203ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_2);
1200 val64 = 0x0001020001000001ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_3);
1202 val64 = 0x0203000100000000ULL;
1203 writeq(val64, &bar0->tx_w_round_robin_4);
1206 val64 = 0x0001000203000102ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_0);
1208 val64 = 0x0001020001030004ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_1);
1210 val64 = 0x0001000203000102ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_2);
1212 val64 = 0x0001020001030004ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_3);
1214 val64 = 0x0001000000000000ULL;
1215 writeq(val64, &bar0->tx_w_round_robin_4);
1218 val64 = 0x0001020304000102ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_0);
1220 val64 = 0x0304050001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_1);
1222 val64 = 0x0203000100000102ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_2);
1224 val64 = 0x0304000102030405ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_3);
1226 val64 = 0x0001000200000000ULL;
1227 writeq(val64, &bar0->tx_w_round_robin_4);
1230 val64 = 0x0001020001020300ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_0);
1232 val64 = 0x0102030400010203ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_1);
1234 val64 = 0x0405060001020001ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_2);
1236 val64 = 0x0304050000010200ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_3);
1238 val64 = 0x0102030000000000ULL;
1239 writeq(val64, &bar0->tx_w_round_robin_4);
1242 val64 = 0x0001020300040105ULL;
1243 writeq(val64, &bar0->tx_w_round_robin_0);
1244 val64 = 0x0200030106000204ULL;
1245 writeq(val64, &bar0->tx_w_round_robin_1);
1246 val64 = 0x0103000502010007ULL;
1247 writeq(val64, &bar0->tx_w_round_robin_2);
1248 val64 = 0x0304010002060500ULL;
1249 writeq(val64, &bar0->tx_w_round_robin_3);
1250 val64 = 0x0103020400000000ULL;
1251 writeq(val64, &bar0->tx_w_round_robin_4);
1255 /* Enable all configured Tx FIFO partitions */
1256 val64 = readq(&bar0->tx_fifo_partition_0);
1257 val64 |= (TX_FIFO_PARTITION_EN);
1258 writeq(val64, &bar0->tx_fifo_partition_0);
1260 /* Filling the Rx round robin registers as per the
1261 * number of Rings and steering based on QoS.
1263 switch (config->rx_ring_num) {
1265 val64 = 0x8080808080808080ULL;
1266 writeq(val64, &bar0->rts_qos_steering);
1269 val64 = 0x0000010000010000ULL;
1270 writeq(val64, &bar0->rx_w_round_robin_0);
1271 val64 = 0x0100000100000100ULL;
1272 writeq(val64, &bar0->rx_w_round_robin_1);
1273 val64 = 0x0001000001000001ULL;
1274 writeq(val64, &bar0->rx_w_round_robin_2);
1275 val64 = 0x0000010000010000ULL;
1276 writeq(val64, &bar0->rx_w_round_robin_3);
1277 val64 = 0x0100000000000000ULL;
1278 writeq(val64, &bar0->rx_w_round_robin_4);
1280 val64 = 0x8080808040404040ULL;
1281 writeq(val64, &bar0->rts_qos_steering);
1284 val64 = 0x0001000102000001ULL;
1285 writeq(val64, &bar0->rx_w_round_robin_0);
1286 val64 = 0x0001020000010001ULL;
1287 writeq(val64, &bar0->rx_w_round_robin_1);
1288 val64 = 0x0200000100010200ULL;
1289 writeq(val64, &bar0->rx_w_round_robin_2);
1290 val64 = 0x0001000102000001ULL;
1291 writeq(val64, &bar0->rx_w_round_robin_3);
1292 val64 = 0x0001020000000000ULL;
1293 writeq(val64, &bar0->rx_w_round_robin_4);
1295 val64 = 0x8080804040402020ULL;
1296 writeq(val64, &bar0->rts_qos_steering);
1299 val64 = 0x0001020300010200ULL;
1300 writeq(val64, &bar0->rx_w_round_robin_0);
1301 val64 = 0x0100000102030001ULL;
1302 writeq(val64, &bar0->rx_w_round_robin_1);
1303 val64 = 0x0200010000010203ULL;
1304 writeq(val64, &bar0->rx_w_round_robin_2);
1305 val64 = 0x0001020001000001ULL;
1306 writeq(val64, &bar0->rx_w_round_robin_3);
1307 val64 = 0x0203000100000000ULL;
1308 writeq(val64, &bar0->rx_w_round_robin_4);
1310 val64 = 0x8080404020201010ULL;
1311 writeq(val64, &bar0->rts_qos_steering);
1314 val64 = 0x0001000203000102ULL;
1315 writeq(val64, &bar0->rx_w_round_robin_0);
1316 val64 = 0x0001020001030004ULL;
1317 writeq(val64, &bar0->rx_w_round_robin_1);
1318 val64 = 0x0001000203000102ULL;
1319 writeq(val64, &bar0->rx_w_round_robin_2);
1320 val64 = 0x0001020001030004ULL;
1321 writeq(val64, &bar0->rx_w_round_robin_3);
1322 val64 = 0x0001000000000000ULL;
1323 writeq(val64, &bar0->rx_w_round_robin_4);
1325 val64 = 0x8080404020201008ULL;
1326 writeq(val64, &bar0->rts_qos_steering);
1329 val64 = 0x0001020304000102ULL;
1330 writeq(val64, &bar0->rx_w_round_robin_0);
1331 val64 = 0x0304050001020001ULL;
1332 writeq(val64, &bar0->rx_w_round_robin_1);
1333 val64 = 0x0203000100000102ULL;
1334 writeq(val64, &bar0->rx_w_round_robin_2);
1335 val64 = 0x0304000102030405ULL;
1336 writeq(val64, &bar0->rx_w_round_robin_3);
1337 val64 = 0x0001000200000000ULL;
1338 writeq(val64, &bar0->rx_w_round_robin_4);
1340 val64 = 0x8080404020100804ULL;
1341 writeq(val64, &bar0->rts_qos_steering);
1344 val64 = 0x0001020001020300ULL;
1345 writeq(val64, &bar0->rx_w_round_robin_0);
1346 val64 = 0x0102030400010203ULL;
1347 writeq(val64, &bar0->rx_w_round_robin_1);
1348 val64 = 0x0405060001020001ULL;
1349 writeq(val64, &bar0->rx_w_round_robin_2);
1350 val64 = 0x0304050000010200ULL;
1351 writeq(val64, &bar0->rx_w_round_robin_3);
1352 val64 = 0x0102030000000000ULL;
1353 writeq(val64, &bar0->rx_w_round_robin_4);
1355 val64 = 0x8080402010080402ULL;
1356 writeq(val64, &bar0->rts_qos_steering);
1359 val64 = 0x0001020300040105ULL;
1360 writeq(val64, &bar0->rx_w_round_robin_0);
1361 val64 = 0x0200030106000204ULL;
1362 writeq(val64, &bar0->rx_w_round_robin_1);
1363 val64 = 0x0103000502010007ULL;
1364 writeq(val64, &bar0->rx_w_round_robin_2);
1365 val64 = 0x0304010002060500ULL;
1366 writeq(val64, &bar0->rx_w_round_robin_3);
1367 val64 = 0x0103020400000000ULL;
1368 writeq(val64, &bar0->rx_w_round_robin_4);
1370 val64 = 0x8040201008040201ULL;
1371 writeq(val64, &bar0->rts_qos_steering);
1377 for (i = 0; i < 8; i++)
1378 writeq(val64, &bar0->rts_frm_len_n[i]);
1380 /* Set the default rts frame length for the rings configured */
1381 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1382 for (i = 0 ; i < config->rx_ring_num ; i++)
1383 writeq(val64, &bar0->rts_frm_len_n[i]);
1385 /* Set the frame length for the configured rings
1386 * desired by the user
1388 for (i = 0; i < config->rx_ring_num; i++) {
1389 /* If rts_frm_len[i] == 0 then it is assumed that user not
1390 * specified frame length steering.
1391 * If the user provides the frame length then program
1392 * the rts_frm_len register for those values or else
1393 * leave it as it is.
1395 if (rts_frm_len[i] != 0) {
1396 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1397 &bar0->rts_frm_len_n[i]);
1401 /* Disable differentiated services steering logic */
1402 for (i = 0; i < 64; i++) {
1403 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1404 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1406 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1411 /* Program statistics memory */
1412 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1414 if (nic->device_type == XFRAME_II_DEVICE) {
1415 val64 = STAT_BC(0x320);
1416 writeq(val64, &bar0->stat_byte_cnt);
1420 * Initializing the sampling rate for the device to calculate the
1421 * bandwidth utilization.
1423 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1424 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1425 writeq(val64, &bar0->mac_link_util);
1429 * Initializing the Transmit and Receive Traffic Interrupt
1433 * TTI Initialization. Default Tx timer gets us about
1434 * 250 interrupts per sec. Continuous interrupts are enabled
1437 if (nic->device_type == XFRAME_II_DEVICE) {
1438 int count = (nic->config.bus_speed * 125)/2;
1439 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1442 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1444 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1445 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1446 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1447 if (use_continuous_tx_intrs)
1448 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1449 writeq(val64, &bar0->tti_data1_mem);
1451 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1452 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1453 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1454 writeq(val64, &bar0->tti_data2_mem);
1456 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1457 writeq(val64, &bar0->tti_command_mem);
1460 * Once the operation completes, the Strobe bit of the command
1461 * register will be reset. We poll for this particular condition
1462 * We wait for a maximum of 500ms for the operation to complete,
1463 * if it's not complete by then we return error.
1467 val64 = readq(&bar0->tti_command_mem);
1468 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1472 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1480 if (nic->config.bimodal) {
1482 for (k = 0; k < config->rx_ring_num; k++) {
1483 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1484 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1485 writeq(val64, &bar0->tti_command_mem);
1488 * Once the operation completes, the Strobe bit of the command
1489 * register will be reset. We poll for this particular condition
1490 * We wait for a maximum of 500ms for the operation to complete,
1491 * if it's not complete by then we return error.
1495 val64 = readq(&bar0->tti_command_mem);
1496 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1501 "%s: TTI init Failed\n",
1511 /* RTI Initialization */
1512 if (nic->device_type == XFRAME_II_DEVICE) {
1514 * Programmed to generate Apprx 500 Intrs per
1517 int count = (nic->config.bus_speed * 125)/4;
1518 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1520 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1522 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1523 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1524 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1526 writeq(val64, &bar0->rti_data1_mem);
1528 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1529 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1530 if (nic->intr_type == MSI_X)
1531 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1532 RTI_DATA2_MEM_RX_UFC_D(0x40));
1534 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1535 RTI_DATA2_MEM_RX_UFC_D(0x80));
1536 writeq(val64, &bar0->rti_data2_mem);
1538 for (i = 0; i < config->rx_ring_num; i++) {
1539 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1540 | RTI_CMD_MEM_OFFSET(i);
1541 writeq(val64, &bar0->rti_command_mem);
1544 * Once the operation completes, the Strobe bit of the
1545 * command register will be reset. We poll for this
1546 * particular condition. We wait for a maximum of 500ms
1547 * for the operation to complete, if it's not complete
1548 * by then we return error.
1552 val64 = readq(&bar0->rti_command_mem);
1553 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1557 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1568 * Initializing proper values as Pause threshold into all
1569 * the 8 Queues on Rx side.
1571 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1572 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1574 /* Disable RMAC PAD STRIPPING */
1575 add = &bar0->mac_cfg;
1576 val64 = readq(&bar0->mac_cfg);
1577 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1578 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579 writel((u32) (val64), add);
1580 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1581 writel((u32) (val64 >> 32), (add + 4));
1582 val64 = readq(&bar0->mac_cfg);
1584 /* Enable FCS stripping by adapter */
1585 add = &bar0->mac_cfg;
1586 val64 = readq(&bar0->mac_cfg);
1587 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1588 if (nic->device_type == XFRAME_II_DEVICE)
1589 writeq(val64, &bar0->mac_cfg);
1591 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592 writel((u32) (val64), add);
1593 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1594 writel((u32) (val64 >> 32), (add + 4));
1598 * Set the time value to be inserted in the pause frame
1599 * generated by xena.
1601 val64 = readq(&bar0->rmac_pause_cfg);
1602 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1603 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1604 writeq(val64, &bar0->rmac_pause_cfg);
1607 * Set the Threshold Limit for Generating the pause frame
1608 * If the amount of data in any Queue exceeds ratio of
1609 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1610 * pause frame is generated
1613 for (i = 0; i < 4; i++) {
1615 (((u64) 0xFF00 | nic->mac_control.
1616 mc_pause_threshold_q0q3)
1619 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1622 for (i = 0; i < 4; i++) {
1624 (((u64) 0xFF00 | nic->mac_control.
1625 mc_pause_threshold_q4q7)
1628 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1631 * TxDMA will stop Read request if the number of read split has
1632 * exceeded the limit pointed by shared_splits
1634 val64 = readq(&bar0->pic_control);
1635 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1636 writeq(val64, &bar0->pic_control);
1638 if (nic->config.bus_speed == 266) {
1639 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1640 writeq(0x0, &bar0->read_retry_delay);
1641 writeq(0x0, &bar0->write_retry_delay);
1645 * Programming the Herc to split every write transaction
1646 * that does not start on an ADB to reduce disconnects.
1648 if (nic->device_type == XFRAME_II_DEVICE) {
1649 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1650 MISC_LINK_STABILITY_PRD(3);
1651 writeq(val64, &bar0->misc_control);
1652 val64 = readq(&bar0->pic_control2);
1653 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1654 writeq(val64, &bar0->pic_control2);
1656 if (strstr(nic->product_name, "CX4")) {
1657 val64 = TMAC_AVG_IPG(0x17);
1658 writeq(val64, &bar0->tmac_avg_ipg);
1663 #define LINK_UP_DOWN_INTERRUPT 1
1664 #define MAC_RMAC_ERR_TIMER 2
1666 static int s2io_link_fault_indication(struct s2io_nic *nic)
1668 if (nic->intr_type != INTA)
1669 return MAC_RMAC_ERR_TIMER;
1670 if (nic->device_type == XFRAME_II_DEVICE)
1671 return LINK_UP_DOWN_INTERRUPT;
1673 return MAC_RMAC_ERR_TIMER;
1677 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1678 * @nic: device private variable,
1679 * @mask: A mask indicating which Intr block must be modified and,
1680 * @flag: A flag indicating whether to enable or disable the Intrs.
1681 * Description: This function will either disable or enable the interrupts
1682 * depending on the flag argument. The mask argument can be used to
1683 * enable/disable any Intr block.
1684 * Return Value: NONE.
1687 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1689 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1690 register u64 val64 = 0, temp64 = 0;
1692 /* Top level interrupt classification */
1693 /* PIC Interrupts */
1694 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1695 /* Enable PIC Intrs in the general intr mask register */
1696 val64 = TXPIC_INT_M;
1697 if (flag == ENABLE_INTRS) {
1698 temp64 = readq(&bar0->general_int_mask);
1699 temp64 &= ~((u64) val64);
1700 writeq(temp64, &bar0->general_int_mask);
1702 * If Hercules adapter enable GPIO otherwise
1703 * disable all PCIX, Flash, MDIO, IIC and GPIO
1704 * interrupts for now.
1707 if (s2io_link_fault_indication(nic) ==
1708 LINK_UP_DOWN_INTERRUPT ) {
1709 temp64 = readq(&bar0->pic_int_mask);
1710 temp64 &= ~((u64) PIC_INT_GPIO);
1711 writeq(temp64, &bar0->pic_int_mask);
1712 temp64 = readq(&bar0->gpio_int_mask);
1713 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1714 writeq(temp64, &bar0->gpio_int_mask);
1716 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1719 * No MSI Support is available presently, so TTI and
1720 * RTI interrupts are also disabled.
1722 } else if (flag == DISABLE_INTRS) {
1724 * Disable PIC Intrs in the general
1725 * intr mask register
1727 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1728 temp64 = readq(&bar0->general_int_mask);
1730 writeq(val64, &bar0->general_int_mask);
1734 /* MAC Interrupts */
1735 /* Enabling/Disabling MAC interrupts */
1736 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1737 val64 = TXMAC_INT_M | RXMAC_INT_M;
1738 if (flag == ENABLE_INTRS) {
1739 temp64 = readq(&bar0->general_int_mask);
1740 temp64 &= ~((u64) val64);
1741 writeq(temp64, &bar0->general_int_mask);
1743 * All MAC block error interrupts are disabled for now
1746 } else if (flag == DISABLE_INTRS) {
1748 * Disable MAC Intrs in the general intr mask register
1750 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1751 writeq(DISABLE_ALL_INTRS,
1752 &bar0->mac_rmac_err_mask);
1754 temp64 = readq(&bar0->general_int_mask);
1756 writeq(val64, &bar0->general_int_mask);
1760 /* Tx traffic interrupts */
1761 if (mask & TX_TRAFFIC_INTR) {
1762 val64 = TXTRAFFIC_INT_M;
1763 if (flag == ENABLE_INTRS) {
1764 temp64 = readq(&bar0->general_int_mask);
1765 temp64 &= ~((u64) val64);
1766 writeq(temp64, &bar0->general_int_mask);
1768 * Enable all the Tx side interrupts
1769 * writing 0 Enables all 64 TX interrupt levels
1771 writeq(0x0, &bar0->tx_traffic_mask);
1772 } else if (flag == DISABLE_INTRS) {
1774 * Disable Tx Traffic Intrs in the general intr mask
1777 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1778 temp64 = readq(&bar0->general_int_mask);
1780 writeq(val64, &bar0->general_int_mask);
1784 /* Rx traffic interrupts */
1785 if (mask & RX_TRAFFIC_INTR) {
1786 val64 = RXTRAFFIC_INT_M;
1787 if (flag == ENABLE_INTRS) {
1788 temp64 = readq(&bar0->general_int_mask);
1789 temp64 &= ~((u64) val64);
1790 writeq(temp64, &bar0->general_int_mask);
1791 /* writing 0 Enables all 8 RX interrupt levels */
1792 writeq(0x0, &bar0->rx_traffic_mask);
1793 } else if (flag == DISABLE_INTRS) {
1795 * Disable Rx Traffic Intrs in the general intr mask
1798 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1799 temp64 = readq(&bar0->general_int_mask);
1801 writeq(val64, &bar0->general_int_mask);
1807 * verify_pcc_quiescent- Checks for PCC quiescent state
1808 * Return: 1 If PCC is quiescence
1809 * 0 If PCC is not quiescence
1811 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1814 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1815 u64 val64 = readq(&bar0->adapter_status);
1817 herc = (sp->device_type == XFRAME_II_DEVICE);
1819 if (flag == FALSE) {
1820 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1821 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1824 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1828 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1829 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1830 ADAPTER_STATUS_RMAC_PCC_IDLE))
1833 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1834 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1842 * verify_xena_quiescence - Checks whether the H/W is ready
1843 * Description: Returns whether the H/W is ready to go or not. Depending
1844 * on whether adapter enable bit was written or not the comparison
1845 * differs and the calling function passes the input argument flag to
1847 * Return: 1 If xena is quiescence
1848 * 0 If Xena is not quiescence
1851 static int verify_xena_quiescence(struct s2io_nic *sp)
1854 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1855 u64 val64 = readq(&bar0->adapter_status);
1856 mode = s2io_verify_pci_mode(sp);
1858 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1859 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1862 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1863 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1866 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1867 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1870 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1871 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1874 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1875 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1878 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1879 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1882 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1883 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1886 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1887 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1892 * In PCI 33 mode, the P_PLL is not used, and therefore,
1893 * the the P_PLL_LOCK bit in the adapter_status register will
1896 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1897 sp->device_type == XFRAME_II_DEVICE && mode !=
1899 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1902 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1903 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1904 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1911 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1912 * @sp: Pointer to device specifc structure
1914 * New procedure to clear mac address reading problems on Alpha platforms
1918 static void fix_mac_address(struct s2io_nic * sp)
1920 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1924 while (fix_mac[i] != END_SIGN) {
1925 writeq(fix_mac[i++], &bar0->gpio_control);
1927 val64 = readq(&bar0->gpio_control);
1932 * start_nic - Turns the device on
1933 * @nic : device private variable.
1935 * This function actually turns the device on. Before this function is
1936 * called,all Registers are configured from their reset states
1937 * and shared memory is allocated but the NIC is still quiescent. On
1938 * calling this function, the device interrupts are cleared and the NIC is
1939 * literally switched on by writing into the adapter control register.
1941 * SUCCESS on success and -1 on failure.
1944 static int start_nic(struct s2io_nic *nic)
1946 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1947 struct net_device *dev = nic->dev;
1948 register u64 val64 = 0;
1950 struct mac_info *mac_control;
1951 struct config_param *config;
1953 mac_control = &nic->mac_control;
1954 config = &nic->config;
1956 /* PRC Initialization and configuration */
1957 for (i = 0; i < config->rx_ring_num; i++) {
1958 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1959 &bar0->prc_rxd0_n[i]);
1961 val64 = readq(&bar0->prc_ctrl_n[i]);
1962 if (nic->config.bimodal)
1963 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1964 if (nic->rxd_mode == RXD_MODE_1)
1965 val64 |= PRC_CTRL_RC_ENABLED;
1967 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1968 if (nic->device_type == XFRAME_II_DEVICE)
1969 val64 |= PRC_CTRL_GROUP_READS;
1970 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1971 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1972 writeq(val64, &bar0->prc_ctrl_n[i]);
1975 if (nic->rxd_mode == RXD_MODE_3B) {
1976 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1977 val64 = readq(&bar0->rx_pa_cfg);
1978 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1979 writeq(val64, &bar0->rx_pa_cfg);
1982 if (vlan_tag_strip == 0) {
1983 val64 = readq(&bar0->rx_pa_cfg);
1984 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1985 writeq(val64, &bar0->rx_pa_cfg);
1986 vlan_strip_flag = 0;
1990 * Enabling MC-RLDRAM. After enabling the device, we timeout
1991 * for around 100ms, which is approximately the time required
1992 * for the device to be ready for operation.
1994 val64 = readq(&bar0->mc_rldram_mrs);
1995 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1996 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1997 val64 = readq(&bar0->mc_rldram_mrs);
1999 msleep(100); /* Delay by around 100 ms. */
2001 /* Enabling ECC Protection. */
2002 val64 = readq(&bar0->adapter_control);
2003 val64 &= ~ADAPTER_ECC_EN;
2004 writeq(val64, &bar0->adapter_control);
2007 * Clearing any possible Link state change interrupts that
2008 * could have popped up just before Enabling the card.
2010 val64 = readq(&bar0->mac_rmac_err_reg);
2012 writeq(val64, &bar0->mac_rmac_err_reg);
2015 * Verify if the device is ready to be enabled, if so enable
2018 val64 = readq(&bar0->adapter_status);
2019 if (!verify_xena_quiescence(nic)) {
2020 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2021 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2022 (unsigned long long) val64);
2027 * With some switches, link might be already up at this point.
2028 * Because of this weird behavior, when we enable laser,
2029 * we may not get link. We need to handle this. We cannot
2030 * figure out which switch is misbehaving. So we are forced to
2031 * make a global change.
2034 /* Enabling Laser. */
2035 val64 = readq(&bar0->adapter_control);
2036 val64 |= ADAPTER_EOI_TX_ON;
2037 writeq(val64, &bar0->adapter_control);
2039 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2041 * Dont see link state interrupts initally on some switches,
2042 * so directly scheduling the link state task here.
2044 schedule_work(&nic->set_link_task);
2046 /* SXE-002: Initialize link and activity LED */
2047 subid = nic->pdev->subsystem_device;
2048 if (((subid & 0xFF) >= 0x07) &&
2049 (nic->device_type == XFRAME_I_DEVICE)) {
2050 val64 = readq(&bar0->gpio_control);
2051 val64 |= 0x0000800000000000ULL;
2052 writeq(val64, &bar0->gpio_control);
2053 val64 = 0x0411040400000000ULL;
2054 writeq(val64, (void __iomem *)bar0 + 0x2700);
2060 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2062 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2063 TxD *txdlp, int get_off)
2065 struct s2io_nic *nic = fifo_data->nic;
2066 struct sk_buff *skb;
2071 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2072 pci_unmap_single(nic->pdev, (dma_addr_t)
2073 txds->Buffer_Pointer, sizeof(u64),
2078 skb = (struct sk_buff *) ((unsigned long)
2079 txds->Host_Control);
2081 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2084 pci_unmap_single(nic->pdev, (dma_addr_t)
2085 txds->Buffer_Pointer,
2086 skb->len - skb->data_len,
2088 frg_cnt = skb_shinfo(skb)->nr_frags;
2091 for (j = 0; j < frg_cnt; j++, txds++) {
2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2093 if (!txds->Buffer_Pointer)
2095 pci_unmap_page(nic->pdev, (dma_addr_t)
2096 txds->Buffer_Pointer,
2097 frag->size, PCI_DMA_TODEVICE);
2100 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2105 * free_tx_buffers - Free all queued Tx buffers
2106 * @nic : device private variable.
2108 * Free all queued Tx buffers.
2109 * Return Value: void
2112 static void free_tx_buffers(struct s2io_nic *nic)
2114 struct net_device *dev = nic->dev;
2115 struct sk_buff *skb;
2118 struct mac_info *mac_control;
2119 struct config_param *config;
2122 mac_control = &nic->mac_control;
2123 config = &nic->config;
2125 for (i = 0; i < config->tx_fifo_num; i++) {
2126 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2127 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2129 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2136 "%s:forcibly freeing %d skbs on FIFO%d\n",
2138 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2139 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2144 * stop_nic - To stop the nic
2145 * @nic ; device private variable.
2147 * This function does exactly the opposite of what the start_nic()
2148 * function does. This function is called to stop the device.
2153 static void stop_nic(struct s2io_nic *nic)
2155 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2156 register u64 val64 = 0;
2158 struct mac_info *mac_control;
2159 struct config_param *config;
2161 mac_control = &nic->mac_control;
2162 config = &nic->config;
2164 /* Disable all interrupts */
2165 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2166 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2167 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2168 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2170 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2171 val64 = readq(&bar0->adapter_control);
2172 val64 &= ~(ADAPTER_CNTL_EN);
2173 writeq(val64, &bar0->adapter_control);
2176 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2179 struct net_device *dev = nic->dev;
2180 struct sk_buff *frag_list;
2183 /* Buffer-1 receives L3/L4 headers */
2184 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2185 (nic->pdev, skb->data, l3l4hdr_size + 4,
2186 PCI_DMA_FROMDEVICE);
2188 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2189 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2190 if (skb_shinfo(skb)->frag_list == NULL) {
2191 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2194 frag_list = skb_shinfo(skb)->frag_list;
2195 skb->truesize += frag_list->truesize;
2196 frag_list->next = NULL;
2197 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2198 frag_list->data = tmp;
2199 frag_list->tail = tmp;
2201 /* Buffer-2 receives L4 data payload */
2202 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2203 frag_list->data, dev->mtu,
2204 PCI_DMA_FROMDEVICE);
2205 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2206 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2212 * fill_rx_buffers - Allocates the Rx side skbs
2213 * @nic: device private variable
2214 * @ring_no: ring number
2216 * The function allocates Rx side skbs and puts the physical
2217 * address of these buffers into the RxD buffer pointers, so that the NIC
2218 * can DMA the received frame into these locations.
2219 * The NIC supports 3 receive modes, viz
2221 * 2. three buffer and
2222 * 3. Five buffer modes.
2223 * Each mode defines how many fragments the received frame will be split
2224 * up into by the NIC. The frame is split into L3 header, L4 Header,
2225 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2226 * is split into 3 fragments. As of now only single buffer mode is
2229 * SUCCESS on success or an appropriate -ve value on failure.
2232 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2234 struct net_device *dev = nic->dev;
2235 struct sk_buff *skb;
2237 int off, off1, size, block_no, block_no1;
2240 struct mac_info *mac_control;
2241 struct config_param *config;
2244 unsigned long flags;
2245 struct RxD_t *first_rxdp = NULL;
2247 mac_control = &nic->mac_control;
2248 config = &nic->config;
2249 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2250 atomic_read(&nic->rx_bufs_left[ring_no]);
2252 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2253 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2254 while (alloc_tab < alloc_cnt) {
2255 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2257 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2259 rxdp = mac_control->rings[ring_no].
2260 rx_blocks[block_no].rxds[off].virt_addr;
2262 if ((block_no == block_no1) && (off == off1) &&
2263 (rxdp->Host_Control)) {
2264 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2266 DBG_PRINT(INTR_DBG, " info equated\n");
2269 if (off && (off == rxd_count[nic->rxd_mode])) {
2270 mac_control->rings[ring_no].rx_curr_put_info.
2272 if (mac_control->rings[ring_no].rx_curr_put_info.
2273 block_index == mac_control->rings[ring_no].
2275 mac_control->rings[ring_no].rx_curr_put_info.
2277 block_no = mac_control->rings[ring_no].
2278 rx_curr_put_info.block_index;
2279 if (off == rxd_count[nic->rxd_mode])
2281 mac_control->rings[ring_no].rx_curr_put_info.
2283 rxdp = mac_control->rings[ring_no].
2284 rx_blocks[block_no].block_virt_addr;
2285 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2289 spin_lock_irqsave(&nic->put_lock, flags);
2290 mac_control->rings[ring_no].put_pos =
2291 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2292 spin_unlock_irqrestore(&nic->put_lock, flags);
2294 mac_control->rings[ring_no].put_pos =
2295 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2297 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2298 ((nic->rxd_mode >= RXD_MODE_3A) &&
2299 (rxdp->Control_2 & BIT(0)))) {
2300 mac_control->rings[ring_no].rx_curr_put_info.
2304 /* calculate size of skb based on ring mode */
2305 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2306 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2307 if (nic->rxd_mode == RXD_MODE_1)
2308 size += NET_IP_ALIGN;
2309 else if (nic->rxd_mode == RXD_MODE_3B)
2310 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2312 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2315 skb = dev_alloc_skb(size);
2317 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2318 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2321 first_rxdp->Control_1 |= RXD_OWN_XENA;
2325 if (nic->rxd_mode == RXD_MODE_1) {
2326 /* 1 buffer mode - normal operation mode */
2327 memset(rxdp, 0, sizeof(struct RxD1));
2328 skb_reserve(skb, NET_IP_ALIGN);
2329 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2330 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2331 PCI_DMA_FROMDEVICE);
2332 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2334 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2336 * 2 or 3 buffer mode -
2337 * Both 2 buffer mode and 3 buffer mode provides 128
2338 * byte aligned receive buffers.
2340 * 3 buffer mode provides header separation where in
2341 * skb->data will have L3/L4 headers where as
2342 * skb_shinfo(skb)->frag_list will have the L4 data
2346 memset(rxdp, 0, sizeof(struct RxD3));
2347 ba = &mac_control->rings[ring_no].ba[block_no][off];
2348 skb_reserve(skb, BUF0_LEN);
2349 tmp = (u64)(unsigned long) skb->data;
2352 skb->data = (void *) (unsigned long)tmp;
2353 skb->tail = (void *) (unsigned long)tmp;
2355 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2356 ((struct RxD3*)rxdp)->Buffer0_ptr =
2357 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2358 PCI_DMA_FROMDEVICE);
2360 pci_dma_sync_single_for_device(nic->pdev,
2361 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2362 BUF0_LEN, PCI_DMA_FROMDEVICE);
2363 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2364 if (nic->rxd_mode == RXD_MODE_3B) {
2365 /* Two buffer mode */
2368 * Buffer2 will have L3/L4 header plus
2371 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2372 (nic->pdev, skb->data, dev->mtu + 4,
2373 PCI_DMA_FROMDEVICE);
2375 /* Buffer-1 will be dummy buffer. Not used */
2376 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2377 ((struct RxD3*)rxdp)->Buffer1_ptr =
2378 pci_map_single(nic->pdev,
2380 PCI_DMA_FROMDEVICE);
2382 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2383 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2387 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2388 dev_kfree_skb_irq(skb);
2391 first_rxdp->Control_1 |=
2397 rxdp->Control_2 |= BIT(0);
2399 rxdp->Host_Control = (unsigned long) (skb);
2400 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2401 rxdp->Control_1 |= RXD_OWN_XENA;
2403 if (off == (rxd_count[nic->rxd_mode] + 1))
2405 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2407 rxdp->Control_2 |= SET_RXD_MARKER;
2408 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2411 first_rxdp->Control_1 |= RXD_OWN_XENA;
2415 atomic_inc(&nic->rx_bufs_left[ring_no]);
2420 /* Transfer ownership of first descriptor to adapter just before
2421 * exiting. Before that, use memory barrier so that ownership
2422 * and other fields are seen by adapter correctly.
2426 first_rxdp->Control_1 |= RXD_OWN_XENA;
2432 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2434 struct net_device *dev = sp->dev;
2436 struct sk_buff *skb;
2438 struct mac_info *mac_control;
2441 mac_control = &sp->mac_control;
2442 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2443 rxdp = mac_control->rings[ring_no].
2444 rx_blocks[blk].rxds[j].virt_addr;
2445 skb = (struct sk_buff *)
2446 ((unsigned long) rxdp->Host_Control);
2450 if (sp->rxd_mode == RXD_MODE_1) {
2451 pci_unmap_single(sp->pdev, (dma_addr_t)
2452 ((struct RxD1*)rxdp)->Buffer0_ptr,
2454 HEADER_ETHERNET_II_802_3_SIZE
2455 + HEADER_802_2_SIZE +
2457 PCI_DMA_FROMDEVICE);
2458 memset(rxdp, 0, sizeof(struct RxD1));
2459 } else if(sp->rxd_mode == RXD_MODE_3B) {
2460 ba = &mac_control->rings[ring_no].
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
2463 ((struct RxD3*)rxdp)->Buffer0_ptr,
2465 PCI_DMA_FROMDEVICE);
2466 pci_unmap_single(sp->pdev, (dma_addr_t)
2467 ((struct RxD3*)rxdp)->Buffer1_ptr,
2469 PCI_DMA_FROMDEVICE);
2470 pci_unmap_single(sp->pdev, (dma_addr_t)
2471 ((struct RxD3*)rxdp)->Buffer2_ptr,
2473 PCI_DMA_FROMDEVICE);
2474 memset(rxdp, 0, sizeof(struct RxD3));
2476 pci_unmap_single(sp->pdev, (dma_addr_t)
2477 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2478 PCI_DMA_FROMDEVICE);
2479 pci_unmap_single(sp->pdev, (dma_addr_t)
2480 ((struct RxD3*)rxdp)->Buffer1_ptr,
2482 PCI_DMA_FROMDEVICE);
2483 pci_unmap_single(sp->pdev, (dma_addr_t)
2484 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2485 PCI_DMA_FROMDEVICE);
2486 memset(rxdp, 0, sizeof(struct RxD3));
2489 atomic_dec(&sp->rx_bufs_left[ring_no]);
2494 * free_rx_buffers - Frees all Rx buffers
2495 * @sp: device private variable.
2497 * This function will free all Rx buffers allocated by host.
2502 static void free_rx_buffers(struct s2io_nic *sp)
2504 struct net_device *dev = sp->dev;
2505 int i, blk = 0, buf_cnt = 0;
2506 struct mac_info *mac_control;
2507 struct config_param *config;
2509 mac_control = &sp->mac_control;
2510 config = &sp->config;
2512 for (i = 0; i < config->rx_ring_num; i++) {
2513 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2514 free_rxd_blk(sp,i,blk);
2516 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2517 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2518 mac_control->rings[i].rx_curr_put_info.offset = 0;
2519 mac_control->rings[i].rx_curr_get_info.offset = 0;
2520 atomic_set(&sp->rx_bufs_left[i], 0);
2521 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2522 dev->name, buf_cnt, i);
2527 * s2io_poll - Rx interrupt handler for NAPI support
2528 * @dev : pointer to the device structure.
2529 * @budget : The number of packets that were budgeted to be processed
2530 * during one pass through the 'Poll" function.
2532 * Comes into picture only if NAPI support has been incorporated. It does
2533 * the same thing that rx_intr_handler does, but not in a interrupt context
2534 * also It will process only a given number of packets.
2536 * 0 on success and 1 if there are No Rx packets to be processed.
2539 static int s2io_poll(struct net_device *dev, int *budget)
2541 struct s2io_nic *nic = dev->priv;
2542 int pkt_cnt = 0, org_pkts_to_process;
2543 struct mac_info *mac_control;
2544 struct config_param *config;
2545 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2548 atomic_inc(&nic->isr_cnt);
2549 mac_control = &nic->mac_control;
2550 config = &nic->config;
2552 nic->pkts_to_process = *budget;
2553 if (nic->pkts_to_process > dev->quota)
2554 nic->pkts_to_process = dev->quota;
2555 org_pkts_to_process = nic->pkts_to_process;
2557 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2558 readl(&bar0->rx_traffic_int);
2560 for (i = 0; i < config->rx_ring_num; i++) {
2561 rx_intr_handler(&mac_control->rings[i]);
2562 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2563 if (!nic->pkts_to_process) {
2564 /* Quota for the current iteration has been met */
2571 dev->quota -= pkt_cnt;
2573 netif_rx_complete(dev);
2575 for (i = 0; i < config->rx_ring_num; i++) {
2576 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2577 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2578 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2582 /* Re enable the Rx interrupts. */
2583 writeq(0x0, &bar0->rx_traffic_mask);
2584 readl(&bar0->rx_traffic_mask);
2585 atomic_dec(&nic->isr_cnt);
2589 dev->quota -= pkt_cnt;
2592 for (i = 0; i < config->rx_ring_num; i++) {
2593 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2594 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2595 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2599 atomic_dec(&nic->isr_cnt);
2603 #ifdef CONFIG_NET_POLL_CONTROLLER
2605 * s2io_netpoll - netpoll event handler entry point
2606 * @dev : pointer to the device structure.
2608 * This function will be called by upper layer to check for events on the
2609 * interface in situations where interrupts are disabled. It is used for
2610 * specific in-kernel networking tasks, such as remote consoles and kernel
2611 * debugging over the network (example netdump in RedHat).
2613 static void s2io_netpoll(struct net_device *dev)
2615 struct s2io_nic *nic = dev->priv;
2616 struct mac_info *mac_control;
2617 struct config_param *config;
2618 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2619 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2622 disable_irq(dev->irq);
2624 atomic_inc(&nic->isr_cnt);
2625 mac_control = &nic->mac_control;
2626 config = &nic->config;
2628 writeq(val64, &bar0->rx_traffic_int);
2629 writeq(val64, &bar0->tx_traffic_int);
2631 /* we need to free up the transmitted skbufs or else netpoll will
2632 * run out of skbs and will fail and eventually netpoll application such
2633 * as netdump will fail.
2635 for (i = 0; i < config->tx_fifo_num; i++)
2636 tx_intr_handler(&mac_control->fifos[i]);
2638 /* check for received packet and indicate up to network */
2639 for (i = 0; i < config->rx_ring_num; i++)
2640 rx_intr_handler(&mac_control->rings[i]);
2642 for (i = 0; i < config->rx_ring_num; i++) {
2643 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2644 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2645 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2649 atomic_dec(&nic->isr_cnt);
2650 enable_irq(dev->irq);
2656 * rx_intr_handler - Rx interrupt handler
2657 * @nic: device private variable.
2659 * If the interrupt is because of a received frame or if the
2660 * receive ring contains fresh as yet un-processed frames,this function is
2661 * called. It picks out the RxD at which place the last Rx processing had
2662 * stopped and sends the skb to the OSM's Rx handler and then increments
2667 static void rx_intr_handler(struct ring_info *ring_data)
2669 struct s2io_nic *nic = ring_data->nic;
2670 struct net_device *dev = (struct net_device *) nic->dev;
2671 int get_block, put_block, put_offset;
2672 struct rx_curr_get_info get_info, put_info;
2674 struct sk_buff *skb;
2678 spin_lock(&nic->rx_lock);
2679 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2680 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2681 __FUNCTION__, dev->name);
2682 spin_unlock(&nic->rx_lock);
2686 get_info = ring_data->rx_curr_get_info;
2687 get_block = get_info.block_index;
2688 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2689 put_block = put_info.block_index;
2690 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2692 spin_lock(&nic->put_lock);
2693 put_offset = ring_data->put_pos;
2694 spin_unlock(&nic->put_lock);
2696 put_offset = ring_data->put_pos;
2698 while (RXD_IS_UP2DT(rxdp)) {
2700 * If your are next to put index then it's
2701 * FIFO full condition
2703 if ((get_block == put_block) &&
2704 (get_info.offset + 1) == put_info.offset) {
2705 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2708 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2710 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2712 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2713 spin_unlock(&nic->rx_lock);
2716 if (nic->rxd_mode == RXD_MODE_1) {
2717 pci_unmap_single(nic->pdev, (dma_addr_t)
2718 ((struct RxD1*)rxdp)->Buffer0_ptr,
2720 HEADER_ETHERNET_II_802_3_SIZE +
2723 PCI_DMA_FROMDEVICE);
2724 } else if (nic->rxd_mode == RXD_MODE_3B) {
2725 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2726 ((struct RxD3*)rxdp)->Buffer0_ptr,
2727 BUF0_LEN, PCI_DMA_FROMDEVICE);
2728 pci_unmap_single(nic->pdev, (dma_addr_t)
2729 ((struct RxD3*)rxdp)->Buffer2_ptr,
2731 PCI_DMA_FROMDEVICE);
2733 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2734 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2735 PCI_DMA_FROMDEVICE);
2736 pci_unmap_single(nic->pdev, (dma_addr_t)
2737 ((struct RxD3*)rxdp)->Buffer1_ptr,
2739 PCI_DMA_FROMDEVICE);
2740 pci_unmap_single(nic->pdev, (dma_addr_t)
2741 ((struct RxD3*)rxdp)->Buffer2_ptr,
2742 dev->mtu, PCI_DMA_FROMDEVICE);
2744 prefetch(skb->data);
2745 rx_osm_handler(ring_data, rxdp);
2747 ring_data->rx_curr_get_info.offset = get_info.offset;
2748 rxdp = ring_data->rx_blocks[get_block].
2749 rxds[get_info.offset].virt_addr;
2750 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2751 get_info.offset = 0;
2752 ring_data->rx_curr_get_info.offset = get_info.offset;
2754 if (get_block == ring_data->block_count)
2756 ring_data->rx_curr_get_info.block_index = get_block;
2757 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2760 nic->pkts_to_process -= 1;
2761 if ((napi) && (!nic->pkts_to_process))
2764 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2768 /* Clear all LRO sessions before exiting */
2769 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2770 struct lro *lro = &nic->lro0_n[i];
2772 update_L3L4_header(nic, lro);
2773 queue_rx_frame(lro->parent);
2774 clear_lro_session(lro);
2779 spin_unlock(&nic->rx_lock);
2783 * tx_intr_handler - Transmit interrupt handler
2784 * @nic : device private variable
2786 * If an interrupt was raised to indicate DMA complete of the
2787 * Tx packet, this function is called. It identifies the last TxD
2788 * whose buffer was freed and frees all skbs whose data have already
2789 * DMA'ed into the NICs internal memory.
2794 static void tx_intr_handler(struct fifo_info *fifo_data)
2796 struct s2io_nic *nic = fifo_data->nic;
2797 struct net_device *dev = (struct net_device *) nic->dev;
2798 struct tx_curr_get_info get_info, put_info;
2799 struct sk_buff *skb;
2802 get_info = fifo_data->tx_curr_get_info;
2803 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2804 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2806 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2807 (get_info.offset != put_info.offset) &&
2808 (txdlp->Host_Control)) {
2809 /* Check for TxD errors */
2810 if (txdlp->Control_1 & TXD_T_CODE) {
2811 unsigned long long err;
2812 err = txdlp->Control_1 & TXD_T_CODE;
2814 nic->mac_control.stats_info->sw_stat.
2817 if ((err >> 48) == 0xA) {
2818 DBG_PRINT(TX_DBG, "TxD returned due \
2819 to loss of link\n");
2822 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2826 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2828 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2830 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2834 /* Updating the statistics block */
2835 nic->stats.tx_bytes += skb->len;
2836 dev_kfree_skb_irq(skb);
2839 if (get_info.offset == get_info.fifo_len + 1)
2840 get_info.offset = 0;
2841 txdlp = (struct TxD *) fifo_data->list_info
2842 [get_info.offset].list_virt_addr;
2843 fifo_data->tx_curr_get_info.offset =
2847 spin_lock(&nic->tx_lock);
2848 if (netif_queue_stopped(dev))
2849 netif_wake_queue(dev);
2850 spin_unlock(&nic->tx_lock);
2854 * s2io_mdio_write - Function to write in to MDIO registers
2855 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2856 * @addr : address value
2857 * @value : data value
2858 * @dev : pointer to net_device structure
2860 * This function is used to write values to the MDIO registers
2863 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2866 struct s2io_nic *sp = dev->priv;
2867 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2869 //address transaction
2870 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2871 | MDIO_MMD_DEV_ADDR(mmd_type)
2872 | MDIO_MMS_PRT_ADDR(0x0);
2873 writeq(val64, &bar0->mdio_control);
2874 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2875 writeq(val64, &bar0->mdio_control);
2880 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2881 | MDIO_MMD_DEV_ADDR(mmd_type)
2882 | MDIO_MMS_PRT_ADDR(0x0)
2883 | MDIO_MDIO_DATA(value)
2884 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2885 writeq(val64, &bar0->mdio_control);
2886 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2887 writeq(val64, &bar0->mdio_control);
2891 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2892 | MDIO_MMD_DEV_ADDR(mmd_type)
2893 | MDIO_MMS_PRT_ADDR(0x0)
2894 | MDIO_OP(MDIO_OP_READ_TRANS);
2895 writeq(val64, &bar0->mdio_control);
2896 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2897 writeq(val64, &bar0->mdio_control);
2903 * s2io_mdio_read - Function to write in to MDIO registers
2904 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2905 * @addr : address value
2906 * @dev : pointer to net_device structure
2908 * This function is used to read values to the MDIO registers
2911 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2915 struct s2io_nic *sp = dev->priv;
2916 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2918 /* address transaction */
2919 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2920 | MDIO_MMD_DEV_ADDR(mmd_type)
2921 | MDIO_MMS_PRT_ADDR(0x0);
2922 writeq(val64, &bar0->mdio_control);
2923 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2924 writeq(val64, &bar0->mdio_control);
2927 /* Data transaction */
2929 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2930 | MDIO_MMD_DEV_ADDR(mmd_type)
2931 | MDIO_MMS_PRT_ADDR(0x0)
2932 | MDIO_OP(MDIO_OP_READ_TRANS);
2933 writeq(val64, &bar0->mdio_control);
2934 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2935 writeq(val64, &bar0->mdio_control);
2938 /* Read the value from regs */
2939 rval64 = readq(&bar0->mdio_control);
2940 rval64 = rval64 & 0xFFFF0000;
2941 rval64 = rval64 >> 16;
2945 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2946 * @counter : couter value to be updated
2947 * @flag : flag to indicate the status
2948 * @type : counter type
2950 * This function is to check the status of the xpak counters value
2954 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2959 for(i = 0; i <index; i++)
2964 *counter = *counter + 1;
2965 val64 = *regs_stat & mask;
2966 val64 = val64 >> (index * 0x2);
2973 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2974 "service. Excessive temperatures may "
2975 "result in premature transceiver "
2979 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980 "service Excessive bias currents may "
2981 "indicate imminent laser diode "
2985 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986 "service Excessive laser output "
2987 "power may saturate far-end "
2991 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2996 val64 = val64 << (index * 0x2);
2997 *regs_stat = (*regs_stat & (~mask)) | (val64);
3000 *regs_stat = *regs_stat & (~mask);
3005 * s2io_updt_xpak_counter - Function to update the xpak counters
3006 * @dev : pointer to net_device struct
3008 * This function is to upate the status of the xpak counters value
3011 static void s2io_updt_xpak_counter(struct net_device *dev)
3019 struct s2io_nic *sp = dev->priv;
3020 struct stat_block *stat_info = sp->mac_control.stats_info;
3022 /* Check the communication with the MDIO slave */
3025 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3026 if((val64 == 0xFFFF) || (val64 == 0x0000))
3028 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3029 "Returned %llx\n", (unsigned long long)val64);
3033 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3036 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3037 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3038 (unsigned long long)val64);
3042 /* Loading the DOM register to MDIO register */
3044 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3045 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3047 /* Reading the Alarm flags */
3050 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3052 flag = CHECKBIT(val64, 0x7);
3054 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3055 &stat_info->xpak_stat.xpak_regs_stat,
3058 if(CHECKBIT(val64, 0x6))
3059 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3061 flag = CHECKBIT(val64, 0x3);
3063 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3064 &stat_info->xpak_stat.xpak_regs_stat,
3067 if(CHECKBIT(val64, 0x2))
3068 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3070 flag = CHECKBIT(val64, 0x1);
3072 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3073 &stat_info->xpak_stat.xpak_regs_stat,
3076 if(CHECKBIT(val64, 0x0))
3077 stat_info->xpak_stat.alarm_laser_output_power_low++;
3079 /* Reading the Warning flags */
3082 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3084 if(CHECKBIT(val64, 0x7))
3085 stat_info->xpak_stat.warn_transceiver_temp_high++;
3087 if(CHECKBIT(val64, 0x6))
3088 stat_info->xpak_stat.warn_transceiver_temp_low++;
3090 if(CHECKBIT(val64, 0x3))
3091 stat_info->xpak_stat.warn_laser_bias_current_high++;
3093 if(CHECKBIT(val64, 0x2))
3094 stat_info->xpak_stat.warn_laser_bias_current_low++;
3096 if(CHECKBIT(val64, 0x1))
3097 stat_info->xpak_stat.warn_laser_output_power_high++;
3099 if(CHECKBIT(val64, 0x0))
3100 stat_info->xpak_stat.warn_laser_output_power_low++;
3104 * alarm_intr_handler - Alarm Interrrupt handler
3105 * @nic: device private variable
3106 * Description: If the interrupt was neither because of Rx packet or Tx
3107 * complete, this function is called. If the interrupt was to indicate
3108 * a loss of link, the OSM link status handler is invoked for any other
3109 * alarm interrupt the block that raised the interrupt is displayed
3110 * and a H/W reset is issued.
3115 static void alarm_intr_handler(struct s2io_nic *nic)
3117 struct net_device *dev = (struct net_device *) nic->dev;
3118 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3119 register u64 val64 = 0, err_reg = 0;
3122 if (atomic_read(&nic->card_state) == CARD_DOWN)
3124 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3125 /* Handling the XPAK counters update */
3126 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3127 /* waiting for an hour */
3128 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3130 s2io_updt_xpak_counter(dev);
3131 /* reset the count to zero */
3132 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3135 /* Handling link status change error Intr */
3136 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3137 err_reg = readq(&bar0->mac_rmac_err_reg);
3138 writeq(err_reg, &bar0->mac_rmac_err_reg);
3139 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3140 schedule_work(&nic->set_link_task);
3144 /* Handling Ecc errors */
3145 val64 = readq(&bar0->mc_err_reg);
3146 writeq(val64, &bar0->mc_err_reg);
3147 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3148 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3149 nic->mac_control.stats_info->sw_stat.
3151 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3153 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3154 if (nic->device_type != XFRAME_II_DEVICE) {
3155 /* Reset XframeI only if critical error */
3156 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3157 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3158 netif_stop_queue(dev);
3159 schedule_work(&nic->rst_timer_task);
3160 nic->mac_control.stats_info->sw_stat.
3165 nic->mac_control.stats_info->sw_stat.
3170 /* In case of a serious error, the device will be Reset. */
3171 val64 = readq(&bar0->serr_source);
3172 if (val64 & SERR_SOURCE_ANY) {
3173 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3174 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3175 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3176 (unsigned long long)val64);
3177 netif_stop_queue(dev);
3178 schedule_work(&nic->rst_timer_task);
3179 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3183 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3184 * Error occurs, the adapter will be recycled by disabling the
3185 * adapter enable bit and enabling it again after the device
3186 * becomes Quiescent.
3188 val64 = readq(&bar0->pcc_err_reg);
3189 writeq(val64, &bar0->pcc_err_reg);
3190 if (val64 & PCC_FB_ECC_DB_ERR) {
3191 u64 ac = readq(&bar0->adapter_control);
3192 ac &= ~(ADAPTER_CNTL_EN);
3193 writeq(ac, &bar0->adapter_control);
3194 ac = readq(&bar0->adapter_control);
3195 schedule_work(&nic->set_link_task);
3197 /* Check for data parity error */
3198 val64 = readq(&bar0->pic_int_status);
3199 if (val64 & PIC_INT_GPIO) {
3200 val64 = readq(&bar0->gpio_int_reg);
3201 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3202 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3203 schedule_work(&nic->rst_timer_task);
3204 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3208 /* Check for ring full counter */
3209 if (nic->device_type & XFRAME_II_DEVICE) {
3210 val64 = readq(&bar0->ring_bump_counter1);
3211 for (i=0; i<4; i++) {
3212 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3213 cnt >>= 64 - ((i+1)*16);
3214 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3218 val64 = readq(&bar0->ring_bump_counter2);
3219 for (i=0; i<4; i++) {
3220 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3221 cnt >>= 64 - ((i+1)*16);
3222 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3227 /* Other type of interrupts are not being handled now, TODO */
3231 * wait_for_cmd_complete - waits for a command to complete.
3232 * @sp : private member of the device structure, which is a pointer to the
3233 * s2io_nic structure.
3234 * Description: Function that waits for a command to Write into RMAC
3235 * ADDR DATA registers to be completed and returns either success or
3236 * error depending on whether the command was complete or not.
3238 * SUCCESS on success and FAILURE on failure.
3241 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3244 int ret = FAILURE, cnt = 0, delay = 1;
3247 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3251 val64 = readq(addr);
3252 if (bit_state == S2IO_BIT_RESET) {
3253 if (!(val64 & busy_bit)) {
3258 if (!(val64 & busy_bit)) {
3275 * check_pci_device_id - Checks if the device id is supported
3277 * Description: Function to check if the pci device id is supported by driver.
3278 * Return value: Actual device id if supported else PCI_ANY_ID
3280 static u16 check_pci_device_id(u16 id)
3283 case PCI_DEVICE_ID_HERC_WIN:
3284 case PCI_DEVICE_ID_HERC_UNI:
3285 return XFRAME_II_DEVICE;
3286 case PCI_DEVICE_ID_S2IO_UNI:
3287 case PCI_DEVICE_ID_S2IO_WIN:
3288 return XFRAME_I_DEVICE;
3295 * s2io_reset - Resets the card.
3296 * @sp : private member of the device structure.
3297 * Description: Function to Reset the card. This function then also
3298 * restores the previously saved PCI configuration space registers as
3299 * the card reset also resets the configuration space.
3304 static void s2io_reset(struct s2io_nic * sp)
3306 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3311 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3312 __FUNCTION__, sp->dev->name);
3314 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3315 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3317 if (sp->device_type == XFRAME_II_DEVICE) {
3319 ret = pci_set_power_state(sp->pdev, 3);
3321 ret = pci_set_power_state(sp->pdev, 0);
3323 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3331 val64 = SW_RESET_ALL;
3332 writeq(val64, &bar0->sw_reset);
3334 if (strstr(sp->product_name, "CX4")) {
3338 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3340 /* Restore the PCI state saved during initialization. */
3341 pci_restore_state(sp->pdev);
3342 pci_read_config_word(sp->pdev, 0x2, &val16);
3343 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3348 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3349 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3352 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3356 /* Set swapper to enable I/O register access */
3357 s2io_set_swapper(sp);
3359 /* Restore the MSIX table entries from local variables */
3360 restore_xmsi_data(sp);
3362 /* Clear certain PCI/PCI-X fields after reset */
3363 if (sp->device_type == XFRAME_II_DEVICE) {
3364 /* Clear "detected parity error" bit */
3365 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3367 /* Clearing PCIX Ecc status register */
3368 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3370 /* Clearing PCI_STATUS error reflected here */
3371 writeq(BIT(62), &bar0->txpic_int_reg);
3374 /* Reset device statistics maintained by OS */
3375 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3377 /* SXE-002: Configure link and activity LED to turn it off */
3378 subid = sp->pdev->subsystem_device;
3379 if (((subid & 0xFF) >= 0x07) &&
3380 (sp->device_type == XFRAME_I_DEVICE)) {
3381 val64 = readq(&bar0->gpio_control);
3382 val64 |= 0x0000800000000000ULL;
3383 writeq(val64, &bar0->gpio_control);
3384 val64 = 0x0411040400000000ULL;
3385 writeq(val64, (void __iomem *)bar0 + 0x2700);
3389 * Clear spurious ECC interrupts that would have occured on
3390 * XFRAME II cards after reset.
3392 if (sp->device_type == XFRAME_II_DEVICE) {
3393 val64 = readq(&bar0->pcc_err_reg);
3394 writeq(val64, &bar0->pcc_err_reg);
3397 /* restore the previously assigned mac address */
3398 s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3400 sp->device_enabled_once = FALSE;
3404 * s2io_set_swapper - to set the swapper controle on the card
3405 * @sp : private member of the device structure,
3406 * pointer to the s2io_nic structure.
3407 * Description: Function to set the swapper control on the card
3408 * correctly depending on the 'endianness' of the system.
3410 * SUCCESS on success and FAILURE on failure.
3413 static int s2io_set_swapper(struct s2io_nic * sp)
3415 struct net_device *dev = sp->dev;
3416 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3417 u64 val64, valt, valr;
3420 * Set proper endian settings and verify the same by reading
3421 * the PIF Feed-back register.
3424 val64 = readq(&bar0->pif_rd_swapper_fb);
3425 if (val64 != 0x0123456789ABCDEFULL) {
3427 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3428 0x8100008181000081ULL, /* FE=1, SE=0 */
3429 0x4200004242000042ULL, /* FE=0, SE=1 */
3430 0}; /* FE=0, SE=0 */
3433 writeq(value[i], &bar0->swapper_ctrl);
3434 val64 = readq(&bar0->pif_rd_swapper_fb);
3435 if (val64 == 0x0123456789ABCDEFULL)
3440 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3442 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3443 (unsigned long long) val64);
3448 valr = readq(&bar0->swapper_ctrl);
3451 valt = 0x0123456789ABCDEFULL;
3452 writeq(valt, &bar0->xmsi_address);
3453 val64 = readq(&bar0->xmsi_address);
3457 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3458 0x0081810000818100ULL, /* FE=1, SE=0 */
3459 0x0042420000424200ULL, /* FE=0, SE=1 */
3460 0}; /* FE=0, SE=0 */
3463 writeq((value[i] | valr), &bar0->swapper_ctrl);
3464 writeq(valt, &bar0->xmsi_address);
3465 val64 = readq(&bar0->xmsi_address);
3471 unsigned long long x = val64;
3472 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3473 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3477 val64 = readq(&bar0->swapper_ctrl);
3478 val64 &= 0xFFFF000000000000ULL;
3482 * The device by default set to a big endian format, so a
3483 * big endian driver need not set anything.
3485 val64 |= (SWAPPER_CTRL_TXP_FE |
3486 SWAPPER_CTRL_TXP_SE |
3487 SWAPPER_CTRL_TXD_R_FE |
3488 SWAPPER_CTRL_TXD_W_FE |
3489 SWAPPER_CTRL_TXF_R_FE |
3490 SWAPPER_CTRL_RXD_R_FE |
3491 SWAPPER_CTRL_RXD_W_FE |
3492 SWAPPER_CTRL_RXF_W_FE |
3493 SWAPPER_CTRL_XMSI_FE |
3494 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3495 if (sp->intr_type == INTA)
3496 val64 |= SWAPPER_CTRL_XMSI_SE;
3497 writeq(val64, &bar0->swapper_ctrl);
3500 * Initially we enable all bits to make it accessible by the
3501 * driver, then we selectively enable only those bits that
3504 val64 |= (SWAPPER_CTRL_TXP_FE |
3505 SWAPPER_CTRL_TXP_SE |
3506 SWAPPER_CTRL_TXD_R_FE |
3507 SWAPPER_CTRL_TXD_R_SE |
3508 SWAPPER_CTRL_TXD_W_FE |
3509 SWAPPER_CTRL_TXD_W_SE |
3510 SWAPPER_CTRL_TXF_R_FE |
3511 SWAPPER_CTRL_RXD_R_FE |
3512 SWAPPER_CTRL_RXD_R_SE |
3513 SWAPPER_CTRL_RXD_W_FE |
3514 SWAPPER_CTRL_RXD_W_SE |
3515 SWAPPER_CTRL_RXF_W_FE |
3516 SWAPPER_CTRL_XMSI_FE |
3517 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3518 if (sp->intr_type == INTA)
3519 val64 |= SWAPPER_CTRL_XMSI_SE;
3520 writeq(val64, &bar0->swapper_ctrl);
3522 val64 = readq(&bar0->swapper_ctrl);
3525 * Verifying if endian settings are accurate by reading a
3526 * feedback register.
3528 val64 = readq(&bar0->pif_rd_swapper_fb);
3529 if (val64 != 0x0123456789ABCDEFULL) {
3530 /* Endian settings are incorrect, calls for another dekko. */
3531 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3533 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3534 (unsigned long long) val64);
3541 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3543 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3545 int ret = 0, cnt = 0;
3548 val64 = readq(&bar0->xmsi_access);
3549 if (!(val64 & BIT(15)))
3555 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3562 static void restore_xmsi_data(struct s2io_nic *nic)
3564 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3568 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3569 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3570 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3571 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3572 writeq(val64, &bar0->xmsi_access);
3573 if (wait_for_msix_trans(nic, i)) {
3574 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3580 static void store_xmsi_data(struct s2io_nic *nic)
3582 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3583 u64 val64, addr, data;
3586 /* Store and display */
3587 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3588 val64 = (BIT(15) | vBIT(i, 26, 6));
3589 writeq(val64, &bar0->xmsi_access);
3590 if (wait_for_msix_trans(nic, i)) {
3591 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3594 addr = readq(&bar0->xmsi_address);
3595 data = readq(&bar0->xmsi_data);
3597 nic->msix_info[i].addr = addr;
3598 nic->msix_info[i].data = data;
3603 int s2io_enable_msi(struct s2io_nic *nic)
3605 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3606 u16 msi_ctrl, msg_val;
3607 struct config_param *config = &nic->config;
3608 struct net_device *dev = nic->dev;
3609 u64 val64, tx_mat, rx_mat;
3612 val64 = readq(&bar0->pic_control);
3614 writeq(val64, &bar0->pic_control);
3616 err = pci_enable_msi(nic->pdev);
3618 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3624 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3625 * for interrupt handling.
3627 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3629 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3630 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3632 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3634 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3636 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3637 tx_mat = readq(&bar0->tx_mat0_n[0]);
3638 for (i=0; i<config->tx_fifo_num; i++) {
3639 tx_mat |= TX_MAT_SET(i, 1);
3641 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3643 rx_mat = readq(&bar0->rx_mat);
3644 for (i=0; i<config->rx_ring_num; i++) {
3645 rx_mat |= RX_MAT_SET(i, 1);
3647 writeq(rx_mat, &bar0->rx_mat);
3649 dev->irq = nic->pdev->irq;
3653 static int s2io_enable_msi_x(struct s2io_nic *nic)
3655 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3657 u16 msi_control; /* Temp variable */
3658 int ret, i, j, msix_indx = 1;
3660 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3662 if (nic->entries == NULL) {
3663 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3666 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3669 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3671 if (nic->s2io_entries == NULL) {
3672 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3673 kfree(nic->entries);
3676 memset(nic->s2io_entries, 0,
3677 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3679 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3680 nic->entries[i].entry = i;
3681 nic->s2io_entries[i].entry = i;
3682 nic->s2io_entries[i].arg = NULL;
3683 nic->s2io_entries[i].in_use = 0;
3686 tx_mat = readq(&bar0->tx_mat0_n[0]);
3687 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3688 tx_mat |= TX_MAT_SET(i, msix_indx);
3689 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3690 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3691 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3693 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3695 if (!nic->config.bimodal) {
3696 rx_mat = readq(&bar0->rx_mat);
3697 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3698 rx_mat |= RX_MAT_SET(j, msix_indx);
3699 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3700 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3701 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3703 writeq(rx_mat, &bar0->rx_mat);
3705 tx_mat = readq(&bar0->tx_mat0_n[7]);
3706 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3707 tx_mat |= TX_MAT_SET(i, msix_indx);
3708 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3709 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3710 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3712 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3715 nic->avail_msix_vectors = 0;
3716 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3717 /* We fail init if error or we get less vectors than min required */
3718 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3719 nic->avail_msix_vectors = ret;
3720 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3723 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3724 kfree(nic->entries);
3725 kfree(nic->s2io_entries);
3726 nic->entries = NULL;
3727 nic->s2io_entries = NULL;
3728 nic->avail_msix_vectors = 0;
3731 if (!nic->avail_msix_vectors)
3732 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3735 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3736 * in the herc NIC. (Temp change, needs to be removed later)
3738 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3739 msi_control |= 0x1; /* Enable MSI */
3740 pci_write_config_word(nic->pdev, 0x42, msi_control);
3745 /* ********************************************************* *
3746 * Functions defined below concern the OS part of the driver *
3747 * ********************************************************* */
3750 * s2io_open - open entry point of the driver
3751 * @dev : pointer to the device structure.
3753 * This function is the open entry point of the driver. It mainly calls a
3754 * function to allocate Rx buffers and inserts them into the buffer
3755 * descriptors and then enables the Rx part of the NIC.
3757 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3761 static int s2io_open(struct net_device *dev)
3763 struct s2io_nic *sp = dev->priv;
3767 * Make sure you have link off by default every time
3768 * Nic is initialized
3770 netif_carrier_off(dev);
3771 sp->last_link_state = 0;
3773 /* Initialize H/W and enable interrupts */
3774 err = s2io_card_up(sp);
3776 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3778 goto hw_init_failed;
3781 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3782 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3785 goto hw_init_failed;
3788 netif_start_queue(dev);
3792 if (sp->intr_type == MSI_X) {
3795 if (sp->s2io_entries)
3796 kfree(sp->s2io_entries);
3802 * s2io_close -close entry point of the driver
3803 * @dev : device pointer.
3805 * This is the stop entry point of the driver. It needs to undo exactly
3806 * whatever was done by the open entry point,thus it's usually referred to
3807 * as the close function.Among other things this function mainly stops the
3808 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3810 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3814 static int s2io_close(struct net_device *dev)
3816 struct s2io_nic *sp = dev->priv;
3818 netif_stop_queue(dev);
3819 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3822 sp->device_close_flag = TRUE; /* Device is shut down. */
3827 * s2io_xmit - Tx entry point of te driver
3828 * @skb : the socket buffer containing the Tx data.
3829 * @dev : device pointer.
3831 * This function is the Tx entry point of the driver. S2IO NIC supports
3832 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3833 * NOTE: when device cant queue the pkt,just the trans_start variable will
3836 * 0 on success & 1 on failure.
3839 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3841 struct s2io_nic *sp = dev->priv;
3842 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3845 struct TxFIFO_element __iomem *tx_fifo;
3846 unsigned long flags;
3848 int vlan_priority = 0;
3849 struct mac_info *mac_control;
3850 struct config_param *config;
3853 mac_control = &sp->mac_control;
3854 config = &sp->config;
3856 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3857 spin_lock_irqsave(&sp->tx_lock, flags);
3858 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3859 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3861 spin_unlock_irqrestore(&sp->tx_lock, flags);
3868 /* Get Fifo number to Transmit based on vlan priority */
3869 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3870 vlan_tag = vlan_tx_tag_get(skb);
3871 vlan_priority = vlan_tag >> 13;
3872 queue = config->fifo_mapping[vlan_priority];
3875 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3876 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3877 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3880 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3881 /* Avoid "put" pointer going beyond "get" pointer */
3882 if (txdp->Host_Control ||
3883 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3884 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3885 netif_stop_queue(dev);
3887 spin_unlock_irqrestore(&sp->tx_lock, flags);
3891 /* A buffer with no data will be dropped */
3893 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3895 spin_unlock_irqrestore(&sp->tx_lock, flags);
3899 offload_type = s2io_offload_type(skb);
3900 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3901 txdp->Control_1 |= TXD_TCP_LSO_EN;
3902 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3904 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3906 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3909 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3910 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3911 txdp->Control_2 |= config->tx_intr_type;
3913 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3914 txdp->Control_2 |= TXD_VLAN_ENABLE;
3915 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3918 frg_len = skb->len - skb->data_len;
3919 if (offload_type == SKB_GSO_UDP) {
3922 ufo_size = s2io_udp_mss(skb);
3924 txdp->Control_1 |= TXD_UFO_EN;
3925 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3926 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3928 sp->ufo_in_band_v[put_off] =
3929 (u64)skb_shinfo(skb)->ip6_frag_id;
3931 sp->ufo_in_band_v[put_off] =
3932 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3934 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3935 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3937 sizeof(u64), PCI_DMA_TODEVICE);
3941 txdp->Buffer_Pointer = pci_map_single
3942 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3943 txdp->Host_Control = (unsigned long) skb;
3944 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3945 if (offload_type == SKB_GSO_UDP)
3946 txdp->Control_1 |= TXD_UFO_EN;
3948 frg_cnt = skb_shinfo(skb)->nr_frags;
3949 /* For fragmented SKB. */
3950 for (i = 0; i < frg_cnt; i++) {
3951 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3952 /* A '0' length fragment will be ignored */
3956 txdp->Buffer_Pointer = (u64) pci_map_page
3957 (sp->pdev, frag->page, frag->page_offset,
3958 frag->size, PCI_DMA_TODEVICE);
3959 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3960 if (offload_type == SKB_GSO_UDP)
3961 txdp->Control_1 |= TXD_UFO_EN;
3963 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3965 if (offload_type == SKB_GSO_UDP)
3966 frg_cnt++; /* as Txd0 was used for inband header */
3968 tx_fifo = mac_control->tx_FIFO_start[queue];
3969 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3970 writeq(val64, &tx_fifo->TxDL_Pointer);
3972 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3975 val64 |= TX_FIFO_SPECIAL_FUNC;
3977 writeq(val64, &tx_fifo->List_Control);
3982 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3984 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3986 /* Avoid "put" pointer going beyond "get" pointer */
3987 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3988 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3990 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3992 netif_stop_queue(dev);
3995 dev->trans_start = jiffies;
3996 spin_unlock_irqrestore(&sp->tx_lock, flags);
4002 s2io_alarm_handle(unsigned long data)
4004 struct s2io_nic *sp = (struct s2io_nic *)data;
4006 alarm_intr_handler(sp);
4007 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4010 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4012 int rxb_size, level;
4015 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4016 level = rx_buffer_level(sp, rxb_size, rng_n);
4018 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4020 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4021 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4022 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4023 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4025 clear_bit(0, (&sp->tasklet_status));
4028 clear_bit(0, (&sp->tasklet_status));
4029 } else if (level == LOW)
4030 tasklet_schedule(&sp->task);
4032 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4033 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4034 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4039 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4041 struct net_device *dev = (struct net_device *) dev_id;
4042 struct s2io_nic *sp = dev->priv;
4044 struct mac_info *mac_control;
4045 struct config_param *config;
4047 atomic_inc(&sp->isr_cnt);
4048 mac_control = &sp->mac_control;
4049 config = &sp->config;
4050 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4052 /* If Intr is because of Rx Traffic */
4053 for (i = 0; i < config->rx_ring_num; i++)
4054 rx_intr_handler(&mac_control->rings[i]);
4056 /* If Intr is because of Tx Traffic */
4057 for (i = 0; i < config->tx_fifo_num; i++)
4058 tx_intr_handler(&mac_control->fifos[i]);
4061 * If the Rx buffer count is below the panic threshold then
4062 * reallocate the buffers from the interrupt handler itself,
4063 * else schedule a tasklet to reallocate the buffers.
4065 for (i = 0; i < config->rx_ring_num; i++)
4066 s2io_chk_rx_buffers(sp, i);
4068 atomic_dec(&sp->isr_cnt);
4072 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4074 struct ring_info *ring = (struct ring_info *)dev_id;
4075 struct s2io_nic *sp = ring->nic;
4077 atomic_inc(&sp->isr_cnt);
4079 rx_intr_handler(ring);
4080 s2io_chk_rx_buffers(sp, ring->ring_no);
4082 atomic_dec(&sp->isr_cnt);
4086 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4088 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4089 struct s2io_nic *sp = fifo->nic;
4091 atomic_inc(&sp->isr_cnt);
4092 tx_intr_handler(fifo);
4093 atomic_dec(&sp->isr_cnt);
4096 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4098 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4101 val64 = readq(&bar0->pic_int_status);
4102 if (val64 & PIC_INT_GPIO) {
4103 val64 = readq(&bar0->gpio_int_reg);
4104 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4105 (val64 & GPIO_INT_REG_LINK_UP)) {
4107 * This is unstable state so clear both up/down
4108 * interrupt and adapter to re-evaluate the link state.
4110 val64 |= GPIO_INT_REG_LINK_DOWN;
4111 val64 |= GPIO_INT_REG_LINK_UP;
4112 writeq(val64, &bar0->gpio_int_reg);
4113 val64 = readq(&bar0->gpio_int_mask);
4114 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4115 GPIO_INT_MASK_LINK_DOWN);
4116 writeq(val64, &bar0->gpio_int_mask);
4118 else if (val64 & GPIO_INT_REG_LINK_UP) {
4119 val64 = readq(&bar0->adapter_status);
4120 /* Enable Adapter */
4121 val64 = readq(&bar0->adapter_control);
4122 val64 |= ADAPTER_CNTL_EN;
4123 writeq(val64, &bar0->adapter_control);
4124 val64 |= ADAPTER_LED_ON;
4125 writeq(val64, &bar0->adapter_control);
4126 if (!sp->device_enabled_once)
4127 sp->device_enabled_once = 1;
4129 s2io_link(sp, LINK_UP);
4131 * unmask link down interrupt and mask link-up
4134 val64 = readq(&bar0->gpio_int_mask);
4135 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4136 val64 |= GPIO_INT_MASK_LINK_UP;
4137 writeq(val64, &bar0->gpio_int_mask);
4139 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4140 val64 = readq(&bar0->adapter_status);
4141 s2io_link(sp, LINK_DOWN);
4142 /* Link is down so unmaks link up interrupt */
4143 val64 = readq(&bar0->gpio_int_mask);
4144 val64 &= ~GPIO_INT_MASK_LINK_UP;
4145 val64 |= GPIO_INT_MASK_LINK_DOWN;
4146 writeq(val64, &bar0->gpio_int_mask);
4149 val64 = readq(&bar0->adapter_control);
4150 val64 = val64 &(~ADAPTER_LED_ON);
4151 writeq(val64, &bar0->adapter_control);
4154 val64 = readq(&bar0->gpio_int_mask);
4158 * s2io_isr - ISR handler of the device .
4159 * @irq: the irq of the device.
4160 * @dev_id: a void pointer to the dev structure of the NIC.
4161 * Description: This function is the ISR handler of the device. It
4162 * identifies the reason for the interrupt and calls the relevant
4163 * service routines. As a contongency measure, this ISR allocates the
4164 * recv buffers, if their numbers are below the panic value which is
4165 * presently set to 25% of the original number of rcv buffers allocated.
4167 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4168 * IRQ_NONE: will be returned if interrupt is not from our device
4170 static irqreturn_t s2io_isr(int irq, void *dev_id)
4172 struct net_device *dev = (struct net_device *) dev_id;
4173 struct s2io_nic *sp = dev->priv;
4174 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4177 struct mac_info *mac_control;
4178 struct config_param *config;
4180 atomic_inc(&sp->isr_cnt);
4181 mac_control = &sp->mac_control;
4182 config = &sp->config;
4185 * Identify the cause for interrupt and call the appropriate
4186 * interrupt handler. Causes for the interrupt could be;
4190 * 4. Error in any functional blocks of the NIC.
4192 reason = readq(&bar0->general_int_status);
4195 /* The interrupt was not raised by us. */
4196 atomic_dec(&sp->isr_cnt);
4199 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4200 /* Disable device and get out */
4201 atomic_dec(&sp->isr_cnt);
4206 if (reason & GEN_INTR_RXTRAFFIC) {
4207 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4208 __netif_rx_schedule(dev);
4209 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4212 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4216 * Rx handler is called by default, without checking for the
4217 * cause of interrupt.
4218 * rx_traffic_int reg is an R1 register, writing all 1's
4219 * will ensure that the actual interrupt causing bit get's
4220 * cleared and hence a read can be avoided.
4222 if (reason & GEN_INTR_RXTRAFFIC)
4223 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4225 for (i = 0; i < config->rx_ring_num; i++) {
4226 rx_intr_handler(&mac_control->rings[i]);
4231 * tx_traffic_int reg is an R1 register, writing all 1's
4232 * will ensure that the actual interrupt causing bit get's
4233 * cleared and hence a read can be avoided.
4235 if (reason & GEN_INTR_TXTRAFFIC)
4236 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4238 for (i = 0; i < config->tx_fifo_num; i++)
4239 tx_intr_handler(&mac_control->fifos[i]);
4241 if (reason & GEN_INTR_TXPIC)
4242 s2io_txpic_intr_handle(sp);
4244 * If the Rx buffer count is below the panic threshold then
4245 * reallocate the buffers from the interrupt handler itself,
4246 * else schedule a tasklet to reallocate the buffers.
4249 for (i = 0; i < config->rx_ring_num; i++)
4250 s2io_chk_rx_buffers(sp, i);
4253 writeq(0, &bar0->general_int_mask);
4254 readl(&bar0->general_int_status);
4256 atomic_dec(&sp->isr_cnt);
4263 static void s2io_updt_stats(struct s2io_nic *sp)
4265 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4269 if (atomic_read(&sp->card_state) == CARD_UP) {
4270 /* Apprx 30us on a 133 MHz bus */
4271 val64 = SET_UPDT_CLICKS(10) |
4272 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4273 writeq(val64, &bar0->stat_cfg);
4276 val64 = readq(&bar0->stat_cfg);
4277 if (!(val64 & BIT(0)))
4281 break; /* Updt failed */
4284 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4289 * s2io_get_stats - Updates the device statistics structure.
4290 * @dev : pointer to the device structure.
4292 * This function updates the device statistics structure in the s2io_nic
4293 * structure and returns a pointer to the same.
4295 * pointer to the updated net_device_stats structure.
4298 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4300 struct s2io_nic *sp = dev->priv;
4301 struct mac_info *mac_control;
4302 struct config_param *config;
4305 mac_control = &sp->mac_control;
4306 config = &sp->config;
4308 /* Configure Stats for immediate updt */
4309 s2io_updt_stats(sp);
4311 sp->stats.tx_packets =
4312 le32_to_cpu(mac_control->stats_info->tmac_frms);
4313 sp->stats.tx_errors =
4314 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4315 sp->stats.rx_errors =
4316 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4317 sp->stats.multicast =
4318 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4319 sp->stats.rx_length_errors =
4320 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4322 return (&sp->stats);
4326 * s2io_set_multicast - entry point for multicast address enable/disable.
4327 * @dev : pointer to the device structure
4329 * This function is a driver entry point which gets called by the kernel
4330 * whenever multicast addresses must be enabled/disabled. This also gets
4331 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4332 * determine, if multicast address must be enabled or if promiscuous mode
4333 * is to be disabled etc.
4338 static void s2io_set_multicast(struct net_device *dev)
4341 struct dev_mc_list *mclist;
4342 struct s2io_nic *sp = dev->priv;
4343 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4344 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4346 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4349 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4350 /* Enable all Multicast addresses */
4351 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4352 &bar0->rmac_addr_data0_mem);
4353 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4354 &bar0->rmac_addr_data1_mem);
4355 val64 = RMAC_ADDR_CMD_MEM_WE |
4356 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4357 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4358 writeq(val64, &bar0->rmac_addr_cmd_mem);
4359 /* Wait till command completes */
4360 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4361 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4365 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4366 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4367 /* Disable all Multicast addresses */
4368 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4369 &bar0->rmac_addr_data0_mem);
4370 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4371 &bar0->rmac_addr_data1_mem);
4372 val64 = RMAC_ADDR_CMD_MEM_WE |
4373 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4374 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4375 writeq(val64, &bar0->rmac_addr_cmd_mem);
4376 /* Wait till command completes */
4377 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4378 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4382 sp->all_multi_pos = 0;
4385 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4386 /* Put the NIC into promiscuous mode */
4387 add = &bar0->mac_cfg;
4388 val64 = readq(&bar0->mac_cfg);
4389 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4391 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4392 writel((u32) val64, add);
4393 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4394 writel((u32) (val64 >> 32), (add + 4));
4396 if (vlan_tag_strip != 1) {
4397 val64 = readq(&bar0->rx_pa_cfg);
4398 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4399 writeq(val64, &bar0->rx_pa_cfg);
4400 vlan_strip_flag = 0;
4403 val64 = readq(&bar0->mac_cfg);
4404 sp->promisc_flg = 1;
4405 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4407 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4408 /* Remove the NIC from promiscuous mode */
4409 add = &bar0->mac_cfg;
4410 val64 = readq(&bar0->mac_cfg);
4411 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4413 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4414 writel((u32) val64, add);
4415 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4416 writel((u32) (val64 >> 32), (add + 4));
4418 if (vlan_tag_strip != 0) {
4419 val64 = readq(&bar0->rx_pa_cfg);
4420 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4421 writeq(val64, &bar0->rx_pa_cfg);
4422 vlan_strip_flag = 1;
4425 val64 = readq(&bar0->mac_cfg);
4426 sp->promisc_flg = 0;
4427 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4431 /* Update individual M_CAST address list */
4432 if ((!sp->m_cast_flg) && dev->mc_count) {
4434 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4435 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4437 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4438 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4442 prev_cnt = sp->mc_addr_count;
4443 sp->mc_addr_count = dev->mc_count;
4445 /* Clear out the previous list of Mc in the H/W. */
4446 for (i = 0; i < prev_cnt; i++) {
4447 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4448 &bar0->rmac_addr_data0_mem);
4449 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4450 &bar0->rmac_addr_data1_mem);
4451 val64 = RMAC_ADDR_CMD_MEM_WE |
4452 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4453 RMAC_ADDR_CMD_MEM_OFFSET
4454 (MAC_MC_ADDR_START_OFFSET + i);
4455 writeq(val64, &bar0->rmac_addr_cmd_mem);
4457 /* Wait for command completes */
4458 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4459 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4461 DBG_PRINT(ERR_DBG, "%s: Adding ",
4463 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4468 /* Create the new Rx filter list and update the same in H/W. */
4469 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4470 i++, mclist = mclist->next) {
4471 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4474 for (j = 0; j < ETH_ALEN; j++) {
4475 mac_addr |= mclist->dmi_addr[j];
4479 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4480 &bar0->rmac_addr_data0_mem);
4481 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4482 &bar0->rmac_addr_data1_mem);
4483 val64 = RMAC_ADDR_CMD_MEM_WE |
4484 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4485 RMAC_ADDR_CMD_MEM_OFFSET
4486 (i + MAC_MC_ADDR_START_OFFSET);
4487 writeq(val64, &bar0->rmac_addr_cmd_mem);
4489 /* Wait for command completes */
4490 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4491 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4493 DBG_PRINT(ERR_DBG, "%s: Adding ",
4495 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4503 * s2io_set_mac_addr - Programs the Xframe mac address
4504 * @dev : pointer to the device structure.
4505 * @addr: a uchar pointer to the new mac address which is to be set.
4506 * Description : This procedure will program the Xframe to receive
4507 * frames with new Mac Address
4508 * Return value: SUCCESS on success and an appropriate (-)ve integer
4509 * as defined in errno.h file on failure.
4512 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4514 struct s2io_nic *sp = dev->priv;
4515 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4516 register u64 val64, mac_addr = 0;
4518 u64 old_mac_addr = 0;
4521 * Set the new MAC address as the new unicast filter and reflect this
4522 * change on the device address registered with the OS. It will be
4525 for (i = 0; i < ETH_ALEN; i++) {
4527 mac_addr |= addr[i];
4529 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4535 /* Update the internal structure with this new mac address */
4536 if(mac_addr != old_mac_addr) {
4537 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4538 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4539 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4540 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4541 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4542 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4543 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4546 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4547 &bar0->rmac_addr_data0_mem);
4550 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4551 RMAC_ADDR_CMD_MEM_OFFSET(0);
4552 writeq(val64, &bar0->rmac_addr_cmd_mem);
4553 /* Wait till command completes */
4554 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4555 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4556 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4564 * s2io_ethtool_sset - Sets different link parameters.
4565 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4566 * @info: pointer to the structure with parameters given by ethtool to set
4569 * The function sets different link parameters provided by the user onto
4575 static int s2io_ethtool_sset(struct net_device *dev,
4576 struct ethtool_cmd *info)
4578 struct s2io_nic *sp = dev->priv;
4579 if ((info->autoneg == AUTONEG_ENABLE) ||
4580 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4583 s2io_close(sp->dev);
4591 * s2io_ethtol_gset - Return link specific information.
4592 * @sp : private member of the device structure, pointer to the
4593 * s2io_nic structure.
4594 * @info : pointer to the structure with parameters given by ethtool
4595 * to return link information.
4597 * Returns link specific information like speed, duplex etc.. to ethtool.
4599 * return 0 on success.
4602 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4604 struct s2io_nic *sp = dev->priv;
4605 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4606 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4607 info->port = PORT_FIBRE;
4608 /* info->transceiver?? TODO */
4610 if (netif_carrier_ok(sp->dev)) {
4611 info->speed = 10000;
4612 info->duplex = DUPLEX_FULL;
4618 info->autoneg = AUTONEG_DISABLE;
4623 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4624 * @sp : private member of the device structure, which is a pointer to the
4625 * s2io_nic structure.
4626 * @info : pointer to the structure with parameters given by ethtool to
4627 * return driver information.
4629 * Returns driver specefic information like name, version etc.. to ethtool.
4634 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4635 struct ethtool_drvinfo *info)
4637 struct s2io_nic *sp = dev->priv;
4639 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4640 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4641 strncpy(info->fw_version, "", sizeof(info->fw_version));
4642 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4643 info->regdump_len = XENA_REG_SPACE;
4644 info->eedump_len = XENA_EEPROM_SPACE;
4645 info->testinfo_len = S2IO_TEST_LEN;
4647 if (sp->device_type == XFRAME_I_DEVICE)
4648 info->n_stats = XFRAME_I_STAT_LEN;
4650 info->n_stats = XFRAME_II_STAT_LEN;
4654 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4655 * @sp: private member of the device structure, which is a pointer to the
4656 * s2io_nic structure.
4657 * @regs : pointer to the structure with parameters given by ethtool for
4658 * dumping the registers.
4659 * @reg_space: The input argumnet into which all the registers are dumped.
4661 * Dumps the entire register space of xFrame NIC into the user given
4667 static void s2io_ethtool_gregs(struct net_device *dev,
4668 struct ethtool_regs *regs, void *space)
4672 u8 *reg_space = (u8 *) space;
4673 struct s2io_nic *sp = dev->priv;
4675 regs->len = XENA_REG_SPACE;
4676 regs->version = sp->pdev->subsystem_device;
4678 for (i = 0; i < regs->len; i += 8) {
4679 reg = readq(sp->bar0 + i);
4680 memcpy((reg_space + i), ®, 8);
4685 * s2io_phy_id - timer function that alternates adapter LED.
4686 * @data : address of the private member of the device structure, which
4687 * is a pointer to the s2io_nic structure, provided as an u32.
4688 * Description: This is actually the timer function that alternates the
4689 * adapter LED bit of the adapter control bit to set/reset every time on
4690 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4691 * once every second.
4693 static void s2io_phy_id(unsigned long data)
4695 struct s2io_nic *sp = (struct s2io_nic *) data;
4696 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4700 subid = sp->pdev->subsystem_device;
4701 if ((sp->device_type == XFRAME_II_DEVICE) ||
4702 ((subid & 0xFF) >= 0x07)) {
4703 val64 = readq(&bar0->gpio_control);
4704 val64 ^= GPIO_CTRL_GPIO_0;
4705 writeq(val64, &bar0->gpio_control);
4707 val64 = readq(&bar0->adapter_control);
4708 val64 ^= ADAPTER_LED_ON;
4709 writeq(val64, &bar0->adapter_control);
4712 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4716 * s2io_ethtool_idnic - To physically identify the nic on the system.
4717 * @sp : private member of the device structure, which is a pointer to the
4718 * s2io_nic structure.
4719 * @id : pointer to the structure with identification parameters given by
4721 * Description: Used to physically identify the NIC on the system.
4722 * The Link LED will blink for a time specified by the user for
4724 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4725 * identification is possible only if it's link is up.
4727 * int , returns 0 on success
4730 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4732 u64 val64 = 0, last_gpio_ctrl_val;
4733 struct s2io_nic *sp = dev->priv;
4734 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4737 subid = sp->pdev->subsystem_device;
4738 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4739 if ((sp->device_type == XFRAME_I_DEVICE) &&
4740 ((subid & 0xFF) < 0x07)) {
4741 val64 = readq(&bar0->adapter_control);
4742 if (!(val64 & ADAPTER_CNTL_EN)) {
4744 "Adapter Link down, cannot blink LED\n");
4748 if (sp->id_timer.function == NULL) {
4749 init_timer(&sp->id_timer);
4750 sp->id_timer.function = s2io_phy_id;
4751 sp->id_timer.data = (unsigned long) sp;
4753 mod_timer(&sp->id_timer, jiffies);
4755 msleep_interruptible(data * HZ);
4757 msleep_interruptible(MAX_FLICKER_TIME);
4758 del_timer_sync(&sp->id_timer);
4760 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4761 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4762 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4769 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4770 * @sp : private member of the device structure, which is a pointer to the
4771 * s2io_nic structure.
4772 * @ep : pointer to the structure with pause parameters given by ethtool.
4774 * Returns the Pause frame generation and reception capability of the NIC.
4778 static void s2io_ethtool_getpause_data(struct net_device *dev,
4779 struct ethtool_pauseparam *ep)
4782 struct s2io_nic *sp = dev->priv;
4783 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4785 val64 = readq(&bar0->rmac_pause_cfg);
4786 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4787 ep->tx_pause = TRUE;
4788 if (val64 & RMAC_PAUSE_RX_ENABLE)
4789 ep->rx_pause = TRUE;
4790 ep->autoneg = FALSE;
4794 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4795 * @sp : private member of the device structure, which is a pointer to the
4796 * s2io_nic structure.
4797 * @ep : pointer to the structure with pause parameters given by ethtool.
4799 * It can be used to set or reset Pause frame generation or reception
4800 * support of the NIC.
4802 * int, returns 0 on Success
4805 static int s2io_ethtool_setpause_data(struct net_device *dev,
4806 struct ethtool_pauseparam *ep)
4809 struct s2io_nic *sp = dev->priv;
4810 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4812 val64 = readq(&bar0->rmac_pause_cfg);
4814 val64 |= RMAC_PAUSE_GEN_ENABLE;
4816 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4818 val64 |= RMAC_PAUSE_RX_ENABLE;
4820 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4821 writeq(val64, &bar0->rmac_pause_cfg);
4826 * read_eeprom - reads 4 bytes of data from user given offset.
4827 * @sp : private member of the device structure, which is a pointer to the
4828 * s2io_nic structure.
4829 * @off : offset at which the data must be written
4830 * @data : Its an output parameter where the data read at the given
4833 * Will read 4 bytes of data from the user given offset and return the
4835 * NOTE: Will allow to read only part of the EEPROM visible through the
4838 * -1 on failure and 0 on success.
4841 #define S2IO_DEV_ID 5
4842 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4847 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4849 if (sp->device_type == XFRAME_I_DEVICE) {
4850 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4851 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4852 I2C_CONTROL_CNTL_START;
4853 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4855 while (exit_cnt < 5) {
4856 val64 = readq(&bar0->i2c_control);
4857 if (I2C_CONTROL_CNTL_END(val64)) {
4858 *data = I2C_CONTROL_GET_DATA(val64);
4867 if (sp->device_type == XFRAME_II_DEVICE) {
4868 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4869 SPI_CONTROL_BYTECNT(0x3) |
4870 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4871 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4872 val64 |= SPI_CONTROL_REQ;
4873 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4874 while (exit_cnt < 5) {
4875 val64 = readq(&bar0->spi_control);
4876 if (val64 & SPI_CONTROL_NACK) {
4879 } else if (val64 & SPI_CONTROL_DONE) {
4880 *data = readq(&bar0->spi_data);
4893 * write_eeprom - actually writes the relevant part of the data value.
4894 * @sp : private member of the device structure, which is a pointer to the
4895 * s2io_nic structure.
4896 * @off : offset at which the data must be written
4897 * @data : The data that is to be written
4898 * @cnt : Number of bytes of the data that are actually to be written into
4899 * the Eeprom. (max of 3)
4901 * Actually writes the relevant part of the data value into the Eeprom
4902 * through the I2C bus.
4904 * 0 on success, -1 on failure.
4907 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4909 int exit_cnt = 0, ret = -1;
4911 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4913 if (sp->device_type == XFRAME_I_DEVICE) {
4914 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4915 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4916 I2C_CONTROL_CNTL_START;
4917 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4919 while (exit_cnt < 5) {
4920 val64 = readq(&bar0->i2c_control);
4921 if (I2C_CONTROL_CNTL_END(val64)) {
4922 if (!(val64 & I2C_CONTROL_NACK))
4931 if (sp->device_type == XFRAME_II_DEVICE) {
4932 int write_cnt = (cnt == 8) ? 0 : cnt;
4933 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4935 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4936 SPI_CONTROL_BYTECNT(write_cnt) |
4937 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4938 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4939 val64 |= SPI_CONTROL_REQ;
4940 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4941 while (exit_cnt < 5) {
4942 val64 = readq(&bar0->spi_control);
4943 if (val64 & SPI_CONTROL_NACK) {
4946 } else if (val64 & SPI_CONTROL_DONE) {
4956 static void s2io_vpd_read(struct s2io_nic *nic)
4960 int i=0, cnt, fail = 0;
4961 int vpd_addr = 0x80;
4963 if (nic->device_type == XFRAME_II_DEVICE) {
4964 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4968 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4971 strcpy(nic->serial_num, "NOT AVAILABLE");
4973 vpd_data = kmalloc(256, GFP_KERNEL);
4977 for (i = 0; i < 256; i +=4 ) {
4978 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4979 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4980 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4981 for (cnt = 0; cnt <5; cnt++) {
4983 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4988 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4992 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4993 (u32 *)&vpd_data[i]);
4997 /* read serial number of adapter */
4998 for (cnt = 0; cnt < 256; cnt++) {
4999 if ((vpd_data[cnt] == 'S') &&
5000 (vpd_data[cnt+1] == 'N') &&
5001 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5002 memset(nic->serial_num, 0, VPD_STRING_LEN);
5003 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5010 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5011 memset(nic->product_name, 0, vpd_data[1]);
5012 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5018 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5019 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5020 * @eeprom : pointer to the user level structure provided by ethtool,
5021 * containing all relevant information.
5022 * @data_buf : user defined value to be written into Eeprom.
5023 * Description: Reads the values stored in the Eeprom at given offset
5024 * for a given length. Stores these values int the input argument data
5025 * buffer 'data_buf' and returns these to the caller (ethtool.)
5030 static int s2io_ethtool_geeprom(struct net_device *dev,
5031 struct ethtool_eeprom *eeprom, u8 * data_buf)
5035 struct s2io_nic *sp = dev->priv;
5037 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5039 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5040 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5042 for (i = 0; i < eeprom->len; i += 4) {
5043 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5044 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5048 memcpy((data_buf + i), &valid, 4);
5054 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5055 * @sp : private member of the device structure, which is a pointer to the
5056 * s2io_nic structure.
5057 * @eeprom : pointer to the user level structure provided by ethtool,
5058 * containing all relevant information.
5059 * @data_buf ; user defined value to be written into Eeprom.
5061 * Tries to write the user provided value in the Eeprom, at the offset
5062 * given by the user.
5064 * 0 on success, -EFAULT on failure.
5067 static int s2io_ethtool_seeprom(struct net_device *dev,
5068 struct ethtool_eeprom *eeprom,
5071 int len = eeprom->len, cnt = 0;
5072 u64 valid = 0, data;
5073 struct s2io_nic *sp = dev->priv;
5075 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5077 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5078 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5084 data = (u32) data_buf[cnt] & 0x000000FF;
5086 valid = (u32) (data << 24);
5090 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5092 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5094 "write into the specified offset\n");
5105 * s2io_register_test - reads and writes into all clock domains.
5106 * @sp : private member of the device structure, which is a pointer to the
5107 * s2io_nic structure.
5108 * @data : variable that returns the result of each of the test conducted b
5111 * Read and write into all clock domains. The NIC has 3 clock domains,
5112 * see that registers in all the three regions are accessible.
5117 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5119 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5120 u64 val64 = 0, exp_val;
5123 val64 = readq(&bar0->pif_rd_swapper_fb);
5124 if (val64 != 0x123456789abcdefULL) {
5126 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5129 val64 = readq(&bar0->rmac_pause_cfg);
5130 if (val64 != 0xc000ffff00000000ULL) {
5132 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5135 val64 = readq(&bar0->rx_queue_cfg);
5136 if (sp->device_type == XFRAME_II_DEVICE)
5137 exp_val = 0x0404040404040404ULL;
5139 exp_val = 0x0808080808080808ULL;
5140 if (val64 != exp_val) {
5142 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5145 val64 = readq(&bar0->xgxs_efifo_cfg);
5146 if (val64 != 0x000000001923141EULL) {
5148 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5151 val64 = 0x5A5A5A5A5A5A5A5AULL;
5152 writeq(val64, &bar0->xmsi_data);
5153 val64 = readq(&bar0->xmsi_data);
5154 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5156 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5159 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5160 writeq(val64, &bar0->xmsi_data);
5161 val64 = readq(&bar0->xmsi_data);
5162 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5164 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5172 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5173 * @sp : private member of the device structure, which is a pointer to the
5174 * s2io_nic structure.
5175 * @data:variable that returns the result of each of the test conducted by
5178 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5184 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5187 u64 ret_data, org_4F0, org_7F0;
5188 u8 saved_4F0 = 0, saved_7F0 = 0;
5189 struct net_device *dev = sp->dev;
5191 /* Test Write Error at offset 0 */
5192 /* Note that SPI interface allows write access to all areas
5193 * of EEPROM. Hence doing all negative testing only for Xframe I.
5195 if (sp->device_type == XFRAME_I_DEVICE)
5196 if (!write_eeprom(sp, 0, 0, 3))
5199 /* Save current values at offsets 0x4F0 and 0x7F0 */
5200 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5202 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5205 /* Test Write at offset 4f0 */
5206 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5208 if (read_eeprom(sp, 0x4F0, &ret_data))
5211 if (ret_data != 0x012345) {
5212 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5213 "Data written %llx Data read %llx\n",
5214 dev->name, (unsigned long long)0x12345,
5215 (unsigned long long)ret_data);
5219 /* Reset the EEPROM data go FFFF */
5220 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5222 /* Test Write Request Error at offset 0x7c */
5223 if (sp->device_type == XFRAME_I_DEVICE)
5224 if (!write_eeprom(sp, 0x07C, 0, 3))
5227 /* Test Write Request at offset 0x7f0 */
5228 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5230 if (read_eeprom(sp, 0x7F0, &ret_data))
5233 if (ret_data != 0x012345) {
5234 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5235 "Data written %llx Data read %llx\n",
5236 dev->name, (unsigned long long)0x12345,
5237 (unsigned long long)ret_data);
5241 /* Reset the EEPROM data go FFFF */
5242 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5244 if (sp->device_type == XFRAME_I_DEVICE) {
5245 /* Test Write Error at offset 0x80 */
5246 if (!write_eeprom(sp, 0x080, 0, 3))
5249 /* Test Write Error at offset 0xfc */
5250 if (!write_eeprom(sp, 0x0FC, 0, 3))
5253 /* Test Write Error at offset 0x100 */
5254 if (!write_eeprom(sp, 0x100, 0, 3))
5257 /* Test Write Error at offset 4ec */
5258 if (!write_eeprom(sp, 0x4EC, 0, 3))
5262 /* Restore values at offsets 0x4F0 and 0x7F0 */
5264 write_eeprom(sp, 0x4F0, org_4F0, 3);
5266 write_eeprom(sp, 0x7F0, org_7F0, 3);
5273 * s2io_bist_test - invokes the MemBist test of the card .
5274 * @sp : private member of the device structure, which is a pointer to the
5275 * s2io_nic structure.
5276 * @data:variable that returns the result of each of the test conducted by
5279 * This invokes the MemBist test of the card. We give around
5280 * 2 secs time for the Test to complete. If it's still not complete
5281 * within this peiod, we consider that the test failed.
5283 * 0 on success and -1 on failure.
5286 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5289 int cnt = 0, ret = -1;
5291 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5292 bist |= PCI_BIST_START;
5293 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5296 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5297 if (!(bist & PCI_BIST_START)) {
5298 *data = (bist & PCI_BIST_CODE_MASK);
5310 * s2io-link_test - verifies the link state of the nic
5311 * @sp ; private member of the device structure, which is a pointer to the
5312 * s2io_nic structure.
5313 * @data: variable that returns the result of each of the test conducted by
5316 * The function verifies the link state of the NIC and updates the input
5317 * argument 'data' appropriately.
5322 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5324 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5327 val64 = readq(&bar0->adapter_status);
5328 if(!(LINK_IS_UP(val64)))
5337 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5338 * @sp - private member of the device structure, which is a pointer to the
5339 * s2io_nic structure.
5340 * @data - variable that returns the result of each of the test
5341 * conducted by the driver.
5343 * This is one of the offline test that tests the read and write
5344 * access to the RldRam chip on the NIC.
5349 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5351 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5353 int cnt, iteration = 0, test_fail = 0;
5355 val64 = readq(&bar0->adapter_control);
5356 val64 &= ~ADAPTER_ECC_EN;
5357 writeq(val64, &bar0->adapter_control);
5359 val64 = readq(&bar0->mc_rldram_test_ctrl);
5360 val64 |= MC_RLDRAM_TEST_MODE;
5361 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5363 val64 = readq(&bar0->mc_rldram_mrs);
5364 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5365 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5367 val64 |= MC_RLDRAM_MRS_ENABLE;
5368 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5370 while (iteration < 2) {
5371 val64 = 0x55555555aaaa0000ULL;
5372 if (iteration == 1) {
5373 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5375 writeq(val64, &bar0->mc_rldram_test_d0);
5377 val64 = 0xaaaa5a5555550000ULL;
5378 if (iteration == 1) {
5379 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5381 writeq(val64, &bar0->mc_rldram_test_d1);
5383 val64 = 0x55aaaaaaaa5a0000ULL;
5384 if (iteration == 1) {
5385 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5387 writeq(val64, &bar0->mc_rldram_test_d2);
5389 val64 = (u64) (0x0000003ffffe0100ULL);
5390 writeq(val64, &bar0->mc_rldram_test_add);
5392 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5394 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5396 for (cnt = 0; cnt < 5; cnt++) {
5397 val64 = readq(&bar0->mc_rldram_test_ctrl);
5398 if (val64 & MC_RLDRAM_TEST_DONE)
5406 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5407 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5409 for (cnt = 0; cnt < 5; cnt++) {
5410 val64 = readq(&bar0->mc_rldram_test_ctrl);
5411 if (val64 & MC_RLDRAM_TEST_DONE)
5419 val64 = readq(&bar0->mc_rldram_test_ctrl);
5420 if (!(val64 & MC_RLDRAM_TEST_PASS))
5428 /* Bring the adapter out of test mode */
5429 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5435 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5436 * @sp : private member of the device structure, which is a pointer to the
5437 * s2io_nic structure.
5438 * @ethtest : pointer to a ethtool command specific structure that will be
5439 * returned to the user.
5440 * @data : variable that returns the result of each of the test
5441 * conducted by the driver.
5443 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5444 * the health of the card.
5449 static void s2io_ethtool_test(struct net_device *dev,
5450 struct ethtool_test *ethtest,
5453 struct s2io_nic *sp = dev->priv;
5454 int orig_state = netif_running(sp->dev);
5456 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5457 /* Offline Tests. */
5459 s2io_close(sp->dev);
5461 if (s2io_register_test(sp, &data[0]))
5462 ethtest->flags |= ETH_TEST_FL_FAILED;
5466 if (s2io_rldram_test(sp, &data[3]))
5467 ethtest->flags |= ETH_TEST_FL_FAILED;
5471 if (s2io_eeprom_test(sp, &data[1]))
5472 ethtest->flags |= ETH_TEST_FL_FAILED;
5474 if (s2io_bist_test(sp, &data[4]))
5475 ethtest->flags |= ETH_TEST_FL_FAILED;
5485 "%s: is not up, cannot run test\n",
5494 if (s2io_link_test(sp, &data[2]))
5495 ethtest->flags |= ETH_TEST_FL_FAILED;
5504 static void s2io_get_ethtool_stats(struct net_device *dev,
5505 struct ethtool_stats *estats,
5509 struct s2io_nic *sp = dev->priv;
5510 struct stat_block *stat_info = sp->mac_control.stats_info;
5512 s2io_updt_stats(sp);
5514 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5515 le32_to_cpu(stat_info->tmac_frms);
5517 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5518 le32_to_cpu(stat_info->tmac_data_octets);
5519 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5521 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5522 le32_to_cpu(stat_info->tmac_mcst_frms);
5524 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5525 le32_to_cpu(stat_info->tmac_bcst_frms);
5526 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5528 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5529 le32_to_cpu(stat_info->tmac_ttl_octets);
5531 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5532 le32_to_cpu(stat_info->tmac_ucst_frms);
5534 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5535 le32_to_cpu(stat_info->tmac_nucst_frms);
5537 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5538 le32_to_cpu(stat_info->tmac_any_err_frms);
5539 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5540 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5542 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5543 le32_to_cpu(stat_info->tmac_vld_ip);
5545 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5546 le32_to_cpu(stat_info->tmac_drop_ip);
5548 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5549 le32_to_cpu(stat_info->tmac_icmp);
5551 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5552 le32_to_cpu(stat_info->tmac_rst_tcp);
5553 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5554 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5555 le32_to_cpu(stat_info->tmac_udp);
5557 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5558 le32_to_cpu(stat_info->rmac_vld_frms);
5560 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5561 le32_to_cpu(stat_info->rmac_data_octets);
5562 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5563 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5565 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5566 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5568 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5569 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5570 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5571 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5572 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5573 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5574 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5576 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5577 le32_to_cpu(stat_info->rmac_ttl_octets);
5579 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5580 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5582 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5583 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5585 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5586 le32_to_cpu(stat_info->rmac_discarded_frms);
5588 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5589 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5590 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5591 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5593 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5594 le32_to_cpu(stat_info->rmac_usized_frms);
5596 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5597 le32_to_cpu(stat_info->rmac_osized_frms);
5599 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5600 le32_to_cpu(stat_info->rmac_frag_frms);
5602 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5603 le32_to_cpu(stat_info->rmac_jabber_frms);
5604 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5605 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5606 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5607 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5608 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5609 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5611 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5612 le32_to_cpu(stat_info->rmac_ip);
5613 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5614 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5616 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5617 le32_to_cpu(stat_info->rmac_drop_ip);
5619 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5620 le32_to_cpu(stat_info->rmac_icmp);
5621 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5623 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5624 le32_to_cpu(stat_info->rmac_udp);
5626 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5627 le32_to_cpu(stat_info->rmac_err_drp_udp);
5628 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5629 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5630 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5631 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5632 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5633 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5634 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5635 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5636 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5637 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5638 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5639 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5640 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5641 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5642 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5643 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5644 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5646 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5647 le32_to_cpu(stat_info->rmac_pause_cnt);
5648 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5649 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5651 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5652 le32_to_cpu(stat_info->rmac_accepted_ip);
5653 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5654 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5655 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5656 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5657 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5658 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5659 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5660 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5661 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5662 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5663 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5664 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5665 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5666 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5667 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5668 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5669 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5670 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5671 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5673 /* Enhanced statistics exist only for Hercules */
5674 if(sp->device_type == XFRAME_II_DEVICE) {
5676 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5678 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5680 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5681 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5682 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5683 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5684 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5685 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5686 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5687 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5688 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5689 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5690 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5691 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5692 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5693 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5697 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5698 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5699 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5700 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5701 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5702 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5703 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5704 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5705 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5706 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5707 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5708 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5709 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5710 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5711 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5712 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5713 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5714 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5715 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5716 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5717 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5718 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5719 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5720 if (stat_info->sw_stat.num_aggregations) {
5721 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5724 * Since 64-bit divide does not work on all platforms,
5725 * do repeated subtraction.
5727 while (tmp >= stat_info->sw_stat.num_aggregations) {
5728 tmp -= stat_info->sw_stat.num_aggregations;
5731 tmp_stats[i++] = count;
5737 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5739 return (XENA_REG_SPACE);
5743 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5745 struct s2io_nic *sp = dev->priv;
5747 return (sp->rx_csum);
5750 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5752 struct s2io_nic *sp = dev->priv;
5762 static int s2io_get_eeprom_len(struct net_device *dev)
5764 return (XENA_EEPROM_SPACE);
5767 static int s2io_ethtool_self_test_count(struct net_device *dev)
5769 return (S2IO_TEST_LEN);
5772 static void s2io_ethtool_get_strings(struct net_device *dev,
5773 u32 stringset, u8 * data)
5776 struct s2io_nic *sp = dev->priv;
5778 switch (stringset) {
5780 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5783 stat_size = sizeof(ethtool_xena_stats_keys);
5784 memcpy(data, ðtool_xena_stats_keys,stat_size);
5785 if(sp->device_type == XFRAME_II_DEVICE) {
5786 memcpy(data + stat_size,
5787 ðtool_enhanced_stats_keys,
5788 sizeof(ethtool_enhanced_stats_keys));
5789 stat_size += sizeof(ethtool_enhanced_stats_keys);
5792 memcpy(data + stat_size, ðtool_driver_stats_keys,
5793 sizeof(ethtool_driver_stats_keys));
5796 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5798 struct s2io_nic *sp = dev->priv;
5800 switch(sp->device_type) {
5801 case XFRAME_I_DEVICE:
5802 stat_count = XFRAME_I_STAT_LEN;
5805 case XFRAME_II_DEVICE:
5806 stat_count = XFRAME_II_STAT_LEN;
5813 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5816 dev->features |= NETIF_F_IP_CSUM;
5818 dev->features &= ~NETIF_F_IP_CSUM;
5823 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5825 return (dev->features & NETIF_F_TSO) != 0;
5827 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5830 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5832 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5837 static const struct ethtool_ops netdev_ethtool_ops = {
5838 .get_settings = s2io_ethtool_gset,
5839 .set_settings = s2io_ethtool_sset,
5840 .get_drvinfo = s2io_ethtool_gdrvinfo,
5841 .get_regs_len = s2io_ethtool_get_regs_len,
5842 .get_regs = s2io_ethtool_gregs,
5843 .get_link = ethtool_op_get_link,
5844 .get_eeprom_len = s2io_get_eeprom_len,
5845 .get_eeprom = s2io_ethtool_geeprom,
5846 .set_eeprom = s2io_ethtool_seeprom,
5847 .get_pauseparam = s2io_ethtool_getpause_data,
5848 .set_pauseparam = s2io_ethtool_setpause_data,
5849 .get_rx_csum = s2io_ethtool_get_rx_csum,
5850 .set_rx_csum = s2io_ethtool_set_rx_csum,
5851 .get_tx_csum = ethtool_op_get_tx_csum,
5852 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5853 .get_sg = ethtool_op_get_sg,
5854 .set_sg = ethtool_op_set_sg,
5855 .get_tso = s2io_ethtool_op_get_tso,
5856 .set_tso = s2io_ethtool_op_set_tso,
5857 .get_ufo = ethtool_op_get_ufo,
5858 .set_ufo = ethtool_op_set_ufo,
5859 .self_test_count = s2io_ethtool_self_test_count,
5860 .self_test = s2io_ethtool_test,
5861 .get_strings = s2io_ethtool_get_strings,
5862 .phys_id = s2io_ethtool_idnic,
5863 .get_stats_count = s2io_ethtool_get_stats_count,
5864 .get_ethtool_stats = s2io_get_ethtool_stats
5868 * s2io_ioctl - Entry point for the Ioctl
5869 * @dev : Device pointer.
5870 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5871 * a proprietary structure used to pass information to the driver.
5872 * @cmd : This is used to distinguish between the different commands that
5873 * can be passed to the IOCTL functions.
5875 * Currently there are no special functionality supported in IOCTL, hence
5876 * function always return EOPNOTSUPPORTED
5879 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5885 * s2io_change_mtu - entry point to change MTU size for the device.
5886 * @dev : device pointer.
5887 * @new_mtu : the new MTU size for the device.
5888 * Description: A driver entry point to change MTU size for the device.
5889 * Before changing the MTU the device must be stopped.
5891 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5895 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5897 struct s2io_nic *sp = dev->priv;
5899 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5900 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5906 if (netif_running(dev)) {
5908 netif_stop_queue(dev);
5909 if (s2io_card_up(sp)) {
5910 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5913 if (netif_queue_stopped(dev))
5914 netif_wake_queue(dev);
5915 } else { /* Device is down */
5916 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5917 u64 val64 = new_mtu;
5919 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5926 * s2io_tasklet - Bottom half of the ISR.
5927 * @dev_adr : address of the device structure in dma_addr_t format.
5929 * This is the tasklet or the bottom half of the ISR. This is
5930 * an extension of the ISR which is scheduled by the scheduler to be run
5931 * when the load on the CPU is low. All low priority tasks of the ISR can
5932 * be pushed into the tasklet. For now the tasklet is used only to
5933 * replenish the Rx buffers in the Rx buffer descriptors.
5938 static void s2io_tasklet(unsigned long dev_addr)
5940 struct net_device *dev = (struct net_device *) dev_addr;
5941 struct s2io_nic *sp = dev->priv;
5943 struct mac_info *mac_control;
5944 struct config_param *config;
5946 mac_control = &sp->mac_control;
5947 config = &sp->config;
5949 if (!TASKLET_IN_USE) {
5950 for (i = 0; i < config->rx_ring_num; i++) {
5951 ret = fill_rx_buffers(sp, i);
5952 if (ret == -ENOMEM) {
5953 DBG_PRINT(ERR_DBG, "%s: Out of ",
5955 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5957 } else if (ret == -EFILL) {
5959 "%s: Rx Ring %d is full\n",
5964 clear_bit(0, (&sp->tasklet_status));
5969 * s2io_set_link - Set the LInk status
5970 * @data: long pointer to device private structue
5971 * Description: Sets the link status for the adapter
5974 static void s2io_set_link(struct work_struct *work)
5976 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5977 struct net_device *dev = nic->dev;
5978 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5984 if (!netif_running(dev))
5987 if (test_and_set_bit(0, &(nic->link_state))) {
5988 /* The card is being reset, no point doing anything */
5992 subid = nic->pdev->subsystem_device;
5993 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5995 * Allow a small delay for the NICs self initiated
5996 * cleanup to complete.
6001 val64 = readq(&bar0->adapter_status);
6002 if (LINK_IS_UP(val64)) {
6003 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6004 if (verify_xena_quiescence(nic)) {
6005 val64 = readq(&bar0->adapter_control);
6006 val64 |= ADAPTER_CNTL_EN;
6007 writeq(val64, &bar0->adapter_control);
6008 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6009 nic->device_type, subid)) {
6010 val64 = readq(&bar0->gpio_control);
6011 val64 |= GPIO_CTRL_GPIO_0;
6012 writeq(val64, &bar0->gpio_control);
6013 val64 = readq(&bar0->gpio_control);
6015 val64 |= ADAPTER_LED_ON;
6016 writeq(val64, &bar0->adapter_control);
6018 nic->device_enabled_once = TRUE;
6020 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6021 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6022 netif_stop_queue(dev);
6025 val64 = readq(&bar0->adapter_status);
6026 if (!LINK_IS_UP(val64)) {
6027 DBG_PRINT(ERR_DBG, "%s:", dev->name);
6028 DBG_PRINT(ERR_DBG, " Link down after enabling ");
6029 DBG_PRINT(ERR_DBG, "device \n");
6031 s2io_link(nic, LINK_UP);
6033 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6035 val64 = readq(&bar0->gpio_control);
6036 val64 &= ~GPIO_CTRL_GPIO_0;
6037 writeq(val64, &bar0->gpio_control);
6038 val64 = readq(&bar0->gpio_control);
6040 s2io_link(nic, LINK_DOWN);
6042 clear_bit(0, &(nic->link_state));
6048 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6050 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6051 u64 *temp2, int size)
6053 struct net_device *dev = sp->dev;
6054 struct sk_buff *frag_list;
6056 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6059 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6061 * As Rx frame are not going to be processed,
6062 * using same mapped address for the Rxd
6065 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6067 *skb = dev_alloc_skb(size);
6069 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6070 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6073 /* storing the mapped addr in a temp variable
6074 * such it will be used for next rxd whose
6075 * Host Control is NULL
6077 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6078 pci_map_single( sp->pdev, (*skb)->data,
6079 size - NET_IP_ALIGN,
6080 PCI_DMA_FROMDEVICE);
6081 rxdp->Host_Control = (unsigned long) (*skb);
6083 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6084 /* Two buffer Mode */
6086 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6087 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6088 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6090 *skb = dev_alloc_skb(size);
6092 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6096 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6097 pci_map_single(sp->pdev, (*skb)->data,
6099 PCI_DMA_FROMDEVICE);
6100 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6101 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6102 PCI_DMA_FROMDEVICE);
6103 rxdp->Host_Control = (unsigned long) (*skb);
6105 /* Buffer-1 will be dummy buffer not used */
6106 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6107 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6108 PCI_DMA_FROMDEVICE);
6110 } else if ((rxdp->Host_Control == 0)) {
6111 /* Three buffer mode */
6113 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6114 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6115 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6117 *skb = dev_alloc_skb(size);
6119 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6123 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6124 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6125 PCI_DMA_FROMDEVICE);
6126 /* Buffer-1 receives L3/L4 headers */
6127 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6128 pci_map_single( sp->pdev, (*skb)->data,
6130 PCI_DMA_FROMDEVICE);
6132 * skb_shinfo(skb)->frag_list will have L4
6135 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6137 if (skb_shinfo(*skb)->frag_list == NULL) {
6138 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6139 failed\n ", dev->name);
6142 frag_list = skb_shinfo(*skb)->frag_list;
6143 frag_list->next = NULL;
6145 * Buffer-2 receives L4 data payload
6147 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6148 pci_map_single( sp->pdev, frag_list->data,
6149 dev->mtu, PCI_DMA_FROMDEVICE);
6154 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6157 struct net_device *dev = sp->dev;
6158 if (sp->rxd_mode == RXD_MODE_1) {
6159 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6160 } else if (sp->rxd_mode == RXD_MODE_3B) {
6161 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6162 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6163 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6165 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6166 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6167 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6171 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6173 int i, j, k, blk_cnt = 0, size;
6174 struct mac_info * mac_control = &sp->mac_control;
6175 struct config_param *config = &sp->config;
6176 struct net_device *dev = sp->dev;
6177 struct RxD_t *rxdp = NULL;
6178 struct sk_buff *skb = NULL;
6179 struct buffAdd *ba = NULL;
6180 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6182 /* Calculate the size based on ring mode */
6183 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6184 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6185 if (sp->rxd_mode == RXD_MODE_1)
6186 size += NET_IP_ALIGN;
6187 else if (sp->rxd_mode == RXD_MODE_3B)
6188 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6190 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6192 for (i = 0; i < config->rx_ring_num; i++) {
6193 blk_cnt = config->rx_cfg[i].num_rxd /
6194 (rxd_count[sp->rxd_mode] +1);
6196 for (j = 0; j < blk_cnt; j++) {
6197 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6198 rxdp = mac_control->rings[i].
6199 rx_blocks[j].rxds[k].virt_addr;
6200 if(sp->rxd_mode >= RXD_MODE_3A)
6201 ba = &mac_control->rings[i].ba[j][k];
6202 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6203 &skb,(u64 *)&temp0_64,
6210 set_rxd_buffer_size(sp, rxdp, size);
6212 /* flip the Ownership bit to Hardware */
6213 rxdp->Control_1 |= RXD_OWN_XENA;
6221 static int s2io_add_isr(struct s2io_nic * sp)
6224 struct net_device *dev = sp->dev;
6227 if (sp->intr_type == MSI)
6228 ret = s2io_enable_msi(sp);
6229 else if (sp->intr_type == MSI_X)
6230 ret = s2io_enable_msi_x(sp);
6232 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6233 sp->intr_type = INTA;
6236 /* Store the values of the MSIX table in the struct s2io_nic structure */
6237 store_xmsi_data(sp);
6239 /* After proper initialization of H/W, register ISR */
6240 if (sp->intr_type == MSI) {
6241 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6242 IRQF_SHARED, sp->name, dev);
6244 pci_disable_msi(sp->pdev);
6245 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6250 if (sp->intr_type == MSI_X) {
6251 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6253 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6254 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6255 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6257 err = request_irq(sp->entries[i].vector,
6258 s2io_msix_fifo_handle, 0, sp->desc[i],
6259 sp->s2io_entries[i].arg);
6260 /* If either data or addr is zero print it */
6261 if(!(sp->msix_info[i].addr &&
6262 sp->msix_info[i].data)) {
6263 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6264 "Data:0x%lx\n",sp->desc[i],
6265 (unsigned long long)
6266 sp->msix_info[i].addr,
6268 ntohl(sp->msix_info[i].data));
6273 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6275 err = request_irq(sp->entries[i].vector,
6276 s2io_msix_ring_handle, 0, sp->desc[i],
6277 sp->s2io_entries[i].arg);
6278 /* If either data or addr is zero print it */
6279 if(!(sp->msix_info[i].addr &&
6280 sp->msix_info[i].data)) {
6281 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6282 "Data:0x%lx\n",sp->desc[i],
6283 (unsigned long long)
6284 sp->msix_info[i].addr,
6286 ntohl(sp->msix_info[i].data));
6292 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6293 "failed\n", dev->name, i);
6294 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6297 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6299 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6300 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6302 if (sp->intr_type == INTA) {
6303 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6306 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6313 static void s2io_rem_isr(struct s2io_nic * sp)
6316 struct net_device *dev = sp->dev;
6318 if (sp->intr_type == MSI_X) {
6322 for (i=1; (sp->s2io_entries[i].in_use ==
6323 MSIX_REGISTERED_SUCCESS); i++) {
6324 int vector = sp->entries[i].vector;
6325 void *arg = sp->s2io_entries[i].arg;
6327 free_irq(vector, arg);
6329 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6330 msi_control &= 0xFFFE; /* Disable MSI */
6331 pci_write_config_word(sp->pdev, 0x42, msi_control);
6333 pci_disable_msix(sp->pdev);
6335 free_irq(sp->pdev->irq, dev);
6336 if (sp->intr_type == MSI) {
6339 pci_disable_msi(sp->pdev);
6340 pci_read_config_word(sp->pdev, 0x4c, &val);
6342 pci_write_config_word(sp->pdev, 0x4c, val);
6345 /* Waiting till all Interrupt handlers are complete */
6349 if (!atomic_read(&sp->isr_cnt))
6355 static void s2io_card_down(struct s2io_nic * sp)
6358 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6359 unsigned long flags;
6360 register u64 val64 = 0;
6362 del_timer_sync(&sp->alarm_timer);
6363 /* If s2io_set_link task is executing, wait till it completes. */
6364 while (test_and_set_bit(0, &(sp->link_state))) {
6367 atomic_set(&sp->card_state, CARD_DOWN);
6369 /* disable Tx and Rx traffic on the NIC */
6375 tasklet_kill(&sp->task);
6377 /* Check if the device is Quiescent and then Reset the NIC */
6379 /* As per the HW requirement we need to replenish the
6380 * receive buffer to avoid the ring bump. Since there is
6381 * no intention of processing the Rx frame at this pointwe are
6382 * just settting the ownership bit of rxd in Each Rx
6383 * ring to HW and set the appropriate buffer size
6384 * based on the ring mode
6386 rxd_owner_bit_reset(sp);
6388 val64 = readq(&bar0->adapter_status);
6389 if (verify_xena_quiescence(sp)) {
6390 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6398 "s2io_close:Device not Quiescent ");
6399 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6400 (unsigned long long) val64);
6406 spin_lock_irqsave(&sp->tx_lock, flags);
6407 /* Free all Tx buffers */
6408 free_tx_buffers(sp);
6409 spin_unlock_irqrestore(&sp->tx_lock, flags);
6411 /* Free all Rx buffers */
6412 spin_lock_irqsave(&sp->rx_lock, flags);
6413 free_rx_buffers(sp);
6414 spin_unlock_irqrestore(&sp->rx_lock, flags);
6416 clear_bit(0, &(sp->link_state));
6419 static int s2io_card_up(struct s2io_nic * sp)
6422 struct mac_info *mac_control;
6423 struct config_param *config;
6424 struct net_device *dev = (struct net_device *) sp->dev;
6427 /* Initialize the H/W I/O registers */
6428 if (init_nic(sp) != 0) {
6429 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6436 * Initializing the Rx buffers. For now we are considering only 1
6437 * Rx ring and initializing buffers into 30 Rx blocks
6439 mac_control = &sp->mac_control;
6440 config = &sp->config;
6442 for (i = 0; i < config->rx_ring_num; i++) {
6443 if ((ret = fill_rx_buffers(sp, i))) {
6444 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6447 free_rx_buffers(sp);
6450 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6451 atomic_read(&sp->rx_bufs_left[i]));
6453 /* Maintain the state prior to the open */
6454 if (sp->promisc_flg)
6455 sp->promisc_flg = 0;
6456 if (sp->m_cast_flg) {
6458 sp->all_multi_pos= 0;
6461 /* Setting its receive mode */
6462 s2io_set_multicast(dev);
6465 /* Initialize max aggregatable pkts per session based on MTU */
6466 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6467 /* Check if we can use(if specified) user provided value */
6468 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6469 sp->lro_max_aggr_per_sess = lro_max_pkts;
6472 /* Enable Rx Traffic and interrupts on the NIC */
6473 if (start_nic(sp)) {
6474 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6476 free_rx_buffers(sp);
6480 /* Add interrupt service routine */
6481 if (s2io_add_isr(sp) != 0) {
6482 if (sp->intr_type == MSI_X)
6485 free_rx_buffers(sp);
6489 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6491 /* Enable tasklet for the device */
6492 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6494 /* Enable select interrupts */
6495 if (sp->intr_type != INTA)
6496 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6498 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6499 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6500 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6501 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6505 atomic_set(&sp->card_state, CARD_UP);
6510 * s2io_restart_nic - Resets the NIC.
6511 * @data : long pointer to the device private structure
6513 * This function is scheduled to be run by the s2io_tx_watchdog
6514 * function after 0.5 secs to reset the NIC. The idea is to reduce
6515 * the run time of the watch dog routine which is run holding a
6519 static void s2io_restart_nic(struct work_struct *work)
6521 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6522 struct net_device *dev = sp->dev;
6526 if (!netif_running(dev))
6530 if (s2io_card_up(sp)) {
6531 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6534 netif_wake_queue(dev);
6535 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6542 * s2io_tx_watchdog - Watchdog for transmit side.
6543 * @dev : Pointer to net device structure
6545 * This function is triggered if the Tx Queue is stopped
6546 * for a pre-defined amount of time when the Interface is still up.
6547 * If the Interface is jammed in such a situation, the hardware is
6548 * reset (by s2io_close) and restarted again (by s2io_open) to
6549 * overcome any problem that might have been caused in the hardware.
6554 static void s2io_tx_watchdog(struct net_device *dev)
6556 struct s2io_nic *sp = dev->priv;
6558 if (netif_carrier_ok(dev)) {
6559 schedule_work(&sp->rst_timer_task);
6560 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6565 * rx_osm_handler - To perform some OS related operations on SKB.
6566 * @sp: private member of the device structure,pointer to s2io_nic structure.
6567 * @skb : the socket buffer pointer.
6568 * @len : length of the packet
6569 * @cksum : FCS checksum of the frame.
6570 * @ring_no : the ring from which this RxD was extracted.
6572 * This function is called by the Rx interrupt serivce routine to perform
6573 * some OS related operations on the SKB before passing it to the upper
6574 * layers. It mainly checks if the checksum is OK, if so adds it to the
6575 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6576 * to the upper layer. If the checksum is wrong, it increments the Rx
6577 * packet error count, frees the SKB and returns error.
6579 * SUCCESS on success and -1 on failure.
6581 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6583 struct s2io_nic *sp = ring_data->nic;
6584 struct net_device *dev = (struct net_device *) sp->dev;
6585 struct sk_buff *skb = (struct sk_buff *)
6586 ((unsigned long) rxdp->Host_Control);
6587 int ring_no = ring_data->ring_no;
6588 u16 l3_csum, l4_csum;
6589 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6595 /* Check for parity error */
6597 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6601 * Drop the packet if bad transfer code. Exception being
6602 * 0x5, which could be due to unsupported IPv6 extension header.
6603 * In this case, we let stack handle the packet.
6604 * Note that in this case, since checksum will be incorrect,
6605 * stack will validate the same.
6607 if (err && ((err >> 48) != 0x5)) {
6608 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6610 sp->stats.rx_crc_errors++;
6612 atomic_dec(&sp->rx_bufs_left[ring_no]);
6613 rxdp->Host_Control = 0;
6618 /* Updating statistics */
6619 rxdp->Host_Control = 0;
6621 sp->stats.rx_packets++;
6622 if (sp->rxd_mode == RXD_MODE_1) {
6623 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6625 sp->stats.rx_bytes += len;
6628 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6629 int get_block = ring_data->rx_curr_get_info.block_index;
6630 int get_off = ring_data->rx_curr_get_info.offset;
6631 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6632 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6633 unsigned char *buff = skb_push(skb, buf0_len);
6635 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6636 sp->stats.rx_bytes += buf0_len + buf2_len;
6637 memcpy(buff, ba->ba_0, buf0_len);
6639 if (sp->rxd_mode == RXD_MODE_3A) {
6640 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6642 skb_put(skb, buf1_len);
6643 skb->len += buf2_len;
6644 skb->data_len += buf2_len;
6645 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6646 sp->stats.rx_bytes += buf1_len;
6649 skb_put(skb, buf2_len);
6652 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6653 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6655 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6656 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6657 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6659 * NIC verifies if the Checksum of the received
6660 * frame is Ok or not and accordingly returns
6661 * a flag in the RxD.
6663 skb->ip_summed = CHECKSUM_UNNECESSARY;
6669 ret = s2io_club_tcp_session(skb->data, &tcp,
6670 &tcp_len, &lro, rxdp, sp);
6672 case 3: /* Begin anew */
6675 case 1: /* Aggregate */
6677 lro_append_pkt(sp, lro,
6681 case 4: /* Flush session */
6683 lro_append_pkt(sp, lro,
6685 queue_rx_frame(lro->parent);
6686 clear_lro_session(lro);
6687 sp->mac_control.stats_info->
6688 sw_stat.flush_max_pkts++;
6691 case 2: /* Flush both */
6692 lro->parent->data_len =
6694 sp->mac_control.stats_info->
6695 sw_stat.sending_both++;
6696 queue_rx_frame(lro->parent);
6697 clear_lro_session(lro);
6699 case 0: /* sessions exceeded */
6700 case -1: /* non-TCP or not
6704 * First pkt in session not
6705 * L3/L4 aggregatable
6710 "%s: Samadhana!!\n",
6717 * Packet with erroneous checksum, let the
6718 * upper layers deal with it.
6720 skb->ip_summed = CHECKSUM_NONE;
6723 skb->ip_summed = CHECKSUM_NONE;
6727 skb->protocol = eth_type_trans(skb, dev);
6728 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6730 /* Queueing the vlan frame to the upper layer */
6732 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6733 RXD_GET_VLAN_TAG(rxdp->Control_2));
6735 vlan_hwaccel_rx(skb, sp->vlgrp,
6736 RXD_GET_VLAN_TAG(rxdp->Control_2));
6739 netif_receive_skb(skb);
6745 queue_rx_frame(skb);
6747 dev->last_rx = jiffies;
6749 atomic_dec(&sp->rx_bufs_left[ring_no]);
6754 * s2io_link - stops/starts the Tx queue.
6755 * @sp : private member of the device structure, which is a pointer to the
6756 * s2io_nic structure.
6757 * @link : inidicates whether link is UP/DOWN.
6759 * This function stops/starts the Tx queue depending on whether the link
6760 * status of the NIC is is down or up. This is called by the Alarm
6761 * interrupt handler whenever a link change interrupt comes up.
6766 static void s2io_link(struct s2io_nic * sp, int link)
6768 struct net_device *dev = (struct net_device *) sp->dev;
6770 if (link != sp->last_link_state) {
6771 if (link == LINK_DOWN) {
6772 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6773 netif_carrier_off(dev);
6775 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6776 netif_carrier_on(dev);
6779 sp->last_link_state = link;
6783 * get_xena_rev_id - to identify revision ID of xena.
6784 * @pdev : PCI Dev structure
6786 * Function to identify the Revision ID of xena.
6788 * returns the revision ID of the device.
6791 static int get_xena_rev_id(struct pci_dev *pdev)
6795 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6800 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6801 * @sp : private member of the device structure, which is a pointer to the
6802 * s2io_nic structure.
6804 * This function initializes a few of the PCI and PCI-X configuration registers
6805 * with recommended values.
6810 static void s2io_init_pci(struct s2io_nic * sp)
6812 u16 pci_cmd = 0, pcix_cmd = 0;
6814 /* Enable Data Parity Error Recovery in PCI-X command register. */
6815 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6817 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6819 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6822 /* Set the PErr Response bit in PCI command register. */
6823 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6824 pci_write_config_word(sp->pdev, PCI_COMMAND,
6825 (pci_cmd | PCI_COMMAND_PARITY));
6826 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6829 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6831 if ( tx_fifo_num > 8) {
6832 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6834 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6837 if ( rx_ring_num > 8) {
6838 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6840 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6843 if (*dev_intr_type != INTA)
6846 #ifndef CONFIG_PCI_MSI
6847 if (*dev_intr_type != INTA) {
6848 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6849 "MSI/MSI-X. Defaulting to INTA\n");
6850 *dev_intr_type = INTA;
6853 if (*dev_intr_type > MSI_X) {
6854 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6855 "Defaulting to INTA\n");
6856 *dev_intr_type = INTA;
6859 if ((*dev_intr_type == MSI_X) &&
6860 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6861 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6862 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6863 "Defaulting to INTA\n");
6864 *dev_intr_type = INTA;
6867 if (rx_ring_mode > 3) {
6868 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6869 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6876 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6877 * or Traffic class respectively.
6878 * @nic: device peivate variable
6879 * Description: The function configures the receive steering to
6880 * desired receive ring.
6881 * Return Value: SUCCESS on success and
6882 * '-1' on failure (endian settings incorrect).
6884 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6886 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6887 register u64 val64 = 0;
6889 if (ds_codepoint > 63)
6892 val64 = RTS_DS_MEM_DATA(ring);
6893 writeq(val64, &bar0->rts_ds_mem_data);
6895 val64 = RTS_DS_MEM_CTRL_WE |
6896 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6897 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6899 writeq(val64, &bar0->rts_ds_mem_ctrl);
6901 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6902 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6907 * s2io_init_nic - Initialization of the adapter .
6908 * @pdev : structure containing the PCI related information of the device.
6909 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6911 * The function initializes an adapter identified by the pci_dec structure.
6912 * All OS related initialization including memory and device structure and
6913 * initlaization of the device private variable is done. Also the swapper
6914 * control register is initialized to enable read and write into the I/O
6915 * registers of the device.
6917 * returns 0 on success and negative on failure.
6920 static int __devinit
6921 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6923 struct s2io_nic *sp;
6924 struct net_device *dev;
6926 int dma_flag = FALSE;
6927 u32 mac_up, mac_down;
6928 u64 val64 = 0, tmp64 = 0;
6929 struct XENA_dev_config __iomem *bar0 = NULL;
6931 struct mac_info *mac_control;
6932 struct config_param *config;
6934 u8 dev_intr_type = intr_type;
6936 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6939 if ((ret = pci_enable_device(pdev))) {
6941 "s2io_init_nic: pci_enable_device failed\n");
6945 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6946 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6948 if (pci_set_consistent_dma_mask
6949 (pdev, DMA_64BIT_MASK)) {
6951 "Unable to obtain 64bit DMA for \
6952 consistent allocations\n");
6953 pci_disable_device(pdev);
6956 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6957 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6959 pci_disable_device(pdev);
6962 if (dev_intr_type != MSI_X) {
6963 if (pci_request_regions(pdev, s2io_driver_name)) {
6964 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6965 pci_disable_device(pdev);
6970 if (!(request_mem_region(pci_resource_start(pdev, 0),
6971 pci_resource_len(pdev, 0), s2io_driver_name))) {
6972 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6973 pci_disable_device(pdev);
6976 if (!(request_mem_region(pci_resource_start(pdev, 2),
6977 pci_resource_len(pdev, 2), s2io_driver_name))) {
6978 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6979 release_mem_region(pci_resource_start(pdev, 0),
6980 pci_resource_len(pdev, 0));
6981 pci_disable_device(pdev);
6986 dev = alloc_etherdev(sizeof(struct s2io_nic));
6988 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6989 pci_disable_device(pdev);
6990 pci_release_regions(pdev);
6994 pci_set_master(pdev);
6995 pci_set_drvdata(pdev, dev);
6996 SET_MODULE_OWNER(dev);
6997 SET_NETDEV_DEV(dev, &pdev->dev);
6999 /* Private member variable initialized to s2io NIC structure */
7001 memset(sp, 0, sizeof(struct s2io_nic));
7004 sp->high_dma_flag = dma_flag;
7005 sp->device_enabled_once = FALSE;
7006 if (rx_ring_mode == 1)
7007 sp->rxd_mode = RXD_MODE_1;
7008 if (rx_ring_mode == 2)
7009 sp->rxd_mode = RXD_MODE_3B;
7010 if (rx_ring_mode == 3)
7011 sp->rxd_mode = RXD_MODE_3A;
7013 sp->intr_type = dev_intr_type;
7015 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7016 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7017 sp->device_type = XFRAME_II_DEVICE;
7019 sp->device_type = XFRAME_I_DEVICE;
7023 /* Initialize some PCI/PCI-X fields of the NIC. */
7027 * Setting the device configuration parameters.
7028 * Most of these parameters can be specified by the user during
7029 * module insertion as they are module loadable parameters. If
7030 * these parameters are not not specified during load time, they
7031 * are initialized with default values.
7033 mac_control = &sp->mac_control;
7034 config = &sp->config;
7036 /* Tx side parameters. */
7037 config->tx_fifo_num = tx_fifo_num;
7038 for (i = 0; i < MAX_TX_FIFOS; i++) {
7039 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7040 config->tx_cfg[i].fifo_priority = i;
7043 /* mapping the QoS priority to the configured fifos */
7044 for (i = 0; i < MAX_TX_FIFOS; i++)
7045 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7047 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7048 for (i = 0; i < config->tx_fifo_num; i++) {
7049 config->tx_cfg[i].f_no_snoop =
7050 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7051 if (config->tx_cfg[i].fifo_len < 65) {
7052 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7056 /* + 2 because one Txd for skb->data and one Txd for UFO */
7057 config->max_txds = MAX_SKB_FRAGS + 2;
7059 /* Rx side parameters. */
7060 config->rx_ring_num = rx_ring_num;
7061 for (i = 0; i < MAX_RX_RINGS; i++) {
7062 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7063 (rxd_count[sp->rxd_mode] + 1);
7064 config->rx_cfg[i].ring_priority = i;
7067 for (i = 0; i < rx_ring_num; i++) {
7068 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7069 config->rx_cfg[i].f_no_snoop =
7070 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7073 /* Setting Mac Control parameters */
7074 mac_control->rmac_pause_time = rmac_pause_time;
7075 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7076 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7079 /* Initialize Ring buffer parameters. */
7080 for (i = 0; i < config->rx_ring_num; i++)
7081 atomic_set(&sp->rx_bufs_left[i], 0);
7083 /* Initialize the number of ISRs currently running */
7084 atomic_set(&sp->isr_cnt, 0);
7086 /* initialize the shared memory used by the NIC and the host */
7087 if (init_shared_mem(sp)) {
7088 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7091 goto mem_alloc_failed;
7094 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7095 pci_resource_len(pdev, 0));
7097 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7100 goto bar0_remap_failed;
7103 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7104 pci_resource_len(pdev, 2));
7106 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7109 goto bar1_remap_failed;
7112 dev->irq = pdev->irq;
7113 dev->base_addr = (unsigned long) sp->bar0;
7115 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7116 for (j = 0; j < MAX_TX_FIFOS; j++) {
7117 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7118 (sp->bar1 + (j * 0x00020000));
7121 /* Driver entry points */
7122 dev->open = &s2io_open;
7123 dev->stop = &s2io_close;
7124 dev->hard_start_xmit = &s2io_xmit;
7125 dev->get_stats = &s2io_get_stats;
7126 dev->set_multicast_list = &s2io_set_multicast;
7127 dev->do_ioctl = &s2io_ioctl;
7128 dev->change_mtu = &s2io_change_mtu;
7129 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7130 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7131 dev->vlan_rx_register = s2io_vlan_rx_register;
7132 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7135 * will use eth_mac_addr() for dev->set_mac_address
7136 * mac address will be set every time dev->open() is called
7138 dev->poll = s2io_poll;
7141 #ifdef CONFIG_NET_POLL_CONTROLLER
7142 dev->poll_controller = s2io_netpoll;
7145 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7146 if (sp->high_dma_flag == TRUE)
7147 dev->features |= NETIF_F_HIGHDMA;
7148 dev->features |= NETIF_F_TSO;
7149 dev->features |= NETIF_F_TSO6;
7150 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7151 dev->features |= NETIF_F_UFO;
7152 dev->features |= NETIF_F_HW_CSUM;
7155 dev->tx_timeout = &s2io_tx_watchdog;
7156 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7157 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7158 INIT_WORK(&sp->set_link_task, s2io_set_link);
7160 pci_save_state(sp->pdev);
7162 /* Setting swapper control on the NIC, for proper reset operation */
7163 if (s2io_set_swapper(sp)) {
7164 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7167 goto set_swap_failed;
7170 /* Verify if the Herc works on the slot its placed into */
7171 if (sp->device_type & XFRAME_II_DEVICE) {
7172 mode = s2io_verify_pci_mode(sp);
7174 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7175 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7177 goto set_swap_failed;
7181 /* Not needed for Herc */
7182 if (sp->device_type & XFRAME_I_DEVICE) {
7184 * Fix for all "FFs" MAC address problems observed on
7187 fix_mac_address(sp);
7192 * MAC address initialization.
7193 * For now only one mac address will be read and used.
7196 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7197 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7198 writeq(val64, &bar0->rmac_addr_cmd_mem);
7199 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7200 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7201 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7202 mac_down = (u32) tmp64;
7203 mac_up = (u32) (tmp64 >> 32);
7205 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7206 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7207 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7208 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7209 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7210 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7212 /* Set the factory defined MAC address initially */
7213 dev->addr_len = ETH_ALEN;
7214 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7216 /* reset Nic and bring it to known state */
7220 * Initialize the tasklet status and link state flags
7221 * and the card state parameter
7223 atomic_set(&(sp->card_state), 0);
7224 sp->tasklet_status = 0;
7227 /* Initialize spinlocks */
7228 spin_lock_init(&sp->tx_lock);
7231 spin_lock_init(&sp->put_lock);
7232 spin_lock_init(&sp->rx_lock);
7235 * SXE-002: Configure link and activity LED to init state
7238 subid = sp->pdev->subsystem_device;
7239 if ((subid & 0xFF) >= 0x07) {
7240 val64 = readq(&bar0->gpio_control);
7241 val64 |= 0x0000800000000000ULL;
7242 writeq(val64, &bar0->gpio_control);
7243 val64 = 0x0411040400000000ULL;
7244 writeq(val64, (void __iomem *) bar0 + 0x2700);
7245 val64 = readq(&bar0->gpio_control);
7248 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7250 if (register_netdev(dev)) {
7251 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7253 goto register_failed;
7256 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7257 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7258 sp->product_name, get_xena_rev_id(sp->pdev));
7259 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7260 s2io_driver_version);
7261 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7262 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7263 sp->def_mac_addr[0].mac_addr[0],
7264 sp->def_mac_addr[0].mac_addr[1],
7265 sp->def_mac_addr[0].mac_addr[2],
7266 sp->def_mac_addr[0].mac_addr[3],
7267 sp->def_mac_addr[0].mac_addr[4],
7268 sp->def_mac_addr[0].mac_addr[5]);
7269 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7270 if (sp->device_type & XFRAME_II_DEVICE) {
7271 mode = s2io_print_pci_mode(sp);
7273 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7275 unregister_netdev(dev);
7276 goto set_swap_failed;
7279 switch(sp->rxd_mode) {
7281 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7285 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7289 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7295 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7296 switch(sp->intr_type) {
7298 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7301 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7304 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7308 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7311 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7312 " enabled\n", dev->name);
7313 /* Initialize device name */
7314 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7316 /* Initialize bimodal Interrupts */
7317 sp->config.bimodal = bimodal;
7318 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7319 sp->config.bimodal = 0;
7320 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7325 * Make Link state as off at this point, when the Link change
7326 * interrupt comes the state will be automatically changed to
7329 netif_carrier_off(dev);
7340 free_shared_mem(sp);
7341 pci_disable_device(pdev);
7342 if (dev_intr_type != MSI_X)
7343 pci_release_regions(pdev);
7345 release_mem_region(pci_resource_start(pdev, 0),
7346 pci_resource_len(pdev, 0));
7347 release_mem_region(pci_resource_start(pdev, 2),
7348 pci_resource_len(pdev, 2));
7350 pci_set_drvdata(pdev, NULL);
7357 * s2io_rem_nic - Free the PCI device
7358 * @pdev: structure containing the PCI related information of the device.
7359 * Description: This function is called by the Pci subsystem to release a
7360 * PCI device and free up all resource held up by the device. This could
7361 * be in response to a Hot plug event or when the driver is to be removed
7365 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7367 struct net_device *dev =
7368 (struct net_device *) pci_get_drvdata(pdev);
7369 struct s2io_nic *sp;
7372 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7376 flush_scheduled_work();
7379 unregister_netdev(dev);
7381 free_shared_mem(sp);
7384 if (sp->intr_type != MSI_X)
7385 pci_release_regions(pdev);
7387 release_mem_region(pci_resource_start(pdev, 0),
7388 pci_resource_len(pdev, 0));
7389 release_mem_region(pci_resource_start(pdev, 2),
7390 pci_resource_len(pdev, 2));
7392 pci_set_drvdata(pdev, NULL);
7394 pci_disable_device(pdev);
7398 * s2io_starter - Entry point for the driver
7399 * Description: This function is the entry point for the driver. It verifies
7400 * the module loadable parameters and initializes PCI configuration space.
7403 int __init s2io_starter(void)
7405 return pci_register_driver(&s2io_driver);
7409 * s2io_closer - Cleanup routine for the driver
7410 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7413 static __exit void s2io_closer(void)
7415 pci_unregister_driver(&s2io_driver);
7416 DBG_PRINT(INIT_DBG, "cleanup done\n");
7419 module_init(s2io_starter);
7420 module_exit(s2io_closer);
7422 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7423 struct tcphdr **tcp, struct RxD_t *rxdp)
7426 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7428 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7429 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7435 * By default the VLAN field in the MAC is stripped by the card, if this
7436 * feature is turned off in rx_pa_cfg register, then the ip_off field
7437 * has to be shifted by a further 2 bytes
7440 case 0: /* DIX type */
7441 case 4: /* DIX type with VLAN */
7442 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7444 /* LLC, SNAP etc are considered non-mergeable */
7449 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7450 ip_len = (u8)((*ip)->ihl);
7452 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7457 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7460 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7461 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7462 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7467 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7469 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7472 static void initiate_new_session(struct lro *lro, u8 *l2h,
7473 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7475 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7479 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7480 lro->tcp_ack = ntohl(tcp->ack_seq);
7482 lro->total_len = ntohs(ip->tot_len);
7485 * check if we saw TCP timestamp. Other consistency checks have
7486 * already been done.
7488 if (tcp->doff == 8) {
7490 ptr = (u32 *)(tcp+1);
7492 lro->cur_tsval = *(ptr+1);
7493 lro->cur_tsecr = *(ptr+2);
7498 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7500 struct iphdr *ip = lro->iph;
7501 struct tcphdr *tcp = lro->tcph;
7503 struct stat_block *statinfo = sp->mac_control.stats_info;
7504 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7506 /* Update L3 header */
7507 ip->tot_len = htons(lro->total_len);
7509 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7512 /* Update L4 header */
7513 tcp->ack_seq = lro->tcp_ack;
7514 tcp->window = lro->window;
7516 /* Update tsecr field if this session has timestamps enabled */
7518 u32 *ptr = (u32 *)(tcp + 1);
7519 *(ptr+2) = lro->cur_tsecr;
7522 /* Update counters required for calculation of
7523 * average no. of packets aggregated.
7525 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7526 statinfo->sw_stat.num_aggregations++;
7529 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7530 struct tcphdr *tcp, u32 l4_pyld)
7532 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7533 lro->total_len += l4_pyld;
7534 lro->frags_len += l4_pyld;
7535 lro->tcp_next_seq += l4_pyld;
7538 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7539 lro->tcp_ack = tcp->ack_seq;
7540 lro->window = tcp->window;
7544 /* Update tsecr and tsval from this packet */
7545 ptr = (u32 *) (tcp + 1);
7546 lro->cur_tsval = *(ptr + 1);
7547 lro->cur_tsecr = *(ptr + 2);
7551 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7552 struct tcphdr *tcp, u32 tcp_pyld_len)
7556 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7558 if (!tcp_pyld_len) {
7559 /* Runt frame or a pure ack */
7563 if (ip->ihl != 5) /* IP has options */
7566 /* If we see CE codepoint in IP header, packet is not mergeable */
7567 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7570 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7571 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7572 tcp->ece || tcp->cwr || !tcp->ack) {
7574 * Currently recognize only the ack control word and
7575 * any other control field being set would result in
7576 * flushing the LRO session
7582 * Allow only one TCP timestamp option. Don't aggregate if
7583 * any other options are detected.
7585 if (tcp->doff != 5 && tcp->doff != 8)
7588 if (tcp->doff == 8) {
7589 ptr = (u8 *)(tcp + 1);
7590 while (*ptr == TCPOPT_NOP)
7592 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7595 /* Ensure timestamp value increases monotonically */
7597 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7600 /* timestamp echo reply should be non-zero */
7601 if (*((u32 *)(ptr+6)) == 0)
7609 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7610 struct RxD_t *rxdp, struct s2io_nic *sp)
7613 struct tcphdr *tcph;
7616 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7618 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7619 ip->saddr, ip->daddr);
7624 tcph = (struct tcphdr *)*tcp;
7625 *tcp_len = get_l4_pyld_length(ip, tcph);
7626 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7627 struct lro *l_lro = &sp->lro0_n[i];
7628 if (l_lro->in_use) {
7629 if (check_for_socket_match(l_lro, ip, tcph))
7631 /* Sock pair matched */
7634 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7635 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7636 "0x%x, actual 0x%x\n", __FUNCTION__,
7637 (*lro)->tcp_next_seq,
7640 sp->mac_control.stats_info->
7641 sw_stat.outof_sequence_pkts++;
7646 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7647 ret = 1; /* Aggregate */
7649 ret = 2; /* Flush both */
7655 /* Before searching for available LRO objects,
7656 * check if the pkt is L3/L4 aggregatable. If not
7657 * don't create new LRO session. Just send this
7660 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7664 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7665 struct lro *l_lro = &sp->lro0_n[i];
7666 if (!(l_lro->in_use)) {
7668 ret = 3; /* Begin anew */
7674 if (ret == 0) { /* sessions exceeded */
7675 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7683 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7686 update_L3L4_header(sp, *lro);
7689 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7690 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7691 update_L3L4_header(sp, *lro);
7692 ret = 4; /* Flush the LRO */
7696 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7704 static void clear_lro_session(struct lro *lro)
7706 static u16 lro_struct_size = sizeof(struct lro);
7708 memset(lro, 0, lro_struct_size);
7711 static void queue_rx_frame(struct sk_buff *skb)
7713 struct net_device *dev = skb->dev;
7715 skb->protocol = eth_type_trans(skb, dev);
7717 netif_receive_skb(skb);
7722 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7723 struct sk_buff *skb,
7726 struct sk_buff *first = lro->parent;
7728 first->len += tcp_len;
7729 first->data_len = lro->frags_len;
7730 skb_pull(skb, (skb->len - tcp_len));
7731 if (skb_shinfo(first)->frag_list)
7732 lro->last_frag->next = skb;
7734 skb_shinfo(first)->frag_list = skb;
7735 first->truesize += skb->truesize;
7736 lro->last_frag = skb;
7737 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;