1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
39 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
51 #include <linux/delay.h>
52 #include <linux/stddef.h>
53 #include <linux/ioctl.h>
54 #include <linux/timex.h>
55 #include <linux/sched.h>
56 #include <linux/ethtool.h>
57 #include <linux/version.h>
58 #include <linux/workqueue.h>
61 #include <asm/system.h>
62 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "s2io";
70 static char s2io_driver_version[] = "Version 1.7.7.1";
73 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id.
77 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
81 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
83 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
86 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
99 /* Ethtool related variables and Macros. */
100 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
101 "Register test\t(offline)",
102 "Eeprom test\t(offline)",
103 "Link test\t(online)",
104 "RLDRAM test\t(offline)",
105 "BIST Test\t(offline)"
108 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
110 {"tmac_data_octets"},
114 {"tmac_pause_ctrl_frms"},
115 {"tmac_any_err_frms"},
116 {"tmac_vld_ip_octets"},
124 {"rmac_data_octets"},
125 {"rmac_fcs_err_frms"},
127 {"rmac_vld_mcst_frms"},
128 {"rmac_vld_bcst_frms"},
129 {"rmac_in_rng_len_err_frms"},
131 {"rmac_pause_ctrl_frms"},
132 {"rmac_discarded_frms"},
133 {"rmac_usized_frms"},
134 {"rmac_osized_frms"},
136 {"rmac_jabber_frms"},
144 {"rmac_err_drp_udp"},
146 {"rmac_accepted_ip"},
150 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
151 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
153 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
158 * Constants to be programmed into the Xena's registers, to configure
162 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
165 static u64 default_mdio_cfg[] = {
167 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL,
169 /* Remove Reset from PMA PLL */
170 0xC001010000000000ULL, 0xC0010100000000E0ULL,
171 0xC0010100000000E4ULL,
175 static u64 default_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
179 0x8002051500000000ULL, 0x80020515000000E0ULL,
180 0x80020515F21000E4ULL,
181 /* Set PADLOOPBACKN */
182 0x8002051500000000ULL, 0x80020515000000E0ULL,
183 0x80020515B20000E4ULL, 0x8003051500000000ULL,
184 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
185 0x8004051500000000ULL, 0x80040515000000E0ULL,
186 0x80040515B20000E4ULL, 0x8005051500000000ULL,
187 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
189 /* Remove PADLOOPBACKN */
190 0x8002051500000000ULL, 0x80020515000000E0ULL,
191 0x80020515F20000E4ULL, 0x8003051500000000ULL,
192 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
193 0x8004051500000000ULL, 0x80040515000000E0ULL,
194 0x80040515F20000E4ULL, 0x8005051500000000ULL,
195 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
201 * Constants for Fixing the MacAddress problem seen mostly on
204 static u64 fix_mac[] = {
205 0x0060000000000000ULL, 0x0060600000000000ULL,
206 0x0040600000000000ULL, 0x0000600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0060600000000000ULL,
217 0x0020600000000000ULL, 0x0000600000000000ULL,
218 0x0040600000000000ULL, 0x0060600000000000ULL,
222 /* Module Loadable parameters. */
223 static unsigned int tx_fifo_num = 1;
224 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
225 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
226 static unsigned int rx_ring_num = 1;
227 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229 static unsigned int Stats_refresh_time = 4;
230 static unsigned int rmac_pause_time = 65535;
231 static unsigned int mc_pause_threshold_q0q3 = 187;
232 static unsigned int mc_pause_threshold_q4q7 = 187;
233 static unsigned int shared_splits;
234 static unsigned int tmac_util_period = 5;
235 static unsigned int rmac_util_period = 5;
236 #ifndef CONFIG_S2IO_NAPI
237 static unsigned int indicate_max_pkts;
242 * This table lists all the devices that this driver supports.
244 static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
246 PCI_ANY_ID, PCI_ANY_ID},
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID},
256 MODULE_DEVICE_TABLE(pci, s2io_tbl);
258 static struct pci_driver s2io_driver = {
260 .id_table = s2io_tbl,
261 .probe = s2io_init_nic,
262 .remove = __devexit_p(s2io_rem_nic),
265 /* A simplifier macro used both by init and free shared_mem Fns(). */
266 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
269 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block.
276 static int init_shared_mem(struct s2io_nic *nic)
279 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL;
283 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev;
285 #ifdef CONFIG_2BUFF_MODE
290 mac_info_t *mac_control;
291 struct config_param *config;
293 mac_control = &nic->mac_control;
294 config = &nic->config;
297 /* Allocation and initialization of TXDLs in FIOFs */
299 for (i = 0; i < config->tx_fifo_num; i++) {
300 size += config->tx_cfg[i].fifo_len;
302 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
306 DBG_PRINT(ERR_DBG, "that can be used\n");
310 lst_size = (sizeof(TxD_t) * config->max_txds);
311 lst_per_page = PAGE_SIZE / lst_size;
313 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
317 if (!nic->list_info[i]) {
319 "Malloc failed for list_info\n");
322 memset(nic->list_info[i], 0, list_holder_size);
324 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
327 mac_control->tx_curr_put_info[i].offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len =
329 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len =
332 config->tx_cfg[i].fifo_len - 1;
333 for (j = 0; j < page_num; j++) {
337 tmp_v = pci_alloc_consistent(nic->pdev,
341 "pci_alloc_consistent ");
342 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
345 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len)
349 nic->list_info[i][l].list_virt_addr =
350 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr =
352 tmp_p + (k * lst_size);
359 /* Allocation and initialization of RXDs in Rings */
361 for (i = 0; i < config->rx_ring_num; i++) {
362 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
363 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
364 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
366 DBG_PRINT(ERR_DBG, "RxDs per Block");
369 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
373 config->rx_cfg[i].num_rxd - nic->block_count[i];
376 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len =
380 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len =
384 config->rx_cfg[i].num_rxd - 1;
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */
388 for (j = 0; j < blk_cnt; j++) {
389 #ifndef CONFIG_2BUFF_MODE
390 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
392 size = SIZE_OF_BLOCK;
394 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
396 if (tmp_v_addr == NULL) {
398 * In case of failure, free_shared_mem()
399 * is called, which should free any
400 * memory that was alloced till the
403 nic->rx_blocks[i][j].block_virt_addr =
407 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
411 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
415 nic->rx_blocks[i][(j + 1) %
416 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
419 nic->rx_blocks[i][(j + 1) %
420 blk_cnt].block_dma_addr;
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
426 #ifndef CONFIG_2BUFF_MODE
427 pre_rxd_blk->reserved_2_pNext_RxD_block =
428 (unsigned long) tmp_v_addr_next;
430 pre_rxd_blk->pNext_RxD_Blk_physical =
431 (u64) tmp_p_addr_next;
435 #ifdef CONFIG_2BUFF_MODE
437 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well.
440 for (i = 0; i < config->rx_ring_num; i++) {
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
447 for (j = 0; j < blk_cnt; j++) {
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)),
454 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k];
457 ba->ba_0_org = kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
461 tmp = (unsigned long) ba->ba_0_org;
463 tmp &= ~((unsigned long) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp;
466 ba->ba_1_org = kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
470 tmp = (unsigned long) ba->ba_1_org;
472 tmp &= ~((unsigned long) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp;
480 /* Allocation and initialization of Statistics block */
481 size = sizeof(StatInfo_t);
482 mac_control->stats_mem = pci_alloc_consistent
483 (nic->pdev, size, &mac_control->stats_mem_phy);
485 if (!mac_control->stats_mem) {
487 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the
493 mac_control->stats_mem_sz = size;
495 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size);
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr);
506 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel.
512 static void free_shared_mem(struct s2io_nic *nic)
514 int i, j, blk_cnt, size;
516 dma_addr_t tmp_p_addr;
517 mac_info_t *mac_control;
518 struct config_param *config;
519 int lst_size, lst_per_page;
525 mac_control = &nic->mac_control;
526 config = &nic->config;
528 lst_size = (sizeof(TxD_t) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
534 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr)
538 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks].
541 nic->list_info[i][mem_blks].
544 kfree(nic->list_info[i]);
547 #ifndef CONFIG_2BUFF_MODE
548 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
550 size = SIZE_OF_BLOCK;
552 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i];
554 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
557 if (tmp_v_addr == NULL)
559 pci_free_consistent(nic->pdev, size,
560 tmp_v_addr, tmp_p_addr);
564 #ifdef CONFIG_2BUFF_MODE
565 /* Freeing buffer storage addresses in 2BUFF mode. */
566 for (i = 0; i < config->rx_ring_num; i++) {
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
571 for (j = 0; j < blk_cnt; j++) {
573 if (!nic->ba[i][j]) {
577 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
582 kfree(nic->ba[i][j]);
593 kfree(nic->ba[i][j]);
600 if (mac_control->stats_mem) {
601 pci_free_consistent(nic->pdev,
602 mac_control->stats_mem_sz,
603 mac_control->stats_mem,
604 mac_control->stats_mem_phy);
609 * init_nic - Initialization of hardware
610 * @nic: device peivate variable
611 * Description: The function sequentially configures every block
612 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect).
617 static int init_nic(struct s2io_nic *nic)
619 XENA_dev_config_t __iomem *bar0 = nic->bar0;
620 struct net_device *dev = nic->dev;
621 register u64 val64 = 0;
625 mac_info_t *mac_control;
626 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share;
630 mac_control = &nic->mac_control;
631 config = &nic->config;
633 /* Initialize swapper control register */
634 if (s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
639 /* Remove XGXS from reset state */
641 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
645 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg;
647 val64 = readq(&bar0->mac_cfg);
648 val64 |= MAC_RMAC_BCAST_ENABLE;
649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
650 writel((u32) val64, add);
651 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
652 writel((u32) (val64 >> 32), (add + 4));
654 /* Read registers in all blocks */
655 val64 = readq(&bar0->mac_int_mask);
656 val64 = readq(&bar0->mc_int_mask);
657 val64 = readq(&bar0->xgxs_int_mask);
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
664 * Configuring the XAUI Interface of Xena.
665 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN.
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control);
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
708 /* Tx DMA Initialization */
710 writeq(val64, &bar0->tx_fifo_partition_0);
711 writeq(val64, &bar0->tx_fifo_partition_1);
712 writeq(val64, &bar0->tx_fifo_partition_2);
713 writeq(val64, &bar0->tx_fifo_partition_3);
716 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
718 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
719 13) | vBIT(config->tx_cfg[i].fifo_priority,
722 if (i == (config->tx_fifo_num - 1)) {
729 writeq(val64, &bar0->tx_fifo_partition_0);
733 writeq(val64, &bar0->tx_fifo_partition_1);
737 writeq(val64, &bar0->tx_fifo_partition_2);
741 writeq(val64, &bar0->tx_fifo_partition_3);
746 /* Enable Tx FIFO partition 0. */
747 val64 = readq(&bar0->tx_fifo_partition_0);
748 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0);
751 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
756 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking.
759 val64 = readq(&bar0->tx_pa_cfg);
760 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
761 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
762 writeq(val64, &bar0->tx_pa_cfg);
764 /* Rx DMA intialization. */
766 for (i = 0; i < config->rx_ring_num; i++) {
768 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
771 writeq(val64, &bar0->rx_queue_priority);
774 * Allocating equal share of memory to all the
778 for (i = 0; i < config->rx_ring_num; i++) {
781 mem_share = (64 / config->rx_ring_num +
782 64 % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
786 mem_share = (64 / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
790 mem_share = (64 / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
794 mem_share = (64 / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
798 mem_share = (64 / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
802 mem_share = (64 / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
806 mem_share = (64 / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
810 mem_share = (64 / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
815 writeq(val64, &bar0->rx_queue_cfg);
818 * Initializing the Tx round robin registers to 0.
819 * Filling Tx and Rx round robin registers as per the
820 * number of FIFOs and Rings is still TODO.
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
830 * Disable Rx steering. Hard coding all packets be steered to
833 val64 = 0x8080808080808080ULL;
834 writeq(val64, &bar0->rts_qos_steering);
838 for (i = 1; i < 8; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]);
841 /* Set rts_frm_len register for fifo 0 */
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
843 &bar0->rts_frm_len_n[0]);
845 /* Enable statistics */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
852 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization.
855 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
856 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
857 writeq(val64, &bar0->mac_link_util);
861 * Initializing the Transmit and Receive Traffic Interrupt
864 /* TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
869 TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
872 TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem);
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem);
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem);
884 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete,
887 * if it's not complete by then we return error.
891 val64 = readq(&bar0->tti_command_mem);
892 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
896 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
904 /* RTI Initialization */
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
906 RTI_DATA1_MEM_RX_URNG_A(0xA) |
907 RTI_DATA1_MEM_RX_URNG_B(0x10) |
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
910 writeq(val64, &bar0->rti_data1_mem);
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
918 writeq(val64, &bar0->rti_command_mem);
921 * Once the operation completes, the Strobe bit of the command
922 * register will be reset. We poll for this particular condition
923 * We wait for a maximum of 500ms for the operation to complete,
924 * if it's not complete by then we return error.
928 val64 = readq(&bar0->rti_command_mem);
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
942 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side.
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
948 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
953 writel((u32) (val64), add);
954 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
955 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg);
959 * Set the time value to be inserted in the pause frame
962 val64 = readq(&bar0->rmac_pause_cfg);
963 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg);
968 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
971 * pause frame is generated
974 for (i = 0; i < 4; i++) {
976 (((u64) 0xFF00 | nic->mac_control.
977 mc_pause_threshold_q0q3)
980 writeq(val64, &bar0->mc_pause_thresh_q0q3);
983 for (i = 0; i < 4; i++) {
985 (((u64) 0xFF00 | nic->mac_control.
986 mc_pause_threshold_q4q7)
989 writeq(val64, &bar0->mc_pause_thresh_q4q7);
992 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits
995 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control);
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block.
1010 * Return Value: NONE.
1013 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1015 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1016 register u64 val64 = 0, temp64 = 0;
1018 /* Top level interrupt classification */
1019 /* PIC Interrupts */
1020 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1021 /* Enable PIC Intrs in the general intr mask register */
1022 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1023 if (flag == ENABLE_INTRS) {
1024 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask);
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1029 * interrupts for now.
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1034 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled.
1037 } else if (flag == DISABLE_INTRS) {
1039 * Disable PIC Intrs in the general
1040 * intr mask register
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask);
1045 writeq(val64, &bar0->general_int_mask);
1049 /* DMA Interrupts */
1050 /* Enabling/Disabling Tx DMA interrupts */
1051 if (mask & TX_DMA_INTR) {
1052 /* Enable TxDMA Intrs in the general intr mask register */
1053 val64 = TXDMA_INT_M;
1054 if (flag == ENABLE_INTRS) {
1055 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask);
1059 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level.
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1064 writeq(val64, &bar0->txdma_int_mask);
1066 * Enable only the MISC error 1 interrupt in PFC block
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask);
1071 * Enable only the FB_ECC error interrupt in PCC block
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) {
1077 * Disable TxDMA Intrs in the general intr mask
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1082 temp64 = readq(&bar0->general_int_mask);
1084 writeq(val64, &bar0->general_int_mask);
1088 /* Enabling/Disabling Rx DMA interrupts */
1089 if (mask & RX_DMA_INTR) {
1090 /* Enable RxDMA Intrs in the general intr mask register */
1091 val64 = RXDMA_INT_M;
1092 if (flag == ENABLE_INTRS) {
1093 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask);
1097 * All RxDMA block interrupts are disabled for now
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) {
1103 * Disable RxDMA Intrs in the general intr mask
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask);
1109 writeq(val64, &bar0->general_int_mask);
1113 /* MAC Interrupts */
1114 /* Enabling/Disabling MAC interrupts */
1115 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1116 val64 = TXMAC_INT_M | RXMAC_INT_M;
1117 if (flag == ENABLE_INTRS) {
1118 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask);
1122 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) {
1136 * Disable MAC Intrs in the general intr mask register
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS,
1140 &bar0->mac_rmac_err_mask);
1142 temp64 = readq(&bar0->general_int_mask);
1144 writeq(val64, &bar0->general_int_mask);
1148 /* XGXS Interrupts */
1149 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1150 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1151 if (flag == ENABLE_INTRS) {
1152 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask);
1156 * All XGXS block error interrupts are disabled for now
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) {
1162 * Disable MC Intrs in the general intr mask register
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask);
1167 writeq(val64, &bar0->general_int_mask);
1171 /* Memory Controller(MC) interrupts */
1172 if (mask & MC_INTR) {
1174 if (flag == ENABLE_INTRS) {
1175 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask);
1179 * All MC block error interrupts are disabled for now
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1183 } else if (flag == DISABLE_INTRS) {
1185 * Disable MC Intrs in the general intr mask register
1187 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1188 temp64 = readq(&bar0->general_int_mask);
1190 writeq(val64, &bar0->general_int_mask);
1195 /* Tx traffic interrupts */
1196 if (mask & TX_TRAFFIC_INTR) {
1197 val64 = TXTRAFFIC_INT_M;
1198 if (flag == ENABLE_INTRS) {
1199 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask);
1203 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels
1206 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) {
1209 * Disable Tx Traffic Intrs in the general intr mask
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1213 temp64 = readq(&bar0->general_int_mask);
1215 writeq(val64, &bar0->general_int_mask);
1219 /* Rx traffic interrupts */
1220 if (mask & RX_TRAFFIC_INTR) {
1221 val64 = RXTRAFFIC_INT_M;
1222 if (flag == ENABLE_INTRS) {
1223 temp64 = readq(&bar0->general_int_mask);
1224 temp64 &= ~((u64) val64);
1225 writeq(temp64, &bar0->general_int_mask);
1226 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) {
1230 * Disable Rx Traffic Intrs in the general intr mask
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1234 temp64 = readq(&bar0->general_int_mask);
1236 writeq(val64, &bar0->general_int_mask);
1242 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once
1246 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to
1250 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence
1254 static int verify_xena_quiescence(u64 val64, int flag)
1257 u64 tmp64 = ~((u64) val64);
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1262 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) {
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure
1294 * New procedure to clear mac address reading problems on Alpha platforms
1298 static void fix_mac_address(nic_t * sp)
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1304 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control);
1306 val64 = readq(&bar0->gpio_control);
1311 * start_nic - Turns the device on
1312 * @nic : device private variable.
1314 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register.
1320 * SUCCESS on success and -1 on failure.
1323 static int start_nic(struct s2io_nic *nic)
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0;
1328 u16 interruptible, i;
1330 mac_info_t *mac_control;
1331 struct config_param *config;
1333 mac_control = &nic->mac_control;
1334 config = &nic->config;
1336 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]);
1341 val64 = readq(&bar0->prc_ctrl_n[i]);
1342 #ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED;
1345 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1347 writeq(val64, &bar0->prc_ctrl_n[i]);
1350 #ifdef CONFIG_2BUFF_MODE
1351 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1352 val64 = readq(&bar0->rx_pa_cfg);
1353 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1354 writeq(val64, &bar0->rx_pa_cfg);
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation.
1362 val64 = readq(&bar0->mc_rldram_mrs);
1363 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs);
1367 msleep(100); /* Delay by around 100 ms. */
1369 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control);
1375 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card.
1378 val64 = readq(&bar0->mac_rmac_err_reg);
1380 writeq(val64, &bar0->mac_rmac_err_reg);
1383 * Verify if the device is ready to be enabled, if so enable
1386 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64);
1394 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1400 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change.
1407 /* Enabling Laser. */
1408 val64 = readq(&bar0->adapter_control);
1409 val64 |= ADAPTER_EOI_TX_ON;
1410 writeq(val64, &bar0->adapter_control);
1412 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) {
1415 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700);
1423 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here.
1426 schedule_work(&nic->set_link_task);
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1447 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable.
1450 * Free all queued Tx buffers.
1451 * Return Value: void
1454 static void free_tx_buffers(struct s2io_nic *nic)
1456 struct net_device *dev = nic->dev;
1457 struct sk_buff *skb;
1460 mac_info_t *mac_control;
1461 struct config_param *config;
1464 mac_control = &nic->mac_control;
1465 config = &nic->config;
1467 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j].
1472 (struct sk_buff *) ((unsigned long) txdp->
1475 memset(txdp, 0, sizeof(TxD_t));
1479 memset(txdp, 0, sizeof(TxD_t));
1483 "%s:forcibly freeing %d skbs on FIFO%d\n",
1485 mac_control->tx_curr_get_info[i].offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0;
1491 * stop_nic - To stop the nic
1492 * @nic ; device private variable.
1494 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device.
1500 static void stop_nic(struct s2io_nic *nic)
1502 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1503 register u64 val64 = 0;
1504 u16 interruptible, i;
1505 mac_info_t *mac_control;
1506 struct config_param *config;
1508 mac_control = &nic->mac_control;
1509 config = &nic->config;
1511 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1517 for (i = 0; i < config->rx_ring_num; i++) {
1518 val64 = readq(&bar0->prc_ctrl_n[i]);
1519 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1520 writeq(val64, &bar0->prc_ctrl_n[i]);
1525 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable
1527 * @ring_no: ring number
1529 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations.
1532 * The NIC supports 3 receive modes, viz
1534 * 2. three buffer and
1535 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is
1542 * SUCCESS on success or an appropriate -ve value on failure.
1545 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1547 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb;
1550 int off, off1, size, block_no, block_no1;
1551 int offset, offset1;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control;
1556 struct config_param *config;
1557 #ifdef CONFIG_2BUFF_MODE
1562 dma_addr_t rxdpphys;
1564 #ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags;
1568 mac_control = &nic->mac_control;
1569 config = &nic->config;
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1574 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no].
1577 block_no1 = mac_control->rx_curr_get_info[ring_no].
1579 off = mac_control->rx_curr_put_info[ring_no].offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1581 #ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1585 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1589 rxdp = nic->rx_blocks[ring_no][block_no].
1590 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1593 DBG_PRINT(INTR_DBG, " info equated\n");
1596 #ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no].
1600 mac_control->rx_curr_put_info[ring_no].
1601 block_index %= nic->block_count[ring_no];
1602 block_no = mac_control->rx_curr_put_info
1603 [ring_no].block_index;
1605 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset =
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1612 #ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags);
1619 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no].
1622 mac_control->rx_curr_put_info[ring_no].
1623 block_index %= nic->block_count[ring_no];
1624 block_no = mac_control->rx_curr_put_info
1625 [ring_no].block_index;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset =
1632 rxdp = nic->rx_blocks[ring_no][block_no].
1635 #ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags);
1643 #ifndef CONFIG_2BUFF_MODE
1644 if (rxdp->Control_1 & RXD_OWN_XENA)
1646 if (rxdp->Control_2 & BIT(0))
1649 mac_control->rx_curr_put_info[ring_no].
1653 #ifdef CONFIG_2BUFF_MODE
1655 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor.
1661 rxdpphys = nic->rx_blocks[ring_no][block_no].
1662 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no].
1665 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]);
1669 rxdpnext = nic->rx_blocks[ring_no]
1670 [nextblk].block_virt_addr;
1672 if (rxdpnext->Control_2 & BIT(0))
1677 #ifndef CONFIG_2BUFF_MODE
1678 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1680 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1687 #ifndef CONFIG_2BUFF_MODE
1688 skb_reserve(skb, NET_IP_ALIGN);
1689 memset(rxdp, 0, sizeof(RxD_t));
1690 rxdp->Buffer0_ptr = pci_map_single
1691 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA;
1697 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off;
1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = (unsigned long) skb->data;
1705 skb->data = (void *) tmp;
1706 skb->tail = (void *) tmp;
1708 memset(rxdp, 0, sizeof(RxD_t));
1709 rxdp->Buffer2_ptr = pci_map_single
1710 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1711 PCI_DMA_FROMDEVICE);
1713 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1714 PCI_DMA_FROMDEVICE);
1716 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1717 PCI_DMA_FROMDEVICE);
1719 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1720 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1721 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1722 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1723 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1724 rxdp->Control_1 |= RXD_OWN_XENA;
1726 mac_control->rx_curr_put_info[ring_no].offset = off;
1728 atomic_inc(&nic->rx_bufs_left[ring_no]);
1737 * free_rx_buffers - Frees all Rx buffers
1738 * @sp: device private variable.
1740 * This function will free all Rx buffers allocated by host.
1745 static void free_rx_buffers(struct s2io_nic *sp)
1747 struct net_device *dev = sp->dev;
1748 int i, j, blk = 0, off, buf_cnt = 0;
1750 struct sk_buff *skb;
1751 mac_info_t *mac_control;
1752 struct config_param *config;
1753 #ifdef CONFIG_2BUFF_MODE
1757 mac_control = &sp->mac_control;
1758 config = &sp->config;
1760 for (i = 0; i < config->rx_ring_num; i++) {
1761 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1762 off = j % (MAX_RXDS_PER_BLOCK + 1);
1763 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1765 #ifndef CONFIG_2BUFF_MODE
1766 if (rxdp->Control_1 == END_OF_BLOCK) {
1768 (RxD_t *) ((unsigned long) rxdp->
1774 if (rxdp->Host_Control == END_OF_BLOCK) {
1780 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1781 memset(rxdp, 0, sizeof(RxD_t));
1786 (struct sk_buff *) ((unsigned long) rxdp->
1789 #ifndef CONFIG_2BUFF_MODE
1790 pci_unmap_single(sp->pdev, (dma_addr_t)
1793 HEADER_ETHERNET_II_802_3_SIZE
1794 + HEADER_802_2_SIZE +
1796 PCI_DMA_FROMDEVICE);
1798 ba = &sp->ba[i][blk][off];
1799 pci_unmap_single(sp->pdev, (dma_addr_t)
1802 PCI_DMA_FROMDEVICE);
1803 pci_unmap_single(sp->pdev, (dma_addr_t)
1806 PCI_DMA_FROMDEVICE);
1807 pci_unmap_single(sp->pdev, (dma_addr_t)
1809 dev->mtu + BUF0_LEN + 4,
1810 PCI_DMA_FROMDEVICE);
1813 atomic_dec(&sp->rx_bufs_left[i]);
1816 memset(rxdp, 0, sizeof(RxD_t));
1818 mac_control->rx_curr_put_info[i].block_index = 0;
1819 mac_control->rx_curr_get_info[i].block_index = 0;
1820 mac_control->rx_curr_put_info[i].offset = 0;
1821 mac_control->rx_curr_get_info[i].offset = 0;
1822 atomic_set(&sp->rx_bufs_left[i], 0);
1823 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1824 dev->name, buf_cnt, i);
1829 * s2io_poll - Rx interrupt handler for NAPI support
1830 * @dev : pointer to the device structure.
1831 * @budget : The number of packets that were budgeted to be processed
1832 * during one pass through the 'Poll" function.
1834 * Comes into picture only if NAPI support has been incorporated. It does
1835 * the same thing that rx_intr_handler does, but not in a interrupt context
1836 * also It will process only a given number of packets.
1838 * 0 on success and 1 if there are No Rx packets to be processed.
1841 #ifdef CONFIG_S2IO_NAPI
1842 static int s2io_poll(struct net_device *dev, int *budget)
1844 nic_t *nic = dev->priv;
1845 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1846 int pkts_to_process = *budget, pkt_cnt = 0;
1847 register u64 val64 = 0;
1848 rx_curr_get_info_t get_info, put_info;
1849 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1850 #ifndef CONFIG_2BUFF_MODE
1853 struct sk_buff *skb;
1855 mac_info_t *mac_control;
1856 struct config_param *config;
1857 #ifdef CONFIG_2BUFF_MODE
1861 mac_control = &nic->mac_control;
1862 config = &nic->config;
1864 if (pkts_to_process > dev->quota)
1865 pkts_to_process = dev->quota;
1867 val64 = readq(&bar0->rx_traffic_int);
1868 writeq(val64, &bar0->rx_traffic_int);
1870 for (i = 0; i < config->rx_ring_num; i++) {
1871 get_info = mac_control->rx_curr_get_info[i];
1872 get_block = get_info.block_index;
1873 put_info = mac_control->rx_curr_put_info[i];
1874 put_block = put_info.block_index;
1875 ring_bufs = config->rx_cfg[i].num_rxd;
1876 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1878 #ifndef CONFIG_2BUFF_MODE
1879 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1881 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1883 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1884 (((get_offset + 1) % ring_bufs) != put_offset)) {
1885 if (--pkts_to_process < 0) {
1888 if (rxdp->Control_1 == END_OF_BLOCK) {
1890 (RxD_t *) ((unsigned long) rxdp->
1894 (MAX_RXDS_PER_BLOCK + 1);
1896 get_block %= nic->block_count[i];
1897 mac_control->rx_curr_get_info[i].
1898 offset = get_info.offset;
1899 mac_control->rx_curr_get_info[i].
1900 block_index = get_block;
1904 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1907 (struct sk_buff *) ((unsigned long) rxdp->
1910 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1912 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1915 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1916 val16 = (u16) (val64 >> 48);
1917 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1918 pci_unmap_single(nic->pdev, (dma_addr_t)
1921 HEADER_ETHERNET_II_802_3_SIZE +
1924 PCI_DMA_FROMDEVICE);
1925 rx_osm_handler(nic, val16, rxdp, i);
1928 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1930 nic->rx_blocks[i][get_block].block_virt_addr +
1932 mac_control->rx_curr_get_info[i].offset =
1936 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1938 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1940 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1941 !(rxdp->Control_2 & BIT(0))) &&
1942 (((get_offset + 1) % ring_bufs) != put_offset)) {
1943 if (--pkts_to_process < 0) {
1946 skb = (struct sk_buff *) ((unsigned long)
1947 rxdp->Host_Control);
1949 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1951 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1955 pci_unmap_single(nic->pdev, (dma_addr_t)
1957 BUF0_LEN, PCI_DMA_FROMDEVICE);
1958 pci_unmap_single(nic->pdev, (dma_addr_t)
1960 BUF1_LEN, PCI_DMA_FROMDEVICE);
1961 pci_unmap_single(nic->pdev, (dma_addr_t)
1963 dev->mtu + BUF0_LEN + 4,
1964 PCI_DMA_FROMDEVICE);
1965 ba = &nic->ba[i][get_block][get_info.offset];
1967 rx_osm_handler(nic, rxdp, i, ba);
1970 mac_control->rx_curr_get_info[i].offset =
1973 nic->rx_blocks[i][get_block].block_virt_addr +
1976 if (get_info.offset &&
1977 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1978 get_info.offset = 0;
1979 mac_control->rx_curr_get_info[i].
1980 offset = get_info.offset;
1982 get_block %= nic->block_count[i];
1983 mac_control->rx_curr_get_info[i].
1984 block_index = get_block;
1986 nic->rx_blocks[i][get_block].
1990 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1999 dev->quota -= pkt_cnt;
2001 netif_rx_complete(dev);
2003 for (i = 0; i < config->rx_ring_num; i++) {
2004 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2005 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2006 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2010 /* Re enable the Rx interrupts. */
2011 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2015 dev->quota -= pkt_cnt;
2018 for (i = 0; i < config->rx_ring_num; i++) {
2019 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2020 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2021 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2029 * rx_intr_handler - Rx interrupt handler
2030 * @nic: device private variable.
2032 * If the interrupt is because of a received frame or if the
2033 * receive ring contains fresh as yet un-processed frames,this function is
2034 * called. It picks out the RxD at which place the last Rx processing had
2035 * stopped and sends the skb to the OSM's Rx handler and then increments
2041 static void rx_intr_handler(struct s2io_nic *nic)
2043 struct net_device *dev = (struct net_device *) nic->dev;
2044 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2045 rx_curr_get_info_t get_info, put_info;
2047 struct sk_buff *skb;
2048 #ifndef CONFIG_2BUFF_MODE
2051 register u64 val64 = 0;
2052 int get_block, get_offset, put_block, put_offset, ring_bufs;
2054 mac_info_t *mac_control;
2055 struct config_param *config;
2056 #ifdef CONFIG_2BUFF_MODE
2060 mac_control = &nic->mac_control;
2061 config = &nic->config;
2064 * rx_traffic_int reg is an R1 register, hence we read and write back
2065 * the samevalue in the register to clear it.
2067 val64 = readq(&bar0->rx_traffic_int);
2068 writeq(val64, &bar0->rx_traffic_int);
2070 for (i = 0; i < config->rx_ring_num; i++) {
2071 get_info = mac_control->rx_curr_get_info[i];
2072 get_block = get_info.block_index;
2073 put_info = mac_control->rx_curr_put_info[i];
2074 put_block = put_info.block_index;
2075 ring_bufs = config->rx_cfg[i].num_rxd;
2076 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2078 #ifndef CONFIG_2BUFF_MODE
2079 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2081 spin_lock(&nic->put_lock);
2082 put_offset = nic->put_pos[i];
2083 spin_unlock(&nic->put_lock);
2084 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2085 (((get_offset + 1) % ring_bufs) != put_offset)) {
2086 if (rxdp->Control_1 == END_OF_BLOCK) {
2087 rxdp = (RxD_t *) ((unsigned long)
2091 (MAX_RXDS_PER_BLOCK + 1);
2093 get_block %= nic->block_count[i];
2094 mac_control->rx_curr_get_info[i].
2095 offset = get_info.offset;
2096 mac_control->rx_curr_get_info[i].
2097 block_index = get_block;
2101 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2103 skb = (struct sk_buff *) ((unsigned long)
2104 rxdp->Host_Control);
2106 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2108 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2111 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2112 val16 = (u16) (val64 >> 48);
2113 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2114 pci_unmap_single(nic->pdev, (dma_addr_t)
2117 HEADER_ETHERNET_II_802_3_SIZE +
2120 PCI_DMA_FROMDEVICE);
2121 rx_osm_handler(nic, val16, rxdp, i);
2123 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2125 nic->rx_blocks[i][get_block].block_virt_addr +
2127 mac_control->rx_curr_get_info[i].offset =
2130 if ((indicate_max_pkts)
2131 && (pkt_cnt > indicate_max_pkts))
2135 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2137 spin_lock(&nic->put_lock);
2138 put_offset = nic->put_pos[i];
2139 spin_unlock(&nic->put_lock);
2140 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2141 !(rxdp->Control_2 & BIT(0))) &&
2142 (((get_offset + 1) % ring_bufs) != put_offset)) {
2143 skb = (struct sk_buff *) ((unsigned long)
2144 rxdp->Host_Control);
2146 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2148 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2152 pci_unmap_single(nic->pdev, (dma_addr_t)
2154 BUF0_LEN, PCI_DMA_FROMDEVICE);
2155 pci_unmap_single(nic->pdev, (dma_addr_t)
2157 BUF1_LEN, PCI_DMA_FROMDEVICE);
2158 pci_unmap_single(nic->pdev, (dma_addr_t)
2160 dev->mtu + BUF0_LEN + 4,
2161 PCI_DMA_FROMDEVICE);
2162 ba = &nic->ba[i][get_block][get_info.offset];
2164 rx_osm_handler(nic, rxdp, i, ba);
2167 mac_control->rx_curr_get_info[i].offset =
2170 nic->rx_blocks[i][get_block].block_virt_addr +
2173 if (get_info.offset &&
2174 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2175 get_info.offset = 0;
2176 mac_control->rx_curr_get_info[i].
2177 offset = get_info.offset;
2179 get_block %= nic->block_count[i];
2180 mac_control->rx_curr_get_info[i].
2181 block_index = get_block;
2183 nic->rx_blocks[i][get_block].
2187 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2190 if ((indicate_max_pkts)
2191 && (pkt_cnt > indicate_max_pkts))
2195 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2201 * tx_intr_handler - Transmit interrupt handler
2202 * @nic : device private variable
2204 * If an interrupt was raised to indicate DMA complete of the
2205 * Tx packet, this function is called. It identifies the last TxD
2206 * whose buffer was freed and frees all skbs whose data have already
2207 * DMA'ed into the NICs internal memory.
2212 static void tx_intr_handler(struct s2io_nic *nic)
2214 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2215 struct net_device *dev = (struct net_device *) nic->dev;
2216 tx_curr_get_info_t get_info, put_info;
2217 struct sk_buff *skb;
2219 register u64 val64 = 0;
2222 mac_info_t *mac_control;
2223 struct config_param *config;
2225 mac_control = &nic->mac_control;
2226 config = &nic->config;
2229 * tx_traffic_int reg is an R1 register, hence we read and write
2230 * back the samevalue in the register to clear it.
2232 val64 = readq(&bar0->tx_traffic_int);
2233 writeq(val64, &bar0->tx_traffic_int);
2235 for (i = 0; i < config->tx_fifo_num; i++) {
2236 get_info = mac_control->tx_curr_get_info[i];
2237 put_info = mac_control->tx_curr_put_info[i];
2238 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2240 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2241 (get_info.offset != put_info.offset) &&
2242 (txdlp->Host_Control)) {
2243 /* Check for TxD errors */
2244 if (txdlp->Control_1 & TXD_T_CODE) {
2245 unsigned long long err;
2246 err = txdlp->Control_1 & TXD_T_CODE;
2247 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2251 skb = (struct sk_buff *) ((unsigned long)
2252 txdlp->Host_Control);
2254 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2256 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2259 nic->tx_pkt_count++;
2261 frg_cnt = skb_shinfo(skb)->nr_frags;
2263 /* For unfragmented skb */
2264 pci_unmap_single(nic->pdev, (dma_addr_t)
2265 txdlp->Buffer_Pointer,
2266 skb->len - skb->data_len,
2269 TxD_t *temp = txdlp;
2271 for (j = 0; j < frg_cnt; j++, txdlp++) {
2273 &skb_shinfo(skb)->frags[j];
2274 pci_unmap_page(nic->pdev,
2284 (sizeof(TxD_t) * config->max_txds));
2286 /* Updating the statistics block */
2287 nic->stats.tx_packets++;
2288 nic->stats.tx_bytes += skb->len;
2289 dev_kfree_skb_irq(skb);
2292 get_info.offset %= get_info.fifo_len + 1;
2293 txdlp = (TxD_t *) nic->list_info[i]
2294 [get_info.offset].list_virt_addr;
2295 mac_control->tx_curr_get_info[i].offset =
2300 spin_lock(&nic->tx_lock);
2301 if (netif_queue_stopped(dev))
2302 netif_wake_queue(dev);
2303 spin_unlock(&nic->tx_lock);
2307 * alarm_intr_handler - Alarm Interrrupt handler
2308 * @nic: device private variable
2309 * Description: If the interrupt was neither because of Rx packet or Tx
2310 * complete, this function is called. If the interrupt was to indicate
2311 * a loss of link, the OSM link status handler is invoked for any other
2312 * alarm interrupt the block that raised the interrupt is displayed
2313 * and a H/W reset is issued.
2318 static void alarm_intr_handler(struct s2io_nic *nic)
2320 struct net_device *dev = (struct net_device *) nic->dev;
2321 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2322 register u64 val64 = 0, err_reg = 0;
2324 /* Handling link status change error Intr */
2325 err_reg = readq(&bar0->mac_rmac_err_reg);
2326 writeq(err_reg, &bar0->mac_rmac_err_reg);
2327 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2328 schedule_work(&nic->set_link_task);
2331 /* In case of a serious error, the device will be Reset. */
2332 val64 = readq(&bar0->serr_source);
2333 if (val64 & SERR_SOURCE_ANY) {
2334 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2335 DBG_PRINT(ERR_DBG, "serious error!!\n");
2336 netif_stop_queue(dev);
2337 schedule_work(&nic->rst_timer_task);
2341 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2342 * Error occurs, the adapter will be recycled by disabling the
2343 * adapter enable bit and enabling it again after the device
2344 * becomes Quiescent.
2346 val64 = readq(&bar0->pcc_err_reg);
2347 writeq(val64, &bar0->pcc_err_reg);
2348 if (val64 & PCC_FB_ECC_DB_ERR) {
2349 u64 ac = readq(&bar0->adapter_control);
2350 ac &= ~(ADAPTER_CNTL_EN);
2351 writeq(ac, &bar0->adapter_control);
2352 ac = readq(&bar0->adapter_control);
2353 schedule_work(&nic->set_link_task);
2356 /* Other type of interrupts are not being handled now, TODO */
2360 * wait_for_cmd_complete - waits for a command to complete.
2361 * @sp : private member of the device structure, which is a pointer to the
2362 * s2io_nic structure.
2363 * Description: Function that waits for a command to Write into RMAC
2364 * ADDR DATA registers to be completed and returns either success or
2365 * error depending on whether the command was complete or not.
2367 * SUCCESS on success and FAILURE on failure.
2370 static int wait_for_cmd_complete(nic_t * sp)
2372 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2373 int ret = FAILURE, cnt = 0;
2377 val64 = readq(&bar0->rmac_addr_cmd_mem);
2378 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2391 * s2io_reset - Resets the card.
2392 * @sp : private member of the device structure.
2393 * Description: Function to Reset the card. This function then also
2394 * restores the previously saved PCI configuration space registers as
2395 * the card reset also resets the configuration space.
2400 static void s2io_reset(nic_t * sp)
2402 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2406 val64 = SW_RESET_ALL;
2407 writeq(val64, &bar0->sw_reset);
2410 * At this stage, if the PCI write is indeed completed, the
2411 * card is reset and so is the PCI Config space of the device.
2412 * So a read cannot be issued at this stage on any of the
2413 * registers to ensure the write into "sw_reset" register
2415 * Question: Is there any system call that will explicitly force
2416 * all the write commands still pending on the bus to be pushed
2418 * As of now I'am just giving a 250ms delay and hoping that the
2419 * PCI write to sw_reset register is done by this time.
2423 /* Restore the PCI state saved during initializarion. */
2424 pci_restore_state(sp->pdev);
2429 /* SXE-002: Configure link and activity LED to turn it off */
2430 subid = sp->pdev->subsystem_device;
2431 if ((subid & 0xFF) >= 0x07) {
2432 val64 = readq(&bar0->gpio_control);
2433 val64 |= 0x0000800000000000ULL;
2434 writeq(val64, &bar0->gpio_control);
2435 val64 = 0x0411040400000000ULL;
2436 writeq(val64, (void __iomem *) bar0 + 0x2700);
2439 sp->device_enabled_once = FALSE;
2443 * s2io_set_swapper - to set the swapper controle on the card
2444 * @sp : private member of the device structure,
2445 * pointer to the s2io_nic structure.
2446 * Description: Function to set the swapper control on the card
2447 * correctly depending on the 'endianness' of the system.
2449 * SUCCESS on success and FAILURE on failure.
2452 static int s2io_set_swapper(nic_t * sp)
2454 struct net_device *dev = sp->dev;
2455 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2456 u64 val64, valt, valr;
2459 * Set proper endian settings and verify the same by reading
2460 * the PIF Feed-back register.
2463 val64 = readq(&bar0->pif_rd_swapper_fb);
2464 if (val64 != 0x0123456789ABCDEFULL) {
2466 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2467 0x8100008181000081ULL, /* FE=1, SE=0 */
2468 0x4200004242000042ULL, /* FE=0, SE=1 */
2469 0}; /* FE=0, SE=0 */
2472 writeq(value[i], &bar0->swapper_ctrl);
2473 val64 = readq(&bar0->pif_rd_swapper_fb);
2474 if (val64 == 0x0123456789ABCDEFULL)
2479 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2481 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2482 (unsigned long long) val64);
2487 valr = readq(&bar0->swapper_ctrl);
2490 valt = 0x0123456789ABCDEFULL;
2491 writeq(valt, &bar0->xmsi_address);
2492 val64 = readq(&bar0->xmsi_address);
2496 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2497 0x0081810000818100ULL, /* FE=1, SE=0 */
2498 0x0042420000424200ULL, /* FE=0, SE=1 */
2499 0}; /* FE=0, SE=0 */
2502 writeq((value[i] | valr), &bar0->swapper_ctrl);
2503 writeq(valt, &bar0->xmsi_address);
2504 val64 = readq(&bar0->xmsi_address);
2510 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2511 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2515 val64 = readq(&bar0->swapper_ctrl);
2516 val64 &= 0xFFFF000000000000ULL;
2520 * The device by default set to a big endian format, so a
2521 * big endian driver need not set anything.
2523 val64 |= (SWAPPER_CTRL_TXP_FE |
2524 SWAPPER_CTRL_TXP_SE |
2525 SWAPPER_CTRL_TXD_R_FE |
2526 SWAPPER_CTRL_TXD_W_FE |
2527 SWAPPER_CTRL_TXF_R_FE |
2528 SWAPPER_CTRL_RXD_R_FE |
2529 SWAPPER_CTRL_RXD_W_FE |
2530 SWAPPER_CTRL_RXF_W_FE |
2531 SWAPPER_CTRL_XMSI_FE |
2532 SWAPPER_CTRL_XMSI_SE |
2533 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2534 writeq(val64, &bar0->swapper_ctrl);
2537 * Initially we enable all bits to make it accessible by the
2538 * driver, then we selectively enable only those bits that
2541 val64 |= (SWAPPER_CTRL_TXP_FE |
2542 SWAPPER_CTRL_TXP_SE |
2543 SWAPPER_CTRL_TXD_R_FE |
2544 SWAPPER_CTRL_TXD_R_SE |
2545 SWAPPER_CTRL_TXD_W_FE |
2546 SWAPPER_CTRL_TXD_W_SE |
2547 SWAPPER_CTRL_TXF_R_FE |
2548 SWAPPER_CTRL_RXD_R_FE |
2549 SWAPPER_CTRL_RXD_R_SE |
2550 SWAPPER_CTRL_RXD_W_FE |
2551 SWAPPER_CTRL_RXD_W_SE |
2552 SWAPPER_CTRL_RXF_W_FE |
2553 SWAPPER_CTRL_XMSI_FE |
2554 SWAPPER_CTRL_XMSI_SE |
2555 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2556 writeq(val64, &bar0->swapper_ctrl);
2558 val64 = readq(&bar0->swapper_ctrl);
2561 * Verifying if endian settings are accurate by reading a
2562 * feedback register.
2564 val64 = readq(&bar0->pif_rd_swapper_fb);
2565 if (val64 != 0x0123456789ABCDEFULL) {
2566 /* Endian settings are incorrect, calls for another dekko. */
2567 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2569 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2570 (unsigned long long) val64);
2577 /* ********************************************************* *
2578 * Functions defined below concern the OS part of the driver *
2579 * ********************************************************* */
2582 * s2io_open - open entry point of the driver
2583 * @dev : pointer to the device structure.
2585 * This function is the open entry point of the driver. It mainly calls a
2586 * function to allocate Rx buffers and inserts them into the buffer
2587 * descriptors and then enables the Rx part of the NIC.
2589 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2593 static int s2io_open(struct net_device *dev)
2595 nic_t *sp = dev->priv;
2599 * Make sure you have link off by default every time
2600 * Nic is initialized
2602 netif_carrier_off(dev);
2603 sp->last_link_state = LINK_DOWN;
2605 /* Initialize H/W and enable interrupts */
2606 if (s2io_card_up(sp)) {
2607 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2612 /* After proper initialization of H/W, register ISR */
2613 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2617 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2622 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2623 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2628 netif_start_queue(dev);
2633 * s2io_close -close entry point of the driver
2634 * @dev : device pointer.
2636 * This is the stop entry point of the driver. It needs to undo exactly
2637 * whatever was done by the open entry point,thus it's usually referred to
2638 * as the close function.Among other things this function mainly stops the
2639 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2641 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2645 static int s2io_close(struct net_device *dev)
2647 nic_t *sp = dev->priv;
2649 flush_scheduled_work();
2650 netif_stop_queue(dev);
2651 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2654 free_irq(dev->irq, dev);
2655 sp->device_close_flag = TRUE; /* Device is shut down. */
2660 * s2io_xmit - Tx entry point of te driver
2661 * @skb : the socket buffer containing the Tx data.
2662 * @dev : device pointer.
2664 * This function is the Tx entry point of the driver. S2IO NIC supports
2665 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2666 * NOTE: when device cant queue the pkt,just the trans_start variable will
2669 * 0 on success & 1 on failure.
2672 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2674 nic_t *sp = dev->priv;
2675 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2678 TxFIFO_element_t __iomem *tx_fifo;
2679 unsigned long flags;
2683 mac_info_t *mac_control;
2684 struct config_param *config;
2685 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2687 mac_control = &sp->mac_control;
2688 config = &sp->config;
2690 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2691 spin_lock_irqsave(&sp->tx_lock, flags);
2693 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2694 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2696 spin_unlock_irqrestore(&sp->tx_lock, flags);
2701 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2702 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2703 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2705 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2706 /* Avoid "put" pointer going beyond "get" pointer */
2707 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2708 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2709 netif_stop_queue(dev);
2711 spin_unlock_irqrestore(&sp->tx_lock, flags);
2715 mss = skb_shinfo(skb)->tso_size;
2717 txdp->Control_1 |= TXD_TCP_LSO_EN;
2718 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2722 frg_cnt = skb_shinfo(skb)->nr_frags;
2723 frg_len = skb->len - skb->data_len;
2725 txdp->Host_Control = (unsigned long) skb;
2726 txdp->Buffer_Pointer = pci_map_single
2727 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2728 if (skb->ip_summed == CHECKSUM_HW) {
2730 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2734 txdp->Control_2 |= config->tx_intr_type;
2736 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2737 TXD_GATHER_CODE_FIRST);
2738 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2740 /* For fragmented SKB. */
2741 for (i = 0; i < frg_cnt; i++) {
2742 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2744 txdp->Buffer_Pointer = (u64) pci_map_page
2745 (sp->pdev, frag->page, frag->page_offset,
2746 frag->size, PCI_DMA_TODEVICE);
2747 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2749 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2751 tx_fifo = mac_control->tx_FIFO_start[queue];
2752 val64 = sp->list_info[queue][put_off].list_phy_addr;
2753 writeq(val64, &tx_fifo->TxDL_Pointer);
2755 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2759 val64 |= TX_FIFO_SPECIAL_FUNC;
2761 writeq(val64, &tx_fifo->List_Control);
2763 /* Perform a PCI read to flush previous writes */
2764 val64 = readq(&bar0->general_int_status);
2767 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2768 mac_control->tx_curr_put_info[queue].offset = put_off;
2770 /* Avoid "put" pointer going beyond "get" pointer */
2771 if (((put_off + 1) % queue_len) == get_off) {
2773 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2775 netif_stop_queue(dev);
2778 dev->trans_start = jiffies;
2779 spin_unlock_irqrestore(&sp->tx_lock, flags);
2785 * s2io_isr - ISR handler of the device .
2786 * @irq: the irq of the device.
2787 * @dev_id: a void pointer to the dev structure of the NIC.
2788 * @pt_regs: pointer to the registers pushed on the stack.
2789 * Description: This function is the ISR handler of the device. It
2790 * identifies the reason for the interrupt and calls the relevant
2791 * service routines. As a contongency measure, this ISR allocates the
2792 * recv buffers, if their numbers are below the panic value which is
2793 * presently set to 25% of the original number of rcv buffers allocated.
2795 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2796 * IRQ_NONE: will be returned if interrupt is not from our device
2798 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2800 struct net_device *dev = (struct net_device *) dev_id;
2801 nic_t *sp = dev->priv;
2802 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2803 #ifndef CONFIG_S2IO_NAPI
2807 mac_info_t *mac_control;
2808 struct config_param *config;
2810 mac_control = &sp->mac_control;
2811 config = &sp->config;
2814 * Identify the cause for interrupt and call the appropriate
2815 * interrupt handler. Causes for the interrupt could be;
2819 * 4. Error in any functional blocks of the NIC.
2821 reason = readq(&bar0->general_int_status);
2824 /* The interrupt was not raised by Xena. */
2828 /* If Intr is because of Tx Traffic */
2829 if (reason & GEN_INTR_TXTRAFFIC) {
2830 tx_intr_handler(sp);
2833 /* If Intr is because of an error */
2834 if (reason & (GEN_ERROR_INTR))
2835 alarm_intr_handler(sp);
2837 #ifdef CONFIG_S2IO_NAPI
2838 if (reason & GEN_INTR_RXTRAFFIC) {
2839 if (netif_rx_schedule_prep(dev)) {
2840 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2842 __netif_rx_schedule(dev);
2846 /* If Intr is because of Rx Traffic */
2847 if (reason & GEN_INTR_RXTRAFFIC) {
2848 rx_intr_handler(sp);
2853 * If the Rx buffer count is below the panic threshold then
2854 * reallocate the buffers from the interrupt handler itself,
2855 * else schedule a tasklet to reallocate the buffers.
2857 #ifndef CONFIG_S2IO_NAPI
2858 for (i = 0; i < config->rx_ring_num; i++) {
2859 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2860 int level = rx_buffer_level(sp, rxb_size, i);
2862 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2863 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2864 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2865 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2866 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2868 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2869 clear_bit(0, (&sp->tasklet_status));
2872 clear_bit(0, (&sp->tasklet_status));
2873 } else if (level == LOW) {
2874 tasklet_schedule(&sp->task);
2883 * s2io_get_stats - Updates the device statistics structure.
2884 * @dev : pointer to the device structure.
2886 * This function updates the device statistics structure in the s2io_nic
2887 * structure and returns a pointer to the same.
2889 * pointer to the updated net_device_stats structure.
2892 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2894 nic_t *sp = dev->priv;
2895 mac_info_t *mac_control;
2896 struct config_param *config;
2898 mac_control = &sp->mac_control;
2899 config = &sp->config;
2901 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2902 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2903 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2904 sp->stats.rx_length_errors =
2905 mac_control->stats_info->rmac_long_frms;
2907 return (&sp->stats);
2911 * s2io_set_multicast - entry point for multicast address enable/disable.
2912 * @dev : pointer to the device structure
2914 * This function is a driver entry point which gets called by the kernel
2915 * whenever multicast addresses must be enabled/disabled. This also gets
2916 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2917 * determine, if multicast address must be enabled or if promiscuous mode
2918 * is to be disabled etc.
2923 static void s2io_set_multicast(struct net_device *dev)
2926 struct dev_mc_list *mclist;
2927 nic_t *sp = dev->priv;
2928 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2929 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2931 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2934 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2935 /* Enable all Multicast addresses */
2936 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2937 &bar0->rmac_addr_data0_mem);
2938 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2939 &bar0->rmac_addr_data1_mem);
2940 val64 = RMAC_ADDR_CMD_MEM_WE |
2941 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2942 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2943 writeq(val64, &bar0->rmac_addr_cmd_mem);
2944 /* Wait till command completes */
2945 wait_for_cmd_complete(sp);
2948 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2949 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2950 /* Disable all Multicast addresses */
2951 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2952 &bar0->rmac_addr_data0_mem);
2953 val64 = RMAC_ADDR_CMD_MEM_WE |
2954 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2955 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2956 writeq(val64, &bar0->rmac_addr_cmd_mem);
2957 /* Wait till command completes */
2958 wait_for_cmd_complete(sp);
2961 sp->all_multi_pos = 0;
2964 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2965 /* Put the NIC into promiscuous mode */
2966 add = &bar0->mac_cfg;
2967 val64 = readq(&bar0->mac_cfg);
2968 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2971 writel((u32) val64, add);
2972 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2973 writel((u32) (val64 >> 32), (add + 4));
2975 val64 = readq(&bar0->mac_cfg);
2976 sp->promisc_flg = 1;
2977 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2979 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2980 /* Remove the NIC from promiscuous mode */
2981 add = &bar0->mac_cfg;
2982 val64 = readq(&bar0->mac_cfg);
2983 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2986 writel((u32) val64, add);
2987 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2988 writel((u32) (val64 >> 32), (add + 4));
2990 val64 = readq(&bar0->mac_cfg);
2991 sp->promisc_flg = 0;
2992 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2996 /* Update individual M_CAST address list */
2997 if ((!sp->m_cast_flg) && dev->mc_count) {
2999 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3000 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3002 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3003 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3007 prev_cnt = sp->mc_addr_count;
3008 sp->mc_addr_count = dev->mc_count;
3010 /* Clear out the previous list of Mc in the H/W. */
3011 for (i = 0; i < prev_cnt; i++) {
3012 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3013 &bar0->rmac_addr_data0_mem);
3014 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3015 &bar0->rmac_addr_data1_mem);
3016 val64 = RMAC_ADDR_CMD_MEM_WE |
3017 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3018 RMAC_ADDR_CMD_MEM_OFFSET
3019 (MAC_MC_ADDR_START_OFFSET + i);
3020 writeq(val64, &bar0->rmac_addr_cmd_mem);
3022 /* Wait for command completes */
3023 if (wait_for_cmd_complete(sp)) {
3024 DBG_PRINT(ERR_DBG, "%s: Adding ",
3026 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3031 /* Create the new Rx filter list and update the same in H/W. */
3032 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3033 i++, mclist = mclist->next) {
3034 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3036 for (j = 0; j < ETH_ALEN; j++) {
3037 mac_addr |= mclist->dmi_addr[j];
3041 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3042 &bar0->rmac_addr_data0_mem);
3043 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3044 &bar0->rmac_addr_data1_mem);
3046 val64 = RMAC_ADDR_CMD_MEM_WE |
3047 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3048 RMAC_ADDR_CMD_MEM_OFFSET
3049 (i + MAC_MC_ADDR_START_OFFSET);
3050 writeq(val64, &bar0->rmac_addr_cmd_mem);
3052 /* Wait for command completes */
3053 if (wait_for_cmd_complete(sp)) {
3054 DBG_PRINT(ERR_DBG, "%s: Adding ",
3056 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3064 * s2io_set_mac_addr - Programs the Xframe mac address
3065 * @dev : pointer to the device structure.
3066 * @addr: a uchar pointer to the new mac address which is to be set.
3067 * Description : This procedure will program the Xframe to receive
3068 * frames with new Mac Address
3069 * Return value: SUCCESS on success and an appropriate (-)ve integer
3070 * as defined in errno.h file on failure.
3073 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3075 nic_t *sp = dev->priv;
3076 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3077 register u64 val64, mac_addr = 0;
3081 * Set the new MAC address as the new unicast filter and reflect this
3082 * change on the device address registered with the OS. It will be
3085 for (i = 0; i < ETH_ALEN; i++) {
3087 mac_addr |= addr[i];
3090 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3091 &bar0->rmac_addr_data0_mem);
3094 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3095 RMAC_ADDR_CMD_MEM_OFFSET(0);
3096 writeq(val64, &bar0->rmac_addr_cmd_mem);
3097 /* Wait till command completes */
3098 if (wait_for_cmd_complete(sp)) {
3099 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3107 * s2io_ethtool_sset - Sets different link parameters.
3108 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3109 * @info: pointer to the structure with parameters given by ethtool to set
3112 * The function sets different link parameters provided by the user onto
3118 static int s2io_ethtool_sset(struct net_device *dev,
3119 struct ethtool_cmd *info)
3121 nic_t *sp = dev->priv;
3122 if ((info->autoneg == AUTONEG_ENABLE) ||
3123 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3126 s2io_close(sp->dev);
3134 * s2io_ethtol_gset - Return link specific information.
3135 * @sp : private member of the device structure, pointer to the
3136 * s2io_nic structure.
3137 * @info : pointer to the structure with parameters given by ethtool
3138 * to return link information.
3140 * Returns link specific information like speed, duplex etc.. to ethtool.
3142 * return 0 on success.
3145 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3147 nic_t *sp = dev->priv;
3148 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3149 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3150 info->port = PORT_FIBRE;
3151 /* info->transceiver?? TODO */
3153 if (netif_carrier_ok(sp->dev)) {
3154 info->speed = 10000;
3155 info->duplex = DUPLEX_FULL;
3161 info->autoneg = AUTONEG_DISABLE;
3166 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3167 * @sp : private member of the device structure, which is a pointer to the
3168 * s2io_nic structure.
3169 * @info : pointer to the structure with parameters given by ethtool to
3170 * return driver information.
3172 * Returns driver specefic information like name, version etc.. to ethtool.
3177 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3178 struct ethtool_drvinfo *info)
3180 nic_t *sp = dev->priv;
3182 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3183 strncpy(info->version, s2io_driver_version,
3184 sizeof(s2io_driver_version));
3185 strncpy(info->fw_version, "", 32);
3186 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3187 info->regdump_len = XENA_REG_SPACE;
3188 info->eedump_len = XENA_EEPROM_SPACE;
3189 info->testinfo_len = S2IO_TEST_LEN;
3190 info->n_stats = S2IO_STAT_LEN;
3194 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3195 * @sp: private member of the device structure, which is a pointer to the
3196 * s2io_nic structure.
3197 * @regs : pointer to the structure with parameters given by ethtool for
3198 * dumping the registers.
3199 * @reg_space: The input argumnet into which all the registers are dumped.
3201 * Dumps the entire register space of xFrame NIC into the user given
3207 static void s2io_ethtool_gregs(struct net_device *dev,
3208 struct ethtool_regs *regs, void *space)
3212 u8 *reg_space = (u8 *) space;
3213 nic_t *sp = dev->priv;
3215 regs->len = XENA_REG_SPACE;
3216 regs->version = sp->pdev->subsystem_device;
3218 for (i = 0; i < regs->len; i += 8) {
3219 reg = readq(sp->bar0 + i);
3220 memcpy((reg_space + i), ®, 8);
3225 * s2io_phy_id - timer function that alternates adapter LED.
3226 * @data : address of the private member of the device structure, which
3227 * is a pointer to the s2io_nic structure, provided as an u32.
3228 * Description: This is actually the timer function that alternates the
3229 * adapter LED bit of the adapter control bit to set/reset every time on
3230 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3231 * once every second.
3233 static void s2io_phy_id(unsigned long data)
3235 nic_t *sp = (nic_t *) data;
3236 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3240 subid = sp->pdev->subsystem_device;
3241 if ((subid & 0xFF) >= 0x07) {
3242 val64 = readq(&bar0->gpio_control);
3243 val64 ^= GPIO_CTRL_GPIO_0;
3244 writeq(val64, &bar0->gpio_control);
3246 val64 = readq(&bar0->adapter_control);
3247 val64 ^= ADAPTER_LED_ON;
3248 writeq(val64, &bar0->adapter_control);
3251 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3255 * s2io_ethtool_idnic - To physically identify the nic on the system.
3256 * @sp : private member of the device structure, which is a pointer to the
3257 * s2io_nic structure.
3258 * @id : pointer to the structure with identification parameters given by
3260 * Description: Used to physically identify the NIC on the system.
3261 * The Link LED will blink for a time specified by the user for
3263 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3264 * identification is possible only if it's link is up.
3266 * int , returns 0 on success
3269 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3271 u64 val64 = 0, last_gpio_ctrl_val;
3272 nic_t *sp = dev->priv;
3273 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3276 subid = sp->pdev->subsystem_device;
3277 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3278 if ((subid & 0xFF) < 0x07) {
3279 val64 = readq(&bar0->adapter_control);
3280 if (!(val64 & ADAPTER_CNTL_EN)) {
3282 "Adapter Link down, cannot blink LED\n");
3286 if (sp->id_timer.function == NULL) {
3287 init_timer(&sp->id_timer);
3288 sp->id_timer.function = s2io_phy_id;
3289 sp->id_timer.data = (unsigned long) sp;
3291 mod_timer(&sp->id_timer, jiffies);
3293 msleep(data * 1000);
3296 del_timer_sync(&sp->id_timer);
3298 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3299 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3300 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3307 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3308 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3309 * @ep : pointer to the structure with pause parameters given by ethtool.
3311 * Returns the Pause frame generation and reception capability of the NIC.
3315 static void s2io_ethtool_getpause_data(struct net_device *dev,
3316 struct ethtool_pauseparam *ep)
3319 nic_t *sp = dev->priv;
3320 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3322 val64 = readq(&bar0->rmac_pause_cfg);
3323 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3324 ep->tx_pause = TRUE;
3325 if (val64 & RMAC_PAUSE_RX_ENABLE)
3326 ep->rx_pause = TRUE;
3327 ep->autoneg = FALSE;
3331 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3332 * @sp : private member of the device structure, which is a pointer to the
3333 * s2io_nic structure.
3334 * @ep : pointer to the structure with pause parameters given by ethtool.
3336 * It can be used to set or reset Pause frame generation or reception
3337 * support of the NIC.
3339 * int, returns 0 on Success
3342 static int s2io_ethtool_setpause_data(struct net_device *dev,
3343 struct ethtool_pauseparam *ep)
3346 nic_t *sp = dev->priv;
3347 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3349 val64 = readq(&bar0->rmac_pause_cfg);
3351 val64 |= RMAC_PAUSE_GEN_ENABLE;
3353 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3355 val64 |= RMAC_PAUSE_RX_ENABLE;
3357 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3358 writeq(val64, &bar0->rmac_pause_cfg);
3363 * read_eeprom - reads 4 bytes of data from user given offset.
3364 * @sp : private member of the device structure, which is a pointer to the
3365 * s2io_nic structure.
3366 * @off : offset at which the data must be written
3367 * @data : Its an output parameter where the data read at the given
3370 * Will read 4 bytes of data from the user given offset and return the
3372 * NOTE: Will allow to read only part of the EEPROM visible through the
3375 * -1 on failure and 0 on success.
3378 #define S2IO_DEV_ID 5
3379 static int read_eeprom(nic_t * sp, int off, u32 * data)
3384 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3386 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3387 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3388 I2C_CONTROL_CNTL_START;
3389 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3391 while (exit_cnt < 5) {
3392 val64 = readq(&bar0->i2c_control);
3393 if (I2C_CONTROL_CNTL_END(val64)) {
3394 *data = I2C_CONTROL_GET_DATA(val64);
3406 * write_eeprom - actually writes the relevant part of the data value.
3407 * @sp : private member of the device structure, which is a pointer to the
3408 * s2io_nic structure.
3409 * @off : offset at which the data must be written
3410 * @data : The data that is to be written
3411 * @cnt : Number of bytes of the data that are actually to be written into
3412 * the Eeprom. (max of 3)
3414 * Actually writes the relevant part of the data value into the Eeprom
3415 * through the I2C bus.
3417 * 0 on success, -1 on failure.
3420 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3422 int exit_cnt = 0, ret = -1;
3424 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3426 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3427 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3428 I2C_CONTROL_CNTL_START;
3429 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3431 while (exit_cnt < 5) {
3432 val64 = readq(&bar0->i2c_control);
3433 if (I2C_CONTROL_CNTL_END(val64)) {
3434 if (!(val64 & I2C_CONTROL_NACK))
3446 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3447 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3448 * @eeprom : pointer to the user level structure provided by ethtool,
3449 * containing all relevant information.
3450 * @data_buf : user defined value to be written into Eeprom.
3451 * Description: Reads the values stored in the Eeprom at given offset
3452 * for a given length. Stores these values int the input argument data
3453 * buffer 'data_buf' and returns these to the caller (ethtool.)
3458 static int s2io_ethtool_geeprom(struct net_device *dev,
3459 struct ethtool_eeprom *eeprom, u8 * data_buf)
3462 nic_t *sp = dev->priv;
3464 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3466 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3467 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3469 for (i = 0; i < eeprom->len; i += 4) {
3470 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3471 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3475 memcpy((data_buf + i), &valid, 4);
3481 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3482 * @sp : private member of the device structure, which is a pointer to the
3483 * s2io_nic structure.
3484 * @eeprom : pointer to the user level structure provided by ethtool,
3485 * containing all relevant information.
3486 * @data_buf ; user defined value to be written into Eeprom.
3488 * Tries to write the user provided value in the Eeprom, at the offset
3489 * given by the user.
3491 * 0 on success, -EFAULT on failure.
3494 static int s2io_ethtool_seeprom(struct net_device *dev,
3495 struct ethtool_eeprom *eeprom,
3498 int len = eeprom->len, cnt = 0;
3499 u32 valid = 0, data;
3500 nic_t *sp = dev->priv;
3502 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3504 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3505 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3511 data = (u32) data_buf[cnt] & 0x000000FF;
3513 valid = (u32) (data << 24);
3517 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3519 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3521 "write into the specified offset\n");
3532 * s2io_register_test - reads and writes into all clock domains.
3533 * @sp : private member of the device structure, which is a pointer to the
3534 * s2io_nic structure.
3535 * @data : variable that returns the result of each of the test conducted b
3538 * Read and write into all clock domains. The NIC has 3 clock domains,
3539 * see that registers in all the three regions are accessible.
3544 static int s2io_register_test(nic_t * sp, uint64_t * data)
3546 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3550 val64 = readq(&bar0->pcc_enable);
3551 if (val64 != 0xff00000000000000ULL) {
3553 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3556 val64 = readq(&bar0->rmac_pause_cfg);
3557 if (val64 != 0xc000ffff00000000ULL) {
3559 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3562 val64 = readq(&bar0->rx_queue_cfg);
3563 if (val64 != 0x0808080808080808ULL) {
3565 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3568 val64 = readq(&bar0->xgxs_efifo_cfg);
3569 if (val64 != 0x000000001923141EULL) {
3571 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3574 val64 = 0x5A5A5A5A5A5A5A5AULL;
3575 writeq(val64, &bar0->xmsi_data);
3576 val64 = readq(&bar0->xmsi_data);
3577 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3579 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3582 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3583 writeq(val64, &bar0->xmsi_data);
3584 val64 = readq(&bar0->xmsi_data);
3585 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3587 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3595 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3596 * @sp : private member of the device structure, which is a pointer to the
3597 * s2io_nic structure.
3598 * @data:variable that returns the result of each of the test conducted by
3601 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3607 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3612 /* Test Write Error at offset 0 */
3613 if (!write_eeprom(sp, 0, 0, 3))
3616 /* Test Write at offset 4f0 */
3617 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3619 if (read_eeprom(sp, 0x4F0, &ret_data))
3622 if (ret_data != 0x01234567)
3625 /* Reset the EEPROM data go FFFF */
3626 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3628 /* Test Write Request Error at offset 0x7c */
3629 if (!write_eeprom(sp, 0x07C, 0, 3))
3632 /* Test Write Request at offset 0x7fc */
3633 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3635 if (read_eeprom(sp, 0x7FC, &ret_data))
3638 if (ret_data != 0x01234567)
3641 /* Reset the EEPROM data go FFFF */
3642 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3644 /* Test Write Error at offset 0x80 */
3645 if (!write_eeprom(sp, 0x080, 0, 3))
3648 /* Test Write Error at offset 0xfc */
3649 if (!write_eeprom(sp, 0x0FC, 0, 3))
3652 /* Test Write Error at offset 0x100 */
3653 if (!write_eeprom(sp, 0x100, 0, 3))
3656 /* Test Write Error at offset 4ec */
3657 if (!write_eeprom(sp, 0x4EC, 0, 3))
3665 * s2io_bist_test - invokes the MemBist test of the card .
3666 * @sp : private member of the device structure, which is a pointer to the
3667 * s2io_nic structure.
3668 * @data:variable that returns the result of each of the test conducted by
3671 * This invokes the MemBist test of the card. We give around
3672 * 2 secs time for the Test to complete. If it's still not complete
3673 * within this peiod, we consider that the test failed.
3675 * 0 on success and -1 on failure.
3678 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3681 int cnt = 0, ret = -1;
3683 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3684 bist |= PCI_BIST_START;
3685 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3688 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3689 if (!(bist & PCI_BIST_START)) {
3690 *data = (bist & PCI_BIST_CODE_MASK);
3702 * s2io-link_test - verifies the link state of the nic
3703 * @sp ; private member of the device structure, which is a pointer to the
3704 * s2io_nic structure.
3705 * @data: variable that returns the result of each of the test conducted by
3708 * The function verifies the link state of the NIC and updates the input
3709 * argument 'data' appropriately.
3714 static int s2io_link_test(nic_t * sp, uint64_t * data)
3716 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3719 val64 = readq(&bar0->adapter_status);
3720 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3727 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3728 * @sp - private member of the device structure, which is a pointer to the
3729 * s2io_nic structure.
3730 * @data - variable that returns the result of each of the test
3731 * conducted by the driver.
3733 * This is one of the offline test that tests the read and write
3734 * access to the RldRam chip on the NIC.
3739 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3741 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3743 int cnt, iteration = 0, test_pass = 0;
3745 val64 = readq(&bar0->adapter_control);
3746 val64 &= ~ADAPTER_ECC_EN;
3747 writeq(val64, &bar0->adapter_control);
3749 val64 = readq(&bar0->mc_rldram_test_ctrl);
3750 val64 |= MC_RLDRAM_TEST_MODE;
3751 writeq(val64, &bar0->mc_rldram_test_ctrl);
3753 val64 = readq(&bar0->mc_rldram_mrs);
3754 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3755 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3757 val64 |= MC_RLDRAM_MRS_ENABLE;
3758 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3760 while (iteration < 2) {
3761 val64 = 0x55555555aaaa0000ULL;
3762 if (iteration == 1) {
3763 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3765 writeq(val64, &bar0->mc_rldram_test_d0);
3767 val64 = 0xaaaa5a5555550000ULL;
3768 if (iteration == 1) {
3769 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3771 writeq(val64, &bar0->mc_rldram_test_d1);
3773 val64 = 0x55aaaaaaaa5a0000ULL;
3774 if (iteration == 1) {
3775 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3777 writeq(val64, &bar0->mc_rldram_test_d2);
3779 val64 = (u64) (0x0000003fffff0000ULL);
3780 writeq(val64, &bar0->mc_rldram_test_add);
3783 val64 = MC_RLDRAM_TEST_MODE;
3784 writeq(val64, &bar0->mc_rldram_test_ctrl);
3787 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3789 writeq(val64, &bar0->mc_rldram_test_ctrl);
3791 for (cnt = 0; cnt < 5; cnt++) {
3792 val64 = readq(&bar0->mc_rldram_test_ctrl);
3793 if (val64 & MC_RLDRAM_TEST_DONE)
3801 val64 = MC_RLDRAM_TEST_MODE;
3802 writeq(val64, &bar0->mc_rldram_test_ctrl);
3804 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3805 writeq(val64, &bar0->mc_rldram_test_ctrl);
3807 for (cnt = 0; cnt < 5; cnt++) {
3808 val64 = readq(&bar0->mc_rldram_test_ctrl);
3809 if (val64 & MC_RLDRAM_TEST_DONE)
3817 val64 = readq(&bar0->mc_rldram_test_ctrl);
3818 if (val64 & MC_RLDRAM_TEST_PASS)
3833 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3834 * @sp : private member of the device structure, which is a pointer to the
3835 * s2io_nic structure.
3836 * @ethtest : pointer to a ethtool command specific structure that will be
3837 * returned to the user.
3838 * @data : variable that returns the result of each of the test
3839 * conducted by the driver.
3841 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3842 * the health of the card.
3847 static void s2io_ethtool_test(struct net_device *dev,
3848 struct ethtool_test *ethtest,
3851 nic_t *sp = dev->priv;
3852 int orig_state = netif_running(sp->dev);
3854 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3855 /* Offline Tests. */
3857 s2io_close(sp->dev);
3858 s2io_set_swapper(sp);
3860 s2io_set_swapper(sp);
3862 if (s2io_register_test(sp, &data[0]))
3863 ethtest->flags |= ETH_TEST_FL_FAILED;
3866 s2io_set_swapper(sp);
3868 if (s2io_rldram_test(sp, &data[3]))
3869 ethtest->flags |= ETH_TEST_FL_FAILED;
3872 s2io_set_swapper(sp);
3874 if (s2io_eeprom_test(sp, &data[1]))
3875 ethtest->flags |= ETH_TEST_FL_FAILED;
3877 if (s2io_bist_test(sp, &data[4]))
3878 ethtest->flags |= ETH_TEST_FL_FAILED;
3888 "%s: is not up, cannot run test\n",
3897 if (s2io_link_test(sp, &data[2]))
3898 ethtest->flags |= ETH_TEST_FL_FAILED;
3907 static void s2io_get_ethtool_stats(struct net_device *dev,
3908 struct ethtool_stats *estats,
3912 nic_t *sp = dev->priv;
3913 StatInfo_t *stat_info = sp->mac_control.stats_info;
3915 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3917 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3918 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3922 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3925 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3927 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3929 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3930 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3931 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3932 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3934 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3935 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3936 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3937 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3942 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3944 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3946 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3948 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3952 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3953 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3956 static int s2io_ethtool_get_regs_len(struct net_device *dev)
3958 return (XENA_REG_SPACE);
3962 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3964 nic_t *sp = dev->priv;
3966 return (sp->rx_csum);
3969 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3971 nic_t *sp = dev->priv;
3981 static int s2io_get_eeprom_len(struct net_device *dev)
3983 return (XENA_EEPROM_SPACE);
3986 static int s2io_ethtool_self_test_count(struct net_device *dev)
3988 return (S2IO_TEST_LEN);
3991 static void s2io_ethtool_get_strings(struct net_device *dev,
3992 u32 stringset, u8 * data)
3994 switch (stringset) {
3996 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3999 memcpy(data, ðtool_stats_keys,
4000 sizeof(ethtool_stats_keys));
4004 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4006 return (S2IO_STAT_LEN);
4009 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4012 dev->features |= NETIF_F_IP_CSUM;
4014 dev->features &= ~NETIF_F_IP_CSUM;
4020 static struct ethtool_ops netdev_ethtool_ops = {
4021 .get_settings = s2io_ethtool_gset,
4022 .set_settings = s2io_ethtool_sset,
4023 .get_drvinfo = s2io_ethtool_gdrvinfo,
4024 .get_regs_len = s2io_ethtool_get_regs_len,
4025 .get_regs = s2io_ethtool_gregs,
4026 .get_link = ethtool_op_get_link,
4027 .get_eeprom_len = s2io_get_eeprom_len,
4028 .get_eeprom = s2io_ethtool_geeprom,
4029 .set_eeprom = s2io_ethtool_seeprom,
4030 .get_pauseparam = s2io_ethtool_getpause_data,
4031 .set_pauseparam = s2io_ethtool_setpause_data,
4032 .get_rx_csum = s2io_ethtool_get_rx_csum,
4033 .set_rx_csum = s2io_ethtool_set_rx_csum,
4034 .get_tx_csum = ethtool_op_get_tx_csum,
4035 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4036 .get_sg = ethtool_op_get_sg,
4037 .set_sg = ethtool_op_set_sg,
4039 .get_tso = ethtool_op_get_tso,
4040 .set_tso = ethtool_op_set_tso,
4042 .self_test_count = s2io_ethtool_self_test_count,
4043 .self_test = s2io_ethtool_test,
4044 .get_strings = s2io_ethtool_get_strings,
4045 .phys_id = s2io_ethtool_idnic,
4046 .get_stats_count = s2io_ethtool_get_stats_count,
4047 .get_ethtool_stats = s2io_get_ethtool_stats
4051 * s2io_ioctl - Entry point for the Ioctl
4052 * @dev : Device pointer.
4053 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4054 * a proprietary structure used to pass information to the driver.
4055 * @cmd : This is used to distinguish between the different commands that
4056 * can be passed to the IOCTL functions.
4058 * This function has support for ethtool, adding multiple MAC addresses on
4059 * the NIC and some DBG commands for the util tool.
4061 * Currently the IOCTL supports no operations, hence by default this
4062 * function returns OP NOT SUPPORTED value.
4065 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4071 * s2io_change_mtu - entry point to change MTU size for the device.
4072 * @dev : device pointer.
4073 * @new_mtu : the new MTU size for the device.
4074 * Description: A driver entry point to change MTU size for the device.
4075 * Before changing the MTU the device must be stopped.
4077 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4081 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4083 nic_t *sp = dev->priv;
4084 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4087 if (netif_running(dev)) {
4088 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4089 DBG_PRINT(ERR_DBG, "change its MTU \n");
4093 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4094 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4099 /* Set the new MTU into the PYLD register of the NIC */
4101 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4109 * s2io_tasklet - Bottom half of the ISR.
4110 * @dev_adr : address of the device structure in dma_addr_t format.
4112 * This is the tasklet or the bottom half of the ISR. This is
4113 * an extension of the ISR which is scheduled by the scheduler to be run
4114 * when the load on the CPU is low. All low priority tasks of the ISR can
4115 * be pushed into the tasklet. For now the tasklet is used only to
4116 * replenish the Rx buffers in the Rx buffer descriptors.
4121 static void s2io_tasklet(unsigned long dev_addr)
4123 struct net_device *dev = (struct net_device *) dev_addr;
4124 nic_t *sp = dev->priv;
4126 mac_info_t *mac_control;
4127 struct config_param *config;
4129 mac_control = &sp->mac_control;
4130 config = &sp->config;
4132 if (!TASKLET_IN_USE) {
4133 for (i = 0; i < config->rx_ring_num; i++) {
4134 ret = fill_rx_buffers(sp, i);
4135 if (ret == -ENOMEM) {
4136 DBG_PRINT(ERR_DBG, "%s: Out of ",
4138 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4140 } else if (ret == -EFILL) {
4142 "%s: Rx Ring %d is full\n",
4147 clear_bit(0, (&sp->tasklet_status));
4152 * s2io_set_link - Set the LInk status
4153 * @data: long pointer to device private structue
4154 * Description: Sets the link status for the adapter
4157 static void s2io_set_link(unsigned long data)
4159 nic_t *nic = (nic_t *) data;
4160 struct net_device *dev = nic->dev;
4161 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4165 if (test_and_set_bit(0, &(nic->link_state))) {
4166 /* The card is being reset, no point doing anything */
4170 subid = nic->pdev->subsystem_device;
4172 * Allow a small delay for the NICs self initiated
4173 * cleanup to complete.
4177 val64 = readq(&bar0->adapter_status);
4178 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4179 if (LINK_IS_UP(val64)) {
4180 val64 = readq(&bar0->adapter_control);
4181 val64 |= ADAPTER_CNTL_EN;
4182 writeq(val64, &bar0->adapter_control);
4183 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4184 val64 = readq(&bar0->gpio_control);
4185 val64 |= GPIO_CTRL_GPIO_0;
4186 writeq(val64, &bar0->gpio_control);
4187 val64 = readq(&bar0->gpio_control);
4189 val64 |= ADAPTER_LED_ON;
4190 writeq(val64, &bar0->adapter_control);
4192 val64 = readq(&bar0->adapter_status);
4193 if (!LINK_IS_UP(val64)) {
4194 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4195 DBG_PRINT(ERR_DBG, " Link down");
4196 DBG_PRINT(ERR_DBG, "after ");
4197 DBG_PRINT(ERR_DBG, "enabling ");
4198 DBG_PRINT(ERR_DBG, "device \n");
4200 if (nic->device_enabled_once == FALSE) {
4201 nic->device_enabled_once = TRUE;
4203 s2io_link(nic, LINK_UP);
4205 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4206 val64 = readq(&bar0->gpio_control);
4207 val64 &= ~GPIO_CTRL_GPIO_0;
4208 writeq(val64, &bar0->gpio_control);
4209 val64 = readq(&bar0->gpio_control);
4211 s2io_link(nic, LINK_DOWN);
4213 } else { /* NIC is not Quiescent. */
4214 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4215 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4216 netif_stop_queue(dev);
4218 clear_bit(0, &(nic->link_state));
4221 static void s2io_card_down(nic_t * sp)
4224 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4225 unsigned long flags;
4226 register u64 val64 = 0;
4228 /* If s2io_set_link task is executing, wait till it completes. */
4229 while (test_and_set_bit(0, &(sp->link_state)))
4231 atomic_set(&sp->card_state, CARD_DOWN);
4233 /* disable Tx and Rx traffic on the NIC */
4237 tasklet_kill(&sp->task);
4239 /* Check if the device is Quiescent and then Reset the NIC */
4241 val64 = readq(&bar0->adapter_status);
4242 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4250 "s2io_close:Device not Quiescent ");
4251 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4252 (unsigned long long) val64);
4256 spin_lock_irqsave(&sp->tx_lock, flags);
4259 /* Free all unused Tx and Rx buffers */
4260 free_tx_buffers(sp);
4261 free_rx_buffers(sp);
4263 spin_unlock_irqrestore(&sp->tx_lock, flags);
4264 clear_bit(0, &(sp->link_state));
4267 static int s2io_card_up(nic_t * sp)
4270 mac_info_t *mac_control;
4271 struct config_param *config;
4272 struct net_device *dev = (struct net_device *) sp->dev;
4274 /* Initialize the H/W I/O registers */
4275 if (init_nic(sp) != 0) {
4276 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4282 * Initializing the Rx buffers. For now we are considering only 1
4283 * Rx ring and initializing buffers into 30 Rx blocks
4285 mac_control = &sp->mac_control;
4286 config = &sp->config;
4288 for (i = 0; i < config->rx_ring_num; i++) {
4289 if ((ret = fill_rx_buffers(sp, i))) {
4290 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4293 free_rx_buffers(sp);
4296 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4297 atomic_read(&sp->rx_bufs_left[i]));
4300 /* Setting its receive mode */
4301 s2io_set_multicast(dev);
4303 /* Enable tasklet for the device */
4304 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4306 /* Enable Rx Traffic and interrupts on the NIC */
4307 if (start_nic(sp)) {
4308 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4309 tasklet_kill(&sp->task);
4311 free_irq(dev->irq, dev);
4312 free_rx_buffers(sp);
4316 atomic_set(&sp->card_state, CARD_UP);
4321 * s2io_restart_nic - Resets the NIC.
4322 * @data : long pointer to the device private structure
4324 * This function is scheduled to be run by the s2io_tx_watchdog
4325 * function after 0.5 secs to reset the NIC. The idea is to reduce
4326 * the run time of the watch dog routine which is run holding a
4330 static void s2io_restart_nic(unsigned long data)
4332 struct net_device *dev = (struct net_device *) data;
4333 nic_t *sp = dev->priv;
4336 if (s2io_card_up(sp)) {
4337 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4340 netif_wake_queue(dev);
4341 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4346 * s2io_tx_watchdog - Watchdog for transmit side.
4347 * @dev : Pointer to net device structure
4349 * This function is triggered if the Tx Queue is stopped
4350 * for a pre-defined amount of time when the Interface is still up.
4351 * If the Interface is jammed in such a situation, the hardware is
4352 * reset (by s2io_close) and restarted again (by s2io_open) to
4353 * overcome any problem that might have been caused in the hardware.
4358 static void s2io_tx_watchdog(struct net_device *dev)
4360 nic_t *sp = dev->priv;
4362 if (netif_carrier_ok(dev)) {
4363 schedule_work(&sp->rst_timer_task);
4368 * rx_osm_handler - To perform some OS related operations on SKB.
4369 * @sp: private member of the device structure,pointer to s2io_nic structure.
4370 * @skb : the socket buffer pointer.
4371 * @len : length of the packet
4372 * @cksum : FCS checksum of the frame.
4373 * @ring_no : the ring from which this RxD was extracted.
4375 * This function is called by the Tx interrupt serivce routine to perform
4376 * some OS related operations on the SKB before passing it to the upper
4377 * layers. It mainly checks if the checksum is OK, if so adds it to the
4378 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4379 * to the upper layer. If the checksum is wrong, it increments the Rx
4380 * packet error count, frees the SKB and returns error.
4382 * SUCCESS on success and -1 on failure.
4384 #ifndef CONFIG_2BUFF_MODE
4385 static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4387 static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4391 struct net_device *dev = (struct net_device *) sp->dev;
4392 struct sk_buff *skb =
4393 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4394 u16 l3_csum, l4_csum;
4395 #ifdef CONFIG_2BUFF_MODE
4396 int buf0_len, buf2_len;
4397 unsigned char *buff;
4400 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4401 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4402 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4403 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4405 * NIC verifies if the Checksum of the received
4406 * frame is Ok or not and accordingly returns
4407 * a flag in the RxD.
4409 skb->ip_summed = CHECKSUM_UNNECESSARY;
4412 * Packet with erroneous checksum, let the
4413 * upper layers deal with it.
4415 skb->ip_summed = CHECKSUM_NONE;
4418 skb->ip_summed = CHECKSUM_NONE;
4421 if (rxdp->Control_1 & RXD_T_CODE) {
4422 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4423 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4426 #ifdef CONFIG_2BUFF_MODE
4427 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4428 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4432 #ifndef CONFIG_2BUFF_MODE
4434 skb->protocol = eth_type_trans(skb, dev);
4436 buff = skb_push(skb, buf0_len);
4437 memcpy(buff, ba->ba_0, buf0_len);
4438 skb_put(skb, buf2_len);
4439 skb->protocol = eth_type_trans(skb, dev);
4442 #ifdef CONFIG_S2IO_NAPI
4443 netif_receive_skb(skb);
4448 dev->last_rx = jiffies;
4450 sp->stats.rx_packets++;
4451 #ifndef CONFIG_2BUFF_MODE
4452 sp->stats.rx_bytes += len;
4454 sp->stats.rx_bytes += buf0_len + buf2_len;
4457 atomic_dec(&sp->rx_bufs_left[ring_no]);
4458 rxdp->Host_Control = 0;
4463 * s2io_link - stops/starts the Tx queue.
4464 * @sp : private member of the device structure, which is a pointer to the
4465 * s2io_nic structure.
4466 * @link : inidicates whether link is UP/DOWN.
4468 * This function stops/starts the Tx queue depending on whether the link
4469 * status of the NIC is is down or up. This is called by the Alarm
4470 * interrupt handler whenever a link change interrupt comes up.
4475 static void s2io_link(nic_t * sp, int link)
4477 struct net_device *dev = (struct net_device *) sp->dev;
4479 if (link != sp->last_link_state) {
4480 if (link == LINK_DOWN) {
4481 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4482 netif_carrier_off(dev);
4484 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4485 netif_carrier_on(dev);
4488 sp->last_link_state = link;
4492 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4493 * @sp : private member of the device structure, which is a pointer to the
4494 * s2io_nic structure.
4496 * This function initializes a few of the PCI and PCI-X configuration registers
4497 * with recommended values.
4502 static void s2io_init_pci(nic_t * sp)
4506 /* Enable Data Parity Error Recovery in PCI-X command register. */
4507 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4509 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4510 (sp->pcix_cmd | 1));
4511 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4514 /* Set the PErr Response bit in PCI command register. */
4515 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4516 pci_write_config_word(sp->pdev, PCI_COMMAND,
4517 (pci_cmd | PCI_COMMAND_PARITY));
4518 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4520 /* Set MMRB count to 1024 in PCI-X Command register. */
4521 sp->pcix_cmd &= 0xFFF3;
4522 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4523 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4526 /* Setting Maximum outstanding splits based on system type. */
4527 sp->pcix_cmd &= 0xFF8F;
4529 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4530 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4532 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4534 /* Forcibly disabling relaxed ordering capability of the card. */
4535 sp->pcix_cmd &= 0xfffd;
4536 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4538 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4542 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4543 MODULE_LICENSE("GPL");
4544 module_param(tx_fifo_num, int, 0);
4545 module_param_array(tx_fifo_len, int, NULL, 0);
4546 module_param(rx_ring_num, int, 0);
4547 module_param_array(rx_ring_sz, int, NULL, 0);
4548 module_param(Stats_refresh_time, int, 0);
4549 module_param(rmac_pause_time, int, 0);
4550 module_param(mc_pause_threshold_q0q3, int, 0);
4551 module_param(mc_pause_threshold_q4q7, int, 0);
4552 module_param(shared_splits, int, 0);
4553 module_param(tmac_util_period, int, 0);
4554 module_param(rmac_util_period, int, 0);
4555 #ifndef CONFIG_S2IO_NAPI
4556 module_param(indicate_max_pkts, int, 0);
4559 * s2io_init_nic - Initialization of the adapter .
4560 * @pdev : structure containing the PCI related information of the device.
4561 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4563 * The function initializes an adapter identified by the pci_dec structure.
4564 * All OS related initialization including memory and device structure and
4565 * initlaization of the device private variable is done. Also the swapper
4566 * control register is initialized to enable read and write into the I/O
4567 * registers of the device.
4569 * returns 0 on success and negative on failure.
4572 static int __devinit
4573 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4576 struct net_device *dev;
4577 char *dev_name = "S2IO 10GE NIC";
4579 int dma_flag = FALSE;
4580 u32 mac_up, mac_down;
4581 u64 val64 = 0, tmp64 = 0;
4582 XENA_dev_config_t __iomem *bar0 = NULL;
4584 mac_info_t *mac_control;
4585 struct config_param *config;
4588 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4589 s2io_driver_version);
4591 if ((ret = pci_enable_device(pdev))) {
4593 "s2io_init_nic: pci_enable_device failed\n");
4597 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4598 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4601 if (pci_set_consistent_dma_mask
4602 (pdev, DMA_64BIT_MASK)) {
4604 "Unable to obtain 64bit DMA for \
4605 consistent allocations\n");
4606 pci_disable_device(pdev);
4609 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4610 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4612 pci_disable_device(pdev);
4616 if (pci_request_regions(pdev, s2io_driver_name)) {
4617 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4618 pci_disable_device(pdev);
4622 dev = alloc_etherdev(sizeof(nic_t));
4624 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4625 pci_disable_device(pdev);
4626 pci_release_regions(pdev);
4630 pci_set_master(pdev);
4631 pci_set_drvdata(pdev, dev);
4632 SET_MODULE_OWNER(dev);
4633 SET_NETDEV_DEV(dev, &pdev->dev);
4635 /* Private member variable initialized to s2io NIC structure */
4637 memset(sp, 0, sizeof(nic_t));
4640 sp->vendor_id = pdev->vendor;
4641 sp->device_id = pdev->device;
4642 sp->high_dma_flag = dma_flag;
4643 sp->irq = pdev->irq;
4644 sp->device_enabled_once = FALSE;
4645 strcpy(sp->name, dev_name);
4647 /* Initialize some PCI/PCI-X fields of the NIC. */
4651 * Setting the device configuration parameters.
4652 * Most of these parameters can be specified by the user during
4653 * module insertion as they are module loadable parameters. If
4654 * these parameters are not not specified during load time, they
4655 * are initialized with default values.
4657 mac_control = &sp->mac_control;
4658 config = &sp->config;
4660 /* Tx side parameters. */
4661 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4662 config->tx_fifo_num = tx_fifo_num;
4663 for (i = 0; i < MAX_TX_FIFOS; i++) {
4664 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4665 config->tx_cfg[i].fifo_priority = i;
4668 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4669 for (i = 0; i < config->tx_fifo_num; i++) {
4670 config->tx_cfg[i].f_no_snoop =
4671 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4672 if (config->tx_cfg[i].fifo_len < 65) {
4673 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4677 config->max_txds = MAX_SKB_FRAGS;
4679 /* Rx side parameters. */
4680 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4681 config->rx_ring_num = rx_ring_num;
4682 for (i = 0; i < MAX_RX_RINGS; i++) {
4683 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4684 (MAX_RXDS_PER_BLOCK + 1);
4685 config->rx_cfg[i].ring_priority = i;
4688 for (i = 0; i < rx_ring_num; i++) {
4689 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4690 config->rx_cfg[i].f_no_snoop =
4691 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4694 /* Setting Mac Control parameters */
4695 mac_control->rmac_pause_time = rmac_pause_time;
4696 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4697 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4700 /* Initialize Ring buffer parameters. */
4701 for (i = 0; i < config->rx_ring_num; i++)
4702 atomic_set(&sp->rx_bufs_left[i], 0);
4704 /* initialize the shared memory used by the NIC and the host */
4705 if (init_shared_mem(sp)) {
4706 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4709 goto mem_alloc_failed;
4712 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4713 pci_resource_len(pdev, 0));
4715 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4718 goto bar0_remap_failed;
4721 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4722 pci_resource_len(pdev, 2));
4724 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4727 goto bar1_remap_failed;
4730 dev->irq = pdev->irq;
4731 dev->base_addr = (unsigned long) sp->bar0;
4733 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4734 for (j = 0; j < MAX_TX_FIFOS; j++) {
4735 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4736 (sp->bar1 + (j * 0x00020000));
4739 /* Driver entry points */
4740 dev->open = &s2io_open;
4741 dev->stop = &s2io_close;
4742 dev->hard_start_xmit = &s2io_xmit;
4743 dev->get_stats = &s2io_get_stats;
4744 dev->set_multicast_list = &s2io_set_multicast;
4745 dev->do_ioctl = &s2io_ioctl;
4746 dev->change_mtu = &s2io_change_mtu;
4747 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4749 * will use eth_mac_addr() for dev->set_mac_address
4750 * mac address will be set every time dev->open() is called
4752 #ifdef CONFIG_S2IO_NAPI
4753 dev->poll = s2io_poll;
4757 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4758 if (sp->high_dma_flag == TRUE)
4759 dev->features |= NETIF_F_HIGHDMA;
4761 dev->features |= NETIF_F_TSO;
4764 dev->tx_timeout = &s2io_tx_watchdog;
4765 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4766 INIT_WORK(&sp->rst_timer_task,
4767 (void (*)(void *)) s2io_restart_nic, dev);
4768 INIT_WORK(&sp->set_link_task,
4769 (void (*)(void *)) s2io_set_link, sp);
4771 pci_save_state(sp->pdev);
4773 /* Setting swapper control on the NIC, for proper reset operation */
4774 if (s2io_set_swapper(sp)) {
4775 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4778 goto set_swap_failed;
4781 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4782 fix_mac_address(sp);
4786 * Setting swapper control on the NIC, so the MAC address can be read.
4788 if (s2io_set_swapper(sp)) {
4790 "%s: S2IO: swapper settings are wrong\n",
4793 goto set_swap_failed;
4797 * MAC address initialization.
4798 * For now only one mac address will be read and used.
4801 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4802 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4803 writeq(val64, &bar0->rmac_addr_cmd_mem);
4804 wait_for_cmd_complete(sp);
4806 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4807 mac_down = (u32) tmp64;
4808 mac_up = (u32) (tmp64 >> 32);
4810 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4812 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4813 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4814 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4815 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4816 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4817 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4820 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4821 sp->def_mac_addr[0].mac_addr[0],
4822 sp->def_mac_addr[0].mac_addr[1],
4823 sp->def_mac_addr[0].mac_addr[2],
4824 sp->def_mac_addr[0].mac_addr[3],
4825 sp->def_mac_addr[0].mac_addr[4],
4826 sp->def_mac_addr[0].mac_addr[5]);
4828 /* Set the factory defined MAC address initially */
4829 dev->addr_len = ETH_ALEN;
4830 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4833 * Initialize the tasklet status and link state flags
4834 * and the card statte parameter
4836 atomic_set(&(sp->card_state), 0);
4837 sp->tasklet_status = 0;
4841 /* Initialize spinlocks */
4842 spin_lock_init(&sp->tx_lock);
4843 #ifndef CONFIG_S2IO_NAPI
4844 spin_lock_init(&sp->put_lock);
4848 * SXE-002: Configure link and activity LED to init state
4851 subid = sp->pdev->subsystem_device;
4852 if ((subid & 0xFF) >= 0x07) {
4853 val64 = readq(&bar0->gpio_control);
4854 val64 |= 0x0000800000000000ULL;
4855 writeq(val64, &bar0->gpio_control);
4856 val64 = 0x0411040400000000ULL;
4857 writeq(val64, (void __iomem *) bar0 + 0x2700);
4858 val64 = readq(&bar0->gpio_control);
4861 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4863 if (register_netdev(dev)) {
4864 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4866 goto register_failed;
4870 * Make Link state as off at this point, when the Link change
4871 * interrupt comes the state will be automatically changed to
4874 netif_carrier_off(dev);
4875 sp->last_link_state = LINK_DOWN;
4886 free_shared_mem(sp);
4887 pci_disable_device(pdev);
4888 pci_release_regions(pdev);
4889 pci_set_drvdata(pdev, NULL);
4896 * s2io_rem_nic - Free the PCI device
4897 * @pdev: structure containing the PCI related information of the device.
4898 * Description: This function is called by the Pci subsystem to release a
4899 * PCI device and free up all resource held up by the device. This could
4900 * be in response to a Hot plug event or when the driver is to be removed
4904 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4906 struct net_device *dev =
4907 (struct net_device *) pci_get_drvdata(pdev);
4911 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4916 unregister_netdev(dev);
4918 free_shared_mem(sp);
4921 pci_disable_device(pdev);
4922 pci_release_regions(pdev);
4923 pci_set_drvdata(pdev, NULL);
4929 * s2io_starter - Entry point for the driver
4930 * Description: This function is the entry point for the driver. It verifies
4931 * the module loadable parameters and initializes PCI configuration space.
4934 int __init s2io_starter(void)
4936 return pci_module_init(&s2io_driver);
4940 * s2io_closer - Cleanup routine for the driver
4941 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4944 static void s2io_closer(void)
4946 pci_unregister_driver(&s2io_driver);
4947 DBG_PRINT(INIT_DBG, "cleanup done\n");
4950 module_init(s2io_starter);
4951 module_exit(s2io_closer);