1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
39 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/skbuff.h>
50 #include <linux/init.h>
51 #include <linux/delay.h>
52 #include <linux/stddef.h>
53 #include <linux/ioctl.h>
54 #include <linux/timex.h>
55 #include <linux/sched.h>
56 #include <linux/ethtool.h>
57 #include <linux/version.h>
58 #include <linux/workqueue.h>
61 #include <asm/system.h>
62 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "s2io";
70 static char s2io_driver_version[] = "Version 1.7.7.1";
73 * Cards with following subsystem_id have a link state indication
74 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
75 * macro below identifies these cards given the subsystem_id.
77 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
78 (((subid >= 0x600B) && (subid <= 0x600D)) || \
79 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
81 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
82 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
83 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
86 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
89 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
91 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
99 /* Ethtool related variables and Macros. */
100 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
101 "Register test\t(offline)",
102 "Eeprom test\t(offline)",
103 "Link test\t(online)",
104 "RLDRAM test\t(offline)",
105 "BIST Test\t(offline)"
108 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
110 {"tmac_data_octets"},
114 {"tmac_pause_ctrl_frms"},
115 {"tmac_any_err_frms"},
116 {"tmac_vld_ip_octets"},
124 {"rmac_data_octets"},
125 {"rmac_fcs_err_frms"},
127 {"rmac_vld_mcst_frms"},
128 {"rmac_vld_bcst_frms"},
129 {"rmac_in_rng_len_err_frms"},
131 {"rmac_pause_ctrl_frms"},
132 {"rmac_discarded_frms"},
133 {"rmac_usized_frms"},
134 {"rmac_osized_frms"},
136 {"rmac_jabber_frms"},
144 {"rmac_err_drp_udp"},
146 {"rmac_accepted_ip"},
150 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
151 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
153 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
154 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
158 * Constants to be programmed into the Xena's registers, to configure
162 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
165 static u64 default_mdio_cfg[] = {
167 0xC001010000000000ULL, 0xC0010100000000E0ULL,
168 0xC0010100008000E4ULL,
169 /* Remove Reset from PMA PLL */
170 0xC001010000000000ULL, 0xC0010100000000E0ULL,
171 0xC0010100000000E4ULL,
175 static u64 default_dtx_cfg[] = {
176 0x8000051500000000ULL, 0x80000515000000E0ULL,
177 0x80000515D93500E4ULL, 0x8001051500000000ULL,
178 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
179 0x8002051500000000ULL, 0x80020515000000E0ULL,
180 0x80020515F21000E4ULL,
181 /* Set PADLOOPBACKN */
182 0x8002051500000000ULL, 0x80020515000000E0ULL,
183 0x80020515B20000E4ULL, 0x8003051500000000ULL,
184 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
185 0x8004051500000000ULL, 0x80040515000000E0ULL,
186 0x80040515B20000E4ULL, 0x8005051500000000ULL,
187 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
189 /* Remove PADLOOPBACKN */
190 0x8002051500000000ULL, 0x80020515000000E0ULL,
191 0x80020515F20000E4ULL, 0x8003051500000000ULL,
192 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
193 0x8004051500000000ULL, 0x80040515000000E0ULL,
194 0x80040515F20000E4ULL, 0x8005051500000000ULL,
195 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
201 * Constants for Fixing the MacAddress problem seen mostly on
204 static u64 fix_mac[] = {
205 0x0060000000000000ULL, 0x0060600000000000ULL,
206 0x0040600000000000ULL, 0x0000600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0060600000000000ULL,
217 0x0020600000000000ULL, 0x0000600000000000ULL,
218 0x0040600000000000ULL, 0x0060600000000000ULL,
222 /* Module Loadable parameters. */
223 static unsigned int tx_fifo_num = 1;
224 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
225 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
226 static unsigned int rx_ring_num = 1;
227 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
228 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
229 static unsigned int Stats_refresh_time = 4;
230 static unsigned int rmac_pause_time = 65535;
231 static unsigned int mc_pause_threshold_q0q3 = 187;
232 static unsigned int mc_pause_threshold_q4q7 = 187;
233 static unsigned int shared_splits;
234 static unsigned int tmac_util_period = 5;
235 static unsigned int rmac_util_period = 5;
236 #ifndef CONFIG_S2IO_NAPI
237 static unsigned int indicate_max_pkts;
242 * This table lists all the devices that this driver supports.
244 static struct pci_device_id s2io_tbl[] __devinitdata = {
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
246 PCI_ANY_ID, PCI_ANY_ID},
247 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
248 PCI_ANY_ID, PCI_ANY_ID},
249 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
250 PCI_ANY_ID, PCI_ANY_ID},
251 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
252 PCI_ANY_ID, PCI_ANY_ID},
256 MODULE_DEVICE_TABLE(pci, s2io_tbl);
258 static struct pci_driver s2io_driver = {
260 .id_table = s2io_tbl,
261 .probe = s2io_init_nic,
262 .remove = __devexit_p(s2io_rem_nic),
265 /* A simplifier macro used both by init and free shared_mem Fns(). */
266 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
269 * init_shared_mem - Allocation and Initialization of Memory
270 * @nic: Device private variable.
271 * Description: The function allocates all the memory areas shared
272 * between the NIC and the driver. This includes Tx descriptors,
273 * Rx descriptors and the statistics block.
276 static int init_shared_mem(struct s2io_nic *nic)
279 void *tmp_v_addr, *tmp_v_addr_next;
280 dma_addr_t tmp_p_addr, tmp_p_addr_next;
281 RxD_block_t *pre_rxd_blk = NULL;
283 int lst_size, lst_per_page;
284 struct net_device *dev = nic->dev;
285 #ifdef CONFIG_2BUFF_MODE
290 mac_info_t *mac_control;
291 struct config_param *config;
293 mac_control = &nic->mac_control;
294 config = &nic->config;
297 /* Allocation and initialization of TXDLs in FIOFs */
299 for (i = 0; i < config->tx_fifo_num; i++) {
300 size += config->tx_cfg[i].fifo_len;
302 if (size > MAX_AVAILABLE_TXDS) {
303 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
305 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
306 DBG_PRINT(ERR_DBG, "that can be used\n");
310 lst_size = (sizeof(TxD_t) * config->max_txds);
311 lst_per_page = PAGE_SIZE / lst_size;
313 for (i = 0; i < config->tx_fifo_num; i++) {
314 int fifo_len = config->tx_cfg[i].fifo_len;
315 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
316 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
317 if (!nic->list_info[i]) {
319 "Malloc failed for list_info\n");
322 memset(nic->list_info[i], 0, list_holder_size);
324 for (i = 0; i < config->tx_fifo_num; i++) {
325 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
327 mac_control->tx_curr_put_info[i].offset = 0;
328 mac_control->tx_curr_put_info[i].fifo_len =
329 config->tx_cfg[i].fifo_len - 1;
330 mac_control->tx_curr_get_info[i].offset = 0;
331 mac_control->tx_curr_get_info[i].fifo_len =
332 config->tx_cfg[i].fifo_len - 1;
333 for (j = 0; j < page_num; j++) {
337 tmp_v = pci_alloc_consistent(nic->pdev,
341 "pci_alloc_consistent ");
342 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
345 while (k < lst_per_page) {
346 int l = (j * lst_per_page) + k;
347 if (l == config->tx_cfg[i].fifo_len)
349 nic->list_info[i][l].list_virt_addr =
350 tmp_v + (k * lst_size);
351 nic->list_info[i][l].list_phy_addr =
352 tmp_p + (k * lst_size);
359 /* Allocation and initialization of RXDs in Rings */
361 for (i = 0; i < config->rx_ring_num; i++) {
362 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
363 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
364 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
366 DBG_PRINT(ERR_DBG, "RxDs per Block");
369 size += config->rx_cfg[i].num_rxd;
370 nic->block_count[i] =
371 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
373 config->rx_cfg[i].num_rxd - nic->block_count[i];
376 for (i = 0; i < config->rx_ring_num; i++) {
377 mac_control->rx_curr_get_info[i].block_index = 0;
378 mac_control->rx_curr_get_info[i].offset = 0;
379 mac_control->rx_curr_get_info[i].ring_len =
380 config->rx_cfg[i].num_rxd - 1;
381 mac_control->rx_curr_put_info[i].block_index = 0;
382 mac_control->rx_curr_put_info[i].offset = 0;
383 mac_control->rx_curr_put_info[i].ring_len =
384 config->rx_cfg[i].num_rxd - 1;
386 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
387 /* Allocating all the Rx blocks */
388 for (j = 0; j < blk_cnt; j++) {
389 #ifndef CONFIG_2BUFF_MODE
390 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
392 size = SIZE_OF_BLOCK;
394 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
396 if (tmp_v_addr == NULL) {
398 * In case of failure, free_shared_mem()
399 * is called, which should free any
400 * memory that was alloced till the
403 nic->rx_blocks[i][j].block_virt_addr =
407 memset(tmp_v_addr, 0, size);
408 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
409 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
411 /* Interlinking all Rx Blocks */
412 for (j = 0; j < blk_cnt; j++) {
413 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
415 nic->rx_blocks[i][(j + 1) %
416 blk_cnt].block_virt_addr;
417 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
419 nic->rx_blocks[i][(j + 1) %
420 blk_cnt].block_dma_addr;
422 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
423 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
426 #ifndef CONFIG_2BUFF_MODE
427 pre_rxd_blk->reserved_2_pNext_RxD_block =
428 (unsigned long) tmp_v_addr_next;
430 pre_rxd_blk->pNext_RxD_Blk_physical =
431 (u64) tmp_p_addr_next;
435 #ifdef CONFIG_2BUFF_MODE
437 * Allocation of Storages for buffer addresses in 2BUFF mode
438 * and the buffers as well.
440 for (i = 0; i < config->rx_ring_num; i++) {
442 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
443 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
447 for (j = 0; j < blk_cnt; j++) {
449 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
450 (MAX_RXDS_PER_BLOCK + 1)),
454 while (k != MAX_RXDS_PER_BLOCK) {
455 ba = &nic->ba[i][j][k];
457 ba->ba_0_org = kmalloc
458 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
461 tmp = (unsigned long) ba->ba_0_org;
463 tmp &= ~((unsigned long) ALIGN_SIZE);
464 ba->ba_0 = (void *) tmp;
466 ba->ba_1_org = kmalloc
467 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
470 tmp = (unsigned long) ba->ba_1_org;
472 tmp &= ~((unsigned long) ALIGN_SIZE);
473 ba->ba_1 = (void *) tmp;
480 /* Allocation and initialization of Statistics block */
481 size = sizeof(StatInfo_t);
482 mac_control->stats_mem = pci_alloc_consistent
483 (nic->pdev, size, &mac_control->stats_mem_phy);
485 if (!mac_control->stats_mem) {
487 * In case of failure, free_shared_mem() is called, which
488 * should free any memory that was alloced till the
493 mac_control->stats_mem_sz = size;
495 tmp_v_addr = mac_control->stats_mem;
496 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
497 memset(tmp_v_addr, 0, size);
499 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
500 (unsigned long long) tmp_p_addr);
506 * free_shared_mem - Free the allocated Memory
507 * @nic: Device private variable.
508 * Description: This function is to free all memory locations allocated by
509 * the init_shared_mem() function and return it to the kernel.
512 static void free_shared_mem(struct s2io_nic *nic)
514 int i, j, blk_cnt, size;
516 dma_addr_t tmp_p_addr;
517 mac_info_t *mac_control;
518 struct config_param *config;
519 int lst_size, lst_per_page;
525 mac_control = &nic->mac_control;
526 config = &nic->config;
528 lst_size = (sizeof(TxD_t) * config->max_txds);
529 lst_per_page = PAGE_SIZE / lst_size;
531 for (i = 0; i < config->tx_fifo_num; i++) {
532 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
534 for (j = 0; j < page_num; j++) {
535 int mem_blks = (j * lst_per_page);
536 if (!nic->list_info[i][mem_blks].list_virt_addr)
538 pci_free_consistent(nic->pdev, PAGE_SIZE,
539 nic->list_info[i][mem_blks].
541 nic->list_info[i][mem_blks].
544 kfree(nic->list_info[i]);
547 #ifndef CONFIG_2BUFF_MODE
548 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
550 size = SIZE_OF_BLOCK;
552 for (i = 0; i < config->rx_ring_num; i++) {
553 blk_cnt = nic->block_count[i];
554 for (j = 0; j < blk_cnt; j++) {
555 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
556 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
557 if (tmp_v_addr == NULL)
559 pci_free_consistent(nic->pdev, size,
560 tmp_v_addr, tmp_p_addr);
564 #ifdef CONFIG_2BUFF_MODE
565 /* Freeing buffer storage addresses in 2BUFF mode. */
566 for (i = 0; i < config->rx_ring_num; i++) {
568 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
571 for (j = 0; j < blk_cnt; j++) {
573 if (!nic->ba[i][j]) {
577 while (k != MAX_RXDS_PER_BLOCK) {
578 buffAdd_t *ba = &nic->ba[i][j][k];
579 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
582 kfree(nic->ba[i][j]);
593 kfree(nic->ba[i][j]);
600 if (mac_control->stats_mem) {
601 pci_free_consistent(nic->pdev,
602 mac_control->stats_mem_sz,
603 mac_control->stats_mem,
604 mac_control->stats_mem_phy);
609 * init_nic - Initialization of hardware
610 * @nic: device peivate variable
611 * Description: The function sequentially configures every block
612 * of the H/W from their reset values.
613 * Return Value: SUCCESS on success and
614 * '-1' on failure (endian settings incorrect).
617 static int init_nic(struct s2io_nic *nic)
619 XENA_dev_config_t __iomem *bar0 = nic->bar0;
620 struct net_device *dev = nic->dev;
621 register u64 val64 = 0;
625 mac_info_t *mac_control;
626 struct config_param *config;
627 int mdio_cnt = 0, dtx_cnt = 0;
628 unsigned long long mem_share;
630 mac_control = &nic->mac_control;
631 config = &nic->config;
633 /* Initialize swapper control register */
634 if (s2io_set_swapper(nic)) {
635 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
639 /* Remove XGXS from reset state */
641 writeq(val64, &bar0->sw_reset);
642 val64 = readq(&bar0->sw_reset);
645 /* Enable Receiving broadcasts */
646 add = &bar0->mac_cfg;
647 val64 = readq(&bar0->mac_cfg);
648 val64 |= MAC_RMAC_BCAST_ENABLE;
649 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
650 writel((u32) val64, add);
651 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
652 writel((u32) (val64 >> 32), (add + 4));
654 /* Read registers in all blocks */
655 val64 = readq(&bar0->mac_int_mask);
656 val64 = readq(&bar0->mc_int_mask);
657 val64 = readq(&bar0->xgxs_int_mask);
661 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
664 * Configuring the XAUI Interface of Xena.
665 * ***************************************
666 * To Configure the Xena's XAUI, one has to write a series
667 * of 64 bit values into two registers in a particular
668 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
669 * which will be defined in the array of configuration values
670 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
671 * to switch writing from one regsiter to another. We continue
672 * writing these values until we encounter the 'END_SIGN' macro.
673 * For example, After making a series of 21 writes into
674 * dtx_control register the 'SWITCH_SIGN' appears and hence we
675 * start writing into mdio_control until we encounter END_SIGN.
679 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
680 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
684 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
685 &bar0->dtx_control, UF);
686 val64 = readq(&bar0->dtx_control);
690 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
691 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
695 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
696 &bar0->mdio_control, UF);
697 val64 = readq(&bar0->mdio_control);
700 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
701 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
708 /* Tx DMA Initialization */
710 writeq(val64, &bar0->tx_fifo_partition_0);
711 writeq(val64, &bar0->tx_fifo_partition_1);
712 writeq(val64, &bar0->tx_fifo_partition_2);
713 writeq(val64, &bar0->tx_fifo_partition_3);
716 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
718 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
719 13) | vBIT(config->tx_cfg[i].fifo_priority,
722 if (i == (config->tx_fifo_num - 1)) {
729 writeq(val64, &bar0->tx_fifo_partition_0);
733 writeq(val64, &bar0->tx_fifo_partition_1);
737 writeq(val64, &bar0->tx_fifo_partition_2);
741 writeq(val64, &bar0->tx_fifo_partition_3);
746 /* Enable Tx FIFO partition 0. */
747 val64 = readq(&bar0->tx_fifo_partition_0);
748 val64 |= BIT(0); /* To enable the FIFO partition. */
749 writeq(val64, &bar0->tx_fifo_partition_0);
751 val64 = readq(&bar0->tx_fifo_partition_0);
752 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
753 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
756 * Initialization of Tx_PA_CONFIG register to ignore packet
757 * integrity checking.
759 val64 = readq(&bar0->tx_pa_cfg);
760 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
761 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
762 writeq(val64, &bar0->tx_pa_cfg);
764 /* Rx DMA intialization. */
766 for (i = 0; i < config->rx_ring_num; i++) {
768 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
771 writeq(val64, &bar0->rx_queue_priority);
774 * Allocating equal share of memory to all the
778 for (i = 0; i < config->rx_ring_num; i++) {
781 mem_share = (64 / config->rx_ring_num +
782 64 % config->rx_ring_num);
783 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
786 mem_share = (64 / config->rx_ring_num);
787 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
790 mem_share = (64 / config->rx_ring_num);
791 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
794 mem_share = (64 / config->rx_ring_num);
795 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
798 mem_share = (64 / config->rx_ring_num);
799 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
802 mem_share = (64 / config->rx_ring_num);
803 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
806 mem_share = (64 / config->rx_ring_num);
807 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
810 mem_share = (64 / config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
815 writeq(val64, &bar0->rx_queue_cfg);
818 * Initializing the Tx round robin registers to 0.
819 * Filling Tx and Rx round robin registers as per the
820 * number of FIFOs and Rings is still TODO.
822 writeq(0, &bar0->tx_w_round_robin_0);
823 writeq(0, &bar0->tx_w_round_robin_1);
824 writeq(0, &bar0->tx_w_round_robin_2);
825 writeq(0, &bar0->tx_w_round_robin_3);
826 writeq(0, &bar0->tx_w_round_robin_4);
830 * Disable Rx steering. Hard coding all packets be steered to
833 val64 = 0x8080808080808080ULL;
834 writeq(val64, &bar0->rts_qos_steering);
838 for (i = 1; i < 8; i++)
839 writeq(val64, &bar0->rts_frm_len_n[i]);
841 /* Set rts_frm_len register for fifo 0 */
842 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
843 &bar0->rts_frm_len_n[0]);
845 /* Enable statistics */
846 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
847 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
848 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
849 writeq(val64, &bar0->stat_cfg);
852 * Initializing the sampling rate for the device to calculate the
853 * bandwidth utilization.
855 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
856 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
857 writeq(val64, &bar0->mac_link_util);
861 * Initializing the Transmit and Receive Traffic Interrupt
864 /* TTI Initialization. Default Tx timer gets us about
865 * 250 interrupts per sec. Continuous interrupts are enabled
868 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
869 TTI_DATA1_MEM_TX_URNG_A(0xA) |
870 TTI_DATA1_MEM_TX_URNG_B(0x10) |
871 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
872 TTI_DATA1_MEM_TX_TIMER_CI_EN;
873 writeq(val64, &bar0->tti_data1_mem);
875 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
876 TTI_DATA2_MEM_TX_UFC_B(0x20) |
877 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
878 writeq(val64, &bar0->tti_data2_mem);
880 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
881 writeq(val64, &bar0->tti_command_mem);
884 * Once the operation completes, the Strobe bit of the command
885 * register will be reset. We poll for this particular condition
886 * We wait for a maximum of 500ms for the operation to complete,
887 * if it's not complete by then we return error.
891 val64 = readq(&bar0->tti_command_mem);
892 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
896 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
904 /* RTI Initialization */
905 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
906 RTI_DATA1_MEM_RX_URNG_A(0xA) |
907 RTI_DATA1_MEM_RX_URNG_B(0x10) |
908 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
910 writeq(val64, &bar0->rti_data1_mem);
912 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
913 RTI_DATA2_MEM_RX_UFC_B(0x2) |
914 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
915 writeq(val64, &bar0->rti_data2_mem);
917 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
918 writeq(val64, &bar0->rti_command_mem);
921 * Once the operation completes, the Strobe bit of the command
922 * register will be reset. We poll for this particular condition
923 * We wait for a maximum of 500ms for the operation to complete,
924 * if it's not complete by then we return error.
928 val64 = readq(&bar0->rti_command_mem);
929 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
933 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
942 * Initializing proper values as Pause threshold into all
943 * the 8 Queues on Rx side.
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
946 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
948 /* Disable RMAC PAD STRIPPING */
949 add = &bar0->mac_cfg;
950 val64 = readq(&bar0->mac_cfg);
951 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
952 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
953 writel((u32) (val64), add);
954 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
955 writel((u32) (val64 >> 32), (add + 4));
956 val64 = readq(&bar0->mac_cfg);
959 * Set the time value to be inserted in the pause frame
962 val64 = readq(&bar0->rmac_pause_cfg);
963 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
964 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
965 writeq(val64, &bar0->rmac_pause_cfg);
968 * Set the Threshold Limit for Generating the pause frame
969 * If the amount of data in any Queue exceeds ratio of
970 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
971 * pause frame is generated
974 for (i = 0; i < 4; i++) {
976 (((u64) 0xFF00 | nic->mac_control.
977 mc_pause_threshold_q0q3)
980 writeq(val64, &bar0->mc_pause_thresh_q0q3);
983 for (i = 0; i < 4; i++) {
985 (((u64) 0xFF00 | nic->mac_control.
986 mc_pause_threshold_q4q7)
989 writeq(val64, &bar0->mc_pause_thresh_q4q7);
992 * TxDMA will stop Read request if the number of read split has
993 * exceeded the limit pointed by shared_splits
995 val64 = readq(&bar0->pic_control);
996 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
997 writeq(val64, &bar0->pic_control);
1003 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1004 * @nic: device private variable,
1005 * @mask: A mask indicating which Intr block must be modified and,
1006 * @flag: A flag indicating whether to enable or disable the Intrs.
1007 * Description: This function will either disable or enable the interrupts
1008 * depending on the flag argument. The mask argument can be used to
1009 * enable/disable any Intr block.
1010 * Return Value: NONE.
1013 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1015 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1016 register u64 val64 = 0, temp64 = 0;
1018 /* Top level interrupt classification */
1019 /* PIC Interrupts */
1020 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1021 /* Enable PIC Intrs in the general intr mask register */
1022 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1023 if (flag == ENABLE_INTRS) {
1024 temp64 = readq(&bar0->general_int_mask);
1025 temp64 &= ~((u64) val64);
1026 writeq(temp64, &bar0->general_int_mask);
1028 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1029 * interrupts for now.
1032 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1034 * No MSI Support is available presently, so TTI and
1035 * RTI interrupts are also disabled.
1037 } else if (flag == DISABLE_INTRS) {
1039 * Disable PIC Intrs in the general
1040 * intr mask register
1042 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 temp64 = readq(&bar0->general_int_mask);
1045 writeq(val64, &bar0->general_int_mask);
1049 /* DMA Interrupts */
1050 /* Enabling/Disabling Tx DMA interrupts */
1051 if (mask & TX_DMA_INTR) {
1052 /* Enable TxDMA Intrs in the general intr mask register */
1053 val64 = TXDMA_INT_M;
1054 if (flag == ENABLE_INTRS) {
1055 temp64 = readq(&bar0->general_int_mask);
1056 temp64 &= ~((u64) val64);
1057 writeq(temp64, &bar0->general_int_mask);
1059 * Keep all interrupts other than PFC interrupt
1060 * and PCC interrupt disabled in DMA level.
1062 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1064 writeq(val64, &bar0->txdma_int_mask);
1066 * Enable only the MISC error 1 interrupt in PFC block
1068 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1069 writeq(val64, &bar0->pfc_err_mask);
1071 * Enable only the FB_ECC error interrupt in PCC block
1073 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1074 writeq(val64, &bar0->pcc_err_mask);
1075 } else if (flag == DISABLE_INTRS) {
1077 * Disable TxDMA Intrs in the general intr mask
1080 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1081 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1082 temp64 = readq(&bar0->general_int_mask);
1084 writeq(val64, &bar0->general_int_mask);
1088 /* Enabling/Disabling Rx DMA interrupts */
1089 if (mask & RX_DMA_INTR) {
1090 /* Enable RxDMA Intrs in the general intr mask register */
1091 val64 = RXDMA_INT_M;
1092 if (flag == ENABLE_INTRS) {
1093 temp64 = readq(&bar0->general_int_mask);
1094 temp64 &= ~((u64) val64);
1095 writeq(temp64, &bar0->general_int_mask);
1097 * All RxDMA block interrupts are disabled for now
1100 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1101 } else if (flag == DISABLE_INTRS) {
1103 * Disable RxDMA Intrs in the general intr mask
1106 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1107 temp64 = readq(&bar0->general_int_mask);
1109 writeq(val64, &bar0->general_int_mask);
1113 /* MAC Interrupts */
1114 /* Enabling/Disabling MAC interrupts */
1115 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1116 val64 = TXMAC_INT_M | RXMAC_INT_M;
1117 if (flag == ENABLE_INTRS) {
1118 temp64 = readq(&bar0->general_int_mask);
1119 temp64 &= ~((u64) val64);
1120 writeq(temp64, &bar0->general_int_mask);
1122 * All MAC block error interrupts are disabled for now
1123 * except the link status change interrupt.
1126 val64 = MAC_INT_STATUS_RMAC_INT;
1127 temp64 = readq(&bar0->mac_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->mac_int_mask);
1131 val64 = readq(&bar0->mac_rmac_err_mask);
1132 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1133 writeq(val64, &bar0->mac_rmac_err_mask);
1134 } else if (flag == DISABLE_INTRS) {
1136 * Disable MAC Intrs in the general intr mask register
1138 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1139 writeq(DISABLE_ALL_INTRS,
1140 &bar0->mac_rmac_err_mask);
1142 temp64 = readq(&bar0->general_int_mask);
1144 writeq(val64, &bar0->general_int_mask);
1148 /* XGXS Interrupts */
1149 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1150 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1151 if (flag == ENABLE_INTRS) {
1152 temp64 = readq(&bar0->general_int_mask);
1153 temp64 &= ~((u64) val64);
1154 writeq(temp64, &bar0->general_int_mask);
1156 * All XGXS block error interrupts are disabled for now
1159 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1160 } else if (flag == DISABLE_INTRS) {
1162 * Disable MC Intrs in the general intr mask register
1164 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1165 temp64 = readq(&bar0->general_int_mask);
1167 writeq(val64, &bar0->general_int_mask);
1171 /* Memory Controller(MC) interrupts */
1172 if (mask & MC_INTR) {
1174 if (flag == ENABLE_INTRS) {
1175 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask);
1179 * All MC block error interrupts are disabled for now
1182 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1183 } else if (flag == DISABLE_INTRS) {
1185 * Disable MC Intrs in the general intr mask register
1187 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1188 temp64 = readq(&bar0->general_int_mask);
1190 writeq(val64, &bar0->general_int_mask);
1195 /* Tx traffic interrupts */
1196 if (mask & TX_TRAFFIC_INTR) {
1197 val64 = TXTRAFFIC_INT_M;
1198 if (flag == ENABLE_INTRS) {
1199 temp64 = readq(&bar0->general_int_mask);
1200 temp64 &= ~((u64) val64);
1201 writeq(temp64, &bar0->general_int_mask);
1203 * Enable all the Tx side interrupts
1204 * writing 0 Enables all 64 TX interrupt levels
1206 writeq(0x0, &bar0->tx_traffic_mask);
1207 } else if (flag == DISABLE_INTRS) {
1209 * Disable Tx Traffic Intrs in the general intr mask
1212 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1213 temp64 = readq(&bar0->general_int_mask);
1215 writeq(val64, &bar0->general_int_mask);
1219 /* Rx traffic interrupts */
1220 if (mask & RX_TRAFFIC_INTR) {
1221 val64 = RXTRAFFIC_INT_M;
1222 if (flag == ENABLE_INTRS) {
1223 temp64 = readq(&bar0->general_int_mask);
1224 temp64 &= ~((u64) val64);
1225 writeq(temp64, &bar0->general_int_mask);
1226 /* writing 0 Enables all 8 RX interrupt levels */
1227 writeq(0x0, &bar0->rx_traffic_mask);
1228 } else if (flag == DISABLE_INTRS) {
1230 * Disable Rx Traffic Intrs in the general intr mask
1233 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1234 temp64 = readq(&bar0->general_int_mask);
1236 writeq(val64, &bar0->general_int_mask);
1242 * verify_xena_quiescence - Checks whether the H/W is ready
1243 * @val64 : Value read from adapter status register.
1244 * @flag : indicates if the adapter enable bit was ever written once
1246 * Description: Returns whether the H/W is ready to go or not. Depending
1247 * on whether adapter enable bit was written or not the comparison
1248 * differs and the calling function passes the input argument flag to
1250 * Return: 1 If xena is quiescence
1251 * 0 If Xena is not quiescence
1254 static int verify_xena_quiescence(u64 val64, int flag)
1257 u64 tmp64 = ~((u64) val64);
1261 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1262 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1263 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1264 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1265 ADAPTER_STATUS_P_PLL_LOCK))) {
1266 if (flag == FALSE) {
1267 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1268 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1269 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1275 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1276 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1277 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1278 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1279 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1291 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1292 * @sp: Pointer to device specifc structure
1294 * New procedure to clear mac address reading problems on Alpha platforms
1298 static void fix_mac_address(nic_t * sp)
1300 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1304 while (fix_mac[i] != END_SIGN) {
1305 writeq(fix_mac[i++], &bar0->gpio_control);
1306 val64 = readq(&bar0->gpio_control);
1311 * start_nic - Turns the device on
1312 * @nic : device private variable.
1314 * This function actually turns the device on. Before this function is
1315 * called,all Registers are configured from their reset states
1316 * and shared memory is allocated but the NIC is still quiescent. On
1317 * calling this function, the device interrupts are cleared and the NIC is
1318 * literally switched on by writing into the adapter control register.
1320 * SUCCESS on success and -1 on failure.
1323 static int start_nic(struct s2io_nic *nic)
1325 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1326 struct net_device *dev = nic->dev;
1327 register u64 val64 = 0;
1328 u16 interruptible, i;
1330 mac_info_t *mac_control;
1331 struct config_param *config;
1333 mac_control = &nic->mac_control;
1334 config = &nic->config;
1336 /* PRC Initialization and configuration */
1337 for (i = 0; i < config->rx_ring_num; i++) {
1338 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1339 &bar0->prc_rxd0_n[i]);
1341 val64 = readq(&bar0->prc_ctrl_n[i]);
1342 #ifndef CONFIG_2BUFF_MODE
1343 val64 |= PRC_CTRL_RC_ENABLED;
1345 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1347 writeq(val64, &bar0->prc_ctrl_n[i]);
1350 #ifdef CONFIG_2BUFF_MODE
1351 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1352 val64 = readq(&bar0->rx_pa_cfg);
1353 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1354 writeq(val64, &bar0->rx_pa_cfg);
1358 * Enabling MC-RLDRAM. After enabling the device, we timeout
1359 * for around 100ms, which is approximately the time required
1360 * for the device to be ready for operation.
1362 val64 = readq(&bar0->mc_rldram_mrs);
1363 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1364 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1365 val64 = readq(&bar0->mc_rldram_mrs);
1367 msleep(100); /* Delay by around 100 ms. */
1369 /* Enabling ECC Protection. */
1370 val64 = readq(&bar0->adapter_control);
1371 val64 &= ~ADAPTER_ECC_EN;
1372 writeq(val64, &bar0->adapter_control);
1375 * Clearing any possible Link state change interrupts that
1376 * could have popped up just before Enabling the card.
1378 val64 = readq(&bar0->mac_rmac_err_reg);
1380 writeq(val64, &bar0->mac_rmac_err_reg);
1383 * Verify if the device is ready to be enabled, if so enable
1386 val64 = readq(&bar0->adapter_status);
1387 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1388 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1389 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1390 (unsigned long long) val64);
1394 /* Enable select interrupts */
1395 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1397 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1400 * With some switches, link might be already up at this point.
1401 * Because of this weird behavior, when we enable laser,
1402 * we may not get link. We need to handle this. We cannot
1403 * figure out which switch is misbehaving. So we are forced to
1404 * make a global change.
1407 /* Enabling Laser. */
1408 val64 = readq(&bar0->adapter_control);
1409 val64 |= ADAPTER_EOI_TX_ON;
1410 writeq(val64, &bar0->adapter_control);
1412 /* SXE-002: Initialize link and activity LED */
1413 subid = nic->pdev->subsystem_device;
1414 if ((subid & 0xFF) >= 0x07) {
1415 val64 = readq(&bar0->gpio_control);
1416 val64 |= 0x0000800000000000ULL;
1417 writeq(val64, &bar0->gpio_control);
1418 val64 = 0x0411040400000000ULL;
1419 writeq(val64, (void __iomem *) bar0 + 0x2700);
1423 * Don't see link state interrupts on certain switches, so
1424 * directly scheduling a link state task from here.
1426 schedule_work(&nic->set_link_task);
1429 * Here we are performing soft reset on XGXS to
1430 * force link down. Since link is already up, we will get
1431 * link state change interrupt after this reset
1433 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1434 val64 = readq(&bar0->dtx_control);
1436 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1437 val64 = readq(&bar0->dtx_control);
1439 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1440 val64 = readq(&bar0->dtx_control);
1447 * free_tx_buffers - Free all queued Tx buffers
1448 * @nic : device private variable.
1450 * Free all queued Tx buffers.
1451 * Return Value: void
1454 static void free_tx_buffers(struct s2io_nic *nic)
1456 struct net_device *dev = nic->dev;
1457 struct sk_buff *skb;
1460 mac_info_t *mac_control;
1461 struct config_param *config;
1464 mac_control = &nic->mac_control;
1465 config = &nic->config;
1467 for (i = 0; i < config->tx_fifo_num; i++) {
1468 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1469 txdp = (TxD_t *) nic->list_info[i][j].
1472 (struct sk_buff *) ((unsigned long) txdp->
1475 memset(txdp, 0, sizeof(TxD_t));
1479 memset(txdp, 0, sizeof(TxD_t));
1483 "%s:forcibly freeing %d skbs on FIFO%d\n",
1485 mac_control->tx_curr_get_info[i].offset = 0;
1486 mac_control->tx_curr_put_info[i].offset = 0;
1491 * stop_nic - To stop the nic
1492 * @nic ; device private variable.
1494 * This function does exactly the opposite of what the start_nic()
1495 * function does. This function is called to stop the device.
1500 static void stop_nic(struct s2io_nic *nic)
1502 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1503 register u64 val64 = 0;
1504 u16 interruptible, i;
1505 mac_info_t *mac_control;
1506 struct config_param *config;
1508 mac_control = &nic->mac_control;
1509 config = &nic->config;
1511 /* Disable all interrupts */
1512 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1514 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1517 for (i = 0; i < config->rx_ring_num; i++) {
1518 val64 = readq(&bar0->prc_ctrl_n[i]);
1519 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1520 writeq(val64, &bar0->prc_ctrl_n[i]);
1525 * fill_rx_buffers - Allocates the Rx side skbs
1526 * @nic: device private variable
1527 * @ring_no: ring number
1529 * The function allocates Rx side skbs and puts the physical
1530 * address of these buffers into the RxD buffer pointers, so that the NIC
1531 * can DMA the received frame into these locations.
1532 * The NIC supports 3 receive modes, viz
1534 * 2. three buffer and
1535 * 3. Five buffer modes.
1536 * Each mode defines how many fragments the received frame will be split
1537 * up into by the NIC. The frame is split into L3 header, L4 Header,
1538 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1539 * is split into 3 fragments. As of now only single buffer mode is
1542 * SUCCESS on success or an appropriate -ve value on failure.
1545 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1547 struct net_device *dev = nic->dev;
1548 struct sk_buff *skb;
1550 int off, off1, size, block_no, block_no1;
1551 int offset, offset1;
1553 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1554 atomic_read(&nic->rx_bufs_left[ring_no]);
1555 mac_info_t *mac_control;
1556 struct config_param *config;
1557 #ifdef CONFIG_2BUFF_MODE
1562 dma_addr_t rxdpphys;
1564 #ifndef CONFIG_S2IO_NAPI
1565 unsigned long flags;
1568 mac_control = &nic->mac_control;
1569 config = &nic->config;
1571 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1572 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1574 while (alloc_tab < alloc_cnt) {
1575 block_no = mac_control->rx_curr_put_info[ring_no].
1577 block_no1 = mac_control->rx_curr_get_info[ring_no].
1579 off = mac_control->rx_curr_put_info[ring_no].offset;
1580 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1581 #ifndef CONFIG_2BUFF_MODE
1582 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1583 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1585 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1586 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1589 rxdp = nic->rx_blocks[ring_no][block_no].
1590 block_virt_addr + off;
1591 if ((offset == offset1) && (rxdp->Host_Control)) {
1592 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1593 DBG_PRINT(INTR_DBG, " info equated\n");
1596 #ifndef CONFIG_2BUFF_MODE
1597 if (rxdp->Control_1 == END_OF_BLOCK) {
1598 mac_control->rx_curr_put_info[ring_no].
1600 mac_control->rx_curr_put_info[ring_no].
1601 block_index %= nic->block_count[ring_no];
1602 block_no = mac_control->rx_curr_put_info
1603 [ring_no].block_index;
1605 off %= (MAX_RXDS_PER_BLOCK + 1);
1606 mac_control->rx_curr_put_info[ring_no].offset =
1608 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1609 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1612 #ifndef CONFIG_S2IO_NAPI
1613 spin_lock_irqsave(&nic->put_lock, flags);
1614 nic->put_pos[ring_no] =
1615 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1616 spin_unlock_irqrestore(&nic->put_lock, flags);
1619 if (rxdp->Host_Control == END_OF_BLOCK) {
1620 mac_control->rx_curr_put_info[ring_no].
1622 mac_control->rx_curr_put_info[ring_no].
1623 block_index %= nic->block_count[ring_no];
1624 block_no = mac_control->rx_curr_put_info
1625 [ring_no].block_index;
1627 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1628 dev->name, block_no,
1629 (unsigned long long) rxdp->Control_1);
1630 mac_control->rx_curr_put_info[ring_no].offset =
1632 rxdp = nic->rx_blocks[ring_no][block_no].
1635 #ifndef CONFIG_S2IO_NAPI
1636 spin_lock_irqsave(&nic->put_lock, flags);
1637 nic->put_pos[ring_no] = (block_no *
1638 (MAX_RXDS_PER_BLOCK + 1)) + off;
1639 spin_unlock_irqrestore(&nic->put_lock, flags);
1643 #ifndef CONFIG_2BUFF_MODE
1644 if (rxdp->Control_1 & RXD_OWN_XENA)
1646 if (rxdp->Control_2 & BIT(0))
1649 mac_control->rx_curr_put_info[ring_no].
1653 #ifdef CONFIG_2BUFF_MODE
1655 * RxDs Spanning cache lines will be replenished only
1656 * if the succeeding RxD is also owned by Host. It
1657 * will always be the ((8*i)+3) and ((8*i)+6)
1658 * descriptors for the 48 byte descriptor. The offending
1659 * decsriptor is of-course the 3rd descriptor.
1661 rxdpphys = nic->rx_blocks[ring_no][block_no].
1662 block_dma_addr + (off * sizeof(RxD_t));
1663 if (((u64) (rxdpphys)) % 128 > 80) {
1664 rxdpnext = nic->rx_blocks[ring_no][block_no].
1665 block_virt_addr + (off + 1);
1666 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1667 nextblk = (block_no + 1) %
1668 (nic->block_count[ring_no]);
1669 rxdpnext = nic->rx_blocks[ring_no]
1670 [nextblk].block_virt_addr;
1672 if (rxdpnext->Control_2 & BIT(0))
1677 #ifndef CONFIG_2BUFF_MODE
1678 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1680 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1683 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1684 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1687 #ifndef CONFIG_2BUFF_MODE
1688 skb_reserve(skb, NET_IP_ALIGN);
1689 memset(rxdp, 0, sizeof(RxD_t));
1690 rxdp->Buffer0_ptr = pci_map_single
1691 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1692 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1693 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1694 rxdp->Host_Control = (unsigned long) (skb);
1695 rxdp->Control_1 |= RXD_OWN_XENA;
1697 off %= (MAX_RXDS_PER_BLOCK + 1);
1698 mac_control->rx_curr_put_info[ring_no].offset = off;
1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1704 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1706 memset(rxdp, 0, sizeof(RxD_t));
1707 rxdp->Buffer2_ptr = pci_map_single
1708 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1709 PCI_DMA_FROMDEVICE);
1711 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1712 PCI_DMA_FROMDEVICE);
1714 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1715 PCI_DMA_FROMDEVICE);
1717 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1718 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1719 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1720 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1721 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1722 rxdp->Control_1 |= RXD_OWN_XENA;
1724 mac_control->rx_curr_put_info[ring_no].offset = off;
1726 atomic_inc(&nic->rx_bufs_left[ring_no]);
1735 * free_rx_buffers - Frees all Rx buffers
1736 * @sp: device private variable.
1738 * This function will free all Rx buffers allocated by host.
1743 static void free_rx_buffers(struct s2io_nic *sp)
1745 struct net_device *dev = sp->dev;
1746 int i, j, blk = 0, off, buf_cnt = 0;
1748 struct sk_buff *skb;
1749 mac_info_t *mac_control;
1750 struct config_param *config;
1751 #ifdef CONFIG_2BUFF_MODE
1755 mac_control = &sp->mac_control;
1756 config = &sp->config;
1758 for (i = 0; i < config->rx_ring_num; i++) {
1759 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1760 off = j % (MAX_RXDS_PER_BLOCK + 1);
1761 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1763 #ifndef CONFIG_2BUFF_MODE
1764 if (rxdp->Control_1 == END_OF_BLOCK) {
1766 (RxD_t *) ((unsigned long) rxdp->
1772 if (rxdp->Host_Control == END_OF_BLOCK) {
1778 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1779 memset(rxdp, 0, sizeof(RxD_t));
1784 (struct sk_buff *) ((unsigned long) rxdp->
1787 #ifndef CONFIG_2BUFF_MODE
1788 pci_unmap_single(sp->pdev, (dma_addr_t)
1791 HEADER_ETHERNET_II_802_3_SIZE
1792 + HEADER_802_2_SIZE +
1794 PCI_DMA_FROMDEVICE);
1796 ba = &sp->ba[i][blk][off];
1797 pci_unmap_single(sp->pdev, (dma_addr_t)
1800 PCI_DMA_FROMDEVICE);
1801 pci_unmap_single(sp->pdev, (dma_addr_t)
1804 PCI_DMA_FROMDEVICE);
1805 pci_unmap_single(sp->pdev, (dma_addr_t)
1807 dev->mtu + BUF0_LEN + 4,
1808 PCI_DMA_FROMDEVICE);
1811 atomic_dec(&sp->rx_bufs_left[i]);
1814 memset(rxdp, 0, sizeof(RxD_t));
1816 mac_control->rx_curr_put_info[i].block_index = 0;
1817 mac_control->rx_curr_get_info[i].block_index = 0;
1818 mac_control->rx_curr_put_info[i].offset = 0;
1819 mac_control->rx_curr_get_info[i].offset = 0;
1820 atomic_set(&sp->rx_bufs_left[i], 0);
1821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1822 dev->name, buf_cnt, i);
1827 * s2io_poll - Rx interrupt handler for NAPI support
1828 * @dev : pointer to the device structure.
1829 * @budget : The number of packets that were budgeted to be processed
1830 * during one pass through the 'Poll" function.
1832 * Comes into picture only if NAPI support has been incorporated. It does
1833 * the same thing that rx_intr_handler does, but not in a interrupt context
1834 * also It will process only a given number of packets.
1836 * 0 on success and 1 if there are No Rx packets to be processed.
1839 #ifdef CONFIG_S2IO_NAPI
1840 static int s2io_poll(struct net_device *dev, int *budget)
1842 nic_t *nic = dev->priv;
1843 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1844 int pkts_to_process = *budget, pkt_cnt = 0;
1845 register u64 val64 = 0;
1846 rx_curr_get_info_t get_info, put_info;
1847 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1848 #ifndef CONFIG_2BUFF_MODE
1851 struct sk_buff *skb;
1853 mac_info_t *mac_control;
1854 struct config_param *config;
1855 #ifdef CONFIG_2BUFF_MODE
1859 mac_control = &nic->mac_control;
1860 config = &nic->config;
1862 if (pkts_to_process > dev->quota)
1863 pkts_to_process = dev->quota;
1865 val64 = readq(&bar0->rx_traffic_int);
1866 writeq(val64, &bar0->rx_traffic_int);
1868 for (i = 0; i < config->rx_ring_num; i++) {
1869 get_info = mac_control->rx_curr_get_info[i];
1870 get_block = get_info.block_index;
1871 put_info = mac_control->rx_curr_put_info[i];
1872 put_block = put_info.block_index;
1873 ring_bufs = config->rx_cfg[i].num_rxd;
1874 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1876 #ifndef CONFIG_2BUFF_MODE
1877 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1879 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1881 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1882 (((get_offset + 1) % ring_bufs) != put_offset)) {
1883 if (--pkts_to_process < 0) {
1886 if (rxdp->Control_1 == END_OF_BLOCK) {
1888 (RxD_t *) ((unsigned long) rxdp->
1892 (MAX_RXDS_PER_BLOCK + 1);
1894 get_block %= nic->block_count[i];
1895 mac_control->rx_curr_get_info[i].
1896 offset = get_info.offset;
1897 mac_control->rx_curr_get_info[i].
1898 block_index = get_block;
1902 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1905 (struct sk_buff *) ((unsigned long) rxdp->
1908 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1910 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1913 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1914 val16 = (u16) (val64 >> 48);
1915 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1916 pci_unmap_single(nic->pdev, (dma_addr_t)
1919 HEADER_ETHERNET_II_802_3_SIZE +
1922 PCI_DMA_FROMDEVICE);
1923 rx_osm_handler(nic, val16, rxdp, i);
1926 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1928 nic->rx_blocks[i][get_block].block_virt_addr +
1930 mac_control->rx_curr_get_info[i].offset =
1934 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1936 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1938 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1939 !(rxdp->Control_2 & BIT(0))) &&
1940 (((get_offset + 1) % ring_bufs) != put_offset)) {
1941 if (--pkts_to_process < 0) {
1944 skb = (struct sk_buff *) ((unsigned long)
1945 rxdp->Host_Control);
1947 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1949 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1953 pci_unmap_single(nic->pdev, (dma_addr_t)
1955 BUF0_LEN, PCI_DMA_FROMDEVICE);
1956 pci_unmap_single(nic->pdev, (dma_addr_t)
1958 BUF1_LEN, PCI_DMA_FROMDEVICE);
1959 pci_unmap_single(nic->pdev, (dma_addr_t)
1961 dev->mtu + BUF0_LEN + 4,
1962 PCI_DMA_FROMDEVICE);
1963 ba = &nic->ba[i][get_block][get_info.offset];
1965 rx_osm_handler(nic, rxdp, i, ba);
1968 mac_control->rx_curr_get_info[i].offset =
1971 nic->rx_blocks[i][get_block].block_virt_addr +
1974 if (get_info.offset &&
1975 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1976 get_info.offset = 0;
1977 mac_control->rx_curr_get_info[i].
1978 offset = get_info.offset;
1980 get_block %= nic->block_count[i];
1981 mac_control->rx_curr_get_info[i].
1982 block_index = get_block;
1984 nic->rx_blocks[i][get_block].
1988 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1997 dev->quota -= pkt_cnt;
1999 netif_rx_complete(dev);
2001 for (i = 0; i < config->rx_ring_num; i++) {
2002 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2003 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2004 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2008 /* Re enable the Rx interrupts. */
2009 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2013 dev->quota -= pkt_cnt;
2016 for (i = 0; i < config->rx_ring_num; i++) {
2017 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2018 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2019 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2027 * rx_intr_handler - Rx interrupt handler
2028 * @nic: device private variable.
2030 * If the interrupt is because of a received frame or if the
2031 * receive ring contains fresh as yet un-processed frames,this function is
2032 * called. It picks out the RxD at which place the last Rx processing had
2033 * stopped and sends the skb to the OSM's Rx handler and then increments
2039 static void rx_intr_handler(struct s2io_nic *nic)
2041 struct net_device *dev = (struct net_device *) nic->dev;
2042 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2043 rx_curr_get_info_t get_info, put_info;
2045 struct sk_buff *skb;
2046 #ifndef CONFIG_2BUFF_MODE
2049 register u64 val64 = 0;
2050 int get_block, get_offset, put_block, put_offset, ring_bufs;
2052 mac_info_t *mac_control;
2053 struct config_param *config;
2054 #ifdef CONFIG_2BUFF_MODE
2058 mac_control = &nic->mac_control;
2059 config = &nic->config;
2062 * rx_traffic_int reg is an R1 register, hence we read and write back
2063 * the samevalue in the register to clear it.
2065 val64 = readq(&bar0->rx_traffic_int);
2066 writeq(val64, &bar0->rx_traffic_int);
2068 for (i = 0; i < config->rx_ring_num; i++) {
2069 get_info = mac_control->rx_curr_get_info[i];
2070 get_block = get_info.block_index;
2071 put_info = mac_control->rx_curr_put_info[i];
2072 put_block = put_info.block_index;
2073 ring_bufs = config->rx_cfg[i].num_rxd;
2074 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2076 #ifndef CONFIG_2BUFF_MODE
2077 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2079 spin_lock(&nic->put_lock);
2080 put_offset = nic->put_pos[i];
2081 spin_unlock(&nic->put_lock);
2082 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2083 (((get_offset + 1) % ring_bufs) != put_offset)) {
2084 if (rxdp->Control_1 == END_OF_BLOCK) {
2085 rxdp = (RxD_t *) ((unsigned long)
2089 (MAX_RXDS_PER_BLOCK + 1);
2091 get_block %= nic->block_count[i];
2092 mac_control->rx_curr_get_info[i].
2093 offset = get_info.offset;
2094 mac_control->rx_curr_get_info[i].
2095 block_index = get_block;
2099 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2101 skb = (struct sk_buff *) ((unsigned long)
2102 rxdp->Host_Control);
2104 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2106 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2109 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2110 val16 = (u16) (val64 >> 48);
2111 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2112 pci_unmap_single(nic->pdev, (dma_addr_t)
2115 HEADER_ETHERNET_II_802_3_SIZE +
2118 PCI_DMA_FROMDEVICE);
2119 rx_osm_handler(nic, val16, rxdp, i);
2121 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2123 nic->rx_blocks[i][get_block].block_virt_addr +
2125 mac_control->rx_curr_get_info[i].offset =
2128 if ((indicate_max_pkts)
2129 && (pkt_cnt > indicate_max_pkts))
2133 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2135 spin_lock(&nic->put_lock);
2136 put_offset = nic->put_pos[i];
2137 spin_unlock(&nic->put_lock);
2138 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2139 !(rxdp->Control_2 & BIT(0))) &&
2140 (((get_offset + 1) % ring_bufs) != put_offset)) {
2141 skb = (struct sk_buff *) ((unsigned long)
2142 rxdp->Host_Control);
2144 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2146 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2150 pci_unmap_single(nic->pdev, (dma_addr_t)
2152 BUF0_LEN, PCI_DMA_FROMDEVICE);
2153 pci_unmap_single(nic->pdev, (dma_addr_t)
2155 BUF1_LEN, PCI_DMA_FROMDEVICE);
2156 pci_unmap_single(nic->pdev, (dma_addr_t)
2158 dev->mtu + BUF0_LEN + 4,
2159 PCI_DMA_FROMDEVICE);
2160 ba = &nic->ba[i][get_block][get_info.offset];
2162 rx_osm_handler(nic, rxdp, i, ba);
2165 mac_control->rx_curr_get_info[i].offset =
2168 nic->rx_blocks[i][get_block].block_virt_addr +
2171 if (get_info.offset &&
2172 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2173 get_info.offset = 0;
2174 mac_control->rx_curr_get_info[i].
2175 offset = get_info.offset;
2177 get_block %= nic->block_count[i];
2178 mac_control->rx_curr_get_info[i].
2179 block_index = get_block;
2181 nic->rx_blocks[i][get_block].
2185 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2188 if ((indicate_max_pkts)
2189 && (pkt_cnt > indicate_max_pkts))
2193 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2199 * tx_intr_handler - Transmit interrupt handler
2200 * @nic : device private variable
2202 * If an interrupt was raised to indicate DMA complete of the
2203 * Tx packet, this function is called. It identifies the last TxD
2204 * whose buffer was freed and frees all skbs whose data have already
2205 * DMA'ed into the NICs internal memory.
2210 static void tx_intr_handler(struct s2io_nic *nic)
2212 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2213 struct net_device *dev = (struct net_device *) nic->dev;
2214 tx_curr_get_info_t get_info, put_info;
2215 struct sk_buff *skb;
2217 register u64 val64 = 0;
2220 mac_info_t *mac_control;
2221 struct config_param *config;
2223 mac_control = &nic->mac_control;
2224 config = &nic->config;
2227 * tx_traffic_int reg is an R1 register, hence we read and write
2228 * back the samevalue in the register to clear it.
2230 val64 = readq(&bar0->tx_traffic_int);
2231 writeq(val64, &bar0->tx_traffic_int);
2233 for (i = 0; i < config->tx_fifo_num; i++) {
2234 get_info = mac_control->tx_curr_get_info[i];
2235 put_info = mac_control->tx_curr_put_info[i];
2236 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2238 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2239 (get_info.offset != put_info.offset) &&
2240 (txdlp->Host_Control)) {
2241 /* Check for TxD errors */
2242 if (txdlp->Control_1 & TXD_T_CODE) {
2243 unsigned long long err;
2244 err = txdlp->Control_1 & TXD_T_CODE;
2245 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2249 skb = (struct sk_buff *) ((unsigned long)
2250 txdlp->Host_Control);
2252 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2254 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2257 nic->tx_pkt_count++;
2259 frg_cnt = skb_shinfo(skb)->nr_frags;
2261 /* For unfragmented skb */
2262 pci_unmap_single(nic->pdev, (dma_addr_t)
2263 txdlp->Buffer_Pointer,
2264 skb->len - skb->data_len,
2267 TxD_t *temp = txdlp;
2269 for (j = 0; j < frg_cnt; j++, txdlp++) {
2271 &skb_shinfo(skb)->frags[j];
2272 pci_unmap_page(nic->pdev,
2282 (sizeof(TxD_t) * config->max_txds));
2284 /* Updating the statistics block */
2285 nic->stats.tx_packets++;
2286 nic->stats.tx_bytes += skb->len;
2287 dev_kfree_skb_irq(skb);
2290 get_info.offset %= get_info.fifo_len + 1;
2291 txdlp = (TxD_t *) nic->list_info[i]
2292 [get_info.offset].list_virt_addr;
2293 mac_control->tx_curr_get_info[i].offset =
2298 spin_lock(&nic->tx_lock);
2299 if (netif_queue_stopped(dev))
2300 netif_wake_queue(dev);
2301 spin_unlock(&nic->tx_lock);
2305 * alarm_intr_handler - Alarm Interrrupt handler
2306 * @nic: device private variable
2307 * Description: If the interrupt was neither because of Rx packet or Tx
2308 * complete, this function is called. If the interrupt was to indicate
2309 * a loss of link, the OSM link status handler is invoked for any other
2310 * alarm interrupt the block that raised the interrupt is displayed
2311 * and a H/W reset is issued.
2316 static void alarm_intr_handler(struct s2io_nic *nic)
2318 struct net_device *dev = (struct net_device *) nic->dev;
2319 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2320 register u64 val64 = 0, err_reg = 0;
2322 /* Handling link status change error Intr */
2323 err_reg = readq(&bar0->mac_rmac_err_reg);
2324 writeq(err_reg, &bar0->mac_rmac_err_reg);
2325 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2326 schedule_work(&nic->set_link_task);
2329 /* In case of a serious error, the device will be Reset. */
2330 val64 = readq(&bar0->serr_source);
2331 if (val64 & SERR_SOURCE_ANY) {
2332 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2333 DBG_PRINT(ERR_DBG, "serious error!!\n");
2334 netif_stop_queue(dev);
2335 schedule_work(&nic->rst_timer_task);
2339 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2340 * Error occurs, the adapter will be recycled by disabling the
2341 * adapter enable bit and enabling it again after the device
2342 * becomes Quiescent.
2344 val64 = readq(&bar0->pcc_err_reg);
2345 writeq(val64, &bar0->pcc_err_reg);
2346 if (val64 & PCC_FB_ECC_DB_ERR) {
2347 u64 ac = readq(&bar0->adapter_control);
2348 ac &= ~(ADAPTER_CNTL_EN);
2349 writeq(ac, &bar0->adapter_control);
2350 ac = readq(&bar0->adapter_control);
2351 schedule_work(&nic->set_link_task);
2354 /* Other type of interrupts are not being handled now, TODO */
2358 * wait_for_cmd_complete - waits for a command to complete.
2359 * @sp : private member of the device structure, which is a pointer to the
2360 * s2io_nic structure.
2361 * Description: Function that waits for a command to Write into RMAC
2362 * ADDR DATA registers to be completed and returns either success or
2363 * error depending on whether the command was complete or not.
2365 * SUCCESS on success and FAILURE on failure.
2368 static int wait_for_cmd_complete(nic_t * sp)
2370 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2371 int ret = FAILURE, cnt = 0;
2375 val64 = readq(&bar0->rmac_addr_cmd_mem);
2376 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2389 * s2io_reset - Resets the card.
2390 * @sp : private member of the device structure.
2391 * Description: Function to Reset the card. This function then also
2392 * restores the previously saved PCI configuration space registers as
2393 * the card reset also resets the configuration space.
2398 static void s2io_reset(nic_t * sp)
2400 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2404 val64 = SW_RESET_ALL;
2405 writeq(val64, &bar0->sw_reset);
2408 * At this stage, if the PCI write is indeed completed, the
2409 * card is reset and so is the PCI Config space of the device.
2410 * So a read cannot be issued at this stage on any of the
2411 * registers to ensure the write into "sw_reset" register
2413 * Question: Is there any system call that will explicitly force
2414 * all the write commands still pending on the bus to be pushed
2416 * As of now I'am just giving a 250ms delay and hoping that the
2417 * PCI write to sw_reset register is done by this time.
2421 /* Restore the PCI state saved during initializarion. */
2422 pci_restore_state(sp->pdev);
2427 /* SXE-002: Configure link and activity LED to turn it off */
2428 subid = sp->pdev->subsystem_device;
2429 if ((subid & 0xFF) >= 0x07) {
2430 val64 = readq(&bar0->gpio_control);
2431 val64 |= 0x0000800000000000ULL;
2432 writeq(val64, &bar0->gpio_control);
2433 val64 = 0x0411040400000000ULL;
2434 writeq(val64, (void __iomem *) bar0 + 0x2700);
2437 sp->device_enabled_once = FALSE;
2441 * s2io_set_swapper - to set the swapper controle on the card
2442 * @sp : private member of the device structure,
2443 * pointer to the s2io_nic structure.
2444 * Description: Function to set the swapper control on the card
2445 * correctly depending on the 'endianness' of the system.
2447 * SUCCESS on success and FAILURE on failure.
2450 static int s2io_set_swapper(nic_t * sp)
2452 struct net_device *dev = sp->dev;
2453 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2454 u64 val64, valt, valr;
2457 * Set proper endian settings and verify the same by reading
2458 * the PIF Feed-back register.
2461 val64 = readq(&bar0->pif_rd_swapper_fb);
2462 if (val64 != 0x0123456789ABCDEFULL) {
2464 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2465 0x8100008181000081ULL, /* FE=1, SE=0 */
2466 0x4200004242000042ULL, /* FE=0, SE=1 */
2467 0}; /* FE=0, SE=0 */
2470 writeq(value[i], &bar0->swapper_ctrl);
2471 val64 = readq(&bar0->pif_rd_swapper_fb);
2472 if (val64 == 0x0123456789ABCDEFULL)
2477 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2479 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2480 (unsigned long long) val64);
2485 valr = readq(&bar0->swapper_ctrl);
2488 valt = 0x0123456789ABCDEFULL;
2489 writeq(valt, &bar0->xmsi_address);
2490 val64 = readq(&bar0->xmsi_address);
2494 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2495 0x0081810000818100ULL, /* FE=1, SE=0 */
2496 0x0042420000424200ULL, /* FE=0, SE=1 */
2497 0}; /* FE=0, SE=0 */
2500 writeq((value[i] | valr), &bar0->swapper_ctrl);
2501 writeq(valt, &bar0->xmsi_address);
2502 val64 = readq(&bar0->xmsi_address);
2508 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2509 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2513 val64 = readq(&bar0->swapper_ctrl);
2514 val64 &= 0xFFFF000000000000ULL;
2518 * The device by default set to a big endian format, so a
2519 * big endian driver need not set anything.
2521 val64 |= (SWAPPER_CTRL_TXP_FE |
2522 SWAPPER_CTRL_TXP_SE |
2523 SWAPPER_CTRL_TXD_R_FE |
2524 SWAPPER_CTRL_TXD_W_FE |
2525 SWAPPER_CTRL_TXF_R_FE |
2526 SWAPPER_CTRL_RXD_R_FE |
2527 SWAPPER_CTRL_RXD_W_FE |
2528 SWAPPER_CTRL_RXF_W_FE |
2529 SWAPPER_CTRL_XMSI_FE |
2530 SWAPPER_CTRL_XMSI_SE |
2531 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2532 writeq(val64, &bar0->swapper_ctrl);
2535 * Initially we enable all bits to make it accessible by the
2536 * driver, then we selectively enable only those bits that
2539 val64 |= (SWAPPER_CTRL_TXP_FE |
2540 SWAPPER_CTRL_TXP_SE |
2541 SWAPPER_CTRL_TXD_R_FE |
2542 SWAPPER_CTRL_TXD_R_SE |
2543 SWAPPER_CTRL_TXD_W_FE |
2544 SWAPPER_CTRL_TXD_W_SE |
2545 SWAPPER_CTRL_TXF_R_FE |
2546 SWAPPER_CTRL_RXD_R_FE |
2547 SWAPPER_CTRL_RXD_R_SE |
2548 SWAPPER_CTRL_RXD_W_FE |
2549 SWAPPER_CTRL_RXD_W_SE |
2550 SWAPPER_CTRL_RXF_W_FE |
2551 SWAPPER_CTRL_XMSI_FE |
2552 SWAPPER_CTRL_XMSI_SE |
2553 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2554 writeq(val64, &bar0->swapper_ctrl);
2556 val64 = readq(&bar0->swapper_ctrl);
2559 * Verifying if endian settings are accurate by reading a
2560 * feedback register.
2562 val64 = readq(&bar0->pif_rd_swapper_fb);
2563 if (val64 != 0x0123456789ABCDEFULL) {
2564 /* Endian settings are incorrect, calls for another dekko. */
2565 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2567 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2568 (unsigned long long) val64);
2575 /* ********************************************************* *
2576 * Functions defined below concern the OS part of the driver *
2577 * ********************************************************* */
2580 * s2io_open - open entry point of the driver
2581 * @dev : pointer to the device structure.
2583 * This function is the open entry point of the driver. It mainly calls a
2584 * function to allocate Rx buffers and inserts them into the buffer
2585 * descriptors and then enables the Rx part of the NIC.
2587 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2591 static int s2io_open(struct net_device *dev)
2593 nic_t *sp = dev->priv;
2597 * Make sure you have link off by default every time
2598 * Nic is initialized
2600 netif_carrier_off(dev);
2601 sp->last_link_state = LINK_DOWN;
2603 /* Initialize H/W and enable interrupts */
2604 if (s2io_card_up(sp)) {
2605 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2610 /* After proper initialization of H/W, register ISR */
2611 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2615 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2620 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2621 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2626 netif_start_queue(dev);
2631 * s2io_close -close entry point of the driver
2632 * @dev : device pointer.
2634 * This is the stop entry point of the driver. It needs to undo exactly
2635 * whatever was done by the open entry point,thus it's usually referred to
2636 * as the close function.Among other things this function mainly stops the
2637 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2639 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2643 static int s2io_close(struct net_device *dev)
2645 nic_t *sp = dev->priv;
2647 flush_scheduled_work();
2648 netif_stop_queue(dev);
2649 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2652 free_irq(dev->irq, dev);
2653 sp->device_close_flag = TRUE; /* Device is shut down. */
2658 * s2io_xmit - Tx entry point of te driver
2659 * @skb : the socket buffer containing the Tx data.
2660 * @dev : device pointer.
2662 * This function is the Tx entry point of the driver. S2IO NIC supports
2663 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2664 * NOTE: when device cant queue the pkt,just the trans_start variable will
2667 * 0 on success & 1 on failure.
2670 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2672 nic_t *sp = dev->priv;
2673 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2676 TxFIFO_element_t __iomem *tx_fifo;
2677 unsigned long flags;
2681 mac_info_t *mac_control;
2682 struct config_param *config;
2683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2685 mac_control = &sp->mac_control;
2686 config = &sp->config;
2688 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2689 spin_lock_irqsave(&sp->tx_lock, flags);
2691 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2692 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2694 spin_unlock_irqrestore(&sp->tx_lock, flags);
2699 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2700 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2701 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2703 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2704 /* Avoid "put" pointer going beyond "get" pointer */
2705 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2706 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2707 netif_stop_queue(dev);
2709 spin_unlock_irqrestore(&sp->tx_lock, flags);
2713 mss = skb_shinfo(skb)->tso_size;
2715 txdp->Control_1 |= TXD_TCP_LSO_EN;
2716 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2720 frg_cnt = skb_shinfo(skb)->nr_frags;
2721 frg_len = skb->len - skb->data_len;
2723 txdp->Host_Control = (unsigned long) skb;
2724 txdp->Buffer_Pointer = pci_map_single
2725 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2726 if (skb->ip_summed == CHECKSUM_HW) {
2728 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2732 txdp->Control_2 |= config->tx_intr_type;
2734 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2735 TXD_GATHER_CODE_FIRST);
2736 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2738 /* For fragmented SKB. */
2739 for (i = 0; i < frg_cnt; i++) {
2740 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2742 txdp->Buffer_Pointer = (u64) pci_map_page
2743 (sp->pdev, frag->page, frag->page_offset,
2744 frag->size, PCI_DMA_TODEVICE);
2745 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2747 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2749 tx_fifo = mac_control->tx_FIFO_start[queue];
2750 val64 = sp->list_info[queue][put_off].list_phy_addr;
2751 writeq(val64, &tx_fifo->TxDL_Pointer);
2753 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2757 val64 |= TX_FIFO_SPECIAL_FUNC;
2759 writeq(val64, &tx_fifo->List_Control);
2761 /* Perform a PCI read to flush previous writes */
2762 val64 = readq(&bar0->general_int_status);
2765 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2766 mac_control->tx_curr_put_info[queue].offset = put_off;
2768 /* Avoid "put" pointer going beyond "get" pointer */
2769 if (((put_off + 1) % queue_len) == get_off) {
2771 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2773 netif_stop_queue(dev);
2776 dev->trans_start = jiffies;
2777 spin_unlock_irqrestore(&sp->tx_lock, flags);
2783 * s2io_isr - ISR handler of the device .
2784 * @irq: the irq of the device.
2785 * @dev_id: a void pointer to the dev structure of the NIC.
2786 * @pt_regs: pointer to the registers pushed on the stack.
2787 * Description: This function is the ISR handler of the device. It
2788 * identifies the reason for the interrupt and calls the relevant
2789 * service routines. As a contongency measure, this ISR allocates the
2790 * recv buffers, if their numbers are below the panic value which is
2791 * presently set to 25% of the original number of rcv buffers allocated.
2793 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2794 * IRQ_NONE: will be returned if interrupt is not from our device
2796 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2798 struct net_device *dev = (struct net_device *) dev_id;
2799 nic_t *sp = dev->priv;
2800 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2801 #ifndef CONFIG_S2IO_NAPI
2805 mac_info_t *mac_control;
2806 struct config_param *config;
2808 mac_control = &sp->mac_control;
2809 config = &sp->config;
2812 * Identify the cause for interrupt and call the appropriate
2813 * interrupt handler. Causes for the interrupt could be;
2817 * 4. Error in any functional blocks of the NIC.
2819 reason = readq(&bar0->general_int_status);
2822 /* The interrupt was not raised by Xena. */
2826 /* If Intr is because of Tx Traffic */
2827 if (reason & GEN_INTR_TXTRAFFIC) {
2828 tx_intr_handler(sp);
2831 /* If Intr is because of an error */
2832 if (reason & (GEN_ERROR_INTR))
2833 alarm_intr_handler(sp);
2835 #ifdef CONFIG_S2IO_NAPI
2836 if (reason & GEN_INTR_RXTRAFFIC) {
2837 if (netif_rx_schedule_prep(dev)) {
2838 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2840 __netif_rx_schedule(dev);
2844 /* If Intr is because of Rx Traffic */
2845 if (reason & GEN_INTR_RXTRAFFIC) {
2846 rx_intr_handler(sp);
2851 * If the Rx buffer count is below the panic threshold then
2852 * reallocate the buffers from the interrupt handler itself,
2853 * else schedule a tasklet to reallocate the buffers.
2855 #ifndef CONFIG_S2IO_NAPI
2856 for (i = 0; i < config->rx_ring_num; i++) {
2857 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2858 int level = rx_buffer_level(sp, rxb_size, i);
2860 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2861 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2862 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2863 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2864 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2866 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2867 clear_bit(0, (&sp->tasklet_status));
2870 clear_bit(0, (&sp->tasklet_status));
2871 } else if (level == LOW) {
2872 tasklet_schedule(&sp->task);
2881 * s2io_get_stats - Updates the device statistics structure.
2882 * @dev : pointer to the device structure.
2884 * This function updates the device statistics structure in the s2io_nic
2885 * structure and returns a pointer to the same.
2887 * pointer to the updated net_device_stats structure.
2890 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2892 nic_t *sp = dev->priv;
2893 mac_info_t *mac_control;
2894 struct config_param *config;
2896 mac_control = &sp->mac_control;
2897 config = &sp->config;
2899 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2900 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2901 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2902 sp->stats.rx_length_errors =
2903 mac_control->stats_info->rmac_long_frms;
2905 return (&sp->stats);
2909 * s2io_set_multicast - entry point for multicast address enable/disable.
2910 * @dev : pointer to the device structure
2912 * This function is a driver entry point which gets called by the kernel
2913 * whenever multicast addresses must be enabled/disabled. This also gets
2914 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2915 * determine, if multicast address must be enabled or if promiscuous mode
2916 * is to be disabled etc.
2921 static void s2io_set_multicast(struct net_device *dev)
2924 struct dev_mc_list *mclist;
2925 nic_t *sp = dev->priv;
2926 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2927 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2929 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2932 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2933 /* Enable all Multicast addresses */
2934 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2935 &bar0->rmac_addr_data0_mem);
2936 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2937 &bar0->rmac_addr_data1_mem);
2938 val64 = RMAC_ADDR_CMD_MEM_WE |
2939 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2940 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2941 writeq(val64, &bar0->rmac_addr_cmd_mem);
2942 /* Wait till command completes */
2943 wait_for_cmd_complete(sp);
2946 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2947 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2948 /* Disable all Multicast addresses */
2949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2950 &bar0->rmac_addr_data0_mem);
2951 val64 = RMAC_ADDR_CMD_MEM_WE |
2952 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2953 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2954 writeq(val64, &bar0->rmac_addr_cmd_mem);
2955 /* Wait till command completes */
2956 wait_for_cmd_complete(sp);
2959 sp->all_multi_pos = 0;
2962 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2963 /* Put the NIC into promiscuous mode */
2964 add = &bar0->mac_cfg;
2965 val64 = readq(&bar0->mac_cfg);
2966 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2968 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2969 writel((u32) val64, add);
2970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2971 writel((u32) (val64 >> 32), (add + 4));
2973 val64 = readq(&bar0->mac_cfg);
2974 sp->promisc_flg = 1;
2975 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2977 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2978 /* Remove the NIC from promiscuous mode */
2979 add = &bar0->mac_cfg;
2980 val64 = readq(&bar0->mac_cfg);
2981 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2984 writel((u32) val64, add);
2985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2986 writel((u32) (val64 >> 32), (add + 4));
2988 val64 = readq(&bar0->mac_cfg);
2989 sp->promisc_flg = 0;
2990 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2994 /* Update individual M_CAST address list */
2995 if ((!sp->m_cast_flg) && dev->mc_count) {
2997 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2998 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3000 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3001 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3005 prev_cnt = sp->mc_addr_count;
3006 sp->mc_addr_count = dev->mc_count;
3008 /* Clear out the previous list of Mc in the H/W. */
3009 for (i = 0; i < prev_cnt; i++) {
3010 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3011 &bar0->rmac_addr_data0_mem);
3012 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3013 &bar0->rmac_addr_data1_mem);
3014 val64 = RMAC_ADDR_CMD_MEM_WE |
3015 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3016 RMAC_ADDR_CMD_MEM_OFFSET
3017 (MAC_MC_ADDR_START_OFFSET + i);
3018 writeq(val64, &bar0->rmac_addr_cmd_mem);
3020 /* Wait for command completes */
3021 if (wait_for_cmd_complete(sp)) {
3022 DBG_PRINT(ERR_DBG, "%s: Adding ",
3024 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3029 /* Create the new Rx filter list and update the same in H/W. */
3030 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3031 i++, mclist = mclist->next) {
3032 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3034 for (j = 0; j < ETH_ALEN; j++) {
3035 mac_addr |= mclist->dmi_addr[j];
3039 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3040 &bar0->rmac_addr_data0_mem);
3041 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3042 &bar0->rmac_addr_data1_mem);
3044 val64 = RMAC_ADDR_CMD_MEM_WE |
3045 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3046 RMAC_ADDR_CMD_MEM_OFFSET
3047 (i + MAC_MC_ADDR_START_OFFSET);
3048 writeq(val64, &bar0->rmac_addr_cmd_mem);
3050 /* Wait for command completes */
3051 if (wait_for_cmd_complete(sp)) {
3052 DBG_PRINT(ERR_DBG, "%s: Adding ",
3054 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3062 * s2io_set_mac_addr - Programs the Xframe mac address
3063 * @dev : pointer to the device structure.
3064 * @addr: a uchar pointer to the new mac address which is to be set.
3065 * Description : This procedure will program the Xframe to receive
3066 * frames with new Mac Address
3067 * Return value: SUCCESS on success and an appropriate (-)ve integer
3068 * as defined in errno.h file on failure.
3071 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3073 nic_t *sp = dev->priv;
3074 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3075 register u64 val64, mac_addr = 0;
3079 * Set the new MAC address as the new unicast filter and reflect this
3080 * change on the device address registered with the OS. It will be
3083 for (i = 0; i < ETH_ALEN; i++) {
3085 mac_addr |= addr[i];
3088 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3089 &bar0->rmac_addr_data0_mem);
3092 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3093 RMAC_ADDR_CMD_MEM_OFFSET(0);
3094 writeq(val64, &bar0->rmac_addr_cmd_mem);
3095 /* Wait till command completes */
3096 if (wait_for_cmd_complete(sp)) {
3097 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3105 * s2io_ethtool_sset - Sets different link parameters.
3106 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3107 * @info: pointer to the structure with parameters given by ethtool to set
3110 * The function sets different link parameters provided by the user onto
3116 static int s2io_ethtool_sset(struct net_device *dev,
3117 struct ethtool_cmd *info)
3119 nic_t *sp = dev->priv;
3120 if ((info->autoneg == AUTONEG_ENABLE) ||
3121 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3124 s2io_close(sp->dev);
3132 * s2io_ethtol_gset - Return link specific information.
3133 * @sp : private member of the device structure, pointer to the
3134 * s2io_nic structure.
3135 * @info : pointer to the structure with parameters given by ethtool
3136 * to return link information.
3138 * Returns link specific information like speed, duplex etc.. to ethtool.
3140 * return 0 on success.
3143 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3145 nic_t *sp = dev->priv;
3146 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3147 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3148 info->port = PORT_FIBRE;
3149 /* info->transceiver?? TODO */
3151 if (netif_carrier_ok(sp->dev)) {
3152 info->speed = 10000;
3153 info->duplex = DUPLEX_FULL;
3159 info->autoneg = AUTONEG_DISABLE;
3164 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3165 * @sp : private member of the device structure, which is a pointer to the
3166 * s2io_nic structure.
3167 * @info : pointer to the structure with parameters given by ethtool to
3168 * return driver information.
3170 * Returns driver specefic information like name, version etc.. to ethtool.
3175 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3176 struct ethtool_drvinfo *info)
3178 nic_t *sp = dev->priv;
3180 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3181 strncpy(info->version, s2io_driver_version,
3182 sizeof(s2io_driver_version));
3183 strncpy(info->fw_version, "", 32);
3184 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3185 info->regdump_len = XENA_REG_SPACE;
3186 info->eedump_len = XENA_EEPROM_SPACE;
3187 info->testinfo_len = S2IO_TEST_LEN;
3188 info->n_stats = S2IO_STAT_LEN;
3192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3193 * @sp: private member of the device structure, which is a pointer to the
3194 * s2io_nic structure.
3195 * @regs : pointer to the structure with parameters given by ethtool for
3196 * dumping the registers.
3197 * @reg_space: The input argumnet into which all the registers are dumped.
3199 * Dumps the entire register space of xFrame NIC into the user given
3205 static void s2io_ethtool_gregs(struct net_device *dev,
3206 struct ethtool_regs *regs, void *space)
3210 u8 *reg_space = (u8 *) space;
3211 nic_t *sp = dev->priv;
3213 regs->len = XENA_REG_SPACE;
3214 regs->version = sp->pdev->subsystem_device;
3216 for (i = 0; i < regs->len; i += 8) {
3217 reg = readq(sp->bar0 + i);
3218 memcpy((reg_space + i), ®, 8);
3223 * s2io_phy_id - timer function that alternates adapter LED.
3224 * @data : address of the private member of the device structure, which
3225 * is a pointer to the s2io_nic structure, provided as an u32.
3226 * Description: This is actually the timer function that alternates the
3227 * adapter LED bit of the adapter control bit to set/reset every time on
3228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3229 * once every second.
3231 static void s2io_phy_id(unsigned long data)
3233 nic_t *sp = (nic_t *) data;
3234 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3238 subid = sp->pdev->subsystem_device;
3239 if ((subid & 0xFF) >= 0x07) {
3240 val64 = readq(&bar0->gpio_control);
3241 val64 ^= GPIO_CTRL_GPIO_0;
3242 writeq(val64, &bar0->gpio_control);
3244 val64 = readq(&bar0->adapter_control);
3245 val64 ^= ADAPTER_LED_ON;
3246 writeq(val64, &bar0->adapter_control);
3249 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3253 * s2io_ethtool_idnic - To physically identify the nic on the system.
3254 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure.
3256 * @id : pointer to the structure with identification parameters given by
3258 * Description: Used to physically identify the NIC on the system.
3259 * The Link LED will blink for a time specified by the user for
3261 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3262 * identification is possible only if it's link is up.
3264 * int , returns 0 on success
3267 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3269 u64 val64 = 0, last_gpio_ctrl_val;
3270 nic_t *sp = dev->priv;
3271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3274 subid = sp->pdev->subsystem_device;
3275 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3276 if ((subid & 0xFF) < 0x07) {
3277 val64 = readq(&bar0->adapter_control);
3278 if (!(val64 & ADAPTER_CNTL_EN)) {
3280 "Adapter Link down, cannot blink LED\n");
3284 if (sp->id_timer.function == NULL) {
3285 init_timer(&sp->id_timer);
3286 sp->id_timer.function = s2io_phy_id;
3287 sp->id_timer.data = (unsigned long) sp;
3289 mod_timer(&sp->id_timer, jiffies);
3291 msleep(data * 1000);
3294 del_timer_sync(&sp->id_timer);
3296 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3297 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3298 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3305 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3306 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3307 * @ep : pointer to the structure with pause parameters given by ethtool.
3309 * Returns the Pause frame generation and reception capability of the NIC.
3313 static void s2io_ethtool_getpause_data(struct net_device *dev,
3314 struct ethtool_pauseparam *ep)
3317 nic_t *sp = dev->priv;
3318 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3320 val64 = readq(&bar0->rmac_pause_cfg);
3321 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3322 ep->tx_pause = TRUE;
3323 if (val64 & RMAC_PAUSE_RX_ENABLE)
3324 ep->rx_pause = TRUE;
3325 ep->autoneg = FALSE;
3329 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3330 * @sp : private member of the device structure, which is a pointer to the
3331 * s2io_nic structure.
3332 * @ep : pointer to the structure with pause parameters given by ethtool.
3334 * It can be used to set or reset Pause frame generation or reception
3335 * support of the NIC.
3337 * int, returns 0 on Success
3340 static int s2io_ethtool_setpause_data(struct net_device *dev,
3341 struct ethtool_pauseparam *ep)
3344 nic_t *sp = dev->priv;
3345 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3347 val64 = readq(&bar0->rmac_pause_cfg);
3349 val64 |= RMAC_PAUSE_GEN_ENABLE;
3351 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3353 val64 |= RMAC_PAUSE_RX_ENABLE;
3355 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3356 writeq(val64, &bar0->rmac_pause_cfg);
3361 * read_eeprom - reads 4 bytes of data from user given offset.
3362 * @sp : private member of the device structure, which is a pointer to the
3363 * s2io_nic structure.
3364 * @off : offset at which the data must be written
3365 * @data : Its an output parameter where the data read at the given
3368 * Will read 4 bytes of data from the user given offset and return the
3370 * NOTE: Will allow to read only part of the EEPROM visible through the
3373 * -1 on failure and 0 on success.
3376 #define S2IO_DEV_ID 5
3377 static int read_eeprom(nic_t * sp, int off, u32 * data)
3382 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3384 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3385 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3386 I2C_CONTROL_CNTL_START;
3387 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3389 while (exit_cnt < 5) {
3390 val64 = readq(&bar0->i2c_control);
3391 if (I2C_CONTROL_CNTL_END(val64)) {
3392 *data = I2C_CONTROL_GET_DATA(val64);
3404 * write_eeprom - actually writes the relevant part of the data value.
3405 * @sp : private member of the device structure, which is a pointer to the
3406 * s2io_nic structure.
3407 * @off : offset at which the data must be written
3408 * @data : The data that is to be written
3409 * @cnt : Number of bytes of the data that are actually to be written into
3410 * the Eeprom. (max of 3)
3412 * Actually writes the relevant part of the data value into the Eeprom
3413 * through the I2C bus.
3415 * 0 on success, -1 on failure.
3418 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3420 int exit_cnt = 0, ret = -1;
3422 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3424 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3425 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3426 I2C_CONTROL_CNTL_START;
3427 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3429 while (exit_cnt < 5) {
3430 val64 = readq(&bar0->i2c_control);
3431 if (I2C_CONTROL_CNTL_END(val64)) {
3432 if (!(val64 & I2C_CONTROL_NACK))
3444 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3445 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3446 * @eeprom : pointer to the user level structure provided by ethtool,
3447 * containing all relevant information.
3448 * @data_buf : user defined value to be written into Eeprom.
3449 * Description: Reads the values stored in the Eeprom at given offset
3450 * for a given length. Stores these values int the input argument data
3451 * buffer 'data_buf' and returns these to the caller (ethtool.)
3456 static int s2io_ethtool_geeprom(struct net_device *dev,
3457 struct ethtool_eeprom *eeprom, u8 * data_buf)
3460 nic_t *sp = dev->priv;
3462 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3464 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3465 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3467 for (i = 0; i < eeprom->len; i += 4) {
3468 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3469 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3473 memcpy((data_buf + i), &valid, 4);
3479 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3480 * @sp : private member of the device structure, which is a pointer to the
3481 * s2io_nic structure.
3482 * @eeprom : pointer to the user level structure provided by ethtool,
3483 * containing all relevant information.
3484 * @data_buf ; user defined value to be written into Eeprom.
3486 * Tries to write the user provided value in the Eeprom, at the offset
3487 * given by the user.
3489 * 0 on success, -EFAULT on failure.
3492 static int s2io_ethtool_seeprom(struct net_device *dev,
3493 struct ethtool_eeprom *eeprom,
3496 int len = eeprom->len, cnt = 0;
3497 u32 valid = 0, data;
3498 nic_t *sp = dev->priv;
3500 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3502 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3503 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3509 data = (u32) data_buf[cnt] & 0x000000FF;
3511 valid = (u32) (data << 24);
3515 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3517 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3519 "write into the specified offset\n");
3530 * s2io_register_test - reads and writes into all clock domains.
3531 * @sp : private member of the device structure, which is a pointer to the
3532 * s2io_nic structure.
3533 * @data : variable that returns the result of each of the test conducted b
3536 * Read and write into all clock domains. The NIC has 3 clock domains,
3537 * see that registers in all the three regions are accessible.
3542 static int s2io_register_test(nic_t * sp, uint64_t * data)
3544 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3548 val64 = readq(&bar0->pcc_enable);
3549 if (val64 != 0xff00000000000000ULL) {
3551 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3554 val64 = readq(&bar0->rmac_pause_cfg);
3555 if (val64 != 0xc000ffff00000000ULL) {
3557 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3560 val64 = readq(&bar0->rx_queue_cfg);
3561 if (val64 != 0x0808080808080808ULL) {
3563 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3566 val64 = readq(&bar0->xgxs_efifo_cfg);
3567 if (val64 != 0x000000001923141EULL) {
3569 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3572 val64 = 0x5A5A5A5A5A5A5A5AULL;
3573 writeq(val64, &bar0->xmsi_data);
3574 val64 = readq(&bar0->xmsi_data);
3575 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3577 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3580 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3581 writeq(val64, &bar0->xmsi_data);
3582 val64 = readq(&bar0->xmsi_data);
3583 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3585 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3593 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3594 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure.
3596 * @data:variable that returns the result of each of the test conducted by
3599 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3605 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3610 /* Test Write Error at offset 0 */
3611 if (!write_eeprom(sp, 0, 0, 3))
3614 /* Test Write at offset 4f0 */
3615 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3617 if (read_eeprom(sp, 0x4F0, &ret_data))
3620 if (ret_data != 0x01234567)
3623 /* Reset the EEPROM data go FFFF */
3624 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3626 /* Test Write Request Error at offset 0x7c */
3627 if (!write_eeprom(sp, 0x07C, 0, 3))
3630 /* Test Write Request at offset 0x7fc */
3631 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3633 if (read_eeprom(sp, 0x7FC, &ret_data))
3636 if (ret_data != 0x01234567)
3639 /* Reset the EEPROM data go FFFF */
3640 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3642 /* Test Write Error at offset 0x80 */
3643 if (!write_eeprom(sp, 0x080, 0, 3))
3646 /* Test Write Error at offset 0xfc */
3647 if (!write_eeprom(sp, 0x0FC, 0, 3))
3650 /* Test Write Error at offset 0x100 */
3651 if (!write_eeprom(sp, 0x100, 0, 3))
3654 /* Test Write Error at offset 4ec */
3655 if (!write_eeprom(sp, 0x4EC, 0, 3))
3663 * s2io_bist_test - invokes the MemBist test of the card .
3664 * @sp : private member of the device structure, which is a pointer to the
3665 * s2io_nic structure.
3666 * @data:variable that returns the result of each of the test conducted by
3669 * This invokes the MemBist test of the card. We give around
3670 * 2 secs time for the Test to complete. If it's still not complete
3671 * within this peiod, we consider that the test failed.
3673 * 0 on success and -1 on failure.
3676 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3679 int cnt = 0, ret = -1;
3681 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3682 bist |= PCI_BIST_START;
3683 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3686 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3687 if (!(bist & PCI_BIST_START)) {
3688 *data = (bist & PCI_BIST_CODE_MASK);
3700 * s2io-link_test - verifies the link state of the nic
3701 * @sp ; private member of the device structure, which is a pointer to the
3702 * s2io_nic structure.
3703 * @data: variable that returns the result of each of the test conducted by
3706 * The function verifies the link state of the NIC and updates the input
3707 * argument 'data' appropriately.
3712 static int s2io_link_test(nic_t * sp, uint64_t * data)
3714 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3717 val64 = readq(&bar0->adapter_status);
3718 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3725 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3726 * @sp - private member of the device structure, which is a pointer to the
3727 * s2io_nic structure.
3728 * @data - variable that returns the result of each of the test
3729 * conducted by the driver.
3731 * This is one of the offline test that tests the read and write
3732 * access to the RldRam chip on the NIC.
3737 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3739 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3741 int cnt, iteration = 0, test_pass = 0;
3743 val64 = readq(&bar0->adapter_control);
3744 val64 &= ~ADAPTER_ECC_EN;
3745 writeq(val64, &bar0->adapter_control);
3747 val64 = readq(&bar0->mc_rldram_test_ctrl);
3748 val64 |= MC_RLDRAM_TEST_MODE;
3749 writeq(val64, &bar0->mc_rldram_test_ctrl);
3751 val64 = readq(&bar0->mc_rldram_mrs);
3752 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3753 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3755 val64 |= MC_RLDRAM_MRS_ENABLE;
3756 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3758 while (iteration < 2) {
3759 val64 = 0x55555555aaaa0000ULL;
3760 if (iteration == 1) {
3761 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3763 writeq(val64, &bar0->mc_rldram_test_d0);
3765 val64 = 0xaaaa5a5555550000ULL;
3766 if (iteration == 1) {
3767 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3769 writeq(val64, &bar0->mc_rldram_test_d1);
3771 val64 = 0x55aaaaaaaa5a0000ULL;
3772 if (iteration == 1) {
3773 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3775 writeq(val64, &bar0->mc_rldram_test_d2);
3777 val64 = (u64) (0x0000003fffff0000ULL);
3778 writeq(val64, &bar0->mc_rldram_test_add);
3781 val64 = MC_RLDRAM_TEST_MODE;
3782 writeq(val64, &bar0->mc_rldram_test_ctrl);
3785 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3787 writeq(val64, &bar0->mc_rldram_test_ctrl);
3789 for (cnt = 0; cnt < 5; cnt++) {
3790 val64 = readq(&bar0->mc_rldram_test_ctrl);
3791 if (val64 & MC_RLDRAM_TEST_DONE)
3799 val64 = MC_RLDRAM_TEST_MODE;
3800 writeq(val64, &bar0->mc_rldram_test_ctrl);
3802 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3803 writeq(val64, &bar0->mc_rldram_test_ctrl);
3805 for (cnt = 0; cnt < 5; cnt++) {
3806 val64 = readq(&bar0->mc_rldram_test_ctrl);
3807 if (val64 & MC_RLDRAM_TEST_DONE)
3815 val64 = readq(&bar0->mc_rldram_test_ctrl);
3816 if (val64 & MC_RLDRAM_TEST_PASS)
3831 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3832 * @sp : private member of the device structure, which is a pointer to the
3833 * s2io_nic structure.
3834 * @ethtest : pointer to a ethtool command specific structure that will be
3835 * returned to the user.
3836 * @data : variable that returns the result of each of the test
3837 * conducted by the driver.
3839 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3840 * the health of the card.
3845 static void s2io_ethtool_test(struct net_device *dev,
3846 struct ethtool_test *ethtest,
3849 nic_t *sp = dev->priv;
3850 int orig_state = netif_running(sp->dev);
3852 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3853 /* Offline Tests. */
3855 s2io_close(sp->dev);
3856 s2io_set_swapper(sp);
3858 s2io_set_swapper(sp);
3860 if (s2io_register_test(sp, &data[0]))
3861 ethtest->flags |= ETH_TEST_FL_FAILED;
3864 s2io_set_swapper(sp);
3866 if (s2io_rldram_test(sp, &data[3]))
3867 ethtest->flags |= ETH_TEST_FL_FAILED;
3870 s2io_set_swapper(sp);
3872 if (s2io_eeprom_test(sp, &data[1]))
3873 ethtest->flags |= ETH_TEST_FL_FAILED;
3875 if (s2io_bist_test(sp, &data[4]))
3876 ethtest->flags |= ETH_TEST_FL_FAILED;
3886 "%s: is not up, cannot run test\n",
3895 if (s2io_link_test(sp, &data[2]))
3896 ethtest->flags |= ETH_TEST_FL_FAILED;
3905 static void s2io_get_ethtool_stats(struct net_device *dev,
3906 struct ethtool_stats *estats,
3910 nic_t *sp = dev->priv;
3911 StatInfo_t *stat_info = sp->mac_control.stats_info;
3913 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3915 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3916 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3918 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3919 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3920 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3921 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3925 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3926 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3927 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3929 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3931 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3934 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3936 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3942 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3943 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3946 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3947 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3954 static int s2io_ethtool_get_regs_len(struct net_device *dev)
3956 return (XENA_REG_SPACE);
3960 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3962 nic_t *sp = dev->priv;
3964 return (sp->rx_csum);
3967 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3969 nic_t *sp = dev->priv;
3979 static int s2io_get_eeprom_len(struct net_device *dev)
3981 return (XENA_EEPROM_SPACE);
3984 static int s2io_ethtool_self_test_count(struct net_device *dev)
3986 return (S2IO_TEST_LEN);
3989 static void s2io_ethtool_get_strings(struct net_device *dev,
3990 u32 stringset, u8 * data)
3992 switch (stringset) {
3994 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3997 memcpy(data, ðtool_stats_keys,
3998 sizeof(ethtool_stats_keys));
4002 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4004 return (S2IO_STAT_LEN);
4007 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4010 dev->features |= NETIF_F_IP_CSUM;
4012 dev->features &= ~NETIF_F_IP_CSUM;
4018 static struct ethtool_ops netdev_ethtool_ops = {
4019 .get_settings = s2io_ethtool_gset,
4020 .set_settings = s2io_ethtool_sset,
4021 .get_drvinfo = s2io_ethtool_gdrvinfo,
4022 .get_regs_len = s2io_ethtool_get_regs_len,
4023 .get_regs = s2io_ethtool_gregs,
4024 .get_link = ethtool_op_get_link,
4025 .get_eeprom_len = s2io_get_eeprom_len,
4026 .get_eeprom = s2io_ethtool_geeprom,
4027 .set_eeprom = s2io_ethtool_seeprom,
4028 .get_pauseparam = s2io_ethtool_getpause_data,
4029 .set_pauseparam = s2io_ethtool_setpause_data,
4030 .get_rx_csum = s2io_ethtool_get_rx_csum,
4031 .set_rx_csum = s2io_ethtool_set_rx_csum,
4032 .get_tx_csum = ethtool_op_get_tx_csum,
4033 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4034 .get_sg = ethtool_op_get_sg,
4035 .set_sg = ethtool_op_set_sg,
4037 .get_tso = ethtool_op_get_tso,
4038 .set_tso = ethtool_op_set_tso,
4040 .self_test_count = s2io_ethtool_self_test_count,
4041 .self_test = s2io_ethtool_test,
4042 .get_strings = s2io_ethtool_get_strings,
4043 .phys_id = s2io_ethtool_idnic,
4044 .get_stats_count = s2io_ethtool_get_stats_count,
4045 .get_ethtool_stats = s2io_get_ethtool_stats
4049 * s2io_ioctl - Entry point for the Ioctl
4050 * @dev : Device pointer.
4051 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4052 * a proprietary structure used to pass information to the driver.
4053 * @cmd : This is used to distinguish between the different commands that
4054 * can be passed to the IOCTL functions.
4056 * This function has support for ethtool, adding multiple MAC addresses on
4057 * the NIC and some DBG commands for the util tool.
4059 * Currently the IOCTL supports no operations, hence by default this
4060 * function returns OP NOT SUPPORTED value.
4063 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4069 * s2io_change_mtu - entry point to change MTU size for the device.
4070 * @dev : device pointer.
4071 * @new_mtu : the new MTU size for the device.
4072 * Description: A driver entry point to change MTU size for the device.
4073 * Before changing the MTU the device must be stopped.
4075 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4079 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4081 nic_t *sp = dev->priv;
4082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4085 if (netif_running(dev)) {
4086 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4087 DBG_PRINT(ERR_DBG, "change its MTU \n");
4091 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4092 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4097 /* Set the new MTU into the PYLD register of the NIC */
4099 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4107 * s2io_tasklet - Bottom half of the ISR.
4108 * @dev_adr : address of the device structure in dma_addr_t format.
4110 * This is the tasklet or the bottom half of the ISR. This is
4111 * an extension of the ISR which is scheduled by the scheduler to be run
4112 * when the load on the CPU is low. All low priority tasks of the ISR can
4113 * be pushed into the tasklet. For now the tasklet is used only to
4114 * replenish the Rx buffers in the Rx buffer descriptors.
4119 static void s2io_tasklet(unsigned long dev_addr)
4121 struct net_device *dev = (struct net_device *) dev_addr;
4122 nic_t *sp = dev->priv;
4124 mac_info_t *mac_control;
4125 struct config_param *config;
4127 mac_control = &sp->mac_control;
4128 config = &sp->config;
4130 if (!TASKLET_IN_USE) {
4131 for (i = 0; i < config->rx_ring_num; i++) {
4132 ret = fill_rx_buffers(sp, i);
4133 if (ret == -ENOMEM) {
4134 DBG_PRINT(ERR_DBG, "%s: Out of ",
4136 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4138 } else if (ret == -EFILL) {
4140 "%s: Rx Ring %d is full\n",
4145 clear_bit(0, (&sp->tasklet_status));
4150 * s2io_set_link - Set the LInk status
4151 * @data: long pointer to device private structue
4152 * Description: Sets the link status for the adapter
4155 static void s2io_set_link(unsigned long data)
4157 nic_t *nic = (nic_t *) data;
4158 struct net_device *dev = nic->dev;
4159 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4163 if (test_and_set_bit(0, &(nic->link_state))) {
4164 /* The card is being reset, no point doing anything */
4168 subid = nic->pdev->subsystem_device;
4170 * Allow a small delay for the NICs self initiated
4171 * cleanup to complete.
4175 val64 = readq(&bar0->adapter_status);
4176 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4177 if (LINK_IS_UP(val64)) {
4178 val64 = readq(&bar0->adapter_control);
4179 val64 |= ADAPTER_CNTL_EN;
4180 writeq(val64, &bar0->adapter_control);
4181 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4182 val64 = readq(&bar0->gpio_control);
4183 val64 |= GPIO_CTRL_GPIO_0;
4184 writeq(val64, &bar0->gpio_control);
4185 val64 = readq(&bar0->gpio_control);
4187 val64 |= ADAPTER_LED_ON;
4188 writeq(val64, &bar0->adapter_control);
4190 val64 = readq(&bar0->adapter_status);
4191 if (!LINK_IS_UP(val64)) {
4192 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4193 DBG_PRINT(ERR_DBG, " Link down");
4194 DBG_PRINT(ERR_DBG, "after ");
4195 DBG_PRINT(ERR_DBG, "enabling ");
4196 DBG_PRINT(ERR_DBG, "device \n");
4198 if (nic->device_enabled_once == FALSE) {
4199 nic->device_enabled_once = TRUE;
4201 s2io_link(nic, LINK_UP);
4203 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4204 val64 = readq(&bar0->gpio_control);
4205 val64 &= ~GPIO_CTRL_GPIO_0;
4206 writeq(val64, &bar0->gpio_control);
4207 val64 = readq(&bar0->gpio_control);
4209 s2io_link(nic, LINK_DOWN);
4211 } else { /* NIC is not Quiescent. */
4212 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4213 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4214 netif_stop_queue(dev);
4216 clear_bit(0, &(nic->link_state));
4219 static void s2io_card_down(nic_t * sp)
4222 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4223 unsigned long flags;
4224 register u64 val64 = 0;
4226 /* If s2io_set_link task is executing, wait till it completes. */
4227 while (test_and_set_bit(0, &(sp->link_state)))
4229 atomic_set(&sp->card_state, CARD_DOWN);
4231 /* disable Tx and Rx traffic on the NIC */
4235 tasklet_kill(&sp->task);
4237 /* Check if the device is Quiescent and then Reset the NIC */
4239 val64 = readq(&bar0->adapter_status);
4240 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4248 "s2io_close:Device not Quiescent ");
4249 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4250 (unsigned long long) val64);
4254 spin_lock_irqsave(&sp->tx_lock, flags);
4257 /* Free all unused Tx and Rx buffers */
4258 free_tx_buffers(sp);
4259 free_rx_buffers(sp);
4261 spin_unlock_irqrestore(&sp->tx_lock, flags);
4262 clear_bit(0, &(sp->link_state));
4265 static int s2io_card_up(nic_t * sp)
4268 mac_info_t *mac_control;
4269 struct config_param *config;
4270 struct net_device *dev = (struct net_device *) sp->dev;
4272 /* Initialize the H/W I/O registers */
4273 if (init_nic(sp) != 0) {
4274 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4280 * Initializing the Rx buffers. For now we are considering only 1
4281 * Rx ring and initializing buffers into 30 Rx blocks
4283 mac_control = &sp->mac_control;
4284 config = &sp->config;
4286 for (i = 0; i < config->rx_ring_num; i++) {
4287 if ((ret = fill_rx_buffers(sp, i))) {
4288 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4291 free_rx_buffers(sp);
4294 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4295 atomic_read(&sp->rx_bufs_left[i]));
4298 /* Setting its receive mode */
4299 s2io_set_multicast(dev);
4301 /* Enable tasklet for the device */
4302 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4304 /* Enable Rx Traffic and interrupts on the NIC */
4305 if (start_nic(sp)) {
4306 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4307 tasklet_kill(&sp->task);
4309 free_irq(dev->irq, dev);
4310 free_rx_buffers(sp);
4314 atomic_set(&sp->card_state, CARD_UP);
4319 * s2io_restart_nic - Resets the NIC.
4320 * @data : long pointer to the device private structure
4322 * This function is scheduled to be run by the s2io_tx_watchdog
4323 * function after 0.5 secs to reset the NIC. The idea is to reduce
4324 * the run time of the watch dog routine which is run holding a
4328 static void s2io_restart_nic(unsigned long data)
4330 struct net_device *dev = (struct net_device *) data;
4331 nic_t *sp = dev->priv;
4334 if (s2io_card_up(sp)) {
4335 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4338 netif_wake_queue(dev);
4339 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4344 * s2io_tx_watchdog - Watchdog for transmit side.
4345 * @dev : Pointer to net device structure
4347 * This function is triggered if the Tx Queue is stopped
4348 * for a pre-defined amount of time when the Interface is still up.
4349 * If the Interface is jammed in such a situation, the hardware is
4350 * reset (by s2io_close) and restarted again (by s2io_open) to
4351 * overcome any problem that might have been caused in the hardware.
4356 static void s2io_tx_watchdog(struct net_device *dev)
4358 nic_t *sp = dev->priv;
4360 if (netif_carrier_ok(dev)) {
4361 schedule_work(&sp->rst_timer_task);
4366 * rx_osm_handler - To perform some OS related operations on SKB.
4367 * @sp: private member of the device structure,pointer to s2io_nic structure.
4368 * @skb : the socket buffer pointer.
4369 * @len : length of the packet
4370 * @cksum : FCS checksum of the frame.
4371 * @ring_no : the ring from which this RxD was extracted.
4373 * This function is called by the Tx interrupt serivce routine to perform
4374 * some OS related operations on the SKB before passing it to the upper
4375 * layers. It mainly checks if the checksum is OK, if so adds it to the
4376 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4377 * to the upper layer. If the checksum is wrong, it increments the Rx
4378 * packet error count, frees the SKB and returns error.
4380 * SUCCESS on success and -1 on failure.
4382 #ifndef CONFIG_2BUFF_MODE
4383 static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4385 static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4389 struct net_device *dev = (struct net_device *) sp->dev;
4390 struct sk_buff *skb =
4391 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4392 u16 l3_csum, l4_csum;
4393 #ifdef CONFIG_2BUFF_MODE
4394 int buf0_len, buf2_len;
4395 unsigned char *buff;
4398 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4399 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4400 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4401 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4403 * NIC verifies if the Checksum of the received
4404 * frame is Ok or not and accordingly returns
4405 * a flag in the RxD.
4407 skb->ip_summed = CHECKSUM_UNNECESSARY;
4410 * Packet with erroneous checksum, let the
4411 * upper layers deal with it.
4413 skb->ip_summed = CHECKSUM_NONE;
4416 skb->ip_summed = CHECKSUM_NONE;
4419 if (rxdp->Control_1 & RXD_T_CODE) {
4420 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4421 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4424 #ifdef CONFIG_2BUFF_MODE
4425 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4426 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4430 #ifndef CONFIG_2BUFF_MODE
4432 skb->protocol = eth_type_trans(skb, dev);
4434 buff = skb_push(skb, buf0_len);
4435 memcpy(buff, ba->ba_0, buf0_len);
4436 skb_put(skb, buf2_len);
4437 skb->protocol = eth_type_trans(skb, dev);
4440 #ifdef CONFIG_S2IO_NAPI
4441 netif_receive_skb(skb);
4446 dev->last_rx = jiffies;
4448 sp->stats.rx_packets++;
4449 #ifndef CONFIG_2BUFF_MODE
4450 sp->stats.rx_bytes += len;
4452 sp->stats.rx_bytes += buf0_len + buf2_len;
4455 atomic_dec(&sp->rx_bufs_left[ring_no]);
4456 rxdp->Host_Control = 0;
4461 * s2io_link - stops/starts the Tx queue.
4462 * @sp : private member of the device structure, which is a pointer to the
4463 * s2io_nic structure.
4464 * @link : inidicates whether link is UP/DOWN.
4466 * This function stops/starts the Tx queue depending on whether the link
4467 * status of the NIC is is down or up. This is called by the Alarm
4468 * interrupt handler whenever a link change interrupt comes up.
4473 static void s2io_link(nic_t * sp, int link)
4475 struct net_device *dev = (struct net_device *) sp->dev;
4477 if (link != sp->last_link_state) {
4478 if (link == LINK_DOWN) {
4479 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4480 netif_carrier_off(dev);
4482 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4483 netif_carrier_on(dev);
4486 sp->last_link_state = link;
4490 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4491 * @sp : private member of the device structure, which is a pointer to the
4492 * s2io_nic structure.
4494 * This function initializes a few of the PCI and PCI-X configuration registers
4495 * with recommended values.
4500 static void s2io_init_pci(nic_t * sp)
4504 /* Enable Data Parity Error Recovery in PCI-X command register. */
4505 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4507 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 (sp->pcix_cmd | 1));
4509 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4512 /* Set the PErr Response bit in PCI command register. */
4513 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4514 pci_write_config_word(sp->pdev, PCI_COMMAND,
4515 (pci_cmd | PCI_COMMAND_PARITY));
4516 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4518 /* Set MMRB count to 1024 in PCI-X Command register. */
4519 sp->pcix_cmd &= 0xFFF3;
4520 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4521 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4524 /* Setting Maximum outstanding splits based on system type. */
4525 sp->pcix_cmd &= 0xFF8F;
4527 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4530 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4532 /* Forcibly disabling relaxed ordering capability of the card. */
4533 sp->pcix_cmd &= 0xfffd;
4534 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4536 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4540 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4541 MODULE_LICENSE("GPL");
4542 module_param(tx_fifo_num, int, 0);
4543 module_param_array(tx_fifo_len, int, NULL, 0);
4544 module_param(rx_ring_num, int, 0);
4545 module_param_array(rx_ring_sz, int, NULL, 0);
4546 module_param(Stats_refresh_time, int, 0);
4547 module_param(rmac_pause_time, int, 0);
4548 module_param(mc_pause_threshold_q0q3, int, 0);
4549 module_param(mc_pause_threshold_q4q7, int, 0);
4550 module_param(shared_splits, int, 0);
4551 module_param(tmac_util_period, int, 0);
4552 module_param(rmac_util_period, int, 0);
4553 #ifndef CONFIG_S2IO_NAPI
4554 module_param(indicate_max_pkts, int, 0);
4557 * s2io_init_nic - Initialization of the adapter .
4558 * @pdev : structure containing the PCI related information of the device.
4559 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4561 * The function initializes an adapter identified by the pci_dec structure.
4562 * All OS related initialization including memory and device structure and
4563 * initlaization of the device private variable is done. Also the swapper
4564 * control register is initialized to enable read and write into the I/O
4565 * registers of the device.
4567 * returns 0 on success and negative on failure.
4570 static int __devinit
4571 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4574 struct net_device *dev;
4575 char *dev_name = "S2IO 10GE NIC";
4577 int dma_flag = FALSE;
4578 u32 mac_up, mac_down;
4579 u64 val64 = 0, tmp64 = 0;
4580 XENA_dev_config_t __iomem *bar0 = NULL;
4582 mac_info_t *mac_control;
4583 struct config_param *config;
4586 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4587 s2io_driver_version);
4589 if ((ret = pci_enable_device(pdev))) {
4591 "s2io_init_nic: pci_enable_device failed\n");
4595 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4596 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4599 if (pci_set_consistent_dma_mask
4600 (pdev, DMA_64BIT_MASK)) {
4602 "Unable to obtain 64bit DMA for \
4603 consistent allocations\n");
4604 pci_disable_device(pdev);
4607 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4608 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4610 pci_disable_device(pdev);
4614 if (pci_request_regions(pdev, s2io_driver_name)) {
4615 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4616 pci_disable_device(pdev);
4620 dev = alloc_etherdev(sizeof(nic_t));
4622 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4623 pci_disable_device(pdev);
4624 pci_release_regions(pdev);
4628 pci_set_master(pdev);
4629 pci_set_drvdata(pdev, dev);
4630 SET_MODULE_OWNER(dev);
4631 SET_NETDEV_DEV(dev, &pdev->dev);
4633 /* Private member variable initialized to s2io NIC structure */
4635 memset(sp, 0, sizeof(nic_t));
4638 sp->vendor_id = pdev->vendor;
4639 sp->device_id = pdev->device;
4640 sp->high_dma_flag = dma_flag;
4641 sp->irq = pdev->irq;
4642 sp->device_enabled_once = FALSE;
4643 strcpy(sp->name, dev_name);
4645 /* Initialize some PCI/PCI-X fields of the NIC. */
4649 * Setting the device configuration parameters.
4650 * Most of these parameters can be specified by the user during
4651 * module insertion as they are module loadable parameters. If
4652 * these parameters are not not specified during load time, they
4653 * are initialized with default values.
4655 mac_control = &sp->mac_control;
4656 config = &sp->config;
4658 /* Tx side parameters. */
4659 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4660 config->tx_fifo_num = tx_fifo_num;
4661 for (i = 0; i < MAX_TX_FIFOS; i++) {
4662 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4663 config->tx_cfg[i].fifo_priority = i;
4666 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4667 for (i = 0; i < config->tx_fifo_num; i++) {
4668 config->tx_cfg[i].f_no_snoop =
4669 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4670 if (config->tx_cfg[i].fifo_len < 65) {
4671 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4675 config->max_txds = MAX_SKB_FRAGS;
4677 /* Rx side parameters. */
4678 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4679 config->rx_ring_num = rx_ring_num;
4680 for (i = 0; i < MAX_RX_RINGS; i++) {
4681 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4682 (MAX_RXDS_PER_BLOCK + 1);
4683 config->rx_cfg[i].ring_priority = i;
4686 for (i = 0; i < rx_ring_num; i++) {
4687 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4688 config->rx_cfg[i].f_no_snoop =
4689 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4692 /* Setting Mac Control parameters */
4693 mac_control->rmac_pause_time = rmac_pause_time;
4694 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4695 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4698 /* Initialize Ring buffer parameters. */
4699 for (i = 0; i < config->rx_ring_num; i++)
4700 atomic_set(&sp->rx_bufs_left[i], 0);
4702 /* initialize the shared memory used by the NIC and the host */
4703 if (init_shared_mem(sp)) {
4704 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4707 goto mem_alloc_failed;
4710 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4711 pci_resource_len(pdev, 0));
4713 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4716 goto bar0_remap_failed;
4719 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4720 pci_resource_len(pdev, 2));
4722 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4725 goto bar1_remap_failed;
4728 dev->irq = pdev->irq;
4729 dev->base_addr = (unsigned long) sp->bar0;
4731 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4732 for (j = 0; j < MAX_TX_FIFOS; j++) {
4733 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4734 (sp->bar1 + (j * 0x00020000));
4737 /* Driver entry points */
4738 dev->open = &s2io_open;
4739 dev->stop = &s2io_close;
4740 dev->hard_start_xmit = &s2io_xmit;
4741 dev->get_stats = &s2io_get_stats;
4742 dev->set_multicast_list = &s2io_set_multicast;
4743 dev->do_ioctl = &s2io_ioctl;
4744 dev->change_mtu = &s2io_change_mtu;
4745 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4747 * will use eth_mac_addr() for dev->set_mac_address
4748 * mac address will be set every time dev->open() is called
4750 #ifdef CONFIG_S2IO_NAPI
4751 dev->poll = s2io_poll;
4755 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4756 if (sp->high_dma_flag == TRUE)
4757 dev->features |= NETIF_F_HIGHDMA;
4759 dev->features |= NETIF_F_TSO;
4762 dev->tx_timeout = &s2io_tx_watchdog;
4763 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4764 INIT_WORK(&sp->rst_timer_task,
4765 (void (*)(void *)) s2io_restart_nic, dev);
4766 INIT_WORK(&sp->set_link_task,
4767 (void (*)(void *)) s2io_set_link, sp);
4769 pci_save_state(sp->pdev);
4771 /* Setting swapper control on the NIC, for proper reset operation */
4772 if (s2io_set_swapper(sp)) {
4773 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4776 goto set_swap_failed;
4779 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4780 fix_mac_address(sp);
4784 * Setting swapper control on the NIC, so the MAC address can be read.
4786 if (s2io_set_swapper(sp)) {
4788 "%s: S2IO: swapper settings are wrong\n",
4791 goto set_swap_failed;
4795 * MAC address initialization.
4796 * For now only one mac address will be read and used.
4799 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4800 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4801 writeq(val64, &bar0->rmac_addr_cmd_mem);
4802 wait_for_cmd_complete(sp);
4804 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4805 mac_down = (u32) tmp64;
4806 mac_up = (u32) (tmp64 >> 32);
4808 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4810 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4811 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4812 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4813 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4814 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4815 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4818 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4819 sp->def_mac_addr[0].mac_addr[0],
4820 sp->def_mac_addr[0].mac_addr[1],
4821 sp->def_mac_addr[0].mac_addr[2],
4822 sp->def_mac_addr[0].mac_addr[3],
4823 sp->def_mac_addr[0].mac_addr[4],
4824 sp->def_mac_addr[0].mac_addr[5]);
4826 /* Set the factory defined MAC address initially */
4827 dev->addr_len = ETH_ALEN;
4828 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4831 * Initialize the tasklet status and link state flags
4832 * and the card statte parameter
4834 atomic_set(&(sp->card_state), 0);
4835 sp->tasklet_status = 0;
4839 /* Initialize spinlocks */
4840 spin_lock_init(&sp->tx_lock);
4841 #ifndef CONFIG_S2IO_NAPI
4842 spin_lock_init(&sp->put_lock);
4846 * SXE-002: Configure link and activity LED to init state
4849 subid = sp->pdev->subsystem_device;
4850 if ((subid & 0xFF) >= 0x07) {
4851 val64 = readq(&bar0->gpio_control);
4852 val64 |= 0x0000800000000000ULL;
4853 writeq(val64, &bar0->gpio_control);
4854 val64 = 0x0411040400000000ULL;
4855 writeq(val64, (void __iomem *) bar0 + 0x2700);
4856 val64 = readq(&bar0->gpio_control);
4859 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4861 if (register_netdev(dev)) {
4862 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4864 goto register_failed;
4868 * Make Link state as off at this point, when the Link change
4869 * interrupt comes the state will be automatically changed to
4872 netif_carrier_off(dev);
4873 sp->last_link_state = LINK_DOWN;
4884 free_shared_mem(sp);
4885 pci_disable_device(pdev);
4886 pci_release_regions(pdev);
4887 pci_set_drvdata(pdev, NULL);
4894 * s2io_rem_nic - Free the PCI device
4895 * @pdev: structure containing the PCI related information of the device.
4896 * Description: This function is called by the Pci subsystem to release a
4897 * PCI device and free up all resource held up by the device. This could
4898 * be in response to a Hot plug event or when the driver is to be removed
4902 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4904 struct net_device *dev =
4905 (struct net_device *) pci_get_drvdata(pdev);
4909 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4914 unregister_netdev(dev);
4916 free_shared_mem(sp);
4919 pci_disable_device(pdev);
4920 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL);
4927 * s2io_starter - Entry point for the driver
4928 * Description: This function is the entry point for the driver. It verifies
4929 * the module loadable parameters and initializes PCI configuration space.
4932 int __init s2io_starter(void)
4934 return pci_module_init(&s2io_driver);
4938 * s2io_closer - Cleanup routine for the driver
4939 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4942 static void s2io_closer(void)
4944 pci_unregister_driver(&s2io_driver);
4945 DBG_PRINT(INIT_DBG, "cleanup done\n");
4948 module_init(s2io_starter);
4949 module_exit(s2io_closer);