1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
39 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/ioport.h>
44 #include <linux/pci.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "s2io";
69 static char s2io_driver_version[] = "Version 1.7.7.1";
72 * Cards with following subsystem_id have a link state indication
73 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
74 * macro below identifies these cards given the subsystem_id.
76 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
77 (((subid >= 0x600B) && (subid <= 0x600D)) || \
78 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
80 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
81 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
82 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
85 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
88 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
90 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
98 /* Ethtool related variables and Macros. */
99 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
100 "Register test\t(offline)",
101 "Eeprom test\t(offline)",
102 "Link test\t(online)",
103 "RLDRAM test\t(offline)",
104 "BIST Test\t(offline)"
107 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
109 {"tmac_data_octets"},
113 {"tmac_pause_ctrl_frms"},
114 {"tmac_any_err_frms"},
115 {"tmac_vld_ip_octets"},
123 {"rmac_data_octets"},
124 {"rmac_fcs_err_frms"},
126 {"rmac_vld_mcst_frms"},
127 {"rmac_vld_bcst_frms"},
128 {"rmac_in_rng_len_err_frms"},
130 {"rmac_pause_ctrl_frms"},
131 {"rmac_discarded_frms"},
132 {"rmac_usized_frms"},
133 {"rmac_osized_frms"},
135 {"rmac_jabber_frms"},
143 {"rmac_err_drp_udp"},
145 {"rmac_accepted_ip"},
149 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
150 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
152 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
153 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
157 * Constants to be programmed into the Xena's registers, to configure
161 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
164 static u64 default_mdio_cfg[] = {
166 0xC001010000000000ULL, 0xC0010100000000E0ULL,
167 0xC0010100008000E4ULL,
168 /* Remove Reset from PMA PLL */
169 0xC001010000000000ULL, 0xC0010100000000E0ULL,
170 0xC0010100000000E4ULL,
174 static u64 default_dtx_cfg[] = {
175 0x8000051500000000ULL, 0x80000515000000E0ULL,
176 0x80000515D93500E4ULL, 0x8001051500000000ULL,
177 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
178 0x8002051500000000ULL, 0x80020515000000E0ULL,
179 0x80020515F21000E4ULL,
180 /* Set PADLOOPBACKN */
181 0x8002051500000000ULL, 0x80020515000000E0ULL,
182 0x80020515B20000E4ULL, 0x8003051500000000ULL,
183 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
184 0x8004051500000000ULL, 0x80040515000000E0ULL,
185 0x80040515B20000E4ULL, 0x8005051500000000ULL,
186 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
188 /* Remove PADLOOPBACKN */
189 0x8002051500000000ULL, 0x80020515000000E0ULL,
190 0x80020515F20000E4ULL, 0x8003051500000000ULL,
191 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
192 0x8004051500000000ULL, 0x80040515000000E0ULL,
193 0x80040515F20000E4ULL, 0x8005051500000000ULL,
194 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
200 * Constants for Fixing the MacAddress problem seen mostly on
203 static u64 fix_mac[] = {
204 0x0060000000000000ULL, 0x0060600000000000ULL,
205 0x0040600000000000ULL, 0x0000600000000000ULL,
206 0x0020600000000000ULL, 0x0060600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0000600000000000ULL,
217 0x0040600000000000ULL, 0x0060600000000000ULL,
221 /* Module Loadable parameters. */
222 static unsigned int tx_fifo_num = 1;
223 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
224 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
225 static unsigned int rx_ring_num = 1;
226 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
227 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
228 static unsigned int Stats_refresh_time = 4;
229 static unsigned int rmac_pause_time = 65535;
230 static unsigned int mc_pause_threshold_q0q3 = 187;
231 static unsigned int mc_pause_threshold_q4q7 = 187;
232 static unsigned int shared_splits;
233 static unsigned int tmac_util_period = 5;
234 static unsigned int rmac_util_period = 5;
235 #ifndef CONFIG_S2IO_NAPI
236 static unsigned int indicate_max_pkts;
241 * This table lists all the devices that this driver supports.
243 static struct pci_device_id s2io_tbl[] __devinitdata = {
244 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
245 PCI_ANY_ID, PCI_ANY_ID},
246 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
247 PCI_ANY_ID, PCI_ANY_ID},
248 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
249 PCI_ANY_ID, PCI_ANY_ID},
250 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
251 PCI_ANY_ID, PCI_ANY_ID},
255 MODULE_DEVICE_TABLE(pci, s2io_tbl);
257 static struct pci_driver s2io_driver = {
259 .id_table = s2io_tbl,
260 .probe = s2io_init_nic,
261 .remove = __devexit_p(s2io_rem_nic),
264 /* A simplifier macro used both by init and free shared_mem Fns(). */
265 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
268 * init_shared_mem - Allocation and Initialization of Memory
269 * @nic: Device private variable.
270 * Description: The function allocates all the memory areas shared
271 * between the NIC and the driver. This includes Tx descriptors,
272 * Rx descriptors and the statistics block.
275 static int init_shared_mem(struct s2io_nic *nic)
278 void *tmp_v_addr, *tmp_v_addr_next;
279 dma_addr_t tmp_p_addr, tmp_p_addr_next;
280 RxD_block_t *pre_rxd_blk = NULL;
282 int lst_size, lst_per_page;
283 struct net_device *dev = nic->dev;
284 #ifdef CONFIG_2BUFF_MODE
289 mac_info_t *mac_control;
290 struct config_param *config;
292 mac_control = &nic->mac_control;
293 config = &nic->config;
296 /* Allocation and initialization of TXDLs in FIOFs */
298 for (i = 0; i < config->tx_fifo_num; i++) {
299 size += config->tx_cfg[i].fifo_len;
301 if (size > MAX_AVAILABLE_TXDS) {
302 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
304 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
305 DBG_PRINT(ERR_DBG, "that can be used\n");
309 lst_size = (sizeof(TxD_t) * config->max_txds);
310 lst_per_page = PAGE_SIZE / lst_size;
312 for (i = 0; i < config->tx_fifo_num; i++) {
313 int fifo_len = config->tx_cfg[i].fifo_len;
314 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
315 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
316 if (!nic->list_info[i]) {
318 "Malloc failed for list_info\n");
321 memset(nic->list_info[i], 0, list_holder_size);
323 for (i = 0; i < config->tx_fifo_num; i++) {
324 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
326 mac_control->tx_curr_put_info[i].offset = 0;
327 mac_control->tx_curr_put_info[i].fifo_len =
328 config->tx_cfg[i].fifo_len - 1;
329 mac_control->tx_curr_get_info[i].offset = 0;
330 mac_control->tx_curr_get_info[i].fifo_len =
331 config->tx_cfg[i].fifo_len - 1;
332 for (j = 0; j < page_num; j++) {
336 tmp_v = pci_alloc_consistent(nic->pdev,
340 "pci_alloc_consistent ");
341 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
344 while (k < lst_per_page) {
345 int l = (j * lst_per_page) + k;
346 if (l == config->tx_cfg[i].fifo_len)
348 nic->list_info[i][l].list_virt_addr =
349 tmp_v + (k * lst_size);
350 nic->list_info[i][l].list_phy_addr =
351 tmp_p + (k * lst_size);
358 /* Allocation and initialization of RXDs in Rings */
360 for (i = 0; i < config->rx_ring_num; i++) {
361 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
362 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
363 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
365 DBG_PRINT(ERR_DBG, "RxDs per Block");
368 size += config->rx_cfg[i].num_rxd;
369 nic->block_count[i] =
370 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
372 config->rx_cfg[i].num_rxd - nic->block_count[i];
375 for (i = 0; i < config->rx_ring_num; i++) {
376 mac_control->rx_curr_get_info[i].block_index = 0;
377 mac_control->rx_curr_get_info[i].offset = 0;
378 mac_control->rx_curr_get_info[i].ring_len =
379 config->rx_cfg[i].num_rxd - 1;
380 mac_control->rx_curr_put_info[i].block_index = 0;
381 mac_control->rx_curr_put_info[i].offset = 0;
382 mac_control->rx_curr_put_info[i].ring_len =
383 config->rx_cfg[i].num_rxd - 1;
385 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
386 /* Allocating all the Rx blocks */
387 for (j = 0; j < blk_cnt; j++) {
388 #ifndef CONFIG_2BUFF_MODE
389 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
391 size = SIZE_OF_BLOCK;
393 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
395 if (tmp_v_addr == NULL) {
397 * In case of failure, free_shared_mem()
398 * is called, which should free any
399 * memory that was alloced till the
402 nic->rx_blocks[i][j].block_virt_addr =
406 memset(tmp_v_addr, 0, size);
407 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
408 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
410 /* Interlinking all Rx Blocks */
411 for (j = 0; j < blk_cnt; j++) {
412 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
414 nic->rx_blocks[i][(j + 1) %
415 blk_cnt].block_virt_addr;
416 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
418 nic->rx_blocks[i][(j + 1) %
419 blk_cnt].block_dma_addr;
421 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
422 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
425 #ifndef CONFIG_2BUFF_MODE
426 pre_rxd_blk->reserved_2_pNext_RxD_block =
427 (unsigned long) tmp_v_addr_next;
429 pre_rxd_blk->pNext_RxD_Blk_physical =
430 (u64) tmp_p_addr_next;
434 #ifdef CONFIG_2BUFF_MODE
436 * Allocation of Storages for buffer addresses in 2BUFF mode
437 * and the buffers as well.
439 for (i = 0; i < config->rx_ring_num; i++) {
441 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
442 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
446 for (j = 0; j < blk_cnt; j++) {
448 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
449 (MAX_RXDS_PER_BLOCK + 1)),
453 while (k != MAX_RXDS_PER_BLOCK) {
454 ba = &nic->ba[i][j][k];
456 ba->ba_0_org = kmalloc
457 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
460 tmp = (unsigned long) ba->ba_0_org;
462 tmp &= ~((unsigned long) ALIGN_SIZE);
463 ba->ba_0 = (void *) tmp;
465 ba->ba_1_org = kmalloc
466 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
469 tmp = (unsigned long) ba->ba_1_org;
471 tmp &= ~((unsigned long) ALIGN_SIZE);
472 ba->ba_1 = (void *) tmp;
479 /* Allocation and initialization of Statistics block */
480 size = sizeof(StatInfo_t);
481 mac_control->stats_mem = pci_alloc_consistent
482 (nic->pdev, size, &mac_control->stats_mem_phy);
484 if (!mac_control->stats_mem) {
486 * In case of failure, free_shared_mem() is called, which
487 * should free any memory that was alloced till the
492 mac_control->stats_mem_sz = size;
494 tmp_v_addr = mac_control->stats_mem;
495 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
496 memset(tmp_v_addr, 0, size);
498 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
499 (unsigned long long) tmp_p_addr);
505 * free_shared_mem - Free the allocated Memory
506 * @nic: Device private variable.
507 * Description: This function is to free all memory locations allocated by
508 * the init_shared_mem() function and return it to the kernel.
511 static void free_shared_mem(struct s2io_nic *nic)
513 int i, j, blk_cnt, size;
515 dma_addr_t tmp_p_addr;
516 mac_info_t *mac_control;
517 struct config_param *config;
518 int lst_size, lst_per_page;
524 mac_control = &nic->mac_control;
525 config = &nic->config;
527 lst_size = (sizeof(TxD_t) * config->max_txds);
528 lst_per_page = PAGE_SIZE / lst_size;
530 for (i = 0; i < config->tx_fifo_num; i++) {
531 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
533 for (j = 0; j < page_num; j++) {
534 int mem_blks = (j * lst_per_page);
535 if (!nic->list_info[i][mem_blks].list_virt_addr)
537 pci_free_consistent(nic->pdev, PAGE_SIZE,
538 nic->list_info[i][mem_blks].
540 nic->list_info[i][mem_blks].
543 kfree(nic->list_info[i]);
546 #ifndef CONFIG_2BUFF_MODE
547 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
549 size = SIZE_OF_BLOCK;
551 for (i = 0; i < config->rx_ring_num; i++) {
552 blk_cnt = nic->block_count[i];
553 for (j = 0; j < blk_cnt; j++) {
554 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
555 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
556 if (tmp_v_addr == NULL)
558 pci_free_consistent(nic->pdev, size,
559 tmp_v_addr, tmp_p_addr);
563 #ifdef CONFIG_2BUFF_MODE
564 /* Freeing buffer storage addresses in 2BUFF mode. */
565 for (i = 0; i < config->rx_ring_num; i++) {
567 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
570 for (j = 0; j < blk_cnt; j++) {
572 if (!nic->ba[i][j]) {
576 while (k != MAX_RXDS_PER_BLOCK) {
577 buffAdd_t *ba = &nic->ba[i][j][k];
578 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
581 kfree(nic->ba[i][j]);
592 kfree(nic->ba[i][j]);
599 if (mac_control->stats_mem) {
600 pci_free_consistent(nic->pdev,
601 mac_control->stats_mem_sz,
602 mac_control->stats_mem,
603 mac_control->stats_mem_phy);
608 * init_nic - Initialization of hardware
609 * @nic: device peivate variable
610 * Description: The function sequentially configures every block
611 * of the H/W from their reset values.
612 * Return Value: SUCCESS on success and
613 * '-1' on failure (endian settings incorrect).
616 static int init_nic(struct s2io_nic *nic)
618 XENA_dev_config_t __iomem *bar0 = nic->bar0;
619 struct net_device *dev = nic->dev;
620 register u64 val64 = 0;
624 mac_info_t *mac_control;
625 struct config_param *config;
626 int mdio_cnt = 0, dtx_cnt = 0;
627 unsigned long long mem_share;
629 mac_control = &nic->mac_control;
630 config = &nic->config;
632 /* Initialize swapper control register */
633 if (s2io_set_swapper(nic)) {
634 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
638 /* Remove XGXS from reset state */
640 writeq(val64, &bar0->sw_reset);
641 val64 = readq(&bar0->sw_reset);
644 /* Enable Receiving broadcasts */
645 add = &bar0->mac_cfg;
646 val64 = readq(&bar0->mac_cfg);
647 val64 |= MAC_RMAC_BCAST_ENABLE;
648 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
649 writel((u32) val64, add);
650 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
651 writel((u32) (val64 >> 32), (add + 4));
653 /* Read registers in all blocks */
654 val64 = readq(&bar0->mac_int_mask);
655 val64 = readq(&bar0->mc_int_mask);
656 val64 = readq(&bar0->xgxs_int_mask);
660 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
663 * Configuring the XAUI Interface of Xena.
664 * ***************************************
665 * To Configure the Xena's XAUI, one has to write a series
666 * of 64 bit values into two registers in a particular
667 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
668 * which will be defined in the array of configuration values
669 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
670 * to switch writing from one regsiter to another. We continue
671 * writing these values until we encounter the 'END_SIGN' macro.
672 * For example, After making a series of 21 writes into
673 * dtx_control register the 'SWITCH_SIGN' appears and hence we
674 * start writing into mdio_control until we encounter END_SIGN.
678 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
679 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
683 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
684 &bar0->dtx_control, UF);
685 val64 = readq(&bar0->dtx_control);
689 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
690 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
694 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
695 &bar0->mdio_control, UF);
696 val64 = readq(&bar0->mdio_control);
699 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
700 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
707 /* Tx DMA Initialization */
709 writeq(val64, &bar0->tx_fifo_partition_0);
710 writeq(val64, &bar0->tx_fifo_partition_1);
711 writeq(val64, &bar0->tx_fifo_partition_2);
712 writeq(val64, &bar0->tx_fifo_partition_3);
715 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
717 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
718 13) | vBIT(config->tx_cfg[i].fifo_priority,
721 if (i == (config->tx_fifo_num - 1)) {
728 writeq(val64, &bar0->tx_fifo_partition_0);
732 writeq(val64, &bar0->tx_fifo_partition_1);
736 writeq(val64, &bar0->tx_fifo_partition_2);
740 writeq(val64, &bar0->tx_fifo_partition_3);
745 /* Enable Tx FIFO partition 0. */
746 val64 = readq(&bar0->tx_fifo_partition_0);
747 val64 |= BIT(0); /* To enable the FIFO partition. */
748 writeq(val64, &bar0->tx_fifo_partition_0);
750 val64 = readq(&bar0->tx_fifo_partition_0);
751 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
752 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
755 * Initialization of Tx_PA_CONFIG register to ignore packet
756 * integrity checking.
758 val64 = readq(&bar0->tx_pa_cfg);
759 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
760 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
761 writeq(val64, &bar0->tx_pa_cfg);
763 /* Rx DMA intialization. */
765 for (i = 0; i < config->rx_ring_num; i++) {
767 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
770 writeq(val64, &bar0->rx_queue_priority);
773 * Allocating equal share of memory to all the
777 for (i = 0; i < config->rx_ring_num; i++) {
780 mem_share = (64 / config->rx_ring_num +
781 64 % config->rx_ring_num);
782 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
785 mem_share = (64 / config->rx_ring_num);
786 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
789 mem_share = (64 / config->rx_ring_num);
790 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
793 mem_share = (64 / config->rx_ring_num);
794 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
797 mem_share = (64 / config->rx_ring_num);
798 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
801 mem_share = (64 / config->rx_ring_num);
802 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
805 mem_share = (64 / config->rx_ring_num);
806 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
809 mem_share = (64 / config->rx_ring_num);
810 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
814 writeq(val64, &bar0->rx_queue_cfg);
817 * Initializing the Tx round robin registers to 0.
818 * Filling Tx and Rx round robin registers as per the
819 * number of FIFOs and Rings is still TODO.
821 writeq(0, &bar0->tx_w_round_robin_0);
822 writeq(0, &bar0->tx_w_round_robin_1);
823 writeq(0, &bar0->tx_w_round_robin_2);
824 writeq(0, &bar0->tx_w_round_robin_3);
825 writeq(0, &bar0->tx_w_round_robin_4);
829 * Disable Rx steering. Hard coding all packets be steered to
832 val64 = 0x8080808080808080ULL;
833 writeq(val64, &bar0->rts_qos_steering);
837 for (i = 1; i < 8; i++)
838 writeq(val64, &bar0->rts_frm_len_n[i]);
840 /* Set rts_frm_len register for fifo 0 */
841 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
842 &bar0->rts_frm_len_n[0]);
844 /* Enable statistics */
845 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
846 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
847 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
848 writeq(val64, &bar0->stat_cfg);
851 * Initializing the sampling rate for the device to calculate the
852 * bandwidth utilization.
854 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
855 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
856 writeq(val64, &bar0->mac_link_util);
860 * Initializing the Transmit and Receive Traffic Interrupt
863 /* TTI Initialization. Default Tx timer gets us about
864 * 250 interrupts per sec. Continuous interrupts are enabled
867 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
868 TTI_DATA1_MEM_TX_URNG_A(0xA) |
869 TTI_DATA1_MEM_TX_URNG_B(0x10) |
870 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
871 TTI_DATA1_MEM_TX_TIMER_CI_EN;
872 writeq(val64, &bar0->tti_data1_mem);
874 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
875 TTI_DATA2_MEM_TX_UFC_B(0x20) |
876 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
877 writeq(val64, &bar0->tti_data2_mem);
879 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
880 writeq(val64, &bar0->tti_command_mem);
883 * Once the operation completes, the Strobe bit of the command
884 * register will be reset. We poll for this particular condition
885 * We wait for a maximum of 500ms for the operation to complete,
886 * if it's not complete by then we return error.
890 val64 = readq(&bar0->tti_command_mem);
891 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
895 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
903 /* RTI Initialization */
904 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
905 RTI_DATA1_MEM_RX_URNG_A(0xA) |
906 RTI_DATA1_MEM_RX_URNG_B(0x10) |
907 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
909 writeq(val64, &bar0->rti_data1_mem);
911 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
912 RTI_DATA2_MEM_RX_UFC_B(0x2) |
913 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
914 writeq(val64, &bar0->rti_data2_mem);
916 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
917 writeq(val64, &bar0->rti_command_mem);
920 * Once the operation completes, the Strobe bit of the command
921 * register will be reset. We poll for this particular condition
922 * We wait for a maximum of 500ms for the operation to complete,
923 * if it's not complete by then we return error.
927 val64 = readq(&bar0->rti_command_mem);
928 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
932 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
941 * Initializing proper values as Pause threshold into all
942 * the 8 Queues on Rx side.
944 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
945 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
947 /* Disable RMAC PAD STRIPPING */
948 add = &bar0->mac_cfg;
949 val64 = readq(&bar0->mac_cfg);
950 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
951 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
952 writel((u32) (val64), add);
953 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
954 writel((u32) (val64 >> 32), (add + 4));
955 val64 = readq(&bar0->mac_cfg);
958 * Set the time value to be inserted in the pause frame
961 val64 = readq(&bar0->rmac_pause_cfg);
962 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
963 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
964 writeq(val64, &bar0->rmac_pause_cfg);
967 * Set the Threshold Limit for Generating the pause frame
968 * If the amount of data in any Queue exceeds ratio of
969 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
970 * pause frame is generated
973 for (i = 0; i < 4; i++) {
975 (((u64) 0xFF00 | nic->mac_control.
976 mc_pause_threshold_q0q3)
979 writeq(val64, &bar0->mc_pause_thresh_q0q3);
982 for (i = 0; i < 4; i++) {
984 (((u64) 0xFF00 | nic->mac_control.
985 mc_pause_threshold_q4q7)
988 writeq(val64, &bar0->mc_pause_thresh_q4q7);
991 * TxDMA will stop Read request if the number of read split has
992 * exceeded the limit pointed by shared_splits
994 val64 = readq(&bar0->pic_control);
995 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
996 writeq(val64, &bar0->pic_control);
1002 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1003 * @nic: device private variable,
1004 * @mask: A mask indicating which Intr block must be modified and,
1005 * @flag: A flag indicating whether to enable or disable the Intrs.
1006 * Description: This function will either disable or enable the interrupts
1007 * depending on the flag argument. The mask argument can be used to
1008 * enable/disable any Intr block.
1009 * Return Value: NONE.
1012 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1014 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1015 register u64 val64 = 0, temp64 = 0;
1017 /* Top level interrupt classification */
1018 /* PIC Interrupts */
1019 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1020 /* Enable PIC Intrs in the general intr mask register */
1021 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1022 if (flag == ENABLE_INTRS) {
1023 temp64 = readq(&bar0->general_int_mask);
1024 temp64 &= ~((u64) val64);
1025 writeq(temp64, &bar0->general_int_mask);
1027 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1028 * interrupts for now.
1031 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1033 * No MSI Support is available presently, so TTI and
1034 * RTI interrupts are also disabled.
1036 } else if (flag == DISABLE_INTRS) {
1038 * Disable PIC Intrs in the general
1039 * intr mask register
1041 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1042 temp64 = readq(&bar0->general_int_mask);
1044 writeq(val64, &bar0->general_int_mask);
1048 /* DMA Interrupts */
1049 /* Enabling/Disabling Tx DMA interrupts */
1050 if (mask & TX_DMA_INTR) {
1051 /* Enable TxDMA Intrs in the general intr mask register */
1052 val64 = TXDMA_INT_M;
1053 if (flag == ENABLE_INTRS) {
1054 temp64 = readq(&bar0->general_int_mask);
1055 temp64 &= ~((u64) val64);
1056 writeq(temp64, &bar0->general_int_mask);
1058 * Keep all interrupts other than PFC interrupt
1059 * and PCC interrupt disabled in DMA level.
1061 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1063 writeq(val64, &bar0->txdma_int_mask);
1065 * Enable only the MISC error 1 interrupt in PFC block
1067 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1068 writeq(val64, &bar0->pfc_err_mask);
1070 * Enable only the FB_ECC error interrupt in PCC block
1072 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1073 writeq(val64, &bar0->pcc_err_mask);
1074 } else if (flag == DISABLE_INTRS) {
1076 * Disable TxDMA Intrs in the general intr mask
1079 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1080 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1081 temp64 = readq(&bar0->general_int_mask);
1083 writeq(val64, &bar0->general_int_mask);
1087 /* Enabling/Disabling Rx DMA interrupts */
1088 if (mask & RX_DMA_INTR) {
1089 /* Enable RxDMA Intrs in the general intr mask register */
1090 val64 = RXDMA_INT_M;
1091 if (flag == ENABLE_INTRS) {
1092 temp64 = readq(&bar0->general_int_mask);
1093 temp64 &= ~((u64) val64);
1094 writeq(temp64, &bar0->general_int_mask);
1096 * All RxDMA block interrupts are disabled for now
1099 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1100 } else if (flag == DISABLE_INTRS) {
1102 * Disable RxDMA Intrs in the general intr mask
1105 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1106 temp64 = readq(&bar0->general_int_mask);
1108 writeq(val64, &bar0->general_int_mask);
1112 /* MAC Interrupts */
1113 /* Enabling/Disabling MAC interrupts */
1114 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1115 val64 = TXMAC_INT_M | RXMAC_INT_M;
1116 if (flag == ENABLE_INTRS) {
1117 temp64 = readq(&bar0->general_int_mask);
1118 temp64 &= ~((u64) val64);
1119 writeq(temp64, &bar0->general_int_mask);
1121 * All MAC block error interrupts are disabled for now
1122 * except the link status change interrupt.
1125 val64 = MAC_INT_STATUS_RMAC_INT;
1126 temp64 = readq(&bar0->mac_int_mask);
1127 temp64 &= ~((u64) val64);
1128 writeq(temp64, &bar0->mac_int_mask);
1130 val64 = readq(&bar0->mac_rmac_err_mask);
1131 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1132 writeq(val64, &bar0->mac_rmac_err_mask);
1133 } else if (flag == DISABLE_INTRS) {
1135 * Disable MAC Intrs in the general intr mask register
1137 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1138 writeq(DISABLE_ALL_INTRS,
1139 &bar0->mac_rmac_err_mask);
1141 temp64 = readq(&bar0->general_int_mask);
1143 writeq(val64, &bar0->general_int_mask);
1147 /* XGXS Interrupts */
1148 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1149 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1150 if (flag == ENABLE_INTRS) {
1151 temp64 = readq(&bar0->general_int_mask);
1152 temp64 &= ~((u64) val64);
1153 writeq(temp64, &bar0->general_int_mask);
1155 * All XGXS block error interrupts are disabled for now
1158 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1159 } else if (flag == DISABLE_INTRS) {
1161 * Disable MC Intrs in the general intr mask register
1163 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1164 temp64 = readq(&bar0->general_int_mask);
1166 writeq(val64, &bar0->general_int_mask);
1170 /* Memory Controller(MC) interrupts */
1171 if (mask & MC_INTR) {
1173 if (flag == ENABLE_INTRS) {
1174 temp64 = readq(&bar0->general_int_mask);
1175 temp64 &= ~((u64) val64);
1176 writeq(temp64, &bar0->general_int_mask);
1178 * All MC block error interrupts are disabled for now
1181 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1182 } else if (flag == DISABLE_INTRS) {
1184 * Disable MC Intrs in the general intr mask register
1186 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1187 temp64 = readq(&bar0->general_int_mask);
1189 writeq(val64, &bar0->general_int_mask);
1194 /* Tx traffic interrupts */
1195 if (mask & TX_TRAFFIC_INTR) {
1196 val64 = TXTRAFFIC_INT_M;
1197 if (flag == ENABLE_INTRS) {
1198 temp64 = readq(&bar0->general_int_mask);
1199 temp64 &= ~((u64) val64);
1200 writeq(temp64, &bar0->general_int_mask);
1202 * Enable all the Tx side interrupts
1203 * writing 0 Enables all 64 TX interrupt levels
1205 writeq(0x0, &bar0->tx_traffic_mask);
1206 } else if (flag == DISABLE_INTRS) {
1208 * Disable Tx Traffic Intrs in the general intr mask
1211 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1212 temp64 = readq(&bar0->general_int_mask);
1214 writeq(val64, &bar0->general_int_mask);
1218 /* Rx traffic interrupts */
1219 if (mask & RX_TRAFFIC_INTR) {
1220 val64 = RXTRAFFIC_INT_M;
1221 if (flag == ENABLE_INTRS) {
1222 temp64 = readq(&bar0->general_int_mask);
1223 temp64 &= ~((u64) val64);
1224 writeq(temp64, &bar0->general_int_mask);
1225 /* writing 0 Enables all 8 RX interrupt levels */
1226 writeq(0x0, &bar0->rx_traffic_mask);
1227 } else if (flag == DISABLE_INTRS) {
1229 * Disable Rx Traffic Intrs in the general intr mask
1232 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1233 temp64 = readq(&bar0->general_int_mask);
1235 writeq(val64, &bar0->general_int_mask);
1241 * verify_xena_quiescence - Checks whether the H/W is ready
1242 * @val64 : Value read from adapter status register.
1243 * @flag : indicates if the adapter enable bit was ever written once
1245 * Description: Returns whether the H/W is ready to go or not. Depending
1246 * on whether adapter enable bit was written or not the comparison
1247 * differs and the calling function passes the input argument flag to
1249 * Return: 1 If xena is quiescence
1250 * 0 If Xena is not quiescence
1253 static int verify_xena_quiescence(u64 val64, int flag)
1256 u64 tmp64 = ~((u64) val64);
1260 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1261 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1262 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1263 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1264 ADAPTER_STATUS_P_PLL_LOCK))) {
1265 if (flag == FALSE) {
1266 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1267 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1268 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1274 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1275 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1276 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1277 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1278 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1290 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1291 * @sp: Pointer to device specifc structure
1293 * New procedure to clear mac address reading problems on Alpha platforms
1297 static void fix_mac_address(nic_t * sp)
1299 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1303 while (fix_mac[i] != END_SIGN) {
1304 writeq(fix_mac[i++], &bar0->gpio_control);
1305 val64 = readq(&bar0->gpio_control);
1310 * start_nic - Turns the device on
1311 * @nic : device private variable.
1313 * This function actually turns the device on. Before this function is
1314 * called,all Registers are configured from their reset states
1315 * and shared memory is allocated but the NIC is still quiescent. On
1316 * calling this function, the device interrupts are cleared and the NIC is
1317 * literally switched on by writing into the adapter control register.
1319 * SUCCESS on success and -1 on failure.
1322 static int start_nic(struct s2io_nic *nic)
1324 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1325 struct net_device *dev = nic->dev;
1326 register u64 val64 = 0;
1327 u16 interruptible, i;
1329 mac_info_t *mac_control;
1330 struct config_param *config;
1332 mac_control = &nic->mac_control;
1333 config = &nic->config;
1335 /* PRC Initialization and configuration */
1336 for (i = 0; i < config->rx_ring_num; i++) {
1337 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1338 &bar0->prc_rxd0_n[i]);
1340 val64 = readq(&bar0->prc_ctrl_n[i]);
1341 #ifndef CONFIG_2BUFF_MODE
1342 val64 |= PRC_CTRL_RC_ENABLED;
1344 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1346 writeq(val64, &bar0->prc_ctrl_n[i]);
1349 #ifdef CONFIG_2BUFF_MODE
1350 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1351 val64 = readq(&bar0->rx_pa_cfg);
1352 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1353 writeq(val64, &bar0->rx_pa_cfg);
1357 * Enabling MC-RLDRAM. After enabling the device, we timeout
1358 * for around 100ms, which is approximately the time required
1359 * for the device to be ready for operation.
1361 val64 = readq(&bar0->mc_rldram_mrs);
1362 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1363 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1364 val64 = readq(&bar0->mc_rldram_mrs);
1366 msleep(100); /* Delay by around 100 ms. */
1368 /* Enabling ECC Protection. */
1369 val64 = readq(&bar0->adapter_control);
1370 val64 &= ~ADAPTER_ECC_EN;
1371 writeq(val64, &bar0->adapter_control);
1374 * Clearing any possible Link state change interrupts that
1375 * could have popped up just before Enabling the card.
1377 val64 = readq(&bar0->mac_rmac_err_reg);
1379 writeq(val64, &bar0->mac_rmac_err_reg);
1382 * Verify if the device is ready to be enabled, if so enable
1385 val64 = readq(&bar0->adapter_status);
1386 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1387 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1388 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1389 (unsigned long long) val64);
1393 /* Enable select interrupts */
1394 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1396 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1399 * With some switches, link might be already up at this point.
1400 * Because of this weird behavior, when we enable laser,
1401 * we may not get link. We need to handle this. We cannot
1402 * figure out which switch is misbehaving. So we are forced to
1403 * make a global change.
1406 /* Enabling Laser. */
1407 val64 = readq(&bar0->adapter_control);
1408 val64 |= ADAPTER_EOI_TX_ON;
1409 writeq(val64, &bar0->adapter_control);
1411 /* SXE-002: Initialize link and activity LED */
1412 subid = nic->pdev->subsystem_device;
1413 if ((subid & 0xFF) >= 0x07) {
1414 val64 = readq(&bar0->gpio_control);
1415 val64 |= 0x0000800000000000ULL;
1416 writeq(val64, &bar0->gpio_control);
1417 val64 = 0x0411040400000000ULL;
1418 writeq(val64, (void __iomem *) bar0 + 0x2700);
1422 * Don't see link state interrupts on certain switches, so
1423 * directly scheduling a link state task from here.
1425 schedule_work(&nic->set_link_task);
1428 * Here we are performing soft reset on XGXS to
1429 * force link down. Since link is already up, we will get
1430 * link state change interrupt after this reset
1432 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1433 val64 = readq(&bar0->dtx_control);
1435 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1436 val64 = readq(&bar0->dtx_control);
1438 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1439 val64 = readq(&bar0->dtx_control);
1446 * free_tx_buffers - Free all queued Tx buffers
1447 * @nic : device private variable.
1449 * Free all queued Tx buffers.
1450 * Return Value: void
1453 static void free_tx_buffers(struct s2io_nic *nic)
1455 struct net_device *dev = nic->dev;
1456 struct sk_buff *skb;
1459 mac_info_t *mac_control;
1460 struct config_param *config;
1463 mac_control = &nic->mac_control;
1464 config = &nic->config;
1466 for (i = 0; i < config->tx_fifo_num; i++) {
1467 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1468 txdp = (TxD_t *) nic->list_info[i][j].
1471 (struct sk_buff *) ((unsigned long) txdp->
1474 memset(txdp, 0, sizeof(TxD_t));
1478 memset(txdp, 0, sizeof(TxD_t));
1482 "%s:forcibly freeing %d skbs on FIFO%d\n",
1484 mac_control->tx_curr_get_info[i].offset = 0;
1485 mac_control->tx_curr_put_info[i].offset = 0;
1490 * stop_nic - To stop the nic
1491 * @nic ; device private variable.
1493 * This function does exactly the opposite of what the start_nic()
1494 * function does. This function is called to stop the device.
1499 static void stop_nic(struct s2io_nic *nic)
1501 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1502 register u64 val64 = 0;
1503 u16 interruptible, i;
1504 mac_info_t *mac_control;
1505 struct config_param *config;
1507 mac_control = &nic->mac_control;
1508 config = &nic->config;
1510 /* Disable all interrupts */
1511 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1513 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1516 for (i = 0; i < config->rx_ring_num; i++) {
1517 val64 = readq(&bar0->prc_ctrl_n[i]);
1518 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1519 writeq(val64, &bar0->prc_ctrl_n[i]);
1524 * fill_rx_buffers - Allocates the Rx side skbs
1525 * @nic: device private variable
1526 * @ring_no: ring number
1528 * The function allocates Rx side skbs and puts the physical
1529 * address of these buffers into the RxD buffer pointers, so that the NIC
1530 * can DMA the received frame into these locations.
1531 * The NIC supports 3 receive modes, viz
1533 * 2. three buffer and
1534 * 3. Five buffer modes.
1535 * Each mode defines how many fragments the received frame will be split
1536 * up into by the NIC. The frame is split into L3 header, L4 Header,
1537 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1538 * is split into 3 fragments. As of now only single buffer mode is
1541 * SUCCESS on success or an appropriate -ve value on failure.
1544 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1546 struct net_device *dev = nic->dev;
1547 struct sk_buff *skb;
1549 int off, off1, size, block_no, block_no1;
1550 int offset, offset1;
1552 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1553 atomic_read(&nic->rx_bufs_left[ring_no]);
1554 mac_info_t *mac_control;
1555 struct config_param *config;
1556 #ifdef CONFIG_2BUFF_MODE
1561 dma_addr_t rxdpphys;
1563 #ifndef CONFIG_S2IO_NAPI
1564 unsigned long flags;
1567 mac_control = &nic->mac_control;
1568 config = &nic->config;
1570 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1571 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1573 while (alloc_tab < alloc_cnt) {
1574 block_no = mac_control->rx_curr_put_info[ring_no].
1576 block_no1 = mac_control->rx_curr_get_info[ring_no].
1578 off = mac_control->rx_curr_put_info[ring_no].offset;
1579 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1580 #ifndef CONFIG_2BUFF_MODE
1581 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1582 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1584 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1585 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1588 rxdp = nic->rx_blocks[ring_no][block_no].
1589 block_virt_addr + off;
1590 if ((offset == offset1) && (rxdp->Host_Control)) {
1591 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1592 DBG_PRINT(INTR_DBG, " info equated\n");
1595 #ifndef CONFIG_2BUFF_MODE
1596 if (rxdp->Control_1 == END_OF_BLOCK) {
1597 mac_control->rx_curr_put_info[ring_no].
1599 mac_control->rx_curr_put_info[ring_no].
1600 block_index %= nic->block_count[ring_no];
1601 block_no = mac_control->rx_curr_put_info
1602 [ring_no].block_index;
1604 off %= (MAX_RXDS_PER_BLOCK + 1);
1605 mac_control->rx_curr_put_info[ring_no].offset =
1607 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1608 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1611 #ifndef CONFIG_S2IO_NAPI
1612 spin_lock_irqsave(&nic->put_lock, flags);
1613 nic->put_pos[ring_no] =
1614 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1615 spin_unlock_irqrestore(&nic->put_lock, flags);
1618 if (rxdp->Host_Control == END_OF_BLOCK) {
1619 mac_control->rx_curr_put_info[ring_no].
1621 mac_control->rx_curr_put_info[ring_no].
1622 block_index %= nic->block_count[ring_no];
1623 block_no = mac_control->rx_curr_put_info
1624 [ring_no].block_index;
1626 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1627 dev->name, block_no,
1628 (unsigned long long) rxdp->Control_1);
1629 mac_control->rx_curr_put_info[ring_no].offset =
1631 rxdp = nic->rx_blocks[ring_no][block_no].
1634 #ifndef CONFIG_S2IO_NAPI
1635 spin_lock_irqsave(&nic->put_lock, flags);
1636 nic->put_pos[ring_no] = (block_no *
1637 (MAX_RXDS_PER_BLOCK + 1)) + off;
1638 spin_unlock_irqrestore(&nic->put_lock, flags);
1642 #ifndef CONFIG_2BUFF_MODE
1643 if (rxdp->Control_1 & RXD_OWN_XENA)
1645 if (rxdp->Control_2 & BIT(0))
1648 mac_control->rx_curr_put_info[ring_no].
1652 #ifdef CONFIG_2BUFF_MODE
1654 * RxDs Spanning cache lines will be replenished only
1655 * if the succeeding RxD is also owned by Host. It
1656 * will always be the ((8*i)+3) and ((8*i)+6)
1657 * descriptors for the 48 byte descriptor. The offending
1658 * decsriptor is of-course the 3rd descriptor.
1660 rxdpphys = nic->rx_blocks[ring_no][block_no].
1661 block_dma_addr + (off * sizeof(RxD_t));
1662 if (((u64) (rxdpphys)) % 128 > 80) {
1663 rxdpnext = nic->rx_blocks[ring_no][block_no].
1664 block_virt_addr + (off + 1);
1665 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1666 nextblk = (block_no + 1) %
1667 (nic->block_count[ring_no]);
1668 rxdpnext = nic->rx_blocks[ring_no]
1669 [nextblk].block_virt_addr;
1671 if (rxdpnext->Control_2 & BIT(0))
1676 #ifndef CONFIG_2BUFF_MODE
1677 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1679 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1682 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1683 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1686 #ifndef CONFIG_2BUFF_MODE
1687 skb_reserve(skb, NET_IP_ALIGN);
1688 memset(rxdp, 0, sizeof(RxD_t));
1689 rxdp->Buffer0_ptr = pci_map_single
1690 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1691 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1692 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1693 rxdp->Host_Control = (unsigned long) (skb);
1694 rxdp->Control_1 |= RXD_OWN_XENA;
1696 off %= (MAX_RXDS_PER_BLOCK + 1);
1697 mac_control->rx_curr_put_info[ring_no].offset = off;
1699 ba = &nic->ba[ring_no][block_no][off];
1700 skb_reserve(skb, BUF0_LEN);
1701 tmp = (unsigned long) skb->data;
1704 skb->data = (void *) tmp;
1705 skb->tail = (void *) tmp;
1707 memset(rxdp, 0, sizeof(RxD_t));
1708 rxdp->Buffer2_ptr = pci_map_single
1709 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1710 PCI_DMA_FROMDEVICE);
1712 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1713 PCI_DMA_FROMDEVICE);
1715 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1716 PCI_DMA_FROMDEVICE);
1718 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1719 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1720 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1721 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1722 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1723 rxdp->Control_1 |= RXD_OWN_XENA;
1725 mac_control->rx_curr_put_info[ring_no].offset = off;
1727 atomic_inc(&nic->rx_bufs_left[ring_no]);
1736 * free_rx_buffers - Frees all Rx buffers
1737 * @sp: device private variable.
1739 * This function will free all Rx buffers allocated by host.
1744 static void free_rx_buffers(struct s2io_nic *sp)
1746 struct net_device *dev = sp->dev;
1747 int i, j, blk = 0, off, buf_cnt = 0;
1749 struct sk_buff *skb;
1750 mac_info_t *mac_control;
1751 struct config_param *config;
1752 #ifdef CONFIG_2BUFF_MODE
1756 mac_control = &sp->mac_control;
1757 config = &sp->config;
1759 for (i = 0; i < config->rx_ring_num; i++) {
1760 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1761 off = j % (MAX_RXDS_PER_BLOCK + 1);
1762 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1764 #ifndef CONFIG_2BUFF_MODE
1765 if (rxdp->Control_1 == END_OF_BLOCK) {
1767 (RxD_t *) ((unsigned long) rxdp->
1773 if (rxdp->Host_Control == END_OF_BLOCK) {
1779 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1780 memset(rxdp, 0, sizeof(RxD_t));
1785 (struct sk_buff *) ((unsigned long) rxdp->
1788 #ifndef CONFIG_2BUFF_MODE
1789 pci_unmap_single(sp->pdev, (dma_addr_t)
1792 HEADER_ETHERNET_II_802_3_SIZE
1793 + HEADER_802_2_SIZE +
1795 PCI_DMA_FROMDEVICE);
1797 ba = &sp->ba[i][blk][off];
1798 pci_unmap_single(sp->pdev, (dma_addr_t)
1801 PCI_DMA_FROMDEVICE);
1802 pci_unmap_single(sp->pdev, (dma_addr_t)
1805 PCI_DMA_FROMDEVICE);
1806 pci_unmap_single(sp->pdev, (dma_addr_t)
1808 dev->mtu + BUF0_LEN + 4,
1809 PCI_DMA_FROMDEVICE);
1812 atomic_dec(&sp->rx_bufs_left[i]);
1815 memset(rxdp, 0, sizeof(RxD_t));
1817 mac_control->rx_curr_put_info[i].block_index = 0;
1818 mac_control->rx_curr_get_info[i].block_index = 0;
1819 mac_control->rx_curr_put_info[i].offset = 0;
1820 mac_control->rx_curr_get_info[i].offset = 0;
1821 atomic_set(&sp->rx_bufs_left[i], 0);
1822 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1823 dev->name, buf_cnt, i);
1828 * s2io_poll - Rx interrupt handler for NAPI support
1829 * @dev : pointer to the device structure.
1830 * @budget : The number of packets that were budgeted to be processed
1831 * during one pass through the 'Poll" function.
1833 * Comes into picture only if NAPI support has been incorporated. It does
1834 * the same thing that rx_intr_handler does, but not in a interrupt context
1835 * also It will process only a given number of packets.
1837 * 0 on success and 1 if there are No Rx packets to be processed.
1840 #ifdef CONFIG_S2IO_NAPI
1841 static int s2io_poll(struct net_device *dev, int *budget)
1843 nic_t *nic = dev->priv;
1844 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1845 int pkts_to_process = *budget, pkt_cnt = 0;
1846 register u64 val64 = 0;
1847 rx_curr_get_info_t get_info, put_info;
1848 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1849 #ifndef CONFIG_2BUFF_MODE
1852 struct sk_buff *skb;
1854 mac_info_t *mac_control;
1855 struct config_param *config;
1856 #ifdef CONFIG_2BUFF_MODE
1860 mac_control = &nic->mac_control;
1861 config = &nic->config;
1863 if (pkts_to_process > dev->quota)
1864 pkts_to_process = dev->quota;
1866 val64 = readq(&bar0->rx_traffic_int);
1867 writeq(val64, &bar0->rx_traffic_int);
1869 for (i = 0; i < config->rx_ring_num; i++) {
1870 get_info = mac_control->rx_curr_get_info[i];
1871 get_block = get_info.block_index;
1872 put_info = mac_control->rx_curr_put_info[i];
1873 put_block = put_info.block_index;
1874 ring_bufs = config->rx_cfg[i].num_rxd;
1875 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1877 #ifndef CONFIG_2BUFF_MODE
1878 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1880 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1882 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1883 (((get_offset + 1) % ring_bufs) != put_offset)) {
1884 if (--pkts_to_process < 0) {
1887 if (rxdp->Control_1 == END_OF_BLOCK) {
1889 (RxD_t *) ((unsigned long) rxdp->
1893 (MAX_RXDS_PER_BLOCK + 1);
1895 get_block %= nic->block_count[i];
1896 mac_control->rx_curr_get_info[i].
1897 offset = get_info.offset;
1898 mac_control->rx_curr_get_info[i].
1899 block_index = get_block;
1903 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1906 (struct sk_buff *) ((unsigned long) rxdp->
1909 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1911 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1914 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1915 val16 = (u16) (val64 >> 48);
1916 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1917 pci_unmap_single(nic->pdev, (dma_addr_t)
1920 HEADER_ETHERNET_II_802_3_SIZE +
1923 PCI_DMA_FROMDEVICE);
1924 rx_osm_handler(nic, val16, rxdp, i);
1927 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1929 nic->rx_blocks[i][get_block].block_virt_addr +
1931 mac_control->rx_curr_get_info[i].offset =
1935 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1937 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1939 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1940 !(rxdp->Control_2 & BIT(0))) &&
1941 (((get_offset + 1) % ring_bufs) != put_offset)) {
1942 if (--pkts_to_process < 0) {
1945 skb = (struct sk_buff *) ((unsigned long)
1946 rxdp->Host_Control);
1948 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1950 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1954 pci_unmap_single(nic->pdev, (dma_addr_t)
1956 BUF0_LEN, PCI_DMA_FROMDEVICE);
1957 pci_unmap_single(nic->pdev, (dma_addr_t)
1959 BUF1_LEN, PCI_DMA_FROMDEVICE);
1960 pci_unmap_single(nic->pdev, (dma_addr_t)
1962 dev->mtu + BUF0_LEN + 4,
1963 PCI_DMA_FROMDEVICE);
1964 ba = &nic->ba[i][get_block][get_info.offset];
1966 rx_osm_handler(nic, rxdp, i, ba);
1969 mac_control->rx_curr_get_info[i].offset =
1972 nic->rx_blocks[i][get_block].block_virt_addr +
1975 if (get_info.offset &&
1976 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
1977 get_info.offset = 0;
1978 mac_control->rx_curr_get_info[i].
1979 offset = get_info.offset;
1981 get_block %= nic->block_count[i];
1982 mac_control->rx_curr_get_info[i].
1983 block_index = get_block;
1985 nic->rx_blocks[i][get_block].
1989 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1998 dev->quota -= pkt_cnt;
2000 netif_rx_complete(dev);
2002 for (i = 0; i < config->rx_ring_num; i++) {
2003 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2004 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2005 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2009 /* Re enable the Rx interrupts. */
2010 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2014 dev->quota -= pkt_cnt;
2017 for (i = 0; i < config->rx_ring_num; i++) {
2018 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2019 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2020 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2028 * rx_intr_handler - Rx interrupt handler
2029 * @nic: device private variable.
2031 * If the interrupt is because of a received frame or if the
2032 * receive ring contains fresh as yet un-processed frames,this function is
2033 * called. It picks out the RxD at which place the last Rx processing had
2034 * stopped and sends the skb to the OSM's Rx handler and then increments
2040 static void rx_intr_handler(struct s2io_nic *nic)
2042 struct net_device *dev = (struct net_device *) nic->dev;
2043 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2044 rx_curr_get_info_t get_info, put_info;
2046 struct sk_buff *skb;
2047 #ifndef CONFIG_2BUFF_MODE
2050 register u64 val64 = 0;
2051 int get_block, get_offset, put_block, put_offset, ring_bufs;
2053 mac_info_t *mac_control;
2054 struct config_param *config;
2055 #ifdef CONFIG_2BUFF_MODE
2059 mac_control = &nic->mac_control;
2060 config = &nic->config;
2063 * rx_traffic_int reg is an R1 register, hence we read and write back
2064 * the samevalue in the register to clear it.
2066 val64 = readq(&bar0->rx_traffic_int);
2067 writeq(val64, &bar0->rx_traffic_int);
2069 for (i = 0; i < config->rx_ring_num; i++) {
2070 get_info = mac_control->rx_curr_get_info[i];
2071 get_block = get_info.block_index;
2072 put_info = mac_control->rx_curr_put_info[i];
2073 put_block = put_info.block_index;
2074 ring_bufs = config->rx_cfg[i].num_rxd;
2075 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2077 #ifndef CONFIG_2BUFF_MODE
2078 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2080 spin_lock(&nic->put_lock);
2081 put_offset = nic->put_pos[i];
2082 spin_unlock(&nic->put_lock);
2083 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2084 (((get_offset + 1) % ring_bufs) != put_offset)) {
2085 if (rxdp->Control_1 == END_OF_BLOCK) {
2086 rxdp = (RxD_t *) ((unsigned long)
2090 (MAX_RXDS_PER_BLOCK + 1);
2092 get_block %= nic->block_count[i];
2093 mac_control->rx_curr_get_info[i].
2094 offset = get_info.offset;
2095 mac_control->rx_curr_get_info[i].
2096 block_index = get_block;
2100 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2102 skb = (struct sk_buff *) ((unsigned long)
2103 rxdp->Host_Control);
2105 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2107 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2110 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2111 val16 = (u16) (val64 >> 48);
2112 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2113 pci_unmap_single(nic->pdev, (dma_addr_t)
2116 HEADER_ETHERNET_II_802_3_SIZE +
2119 PCI_DMA_FROMDEVICE);
2120 rx_osm_handler(nic, val16, rxdp, i);
2122 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2124 nic->rx_blocks[i][get_block].block_virt_addr +
2126 mac_control->rx_curr_get_info[i].offset =
2129 if ((indicate_max_pkts)
2130 && (pkt_cnt > indicate_max_pkts))
2134 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2136 spin_lock(&nic->put_lock);
2137 put_offset = nic->put_pos[i];
2138 spin_unlock(&nic->put_lock);
2139 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2140 !(rxdp->Control_2 & BIT(0))) &&
2141 (((get_offset + 1) % ring_bufs) != put_offset)) {
2142 skb = (struct sk_buff *) ((unsigned long)
2143 rxdp->Host_Control);
2145 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2147 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2151 pci_unmap_single(nic->pdev, (dma_addr_t)
2153 BUF0_LEN, PCI_DMA_FROMDEVICE);
2154 pci_unmap_single(nic->pdev, (dma_addr_t)
2156 BUF1_LEN, PCI_DMA_FROMDEVICE);
2157 pci_unmap_single(nic->pdev, (dma_addr_t)
2159 dev->mtu + BUF0_LEN + 4,
2160 PCI_DMA_FROMDEVICE);
2161 ba = &nic->ba[i][get_block][get_info.offset];
2163 rx_osm_handler(nic, rxdp, i, ba);
2166 mac_control->rx_curr_get_info[i].offset =
2169 nic->rx_blocks[i][get_block].block_virt_addr +
2172 if (get_info.offset &&
2173 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2174 get_info.offset = 0;
2175 mac_control->rx_curr_get_info[i].
2176 offset = get_info.offset;
2178 get_block %= nic->block_count[i];
2179 mac_control->rx_curr_get_info[i].
2180 block_index = get_block;
2182 nic->rx_blocks[i][get_block].
2186 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2189 if ((indicate_max_pkts)
2190 && (pkt_cnt > indicate_max_pkts))
2194 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2200 * tx_intr_handler - Transmit interrupt handler
2201 * @nic : device private variable
2203 * If an interrupt was raised to indicate DMA complete of the
2204 * Tx packet, this function is called. It identifies the last TxD
2205 * whose buffer was freed and frees all skbs whose data have already
2206 * DMA'ed into the NICs internal memory.
2211 static void tx_intr_handler(struct s2io_nic *nic)
2213 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2214 struct net_device *dev = (struct net_device *) nic->dev;
2215 tx_curr_get_info_t get_info, put_info;
2216 struct sk_buff *skb;
2218 register u64 val64 = 0;
2221 mac_info_t *mac_control;
2222 struct config_param *config;
2224 mac_control = &nic->mac_control;
2225 config = &nic->config;
2228 * tx_traffic_int reg is an R1 register, hence we read and write
2229 * back the samevalue in the register to clear it.
2231 val64 = readq(&bar0->tx_traffic_int);
2232 writeq(val64, &bar0->tx_traffic_int);
2234 for (i = 0; i < config->tx_fifo_num; i++) {
2235 get_info = mac_control->tx_curr_get_info[i];
2236 put_info = mac_control->tx_curr_put_info[i];
2237 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2239 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2240 (get_info.offset != put_info.offset) &&
2241 (txdlp->Host_Control)) {
2242 /* Check for TxD errors */
2243 if (txdlp->Control_1 & TXD_T_CODE) {
2244 unsigned long long err;
2245 err = txdlp->Control_1 & TXD_T_CODE;
2246 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2250 skb = (struct sk_buff *) ((unsigned long)
2251 txdlp->Host_Control);
2253 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2255 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2258 nic->tx_pkt_count++;
2260 frg_cnt = skb_shinfo(skb)->nr_frags;
2262 /* For unfragmented skb */
2263 pci_unmap_single(nic->pdev, (dma_addr_t)
2264 txdlp->Buffer_Pointer,
2265 skb->len - skb->data_len,
2268 TxD_t *temp = txdlp;
2270 for (j = 0; j < frg_cnt; j++, txdlp++) {
2272 &skb_shinfo(skb)->frags[j];
2273 pci_unmap_page(nic->pdev,
2283 (sizeof(TxD_t) * config->max_txds));
2285 /* Updating the statistics block */
2286 nic->stats.tx_packets++;
2287 nic->stats.tx_bytes += skb->len;
2288 dev_kfree_skb_irq(skb);
2291 get_info.offset %= get_info.fifo_len + 1;
2292 txdlp = (TxD_t *) nic->list_info[i]
2293 [get_info.offset].list_virt_addr;
2294 mac_control->tx_curr_get_info[i].offset =
2299 spin_lock(&nic->tx_lock);
2300 if (netif_queue_stopped(dev))
2301 netif_wake_queue(dev);
2302 spin_unlock(&nic->tx_lock);
2306 * alarm_intr_handler - Alarm Interrrupt handler
2307 * @nic: device private variable
2308 * Description: If the interrupt was neither because of Rx packet or Tx
2309 * complete, this function is called. If the interrupt was to indicate
2310 * a loss of link, the OSM link status handler is invoked for any other
2311 * alarm interrupt the block that raised the interrupt is displayed
2312 * and a H/W reset is issued.
2317 static void alarm_intr_handler(struct s2io_nic *nic)
2319 struct net_device *dev = (struct net_device *) nic->dev;
2320 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2321 register u64 val64 = 0, err_reg = 0;
2323 /* Handling link status change error Intr */
2324 err_reg = readq(&bar0->mac_rmac_err_reg);
2325 writeq(err_reg, &bar0->mac_rmac_err_reg);
2326 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2327 schedule_work(&nic->set_link_task);
2330 /* In case of a serious error, the device will be Reset. */
2331 val64 = readq(&bar0->serr_source);
2332 if (val64 & SERR_SOURCE_ANY) {
2333 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2334 DBG_PRINT(ERR_DBG, "serious error!!\n");
2335 netif_stop_queue(dev);
2336 schedule_work(&nic->rst_timer_task);
2340 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2341 * Error occurs, the adapter will be recycled by disabling the
2342 * adapter enable bit and enabling it again after the device
2343 * becomes Quiescent.
2345 val64 = readq(&bar0->pcc_err_reg);
2346 writeq(val64, &bar0->pcc_err_reg);
2347 if (val64 & PCC_FB_ECC_DB_ERR) {
2348 u64 ac = readq(&bar0->adapter_control);
2349 ac &= ~(ADAPTER_CNTL_EN);
2350 writeq(ac, &bar0->adapter_control);
2351 ac = readq(&bar0->adapter_control);
2352 schedule_work(&nic->set_link_task);
2355 /* Other type of interrupts are not being handled now, TODO */
2359 * wait_for_cmd_complete - waits for a command to complete.
2360 * @sp : private member of the device structure, which is a pointer to the
2361 * s2io_nic structure.
2362 * Description: Function that waits for a command to Write into RMAC
2363 * ADDR DATA registers to be completed and returns either success or
2364 * error depending on whether the command was complete or not.
2366 * SUCCESS on success and FAILURE on failure.
2369 static int wait_for_cmd_complete(nic_t * sp)
2371 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2372 int ret = FAILURE, cnt = 0;
2376 val64 = readq(&bar0->rmac_addr_cmd_mem);
2377 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2390 * s2io_reset - Resets the card.
2391 * @sp : private member of the device structure.
2392 * Description: Function to Reset the card. This function then also
2393 * restores the previously saved PCI configuration space registers as
2394 * the card reset also resets the configuration space.
2399 static void s2io_reset(nic_t * sp)
2401 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2405 val64 = SW_RESET_ALL;
2406 writeq(val64, &bar0->sw_reset);
2409 * At this stage, if the PCI write is indeed completed, the
2410 * card is reset and so is the PCI Config space of the device.
2411 * So a read cannot be issued at this stage on any of the
2412 * registers to ensure the write into "sw_reset" register
2414 * Question: Is there any system call that will explicitly force
2415 * all the write commands still pending on the bus to be pushed
2417 * As of now I'am just giving a 250ms delay and hoping that the
2418 * PCI write to sw_reset register is done by this time.
2422 /* Restore the PCI state saved during initializarion. */
2423 pci_restore_state(sp->pdev);
2428 /* SXE-002: Configure link and activity LED to turn it off */
2429 subid = sp->pdev->subsystem_device;
2430 if ((subid & 0xFF) >= 0x07) {
2431 val64 = readq(&bar0->gpio_control);
2432 val64 |= 0x0000800000000000ULL;
2433 writeq(val64, &bar0->gpio_control);
2434 val64 = 0x0411040400000000ULL;
2435 writeq(val64, (void __iomem *) bar0 + 0x2700);
2438 sp->device_enabled_once = FALSE;
2442 * s2io_set_swapper - to set the swapper controle on the card
2443 * @sp : private member of the device structure,
2444 * pointer to the s2io_nic structure.
2445 * Description: Function to set the swapper control on the card
2446 * correctly depending on the 'endianness' of the system.
2448 * SUCCESS on success and FAILURE on failure.
2451 static int s2io_set_swapper(nic_t * sp)
2453 struct net_device *dev = sp->dev;
2454 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2455 u64 val64, valt, valr;
2458 * Set proper endian settings and verify the same by reading
2459 * the PIF Feed-back register.
2462 val64 = readq(&bar0->pif_rd_swapper_fb);
2463 if (val64 != 0x0123456789ABCDEFULL) {
2465 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2466 0x8100008181000081ULL, /* FE=1, SE=0 */
2467 0x4200004242000042ULL, /* FE=0, SE=1 */
2468 0}; /* FE=0, SE=0 */
2471 writeq(value[i], &bar0->swapper_ctrl);
2472 val64 = readq(&bar0->pif_rd_swapper_fb);
2473 if (val64 == 0x0123456789ABCDEFULL)
2478 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2480 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2481 (unsigned long long) val64);
2486 valr = readq(&bar0->swapper_ctrl);
2489 valt = 0x0123456789ABCDEFULL;
2490 writeq(valt, &bar0->xmsi_address);
2491 val64 = readq(&bar0->xmsi_address);
2495 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2496 0x0081810000818100ULL, /* FE=1, SE=0 */
2497 0x0042420000424200ULL, /* FE=0, SE=1 */
2498 0}; /* FE=0, SE=0 */
2501 writeq((value[i] | valr), &bar0->swapper_ctrl);
2502 writeq(valt, &bar0->xmsi_address);
2503 val64 = readq(&bar0->xmsi_address);
2509 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2510 DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
2514 val64 = readq(&bar0->swapper_ctrl);
2515 val64 &= 0xFFFF000000000000ULL;
2519 * The device by default set to a big endian format, so a
2520 * big endian driver need not set anything.
2522 val64 |= (SWAPPER_CTRL_TXP_FE |
2523 SWAPPER_CTRL_TXP_SE |
2524 SWAPPER_CTRL_TXD_R_FE |
2525 SWAPPER_CTRL_TXD_W_FE |
2526 SWAPPER_CTRL_TXF_R_FE |
2527 SWAPPER_CTRL_RXD_R_FE |
2528 SWAPPER_CTRL_RXD_W_FE |
2529 SWAPPER_CTRL_RXF_W_FE |
2530 SWAPPER_CTRL_XMSI_FE |
2531 SWAPPER_CTRL_XMSI_SE |
2532 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2533 writeq(val64, &bar0->swapper_ctrl);
2536 * Initially we enable all bits to make it accessible by the
2537 * driver, then we selectively enable only those bits that
2540 val64 |= (SWAPPER_CTRL_TXP_FE |
2541 SWAPPER_CTRL_TXP_SE |
2542 SWAPPER_CTRL_TXD_R_FE |
2543 SWAPPER_CTRL_TXD_R_SE |
2544 SWAPPER_CTRL_TXD_W_FE |
2545 SWAPPER_CTRL_TXD_W_SE |
2546 SWAPPER_CTRL_TXF_R_FE |
2547 SWAPPER_CTRL_RXD_R_FE |
2548 SWAPPER_CTRL_RXD_R_SE |
2549 SWAPPER_CTRL_RXD_W_FE |
2550 SWAPPER_CTRL_RXD_W_SE |
2551 SWAPPER_CTRL_RXF_W_FE |
2552 SWAPPER_CTRL_XMSI_FE |
2553 SWAPPER_CTRL_XMSI_SE |
2554 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2555 writeq(val64, &bar0->swapper_ctrl);
2557 val64 = readq(&bar0->swapper_ctrl);
2560 * Verifying if endian settings are accurate by reading a
2561 * feedback register.
2563 val64 = readq(&bar0->pif_rd_swapper_fb);
2564 if (val64 != 0x0123456789ABCDEFULL) {
2565 /* Endian settings are incorrect, calls for another dekko. */
2566 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2568 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2569 (unsigned long long) val64);
2576 /* ********************************************************* *
2577 * Functions defined below concern the OS part of the driver *
2578 * ********************************************************* */
2581 * s2io_open - open entry point of the driver
2582 * @dev : pointer to the device structure.
2584 * This function is the open entry point of the driver. It mainly calls a
2585 * function to allocate Rx buffers and inserts them into the buffer
2586 * descriptors and then enables the Rx part of the NIC.
2588 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2592 static int s2io_open(struct net_device *dev)
2594 nic_t *sp = dev->priv;
2598 * Make sure you have link off by default every time
2599 * Nic is initialized
2601 netif_carrier_off(dev);
2602 sp->last_link_state = LINK_DOWN;
2604 /* Initialize H/W and enable interrupts */
2605 if (s2io_card_up(sp)) {
2606 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2611 /* After proper initialization of H/W, register ISR */
2612 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2616 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2621 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2622 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2627 netif_start_queue(dev);
2632 * s2io_close -close entry point of the driver
2633 * @dev : device pointer.
2635 * This is the stop entry point of the driver. It needs to undo exactly
2636 * whatever was done by the open entry point,thus it's usually referred to
2637 * as the close function.Among other things this function mainly stops the
2638 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2640 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2644 static int s2io_close(struct net_device *dev)
2646 nic_t *sp = dev->priv;
2648 flush_scheduled_work();
2649 netif_stop_queue(dev);
2650 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2653 free_irq(dev->irq, dev);
2654 sp->device_close_flag = TRUE; /* Device is shut down. */
2659 * s2io_xmit - Tx entry point of te driver
2660 * @skb : the socket buffer containing the Tx data.
2661 * @dev : device pointer.
2663 * This function is the Tx entry point of the driver. S2IO NIC supports
2664 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2665 * NOTE: when device cant queue the pkt,just the trans_start variable will
2668 * 0 on success & 1 on failure.
2671 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2673 nic_t *sp = dev->priv;
2674 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2677 TxFIFO_element_t __iomem *tx_fifo;
2678 unsigned long flags;
2682 mac_info_t *mac_control;
2683 struct config_param *config;
2684 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2686 mac_control = &sp->mac_control;
2687 config = &sp->config;
2689 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2690 spin_lock_irqsave(&sp->tx_lock, flags);
2692 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2693 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2695 spin_unlock_irqrestore(&sp->tx_lock, flags);
2700 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2701 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2702 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2704 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2705 /* Avoid "put" pointer going beyond "get" pointer */
2706 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2707 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2708 netif_stop_queue(dev);
2710 spin_unlock_irqrestore(&sp->tx_lock, flags);
2714 mss = skb_shinfo(skb)->tso_size;
2716 txdp->Control_1 |= TXD_TCP_LSO_EN;
2717 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2721 frg_cnt = skb_shinfo(skb)->nr_frags;
2722 frg_len = skb->len - skb->data_len;
2724 txdp->Host_Control = (unsigned long) skb;
2725 txdp->Buffer_Pointer = pci_map_single
2726 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2727 if (skb->ip_summed == CHECKSUM_HW) {
2729 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2733 txdp->Control_2 |= config->tx_intr_type;
2735 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2736 TXD_GATHER_CODE_FIRST);
2737 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2739 /* For fragmented SKB. */
2740 for (i = 0; i < frg_cnt; i++) {
2741 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2743 txdp->Buffer_Pointer = (u64) pci_map_page
2744 (sp->pdev, frag->page, frag->page_offset,
2745 frag->size, PCI_DMA_TODEVICE);
2746 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2748 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2750 tx_fifo = mac_control->tx_FIFO_start[queue];
2751 val64 = sp->list_info[queue][put_off].list_phy_addr;
2752 writeq(val64, &tx_fifo->TxDL_Pointer);
2754 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2758 val64 |= TX_FIFO_SPECIAL_FUNC;
2760 writeq(val64, &tx_fifo->List_Control);
2762 /* Perform a PCI read to flush previous writes */
2763 val64 = readq(&bar0->general_int_status);
2766 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2767 mac_control->tx_curr_put_info[queue].offset = put_off;
2769 /* Avoid "put" pointer going beyond "get" pointer */
2770 if (((put_off + 1) % queue_len) == get_off) {
2772 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2774 netif_stop_queue(dev);
2777 dev->trans_start = jiffies;
2778 spin_unlock_irqrestore(&sp->tx_lock, flags);
2784 * s2io_isr - ISR handler of the device .
2785 * @irq: the irq of the device.
2786 * @dev_id: a void pointer to the dev structure of the NIC.
2787 * @pt_regs: pointer to the registers pushed on the stack.
2788 * Description: This function is the ISR handler of the device. It
2789 * identifies the reason for the interrupt and calls the relevant
2790 * service routines. As a contongency measure, this ISR allocates the
2791 * recv buffers, if their numbers are below the panic value which is
2792 * presently set to 25% of the original number of rcv buffers allocated.
2794 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2795 * IRQ_NONE: will be returned if interrupt is not from our device
2797 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2799 struct net_device *dev = (struct net_device *) dev_id;
2800 nic_t *sp = dev->priv;
2801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2802 #ifndef CONFIG_S2IO_NAPI
2806 mac_info_t *mac_control;
2807 struct config_param *config;
2809 mac_control = &sp->mac_control;
2810 config = &sp->config;
2813 * Identify the cause for interrupt and call the appropriate
2814 * interrupt handler. Causes for the interrupt could be;
2818 * 4. Error in any functional blocks of the NIC.
2820 reason = readq(&bar0->general_int_status);
2823 /* The interrupt was not raised by Xena. */
2827 /* If Intr is because of Tx Traffic */
2828 if (reason & GEN_INTR_TXTRAFFIC) {
2829 tx_intr_handler(sp);
2832 /* If Intr is because of an error */
2833 if (reason & (GEN_ERROR_INTR))
2834 alarm_intr_handler(sp);
2836 #ifdef CONFIG_S2IO_NAPI
2837 if (reason & GEN_INTR_RXTRAFFIC) {
2838 if (netif_rx_schedule_prep(dev)) {
2839 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2841 __netif_rx_schedule(dev);
2845 /* If Intr is because of Rx Traffic */
2846 if (reason & GEN_INTR_RXTRAFFIC) {
2847 rx_intr_handler(sp);
2852 * If the Rx buffer count is below the panic threshold then
2853 * reallocate the buffers from the interrupt handler itself,
2854 * else schedule a tasklet to reallocate the buffers.
2856 #ifndef CONFIG_S2IO_NAPI
2857 for (i = 0; i < config->rx_ring_num; i++) {
2858 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2859 int level = rx_buffer_level(sp, rxb_size, i);
2861 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2862 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2863 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2864 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2865 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2867 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2868 clear_bit(0, (&sp->tasklet_status));
2871 clear_bit(0, (&sp->tasklet_status));
2872 } else if (level == LOW) {
2873 tasklet_schedule(&sp->task);
2882 * s2io_get_stats - Updates the device statistics structure.
2883 * @dev : pointer to the device structure.
2885 * This function updates the device statistics structure in the s2io_nic
2886 * structure and returns a pointer to the same.
2888 * pointer to the updated net_device_stats structure.
2891 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
2893 nic_t *sp = dev->priv;
2894 mac_info_t *mac_control;
2895 struct config_param *config;
2897 mac_control = &sp->mac_control;
2898 config = &sp->config;
2900 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2901 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2902 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2903 sp->stats.rx_length_errors =
2904 mac_control->stats_info->rmac_long_frms;
2906 return (&sp->stats);
2910 * s2io_set_multicast - entry point for multicast address enable/disable.
2911 * @dev : pointer to the device structure
2913 * This function is a driver entry point which gets called by the kernel
2914 * whenever multicast addresses must be enabled/disabled. This also gets
2915 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2916 * determine, if multicast address must be enabled or if promiscuous mode
2917 * is to be disabled etc.
2922 static void s2io_set_multicast(struct net_device *dev)
2925 struct dev_mc_list *mclist;
2926 nic_t *sp = dev->priv;
2927 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2928 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2930 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2933 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2934 /* Enable all Multicast addresses */
2935 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2936 &bar0->rmac_addr_data0_mem);
2937 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2938 &bar0->rmac_addr_data1_mem);
2939 val64 = RMAC_ADDR_CMD_MEM_WE |
2940 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2941 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2942 writeq(val64, &bar0->rmac_addr_cmd_mem);
2943 /* Wait till command completes */
2944 wait_for_cmd_complete(sp);
2947 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2948 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2949 /* Disable all Multicast addresses */
2950 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2951 &bar0->rmac_addr_data0_mem);
2952 val64 = RMAC_ADDR_CMD_MEM_WE |
2953 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2954 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2955 writeq(val64, &bar0->rmac_addr_cmd_mem);
2956 /* Wait till command completes */
2957 wait_for_cmd_complete(sp);
2960 sp->all_multi_pos = 0;
2963 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2964 /* Put the NIC into promiscuous mode */
2965 add = &bar0->mac_cfg;
2966 val64 = readq(&bar0->mac_cfg);
2967 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2969 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2970 writel((u32) val64, add);
2971 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2972 writel((u32) (val64 >> 32), (add + 4));
2974 val64 = readq(&bar0->mac_cfg);
2975 sp->promisc_flg = 1;
2976 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2978 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2979 /* Remove the NIC from promiscuous mode */
2980 add = &bar0->mac_cfg;
2981 val64 = readq(&bar0->mac_cfg);
2982 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2985 writel((u32) val64, add);
2986 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2987 writel((u32) (val64 >> 32), (add + 4));
2989 val64 = readq(&bar0->mac_cfg);
2990 sp->promisc_flg = 0;
2991 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2995 /* Update individual M_CAST address list */
2996 if ((!sp->m_cast_flg) && dev->mc_count) {
2998 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2999 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3001 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3002 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3006 prev_cnt = sp->mc_addr_count;
3007 sp->mc_addr_count = dev->mc_count;
3009 /* Clear out the previous list of Mc in the H/W. */
3010 for (i = 0; i < prev_cnt; i++) {
3011 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3012 &bar0->rmac_addr_data0_mem);
3013 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3014 &bar0->rmac_addr_data1_mem);
3015 val64 = RMAC_ADDR_CMD_MEM_WE |
3016 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3017 RMAC_ADDR_CMD_MEM_OFFSET
3018 (MAC_MC_ADDR_START_OFFSET + i);
3019 writeq(val64, &bar0->rmac_addr_cmd_mem);
3021 /* Wait for command completes */
3022 if (wait_for_cmd_complete(sp)) {
3023 DBG_PRINT(ERR_DBG, "%s: Adding ",
3025 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3030 /* Create the new Rx filter list and update the same in H/W. */
3031 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3032 i++, mclist = mclist->next) {
3033 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3035 for (j = 0; j < ETH_ALEN; j++) {
3036 mac_addr |= mclist->dmi_addr[j];
3040 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3041 &bar0->rmac_addr_data0_mem);
3042 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3043 &bar0->rmac_addr_data1_mem);
3045 val64 = RMAC_ADDR_CMD_MEM_WE |
3046 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3047 RMAC_ADDR_CMD_MEM_OFFSET
3048 (i + MAC_MC_ADDR_START_OFFSET);
3049 writeq(val64, &bar0->rmac_addr_cmd_mem);
3051 /* Wait for command completes */
3052 if (wait_for_cmd_complete(sp)) {
3053 DBG_PRINT(ERR_DBG, "%s: Adding ",
3055 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3063 * s2io_set_mac_addr - Programs the Xframe mac address
3064 * @dev : pointer to the device structure.
3065 * @addr: a uchar pointer to the new mac address which is to be set.
3066 * Description : This procedure will program the Xframe to receive
3067 * frames with new Mac Address
3068 * Return value: SUCCESS on success and an appropriate (-)ve integer
3069 * as defined in errno.h file on failure.
3072 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3074 nic_t *sp = dev->priv;
3075 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3076 register u64 val64, mac_addr = 0;
3080 * Set the new MAC address as the new unicast filter and reflect this
3081 * change on the device address registered with the OS. It will be
3084 for (i = 0; i < ETH_ALEN; i++) {
3086 mac_addr |= addr[i];
3089 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3090 &bar0->rmac_addr_data0_mem);
3093 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3094 RMAC_ADDR_CMD_MEM_OFFSET(0);
3095 writeq(val64, &bar0->rmac_addr_cmd_mem);
3096 /* Wait till command completes */
3097 if (wait_for_cmd_complete(sp)) {
3098 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3106 * s2io_ethtool_sset - Sets different link parameters.
3107 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3108 * @info: pointer to the structure with parameters given by ethtool to set
3111 * The function sets different link parameters provided by the user onto
3117 static int s2io_ethtool_sset(struct net_device *dev,
3118 struct ethtool_cmd *info)
3120 nic_t *sp = dev->priv;
3121 if ((info->autoneg == AUTONEG_ENABLE) ||
3122 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3125 s2io_close(sp->dev);
3133 * s2io_ethtol_gset - Return link specific information.
3134 * @sp : private member of the device structure, pointer to the
3135 * s2io_nic structure.
3136 * @info : pointer to the structure with parameters given by ethtool
3137 * to return link information.
3139 * Returns link specific information like speed, duplex etc.. to ethtool.
3141 * return 0 on success.
3144 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3146 nic_t *sp = dev->priv;
3147 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3148 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3149 info->port = PORT_FIBRE;
3150 /* info->transceiver?? TODO */
3152 if (netif_carrier_ok(sp->dev)) {
3153 info->speed = 10000;
3154 info->duplex = DUPLEX_FULL;
3160 info->autoneg = AUTONEG_DISABLE;
3165 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3166 * @sp : private member of the device structure, which is a pointer to the
3167 * s2io_nic structure.
3168 * @info : pointer to the structure with parameters given by ethtool to
3169 * return driver information.
3171 * Returns driver specefic information like name, version etc.. to ethtool.
3176 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3177 struct ethtool_drvinfo *info)
3179 nic_t *sp = dev->priv;
3181 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3182 strncpy(info->version, s2io_driver_version,
3183 sizeof(s2io_driver_version));
3184 strncpy(info->fw_version, "", 32);
3185 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3186 info->regdump_len = XENA_REG_SPACE;
3187 info->eedump_len = XENA_EEPROM_SPACE;
3188 info->testinfo_len = S2IO_TEST_LEN;
3189 info->n_stats = S2IO_STAT_LEN;
3193 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3194 * @sp: private member of the device structure, which is a pointer to the
3195 * s2io_nic structure.
3196 * @regs : pointer to the structure with parameters given by ethtool for
3197 * dumping the registers.
3198 * @reg_space: The input argumnet into which all the registers are dumped.
3200 * Dumps the entire register space of xFrame NIC into the user given
3206 static void s2io_ethtool_gregs(struct net_device *dev,
3207 struct ethtool_regs *regs, void *space)
3211 u8 *reg_space = (u8 *) space;
3212 nic_t *sp = dev->priv;
3214 regs->len = XENA_REG_SPACE;
3215 regs->version = sp->pdev->subsystem_device;
3217 for (i = 0; i < regs->len; i += 8) {
3218 reg = readq(sp->bar0 + i);
3219 memcpy((reg_space + i), ®, 8);
3224 * s2io_phy_id - timer function that alternates adapter LED.
3225 * @data : address of the private member of the device structure, which
3226 * is a pointer to the s2io_nic structure, provided as an u32.
3227 * Description: This is actually the timer function that alternates the
3228 * adapter LED bit of the adapter control bit to set/reset every time on
3229 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3230 * once every second.
3232 static void s2io_phy_id(unsigned long data)
3234 nic_t *sp = (nic_t *) data;
3235 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3239 subid = sp->pdev->subsystem_device;
3240 if ((subid & 0xFF) >= 0x07) {
3241 val64 = readq(&bar0->gpio_control);
3242 val64 ^= GPIO_CTRL_GPIO_0;
3243 writeq(val64, &bar0->gpio_control);
3245 val64 = readq(&bar0->adapter_control);
3246 val64 ^= ADAPTER_LED_ON;
3247 writeq(val64, &bar0->adapter_control);
3250 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3254 * s2io_ethtool_idnic - To physically identify the nic on the system.
3255 * @sp : private member of the device structure, which is a pointer to the
3256 * s2io_nic structure.
3257 * @id : pointer to the structure with identification parameters given by
3259 * Description: Used to physically identify the NIC on the system.
3260 * The Link LED will blink for a time specified by the user for
3262 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3263 * identification is possible only if it's link is up.
3265 * int , returns 0 on success
3268 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3270 u64 val64 = 0, last_gpio_ctrl_val;
3271 nic_t *sp = dev->priv;
3272 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3275 subid = sp->pdev->subsystem_device;
3276 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3277 if ((subid & 0xFF) < 0x07) {
3278 val64 = readq(&bar0->adapter_control);
3279 if (!(val64 & ADAPTER_CNTL_EN)) {
3281 "Adapter Link down, cannot blink LED\n");
3285 if (sp->id_timer.function == NULL) {
3286 init_timer(&sp->id_timer);
3287 sp->id_timer.function = s2io_phy_id;
3288 sp->id_timer.data = (unsigned long) sp;
3290 mod_timer(&sp->id_timer, jiffies);
3292 msleep(data * 1000);
3295 del_timer_sync(&sp->id_timer);
3297 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3298 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3299 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3306 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3307 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3308 * @ep : pointer to the structure with pause parameters given by ethtool.
3310 * Returns the Pause frame generation and reception capability of the NIC.
3314 static void s2io_ethtool_getpause_data(struct net_device *dev,
3315 struct ethtool_pauseparam *ep)
3318 nic_t *sp = dev->priv;
3319 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3321 val64 = readq(&bar0->rmac_pause_cfg);
3322 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3323 ep->tx_pause = TRUE;
3324 if (val64 & RMAC_PAUSE_RX_ENABLE)
3325 ep->rx_pause = TRUE;
3326 ep->autoneg = FALSE;
3330 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3331 * @sp : private member of the device structure, which is a pointer to the
3332 * s2io_nic structure.
3333 * @ep : pointer to the structure with pause parameters given by ethtool.
3335 * It can be used to set or reset Pause frame generation or reception
3336 * support of the NIC.
3338 * int, returns 0 on Success
3341 static int s2io_ethtool_setpause_data(struct net_device *dev,
3342 struct ethtool_pauseparam *ep)
3345 nic_t *sp = dev->priv;
3346 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3348 val64 = readq(&bar0->rmac_pause_cfg);
3350 val64 |= RMAC_PAUSE_GEN_ENABLE;
3352 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3354 val64 |= RMAC_PAUSE_RX_ENABLE;
3356 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3357 writeq(val64, &bar0->rmac_pause_cfg);
3362 * read_eeprom - reads 4 bytes of data from user given offset.
3363 * @sp : private member of the device structure, which is a pointer to the
3364 * s2io_nic structure.
3365 * @off : offset at which the data must be written
3366 * @data : Its an output parameter where the data read at the given
3369 * Will read 4 bytes of data from the user given offset and return the
3371 * NOTE: Will allow to read only part of the EEPROM visible through the
3374 * -1 on failure and 0 on success.
3377 #define S2IO_DEV_ID 5
3378 static int read_eeprom(nic_t * sp, int off, u32 * data)
3383 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3385 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3386 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3387 I2C_CONTROL_CNTL_START;
3388 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3390 while (exit_cnt < 5) {
3391 val64 = readq(&bar0->i2c_control);
3392 if (I2C_CONTROL_CNTL_END(val64)) {
3393 *data = I2C_CONTROL_GET_DATA(val64);
3405 * write_eeprom - actually writes the relevant part of the data value.
3406 * @sp : private member of the device structure, which is a pointer to the
3407 * s2io_nic structure.
3408 * @off : offset at which the data must be written
3409 * @data : The data that is to be written
3410 * @cnt : Number of bytes of the data that are actually to be written into
3411 * the Eeprom. (max of 3)
3413 * Actually writes the relevant part of the data value into the Eeprom
3414 * through the I2C bus.
3416 * 0 on success, -1 on failure.
3419 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3421 int exit_cnt = 0, ret = -1;
3423 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3425 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3426 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3427 I2C_CONTROL_CNTL_START;
3428 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3430 while (exit_cnt < 5) {
3431 val64 = readq(&bar0->i2c_control);
3432 if (I2C_CONTROL_CNTL_END(val64)) {
3433 if (!(val64 & I2C_CONTROL_NACK))
3445 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3446 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3447 * @eeprom : pointer to the user level structure provided by ethtool,
3448 * containing all relevant information.
3449 * @data_buf : user defined value to be written into Eeprom.
3450 * Description: Reads the values stored in the Eeprom at given offset
3451 * for a given length. Stores these values int the input argument data
3452 * buffer 'data_buf' and returns these to the caller (ethtool.)
3457 static int s2io_ethtool_geeprom(struct net_device *dev,
3458 struct ethtool_eeprom *eeprom, u8 * data_buf)
3461 nic_t *sp = dev->priv;
3463 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3465 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3466 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3468 for (i = 0; i < eeprom->len; i += 4) {
3469 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3470 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3474 memcpy((data_buf + i), &valid, 4);
3480 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3481 * @sp : private member of the device structure, which is a pointer to the
3482 * s2io_nic structure.
3483 * @eeprom : pointer to the user level structure provided by ethtool,
3484 * containing all relevant information.
3485 * @data_buf ; user defined value to be written into Eeprom.
3487 * Tries to write the user provided value in the Eeprom, at the offset
3488 * given by the user.
3490 * 0 on success, -EFAULT on failure.
3493 static int s2io_ethtool_seeprom(struct net_device *dev,
3494 struct ethtool_eeprom *eeprom,
3497 int len = eeprom->len, cnt = 0;
3498 u32 valid = 0, data;
3499 nic_t *sp = dev->priv;
3501 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3503 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3504 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3510 data = (u32) data_buf[cnt] & 0x000000FF;
3512 valid = (u32) (data << 24);
3516 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3518 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3520 "write into the specified offset\n");
3531 * s2io_register_test - reads and writes into all clock domains.
3532 * @sp : private member of the device structure, which is a pointer to the
3533 * s2io_nic structure.
3534 * @data : variable that returns the result of each of the test conducted b
3537 * Read and write into all clock domains. The NIC has 3 clock domains,
3538 * see that registers in all the three regions are accessible.
3543 static int s2io_register_test(nic_t * sp, uint64_t * data)
3545 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3549 val64 = readq(&bar0->pcc_enable);
3550 if (val64 != 0xff00000000000000ULL) {
3552 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3555 val64 = readq(&bar0->rmac_pause_cfg);
3556 if (val64 != 0xc000ffff00000000ULL) {
3558 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3561 val64 = readq(&bar0->rx_queue_cfg);
3562 if (val64 != 0x0808080808080808ULL) {
3564 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3567 val64 = readq(&bar0->xgxs_efifo_cfg);
3568 if (val64 != 0x000000001923141EULL) {
3570 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3573 val64 = 0x5A5A5A5A5A5A5A5AULL;
3574 writeq(val64, &bar0->xmsi_data);
3575 val64 = readq(&bar0->xmsi_data);
3576 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3578 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3581 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3582 writeq(val64, &bar0->xmsi_data);
3583 val64 = readq(&bar0->xmsi_data);
3584 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3586 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3594 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3595 * @sp : private member of the device structure, which is a pointer to the
3596 * s2io_nic structure.
3597 * @data:variable that returns the result of each of the test conducted by
3600 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3606 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3611 /* Test Write Error at offset 0 */
3612 if (!write_eeprom(sp, 0, 0, 3))
3615 /* Test Write at offset 4f0 */
3616 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3618 if (read_eeprom(sp, 0x4F0, &ret_data))
3621 if (ret_data != 0x01234567)
3624 /* Reset the EEPROM data go FFFF */
3625 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3627 /* Test Write Request Error at offset 0x7c */
3628 if (!write_eeprom(sp, 0x07C, 0, 3))
3631 /* Test Write Request at offset 0x7fc */
3632 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3634 if (read_eeprom(sp, 0x7FC, &ret_data))
3637 if (ret_data != 0x01234567)
3640 /* Reset the EEPROM data go FFFF */
3641 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3643 /* Test Write Error at offset 0x80 */
3644 if (!write_eeprom(sp, 0x080, 0, 3))
3647 /* Test Write Error at offset 0xfc */
3648 if (!write_eeprom(sp, 0x0FC, 0, 3))
3651 /* Test Write Error at offset 0x100 */
3652 if (!write_eeprom(sp, 0x100, 0, 3))
3655 /* Test Write Error at offset 4ec */
3656 if (!write_eeprom(sp, 0x4EC, 0, 3))
3664 * s2io_bist_test - invokes the MemBist test of the card .
3665 * @sp : private member of the device structure, which is a pointer to the
3666 * s2io_nic structure.
3667 * @data:variable that returns the result of each of the test conducted by
3670 * This invokes the MemBist test of the card. We give around
3671 * 2 secs time for the Test to complete. If it's still not complete
3672 * within this peiod, we consider that the test failed.
3674 * 0 on success and -1 on failure.
3677 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3680 int cnt = 0, ret = -1;
3682 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3683 bist |= PCI_BIST_START;
3684 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3687 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3688 if (!(bist & PCI_BIST_START)) {
3689 *data = (bist & PCI_BIST_CODE_MASK);
3701 * s2io-link_test - verifies the link state of the nic
3702 * @sp ; private member of the device structure, which is a pointer to the
3703 * s2io_nic structure.
3704 * @data: variable that returns the result of each of the test conducted by
3707 * The function verifies the link state of the NIC and updates the input
3708 * argument 'data' appropriately.
3713 static int s2io_link_test(nic_t * sp, uint64_t * data)
3715 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3718 val64 = readq(&bar0->adapter_status);
3719 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3726 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3727 * @sp - private member of the device structure, which is a pointer to the
3728 * s2io_nic structure.
3729 * @data - variable that returns the result of each of the test
3730 * conducted by the driver.
3732 * This is one of the offline test that tests the read and write
3733 * access to the RldRam chip on the NIC.
3738 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3740 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3742 int cnt, iteration = 0, test_pass = 0;
3744 val64 = readq(&bar0->adapter_control);
3745 val64 &= ~ADAPTER_ECC_EN;
3746 writeq(val64, &bar0->adapter_control);
3748 val64 = readq(&bar0->mc_rldram_test_ctrl);
3749 val64 |= MC_RLDRAM_TEST_MODE;
3750 writeq(val64, &bar0->mc_rldram_test_ctrl);
3752 val64 = readq(&bar0->mc_rldram_mrs);
3753 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3754 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3756 val64 |= MC_RLDRAM_MRS_ENABLE;
3757 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3759 while (iteration < 2) {
3760 val64 = 0x55555555aaaa0000ULL;
3761 if (iteration == 1) {
3762 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3764 writeq(val64, &bar0->mc_rldram_test_d0);
3766 val64 = 0xaaaa5a5555550000ULL;
3767 if (iteration == 1) {
3768 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3770 writeq(val64, &bar0->mc_rldram_test_d1);
3772 val64 = 0x55aaaaaaaa5a0000ULL;
3773 if (iteration == 1) {
3774 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3776 writeq(val64, &bar0->mc_rldram_test_d2);
3778 val64 = (u64) (0x0000003fffff0000ULL);
3779 writeq(val64, &bar0->mc_rldram_test_add);
3782 val64 = MC_RLDRAM_TEST_MODE;
3783 writeq(val64, &bar0->mc_rldram_test_ctrl);
3786 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3788 writeq(val64, &bar0->mc_rldram_test_ctrl);
3790 for (cnt = 0; cnt < 5; cnt++) {
3791 val64 = readq(&bar0->mc_rldram_test_ctrl);
3792 if (val64 & MC_RLDRAM_TEST_DONE)
3800 val64 = MC_RLDRAM_TEST_MODE;
3801 writeq(val64, &bar0->mc_rldram_test_ctrl);
3803 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3804 writeq(val64, &bar0->mc_rldram_test_ctrl);
3806 for (cnt = 0; cnt < 5; cnt++) {
3807 val64 = readq(&bar0->mc_rldram_test_ctrl);
3808 if (val64 & MC_RLDRAM_TEST_DONE)
3816 val64 = readq(&bar0->mc_rldram_test_ctrl);
3817 if (val64 & MC_RLDRAM_TEST_PASS)
3832 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3833 * @sp : private member of the device structure, which is a pointer to the
3834 * s2io_nic structure.
3835 * @ethtest : pointer to a ethtool command specific structure that will be
3836 * returned to the user.
3837 * @data : variable that returns the result of each of the test
3838 * conducted by the driver.
3840 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3841 * the health of the card.
3846 static void s2io_ethtool_test(struct net_device *dev,
3847 struct ethtool_test *ethtest,
3850 nic_t *sp = dev->priv;
3851 int orig_state = netif_running(sp->dev);
3853 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3854 /* Offline Tests. */
3856 s2io_close(sp->dev);
3857 s2io_set_swapper(sp);
3859 s2io_set_swapper(sp);
3861 if (s2io_register_test(sp, &data[0]))
3862 ethtest->flags |= ETH_TEST_FL_FAILED;
3865 s2io_set_swapper(sp);
3867 if (s2io_rldram_test(sp, &data[3]))
3868 ethtest->flags |= ETH_TEST_FL_FAILED;
3871 s2io_set_swapper(sp);
3873 if (s2io_eeprom_test(sp, &data[1]))
3874 ethtest->flags |= ETH_TEST_FL_FAILED;
3876 if (s2io_bist_test(sp, &data[4]))
3877 ethtest->flags |= ETH_TEST_FL_FAILED;
3887 "%s: is not up, cannot run test\n",
3896 if (s2io_link_test(sp, &data[2]))
3897 ethtest->flags |= ETH_TEST_FL_FAILED;
3906 static void s2io_get_ethtool_stats(struct net_device *dev,
3907 struct ethtool_stats *estats,
3911 nic_t *sp = dev->priv;
3912 StatInfo_t *stat_info = sp->mac_control.stats_info;
3914 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3915 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3916 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3917 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3918 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3919 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3920 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3921 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3922 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3923 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3924 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3925 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3926 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3927 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3928 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3929 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3930 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3931 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3932 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3933 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3934 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3935 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3936 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3937 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3938 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3939 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3940 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3941 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3942 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3943 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3944 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3945 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3946 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3947 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3948 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3949 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3950 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3951 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3952 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3955 static int s2io_ethtool_get_regs_len(struct net_device *dev)
3957 return (XENA_REG_SPACE);
3961 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3963 nic_t *sp = dev->priv;
3965 return (sp->rx_csum);
3968 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3970 nic_t *sp = dev->priv;
3980 static int s2io_get_eeprom_len(struct net_device *dev)
3982 return (XENA_EEPROM_SPACE);
3985 static int s2io_ethtool_self_test_count(struct net_device *dev)
3987 return (S2IO_TEST_LEN);
3990 static void s2io_ethtool_get_strings(struct net_device *dev,
3991 u32 stringset, u8 * data)
3993 switch (stringset) {
3995 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3998 memcpy(data, ðtool_stats_keys,
3999 sizeof(ethtool_stats_keys));
4003 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4005 return (S2IO_STAT_LEN);
4008 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4011 dev->features |= NETIF_F_IP_CSUM;
4013 dev->features &= ~NETIF_F_IP_CSUM;
4019 static struct ethtool_ops netdev_ethtool_ops = {
4020 .get_settings = s2io_ethtool_gset,
4021 .set_settings = s2io_ethtool_sset,
4022 .get_drvinfo = s2io_ethtool_gdrvinfo,
4023 .get_regs_len = s2io_ethtool_get_regs_len,
4024 .get_regs = s2io_ethtool_gregs,
4025 .get_link = ethtool_op_get_link,
4026 .get_eeprom_len = s2io_get_eeprom_len,
4027 .get_eeprom = s2io_ethtool_geeprom,
4028 .set_eeprom = s2io_ethtool_seeprom,
4029 .get_pauseparam = s2io_ethtool_getpause_data,
4030 .set_pauseparam = s2io_ethtool_setpause_data,
4031 .get_rx_csum = s2io_ethtool_get_rx_csum,
4032 .set_rx_csum = s2io_ethtool_set_rx_csum,
4033 .get_tx_csum = ethtool_op_get_tx_csum,
4034 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4035 .get_sg = ethtool_op_get_sg,
4036 .set_sg = ethtool_op_set_sg,
4038 .get_tso = ethtool_op_get_tso,
4039 .set_tso = ethtool_op_set_tso,
4041 .self_test_count = s2io_ethtool_self_test_count,
4042 .self_test = s2io_ethtool_test,
4043 .get_strings = s2io_ethtool_get_strings,
4044 .phys_id = s2io_ethtool_idnic,
4045 .get_stats_count = s2io_ethtool_get_stats_count,
4046 .get_ethtool_stats = s2io_get_ethtool_stats
4050 * s2io_ioctl - Entry point for the Ioctl
4051 * @dev : Device pointer.
4052 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4053 * a proprietary structure used to pass information to the driver.
4054 * @cmd : This is used to distinguish between the different commands that
4055 * can be passed to the IOCTL functions.
4057 * This function has support for ethtool, adding multiple MAC addresses on
4058 * the NIC and some DBG commands for the util tool.
4060 * Currently the IOCTL supports no operations, hence by default this
4061 * function returns OP NOT SUPPORTED value.
4064 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4070 * s2io_change_mtu - entry point to change MTU size for the device.
4071 * @dev : device pointer.
4072 * @new_mtu : the new MTU size for the device.
4073 * Description: A driver entry point to change MTU size for the device.
4074 * Before changing the MTU the device must be stopped.
4076 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4080 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
4082 nic_t *sp = dev->priv;
4083 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4086 if (netif_running(dev)) {
4087 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4088 DBG_PRINT(ERR_DBG, "change its MTU \n");
4092 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4093 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4098 /* Set the new MTU into the PYLD register of the NIC */
4100 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4108 * s2io_tasklet - Bottom half of the ISR.
4109 * @dev_adr : address of the device structure in dma_addr_t format.
4111 * This is the tasklet or the bottom half of the ISR. This is
4112 * an extension of the ISR which is scheduled by the scheduler to be run
4113 * when the load on the CPU is low. All low priority tasks of the ISR can
4114 * be pushed into the tasklet. For now the tasklet is used only to
4115 * replenish the Rx buffers in the Rx buffer descriptors.
4120 static void s2io_tasklet(unsigned long dev_addr)
4122 struct net_device *dev = (struct net_device *) dev_addr;
4123 nic_t *sp = dev->priv;
4125 mac_info_t *mac_control;
4126 struct config_param *config;
4128 mac_control = &sp->mac_control;
4129 config = &sp->config;
4131 if (!TASKLET_IN_USE) {
4132 for (i = 0; i < config->rx_ring_num; i++) {
4133 ret = fill_rx_buffers(sp, i);
4134 if (ret == -ENOMEM) {
4135 DBG_PRINT(ERR_DBG, "%s: Out of ",
4137 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4139 } else if (ret == -EFILL) {
4141 "%s: Rx Ring %d is full\n",
4146 clear_bit(0, (&sp->tasklet_status));
4151 * s2io_set_link - Set the LInk status
4152 * @data: long pointer to device private structue
4153 * Description: Sets the link status for the adapter
4156 static void s2io_set_link(unsigned long data)
4158 nic_t *nic = (nic_t *) data;
4159 struct net_device *dev = nic->dev;
4160 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4164 if (test_and_set_bit(0, &(nic->link_state))) {
4165 /* The card is being reset, no point doing anything */
4169 subid = nic->pdev->subsystem_device;
4171 * Allow a small delay for the NICs self initiated
4172 * cleanup to complete.
4176 val64 = readq(&bar0->adapter_status);
4177 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4178 if (LINK_IS_UP(val64)) {
4179 val64 = readq(&bar0->adapter_control);
4180 val64 |= ADAPTER_CNTL_EN;
4181 writeq(val64, &bar0->adapter_control);
4182 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4183 val64 = readq(&bar0->gpio_control);
4184 val64 |= GPIO_CTRL_GPIO_0;
4185 writeq(val64, &bar0->gpio_control);
4186 val64 = readq(&bar0->gpio_control);
4188 val64 |= ADAPTER_LED_ON;
4189 writeq(val64, &bar0->adapter_control);
4191 val64 = readq(&bar0->adapter_status);
4192 if (!LINK_IS_UP(val64)) {
4193 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4194 DBG_PRINT(ERR_DBG, " Link down");
4195 DBG_PRINT(ERR_DBG, "after ");
4196 DBG_PRINT(ERR_DBG, "enabling ");
4197 DBG_PRINT(ERR_DBG, "device \n");
4199 if (nic->device_enabled_once == FALSE) {
4200 nic->device_enabled_once = TRUE;
4202 s2io_link(nic, LINK_UP);
4204 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4205 val64 = readq(&bar0->gpio_control);
4206 val64 &= ~GPIO_CTRL_GPIO_0;
4207 writeq(val64, &bar0->gpio_control);
4208 val64 = readq(&bar0->gpio_control);
4210 s2io_link(nic, LINK_DOWN);
4212 } else { /* NIC is not Quiescent. */
4213 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4214 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4215 netif_stop_queue(dev);
4217 clear_bit(0, &(nic->link_state));
4220 static void s2io_card_down(nic_t * sp)
4223 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4224 unsigned long flags;
4225 register u64 val64 = 0;
4227 /* If s2io_set_link task is executing, wait till it completes. */
4228 while (test_and_set_bit(0, &(sp->link_state)))
4230 atomic_set(&sp->card_state, CARD_DOWN);
4232 /* disable Tx and Rx traffic on the NIC */
4236 tasklet_kill(&sp->task);
4238 /* Check if the device is Quiescent and then Reset the NIC */
4240 val64 = readq(&bar0->adapter_status);
4241 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4249 "s2io_close:Device not Quiescent ");
4250 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4251 (unsigned long long) val64);
4255 spin_lock_irqsave(&sp->tx_lock, flags);
4258 /* Free all unused Tx and Rx buffers */
4259 free_tx_buffers(sp);
4260 free_rx_buffers(sp);
4262 spin_unlock_irqrestore(&sp->tx_lock, flags);
4263 clear_bit(0, &(sp->link_state));
4266 static int s2io_card_up(nic_t * sp)
4269 mac_info_t *mac_control;
4270 struct config_param *config;
4271 struct net_device *dev = (struct net_device *) sp->dev;
4273 /* Initialize the H/W I/O registers */
4274 if (init_nic(sp) != 0) {
4275 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4281 * Initializing the Rx buffers. For now we are considering only 1
4282 * Rx ring and initializing buffers into 30 Rx blocks
4284 mac_control = &sp->mac_control;
4285 config = &sp->config;
4287 for (i = 0; i < config->rx_ring_num; i++) {
4288 if ((ret = fill_rx_buffers(sp, i))) {
4289 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4292 free_rx_buffers(sp);
4295 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4296 atomic_read(&sp->rx_bufs_left[i]));
4299 /* Setting its receive mode */
4300 s2io_set_multicast(dev);
4302 /* Enable tasklet for the device */
4303 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4305 /* Enable Rx Traffic and interrupts on the NIC */
4306 if (start_nic(sp)) {
4307 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4308 tasklet_kill(&sp->task);
4310 free_irq(dev->irq, dev);
4311 free_rx_buffers(sp);
4315 atomic_set(&sp->card_state, CARD_UP);
4320 * s2io_restart_nic - Resets the NIC.
4321 * @data : long pointer to the device private structure
4323 * This function is scheduled to be run by the s2io_tx_watchdog
4324 * function after 0.5 secs to reset the NIC. The idea is to reduce
4325 * the run time of the watch dog routine which is run holding a
4329 static void s2io_restart_nic(unsigned long data)
4331 struct net_device *dev = (struct net_device *) data;
4332 nic_t *sp = dev->priv;
4335 if (s2io_card_up(sp)) {
4336 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4339 netif_wake_queue(dev);
4340 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4345 * s2io_tx_watchdog - Watchdog for transmit side.
4346 * @dev : Pointer to net device structure
4348 * This function is triggered if the Tx Queue is stopped
4349 * for a pre-defined amount of time when the Interface is still up.
4350 * If the Interface is jammed in such a situation, the hardware is
4351 * reset (by s2io_close) and restarted again (by s2io_open) to
4352 * overcome any problem that might have been caused in the hardware.
4357 static void s2io_tx_watchdog(struct net_device *dev)
4359 nic_t *sp = dev->priv;
4361 if (netif_carrier_ok(dev)) {
4362 schedule_work(&sp->rst_timer_task);
4367 * rx_osm_handler - To perform some OS related operations on SKB.
4368 * @sp: private member of the device structure,pointer to s2io_nic structure.
4369 * @skb : the socket buffer pointer.
4370 * @len : length of the packet
4371 * @cksum : FCS checksum of the frame.
4372 * @ring_no : the ring from which this RxD was extracted.
4374 * This function is called by the Tx interrupt serivce routine to perform
4375 * some OS related operations on the SKB before passing it to the upper
4376 * layers. It mainly checks if the checksum is OK, if so adds it to the
4377 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4378 * to the upper layer. If the checksum is wrong, it increments the Rx
4379 * packet error count, frees the SKB and returns error.
4381 * SUCCESS on success and -1 on failure.
4383 #ifndef CONFIG_2BUFF_MODE
4384 static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4386 static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4390 struct net_device *dev = (struct net_device *) sp->dev;
4391 struct sk_buff *skb =
4392 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4393 u16 l3_csum, l4_csum;
4394 #ifdef CONFIG_2BUFF_MODE
4395 int buf0_len, buf2_len;
4396 unsigned char *buff;
4399 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4400 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4401 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4402 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4404 * NIC verifies if the Checksum of the received
4405 * frame is Ok or not and accordingly returns
4406 * a flag in the RxD.
4408 skb->ip_summed = CHECKSUM_UNNECESSARY;
4411 * Packet with erroneous checksum, let the
4412 * upper layers deal with it.
4414 skb->ip_summed = CHECKSUM_NONE;
4417 skb->ip_summed = CHECKSUM_NONE;
4420 if (rxdp->Control_1 & RXD_T_CODE) {
4421 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4422 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4425 #ifdef CONFIG_2BUFF_MODE
4426 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4427 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4431 #ifndef CONFIG_2BUFF_MODE
4433 skb->protocol = eth_type_trans(skb, dev);
4435 buff = skb_push(skb, buf0_len);
4436 memcpy(buff, ba->ba_0, buf0_len);
4437 skb_put(skb, buf2_len);
4438 skb->protocol = eth_type_trans(skb, dev);
4441 #ifdef CONFIG_S2IO_NAPI
4442 netif_receive_skb(skb);
4447 dev->last_rx = jiffies;
4449 sp->stats.rx_packets++;
4450 #ifndef CONFIG_2BUFF_MODE
4451 sp->stats.rx_bytes += len;
4453 sp->stats.rx_bytes += buf0_len + buf2_len;
4456 atomic_dec(&sp->rx_bufs_left[ring_no]);
4457 rxdp->Host_Control = 0;
4462 * s2io_link - stops/starts the Tx queue.
4463 * @sp : private member of the device structure, which is a pointer to the
4464 * s2io_nic structure.
4465 * @link : inidicates whether link is UP/DOWN.
4467 * This function stops/starts the Tx queue depending on whether the link
4468 * status of the NIC is is down or up. This is called by the Alarm
4469 * interrupt handler whenever a link change interrupt comes up.
4474 static void s2io_link(nic_t * sp, int link)
4476 struct net_device *dev = (struct net_device *) sp->dev;
4478 if (link != sp->last_link_state) {
4479 if (link == LINK_DOWN) {
4480 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4481 netif_carrier_off(dev);
4483 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4484 netif_carrier_on(dev);
4487 sp->last_link_state = link;
4491 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4492 * @sp : private member of the device structure, which is a pointer to the
4493 * s2io_nic structure.
4495 * This function initializes a few of the PCI and PCI-X configuration registers
4496 * with recommended values.
4501 static void s2io_init_pci(nic_t * sp)
4505 /* Enable Data Parity Error Recovery in PCI-X command register. */
4506 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4508 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4509 (sp->pcix_cmd | 1));
4510 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4513 /* Set the PErr Response bit in PCI command register. */
4514 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4515 pci_write_config_word(sp->pdev, PCI_COMMAND,
4516 (pci_cmd | PCI_COMMAND_PARITY));
4517 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4519 /* Set MMRB count to 1024 in PCI-X Command register. */
4520 sp->pcix_cmd &= 0xFFF3;
4521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4522 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4525 /* Setting Maximum outstanding splits based on system type. */
4526 sp->pcix_cmd &= 0xFF8F;
4528 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4529 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4531 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4533 /* Forcibly disabling relaxed ordering capability of the card. */
4534 sp->pcix_cmd &= 0xfffd;
4535 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4537 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4541 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4542 MODULE_LICENSE("GPL");
4543 module_param(tx_fifo_num, int, 0);
4544 module_param_array(tx_fifo_len, int, NULL, 0);
4545 module_param(rx_ring_num, int, 0);
4546 module_param_array(rx_ring_sz, int, NULL, 0);
4547 module_param(Stats_refresh_time, int, 0);
4548 module_param(rmac_pause_time, int, 0);
4549 module_param(mc_pause_threshold_q0q3, int, 0);
4550 module_param(mc_pause_threshold_q4q7, int, 0);
4551 module_param(shared_splits, int, 0);
4552 module_param(tmac_util_period, int, 0);
4553 module_param(rmac_util_period, int, 0);
4554 #ifndef CONFIG_S2IO_NAPI
4555 module_param(indicate_max_pkts, int, 0);
4558 * s2io_init_nic - Initialization of the adapter .
4559 * @pdev : structure containing the PCI related information of the device.
4560 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4562 * The function initializes an adapter identified by the pci_dec structure.
4563 * All OS related initialization including memory and device structure and
4564 * initlaization of the device private variable is done. Also the swapper
4565 * control register is initialized to enable read and write into the I/O
4566 * registers of the device.
4568 * returns 0 on success and negative on failure.
4571 static int __devinit
4572 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4575 struct net_device *dev;
4576 char *dev_name = "S2IO 10GE NIC";
4578 int dma_flag = FALSE;
4579 u32 mac_up, mac_down;
4580 u64 val64 = 0, tmp64 = 0;
4581 XENA_dev_config_t __iomem *bar0 = NULL;
4583 mac_info_t *mac_control;
4584 struct config_param *config;
4587 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4588 s2io_driver_version);
4590 if ((ret = pci_enable_device(pdev))) {
4592 "s2io_init_nic: pci_enable_device failed\n");
4596 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4597 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4600 if (pci_set_consistent_dma_mask
4601 (pdev, 0xffffffffffffffffULL)) {
4603 "Unable to obtain 64bit DMA for \
4604 consistent allocations\n");
4605 pci_disable_device(pdev);
4608 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4609 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4611 pci_disable_device(pdev);
4615 if (pci_request_regions(pdev, s2io_driver_name)) {
4616 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4617 pci_disable_device(pdev);
4621 dev = alloc_etherdev(sizeof(nic_t));
4623 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4624 pci_disable_device(pdev);
4625 pci_release_regions(pdev);
4629 pci_set_master(pdev);
4630 pci_set_drvdata(pdev, dev);
4631 SET_MODULE_OWNER(dev);
4632 SET_NETDEV_DEV(dev, &pdev->dev);
4634 /* Private member variable initialized to s2io NIC structure */
4636 memset(sp, 0, sizeof(nic_t));
4639 sp->vendor_id = pdev->vendor;
4640 sp->device_id = pdev->device;
4641 sp->high_dma_flag = dma_flag;
4642 sp->irq = pdev->irq;
4643 sp->device_enabled_once = FALSE;
4644 strcpy(sp->name, dev_name);
4646 /* Initialize some PCI/PCI-X fields of the NIC. */
4650 * Setting the device configuration parameters.
4651 * Most of these parameters can be specified by the user during
4652 * module insertion as they are module loadable parameters. If
4653 * these parameters are not not specified during load time, they
4654 * are initialized with default values.
4656 mac_control = &sp->mac_control;
4657 config = &sp->config;
4659 /* Tx side parameters. */
4660 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4661 config->tx_fifo_num = tx_fifo_num;
4662 for (i = 0; i < MAX_TX_FIFOS; i++) {
4663 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4664 config->tx_cfg[i].fifo_priority = i;
4667 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4668 for (i = 0; i < config->tx_fifo_num; i++) {
4669 config->tx_cfg[i].f_no_snoop =
4670 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4671 if (config->tx_cfg[i].fifo_len < 65) {
4672 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4676 config->max_txds = MAX_SKB_FRAGS;
4678 /* Rx side parameters. */
4679 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4680 config->rx_ring_num = rx_ring_num;
4681 for (i = 0; i < MAX_RX_RINGS; i++) {
4682 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4683 (MAX_RXDS_PER_BLOCK + 1);
4684 config->rx_cfg[i].ring_priority = i;
4687 for (i = 0; i < rx_ring_num; i++) {
4688 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4689 config->rx_cfg[i].f_no_snoop =
4690 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4693 /* Setting Mac Control parameters */
4694 mac_control->rmac_pause_time = rmac_pause_time;
4695 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4696 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4699 /* Initialize Ring buffer parameters. */
4700 for (i = 0; i < config->rx_ring_num; i++)
4701 atomic_set(&sp->rx_bufs_left[i], 0);
4703 /* initialize the shared memory used by the NIC and the host */
4704 if (init_shared_mem(sp)) {
4705 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4708 goto mem_alloc_failed;
4711 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4712 pci_resource_len(pdev, 0));
4714 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4717 goto bar0_remap_failed;
4720 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4721 pci_resource_len(pdev, 2));
4723 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4726 goto bar1_remap_failed;
4729 dev->irq = pdev->irq;
4730 dev->base_addr = (unsigned long) sp->bar0;
4732 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4733 for (j = 0; j < MAX_TX_FIFOS; j++) {
4734 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4735 (sp->bar1 + (j * 0x00020000));
4738 /* Driver entry points */
4739 dev->open = &s2io_open;
4740 dev->stop = &s2io_close;
4741 dev->hard_start_xmit = &s2io_xmit;
4742 dev->get_stats = &s2io_get_stats;
4743 dev->set_multicast_list = &s2io_set_multicast;
4744 dev->do_ioctl = &s2io_ioctl;
4745 dev->change_mtu = &s2io_change_mtu;
4746 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4748 * will use eth_mac_addr() for dev->set_mac_address
4749 * mac address will be set every time dev->open() is called
4751 #ifdef CONFIG_S2IO_NAPI
4752 dev->poll = s2io_poll;
4756 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4757 if (sp->high_dma_flag == TRUE)
4758 dev->features |= NETIF_F_HIGHDMA;
4760 dev->features |= NETIF_F_TSO;
4763 dev->tx_timeout = &s2io_tx_watchdog;
4764 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4765 INIT_WORK(&sp->rst_timer_task,
4766 (void (*)(void *)) s2io_restart_nic, dev);
4767 INIT_WORK(&sp->set_link_task,
4768 (void (*)(void *)) s2io_set_link, sp);
4770 pci_save_state(sp->pdev);
4772 /* Setting swapper control on the NIC, for proper reset operation */
4773 if (s2io_set_swapper(sp)) {
4774 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4777 goto set_swap_failed;
4780 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4781 fix_mac_address(sp);
4785 * Setting swapper control on the NIC, so the MAC address can be read.
4787 if (s2io_set_swapper(sp)) {
4789 "%s: S2IO: swapper settings are wrong\n",
4792 goto set_swap_failed;
4796 * MAC address initialization.
4797 * For now only one mac address will be read and used.
4800 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4801 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4802 writeq(val64, &bar0->rmac_addr_cmd_mem);
4803 wait_for_cmd_complete(sp);
4805 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4806 mac_down = (u32) tmp64;
4807 mac_up = (u32) (tmp64 >> 32);
4809 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4811 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4812 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4813 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4814 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4815 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4816 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4819 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4820 sp->def_mac_addr[0].mac_addr[0],
4821 sp->def_mac_addr[0].mac_addr[1],
4822 sp->def_mac_addr[0].mac_addr[2],
4823 sp->def_mac_addr[0].mac_addr[3],
4824 sp->def_mac_addr[0].mac_addr[4],
4825 sp->def_mac_addr[0].mac_addr[5]);
4827 /* Set the factory defined MAC address initially */
4828 dev->addr_len = ETH_ALEN;
4829 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4832 * Initialize the tasklet status and link state flags
4833 * and the card statte parameter
4835 atomic_set(&(sp->card_state), 0);
4836 sp->tasklet_status = 0;
4840 /* Initialize spinlocks */
4841 spin_lock_init(&sp->tx_lock);
4842 #ifndef CONFIG_S2IO_NAPI
4843 spin_lock_init(&sp->put_lock);
4847 * SXE-002: Configure link and activity LED to init state
4850 subid = sp->pdev->subsystem_device;
4851 if ((subid & 0xFF) >= 0x07) {
4852 val64 = readq(&bar0->gpio_control);
4853 val64 |= 0x0000800000000000ULL;
4854 writeq(val64, &bar0->gpio_control);
4855 val64 = 0x0411040400000000ULL;
4856 writeq(val64, (void __iomem *) bar0 + 0x2700);
4857 val64 = readq(&bar0->gpio_control);
4860 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4862 if (register_netdev(dev)) {
4863 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4865 goto register_failed;
4869 * Make Link state as off at this point, when the Link change
4870 * interrupt comes the state will be automatically changed to
4873 netif_carrier_off(dev);
4874 sp->last_link_state = LINK_DOWN;
4885 free_shared_mem(sp);
4886 pci_disable_device(pdev);
4887 pci_release_regions(pdev);
4888 pci_set_drvdata(pdev, NULL);
4895 * s2io_rem_nic - Free the PCI device
4896 * @pdev: structure containing the PCI related information of the device.
4897 * Description: This function is called by the Pci subsystem to release a
4898 * PCI device and free up all resource held up by the device. This could
4899 * be in response to a Hot plug event or when the driver is to be removed
4903 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4905 struct net_device *dev =
4906 (struct net_device *) pci_get_drvdata(pdev);
4910 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4915 unregister_netdev(dev);
4917 free_shared_mem(sp);
4920 pci_disable_device(pdev);
4921 pci_release_regions(pdev);
4922 pci_set_drvdata(pdev, NULL);
4928 * s2io_starter - Entry point for the driver
4929 * Description: This function is the entry point for the driver. It verifies
4930 * the module loadable parameters and initializes PCI configuration space.
4933 int __init s2io_starter(void)
4935 return pci_module_init(&s2io_driver);
4939 * s2io_closer - Cleanup routine for the driver
4940 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4943 static void s2io_closer(void)
4945 pci_unregister_driver(&s2io_driver);
4946 DBG_PRINT(INIT_DBG, "cleanup done\n");
4949 module_init(s2io_starter);
4950 module_exit(s2io_closer);