1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 mac_info_t *mac_control;
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
111 /* Ethtool related variables and Macros. */
112 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
113 "Register test\t(offline)",
114 "Eeprom test\t(offline)",
115 "Link test\t(online)",
116 "RLDRAM test\t(offline)",
117 "BIST Test\t(offline)"
120 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
122 {"tmac_data_octets"},
126 {"tmac_pause_ctrl_frms"},
127 {"tmac_any_err_frms"},
128 {"tmac_vld_ip_octets"},
136 {"rmac_data_octets"},
137 {"rmac_fcs_err_frms"},
139 {"rmac_vld_mcst_frms"},
140 {"rmac_vld_bcst_frms"},
141 {"rmac_in_rng_len_err_frms"},
143 {"rmac_pause_ctrl_frms"},
144 {"rmac_discarded_frms"},
145 {"rmac_usized_frms"},
146 {"rmac_osized_frms"},
148 {"rmac_jabber_frms"},
156 {"rmac_err_drp_udp"},
158 {"rmac_accepted_ip"},
160 {"\n DRIVER STATISTICS"},
161 {"single_bit_ecc_errs"},
162 {"double_bit_ecc_errs"},
165 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
166 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
168 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
169 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
172 * Constants to be programmed into the Xena's registers, to configure
176 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
179 static u64 default_mdio_cfg[] = {
181 0xC001010000000000ULL, 0xC0010100000000E0ULL,
182 0xC0010100008000E4ULL,
183 /* Remove Reset from PMA PLL */
184 0xC001010000000000ULL, 0xC0010100000000E0ULL,
185 0xC0010100000000E4ULL,
189 static u64 default_dtx_cfg[] = {
190 0x8000051500000000ULL, 0x80000515000000E0ULL,
191 0x80000515D93500E4ULL, 0x8001051500000000ULL,
192 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
193 0x8002051500000000ULL, 0x80020515000000E0ULL,
194 0x80020515F21000E4ULL,
195 /* Set PADLOOPBACKN */
196 0x8002051500000000ULL, 0x80020515000000E0ULL,
197 0x80020515B20000E4ULL, 0x8003051500000000ULL,
198 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
199 0x8004051500000000ULL, 0x80040515000000E0ULL,
200 0x80040515B20000E4ULL, 0x8005051500000000ULL,
201 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
203 /* Remove PADLOOPBACKN */
204 0x8002051500000000ULL, 0x80020515000000E0ULL,
205 0x80020515F20000E4ULL, 0x8003051500000000ULL,
206 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
207 0x8004051500000000ULL, 0x80040515000000E0ULL,
208 0x80040515F20000E4ULL, 0x8005051500000000ULL,
209 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
214 * Constants for Fixing the MacAddress problem seen mostly on
217 static u64 fix_mac[] = {
218 0x0060000000000000ULL, 0x0060600000000000ULL,
219 0x0040600000000000ULL, 0x0000600000000000ULL,
220 0x0020600000000000ULL, 0x0060600000000000ULL,
221 0x0020600000000000ULL, 0x0060600000000000ULL,
222 0x0020600000000000ULL, 0x0060600000000000ULL,
223 0x0020600000000000ULL, 0x0060600000000000ULL,
224 0x0020600000000000ULL, 0x0060600000000000ULL,
225 0x0020600000000000ULL, 0x0060600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0060600000000000ULL,
229 0x0020600000000000ULL, 0x0060600000000000ULL,
230 0x0020600000000000ULL, 0x0000600000000000ULL,
231 0x0040600000000000ULL, 0x0060600000000000ULL,
235 /* Module Loadable parameters. */
236 static unsigned int tx_fifo_num = 1;
237 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
238 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
239 static unsigned int rx_ring_num = 1;
240 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
241 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
242 static unsigned int rts_frm_len[MAX_RX_RINGS] =
243 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
244 static unsigned int use_continuous_tx_intrs = 1;
245 static unsigned int rmac_pause_time = 65535;
246 static unsigned int mc_pause_threshold_q0q3 = 187;
247 static unsigned int mc_pause_threshold_q4q7 = 187;
248 static unsigned int shared_splits;
249 static unsigned int tmac_util_period = 5;
250 static unsigned int rmac_util_period = 5;
251 #ifndef CONFIG_S2IO_NAPI
252 static unsigned int indicate_max_pkts;
257 * This table lists all the devices that this driver supports.
259 static struct pci_device_id s2io_tbl[] __devinitdata = {
260 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
261 PCI_ANY_ID, PCI_ANY_ID},
262 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
263 PCI_ANY_ID, PCI_ANY_ID},
264 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
265 PCI_ANY_ID, PCI_ANY_ID},
266 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
267 PCI_ANY_ID, PCI_ANY_ID},
271 MODULE_DEVICE_TABLE(pci, s2io_tbl);
273 static struct pci_driver s2io_driver = {
275 .id_table = s2io_tbl,
276 .probe = s2io_init_nic,
277 .remove = __devexit_p(s2io_rem_nic),
280 /* A simplifier macro used both by init and free shared_mem Fns(). */
281 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
284 * init_shared_mem - Allocation and Initialization of Memory
285 * @nic: Device private variable.
286 * Description: The function allocates all the memory areas shared
287 * between the NIC and the driver. This includes Tx descriptors,
288 * Rx descriptors and the statistics block.
291 static int init_shared_mem(struct s2io_nic *nic)
294 void *tmp_v_addr, *tmp_v_addr_next;
295 dma_addr_t tmp_p_addr, tmp_p_addr_next;
296 RxD_block_t *pre_rxd_blk = NULL;
297 int i, j, blk_cnt, rx_sz, tx_sz;
298 int lst_size, lst_per_page;
299 struct net_device *dev = nic->dev;
300 #ifdef CONFIG_2BUFF_MODE
305 mac_info_t *mac_control;
306 struct config_param *config;
308 mac_control = &nic->mac_control;
309 config = &nic->config;
312 /* Allocation and initialization of TXDLs in FIOFs */
314 for (i = 0; i < config->tx_fifo_num; i++) {
315 size += config->tx_cfg[i].fifo_len;
317 if (size > MAX_AVAILABLE_TXDS) {
318 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
320 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
321 DBG_PRINT(ERR_DBG, "that can be used\n");
325 lst_size = (sizeof(TxD_t) * config->max_txds);
326 tx_sz = lst_size * size;
327 lst_per_page = PAGE_SIZE / lst_size;
329 for (i = 0; i < config->tx_fifo_num; i++) {
330 int fifo_len = config->tx_cfg[i].fifo_len;
331 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
332 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
334 if (!mac_control->fifos[i].list_info) {
336 "Malloc failed for list_info\n");
339 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
341 for (i = 0; i < config->tx_fifo_num; i++) {
342 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
344 mac_control->fifos[i].tx_curr_put_info.offset = 0;
345 mac_control->fifos[i].tx_curr_put_info.fifo_len =
346 config->tx_cfg[i].fifo_len - 1;
347 mac_control->fifos[i].tx_curr_get_info.offset = 0;
348 mac_control->fifos[i].tx_curr_get_info.fifo_len =
349 config->tx_cfg[i].fifo_len - 1;
350 mac_control->fifos[i].fifo_no = i;
351 mac_control->fifos[i].nic = nic;
352 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
354 for (j = 0; j < page_num; j++) {
358 tmp_v = pci_alloc_consistent(nic->pdev,
362 "pci_alloc_consistent ");
363 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
366 while (k < lst_per_page) {
367 int l = (j * lst_per_page) + k;
368 if (l == config->tx_cfg[i].fifo_len)
370 mac_control->fifos[i].list_info[l].list_virt_addr =
371 tmp_v + (k * lst_size);
372 mac_control->fifos[i].list_info[l].list_phy_addr =
373 tmp_p + (k * lst_size);
379 /* Allocation and initialization of RXDs in Rings */
381 for (i = 0; i < config->rx_ring_num; i++) {
382 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
383 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
384 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
386 DBG_PRINT(ERR_DBG, "RxDs per Block");
389 size += config->rx_cfg[i].num_rxd;
390 mac_control->rings[i].block_count =
391 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
392 mac_control->rings[i].pkt_cnt =
393 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
395 size = (size * (sizeof(RxD_t)));
398 for (i = 0; i < config->rx_ring_num; i++) {
399 mac_control->rings[i].rx_curr_get_info.block_index = 0;
400 mac_control->rings[i].rx_curr_get_info.offset = 0;
401 mac_control->rings[i].rx_curr_get_info.ring_len =
402 config->rx_cfg[i].num_rxd - 1;
403 mac_control->rings[i].rx_curr_put_info.block_index = 0;
404 mac_control->rings[i].rx_curr_put_info.offset = 0;
405 mac_control->rings[i].rx_curr_put_info.ring_len =
406 config->rx_cfg[i].num_rxd - 1;
407 mac_control->rings[i].nic = nic;
408 mac_control->rings[i].ring_no = i;
411 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
412 /* Allocating all the Rx blocks */
413 for (j = 0; j < blk_cnt; j++) {
414 #ifndef CONFIG_2BUFF_MODE
415 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
417 size = SIZE_OF_BLOCK;
419 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
421 if (tmp_v_addr == NULL) {
423 * In case of failure, free_shared_mem()
424 * is called, which should free any
425 * memory that was alloced till the
428 mac_control->rings[i].rx_blocks[j].block_virt_addr =
432 memset(tmp_v_addr, 0, size);
433 mac_control->rings[i].rx_blocks[j].block_virt_addr =
435 mac_control->rings[i].rx_blocks[j].block_dma_addr =
438 /* Interlinking all Rx Blocks */
439 for (j = 0; j < blk_cnt; j++) {
441 mac_control->rings[i].rx_blocks[j].block_virt_addr;
443 mac_control->rings[i].rx_blocks[(j + 1) %
444 blk_cnt].block_virt_addr;
446 mac_control->rings[i].rx_blocks[j].block_dma_addr;
448 mac_control->rings[i].rx_blocks[(j + 1) %
449 blk_cnt].block_dma_addr;
451 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
452 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
455 #ifndef CONFIG_2BUFF_MODE
456 pre_rxd_blk->reserved_2_pNext_RxD_block =
457 (unsigned long) tmp_v_addr_next;
459 pre_rxd_blk->pNext_RxD_Blk_physical =
460 (u64) tmp_p_addr_next;
464 #ifdef CONFIG_2BUFF_MODE
466 * Allocation of Storages for buffer addresses in 2BUFF mode
467 * and the buffers as well.
469 for (i = 0; i < config->rx_ring_num; i++) {
471 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
472 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
474 if (!mac_control->rings[i].ba)
476 for (j = 0; j < blk_cnt; j++) {
478 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
479 (MAX_RXDS_PER_BLOCK + 1)),
481 if (!mac_control->rings[i].ba[j])
483 while (k != MAX_RXDS_PER_BLOCK) {
484 ba = &mac_control->rings[i].ba[j][k];
486 ba->ba_0_org = (void *) kmalloc
487 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
490 tmp = (u64) ba->ba_0_org;
492 tmp &= ~((u64) ALIGN_SIZE);
493 ba->ba_0 = (void *) tmp;
495 ba->ba_1_org = (void *) kmalloc
496 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
499 tmp = (u64) ba->ba_1_org;
501 tmp &= ~((u64) ALIGN_SIZE);
502 ba->ba_1 = (void *) tmp;
509 /* Allocation and initialization of Statistics block */
510 size = sizeof(StatInfo_t);
511 mac_control->stats_mem = pci_alloc_consistent
512 (nic->pdev, size, &mac_control->stats_mem_phy);
514 if (!mac_control->stats_mem) {
516 * In case of failure, free_shared_mem() is called, which
517 * should free any memory that was alloced till the
522 mac_control->stats_mem_sz = size;
524 tmp_v_addr = mac_control->stats_mem;
525 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
526 memset(tmp_v_addr, 0, size);
527 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
528 (unsigned long long) tmp_p_addr);
534 * free_shared_mem - Free the allocated Memory
535 * @nic: Device private variable.
536 * Description: This function is to free all memory locations allocated by
537 * the init_shared_mem() function and return it to the kernel.
540 static void free_shared_mem(struct s2io_nic *nic)
542 int i, j, blk_cnt, size;
544 dma_addr_t tmp_p_addr;
545 mac_info_t *mac_control;
546 struct config_param *config;
547 int lst_size, lst_per_page;
553 mac_control = &nic->mac_control;
554 config = &nic->config;
556 lst_size = (sizeof(TxD_t) * config->max_txds);
557 lst_per_page = PAGE_SIZE / lst_size;
559 for (i = 0; i < config->tx_fifo_num; i++) {
560 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
562 for (j = 0; j < page_num; j++) {
563 int mem_blks = (j * lst_per_page);
564 if (!mac_control->fifos[i].list_info[mem_blks].
567 pci_free_consistent(nic->pdev, PAGE_SIZE,
568 mac_control->fifos[i].
571 mac_control->fifos[i].
575 kfree(mac_control->fifos[i].list_info);
578 #ifndef CONFIG_2BUFF_MODE
579 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
581 size = SIZE_OF_BLOCK;
583 for (i = 0; i < config->rx_ring_num; i++) {
584 blk_cnt = mac_control->rings[i].block_count;
585 for (j = 0; j < blk_cnt; j++) {
586 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
588 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
590 if (tmp_v_addr == NULL)
592 pci_free_consistent(nic->pdev, size,
593 tmp_v_addr, tmp_p_addr);
597 #ifdef CONFIG_2BUFF_MODE
598 /* Freeing buffer storage addresses in 2BUFF mode. */
599 for (i = 0; i < config->rx_ring_num; i++) {
601 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
602 for (j = 0; j < blk_cnt; j++) {
604 if (!mac_control->rings[i].ba[j])
606 while (k != MAX_RXDS_PER_BLOCK) {
607 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
612 kfree(mac_control->rings[i].ba[j]);
614 if (mac_control->rings[i].ba)
615 kfree(mac_control->rings[i].ba);
619 if (mac_control->stats_mem) {
620 pci_free_consistent(nic->pdev,
621 mac_control->stats_mem_sz,
622 mac_control->stats_mem,
623 mac_control->stats_mem_phy);
628 * init_nic - Initialization of hardware
629 * @nic: device peivate variable
630 * Description: The function sequentially configures every block
631 * of the H/W from their reset values.
632 * Return Value: SUCCESS on success and
633 * '-1' on failure (endian settings incorrect).
636 static int init_nic(struct s2io_nic *nic)
638 XENA_dev_config_t __iomem *bar0 = nic->bar0;
639 struct net_device *dev = nic->dev;
640 register u64 val64 = 0;
644 mac_info_t *mac_control;
645 struct config_param *config;
646 int mdio_cnt = 0, dtx_cnt = 0;
647 unsigned long long mem_share;
650 mac_control = &nic->mac_control;
651 config = &nic->config;
653 /* to set the swapper controle on the card */
654 if(s2io_set_swapper(nic)) {
655 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
659 /* Remove XGXS from reset state */
661 writeq(val64, &bar0->sw_reset);
663 val64 = readq(&bar0->sw_reset);
665 /* Enable Receiving broadcasts */
666 add = &bar0->mac_cfg;
667 val64 = readq(&bar0->mac_cfg);
668 val64 |= MAC_RMAC_BCAST_ENABLE;
669 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
670 writel((u32) val64, add);
671 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
672 writel((u32) (val64 >> 32), (add + 4));
674 /* Read registers in all blocks */
675 val64 = readq(&bar0->mac_int_mask);
676 val64 = readq(&bar0->mc_int_mask);
677 val64 = readq(&bar0->xgxs_int_mask);
681 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
684 * Configuring the XAUI Interface of Xena.
685 * ***************************************
686 * To Configure the Xena's XAUI, one has to write a series
687 * of 64 bit values into two registers in a particular
688 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
689 * which will be defined in the array of configuration values
690 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
691 * to switch writing from one regsiter to another. We continue
692 * writing these values until we encounter the 'END_SIGN' macro.
693 * For example, After making a series of 21 writes into
694 * dtx_control register the 'SWITCH_SIGN' appears and hence we
695 * start writing into mdio_control until we encounter END_SIGN.
699 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
700 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
704 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
705 &bar0->dtx_control, UF);
706 val64 = readq(&bar0->dtx_control);
710 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
711 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
715 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
716 &bar0->mdio_control, UF);
717 val64 = readq(&bar0->mdio_control);
720 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
721 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
728 /* Tx DMA Initialization */
730 writeq(val64, &bar0->tx_fifo_partition_0);
731 writeq(val64, &bar0->tx_fifo_partition_1);
732 writeq(val64, &bar0->tx_fifo_partition_2);
733 writeq(val64, &bar0->tx_fifo_partition_3);
736 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
738 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
739 13) | vBIT(config->tx_cfg[i].fifo_priority,
742 if (i == (config->tx_fifo_num - 1)) {
749 writeq(val64, &bar0->tx_fifo_partition_0);
753 writeq(val64, &bar0->tx_fifo_partition_1);
757 writeq(val64, &bar0->tx_fifo_partition_2);
761 writeq(val64, &bar0->tx_fifo_partition_3);
766 /* Enable Tx FIFO partition 0. */
767 val64 = readq(&bar0->tx_fifo_partition_0);
768 val64 |= BIT(0); /* To enable the FIFO partition. */
769 writeq(val64, &bar0->tx_fifo_partition_0);
772 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
773 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
775 if (get_xena_rev_id(nic->pdev) < 4)
776 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
778 val64 = readq(&bar0->tx_fifo_partition_0);
779 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
780 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
783 * Initialization of Tx_PA_CONFIG register to ignore packet
784 * integrity checking.
786 val64 = readq(&bar0->tx_pa_cfg);
787 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
788 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
789 writeq(val64, &bar0->tx_pa_cfg);
791 /* Rx DMA intialization. */
793 for (i = 0; i < config->rx_ring_num; i++) {
795 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
798 writeq(val64, &bar0->rx_queue_priority);
801 * Allocating equal share of memory to all the
806 for (i = 0; i < config->rx_ring_num; i++) {
809 mem_share = (mem_size / config->rx_ring_num +
810 mem_size % config->rx_ring_num);
811 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
814 mem_share = (mem_size / config->rx_ring_num);
815 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
818 mem_share = (mem_size / config->rx_ring_num);
819 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
822 mem_share = (mem_size / config->rx_ring_num);
823 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
826 mem_share = (mem_size / config->rx_ring_num);
827 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
830 mem_share = (mem_size / config->rx_ring_num);
831 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
834 mem_share = (mem_size / config->rx_ring_num);
835 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
838 mem_share = (mem_size / config->rx_ring_num);
839 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
843 writeq(val64, &bar0->rx_queue_cfg);
846 * Filling Tx round robin registers
847 * as per the number of FIFOs
849 switch (config->tx_fifo_num) {
851 val64 = 0x0000000000000000ULL;
852 writeq(val64, &bar0->tx_w_round_robin_0);
853 writeq(val64, &bar0->tx_w_round_robin_1);
854 writeq(val64, &bar0->tx_w_round_robin_2);
855 writeq(val64, &bar0->tx_w_round_robin_3);
856 writeq(val64, &bar0->tx_w_round_robin_4);
859 val64 = 0x0000010000010000ULL;
860 writeq(val64, &bar0->tx_w_round_robin_0);
861 val64 = 0x0100000100000100ULL;
862 writeq(val64, &bar0->tx_w_round_robin_1);
863 val64 = 0x0001000001000001ULL;
864 writeq(val64, &bar0->tx_w_round_robin_2);
865 val64 = 0x0000010000010000ULL;
866 writeq(val64, &bar0->tx_w_round_robin_3);
867 val64 = 0x0100000000000000ULL;
868 writeq(val64, &bar0->tx_w_round_robin_4);
871 val64 = 0x0001000102000001ULL;
872 writeq(val64, &bar0->tx_w_round_robin_0);
873 val64 = 0x0001020000010001ULL;
874 writeq(val64, &bar0->tx_w_round_robin_1);
875 val64 = 0x0200000100010200ULL;
876 writeq(val64, &bar0->tx_w_round_robin_2);
877 val64 = 0x0001000102000001ULL;
878 writeq(val64, &bar0->tx_w_round_robin_3);
879 val64 = 0x0001020000000000ULL;
880 writeq(val64, &bar0->tx_w_round_robin_4);
883 val64 = 0x0001020300010200ULL;
884 writeq(val64, &bar0->tx_w_round_robin_0);
885 val64 = 0x0100000102030001ULL;
886 writeq(val64, &bar0->tx_w_round_robin_1);
887 val64 = 0x0200010000010203ULL;
888 writeq(val64, &bar0->tx_w_round_robin_2);
889 val64 = 0x0001020001000001ULL;
890 writeq(val64, &bar0->tx_w_round_robin_3);
891 val64 = 0x0203000100000000ULL;
892 writeq(val64, &bar0->tx_w_round_robin_4);
895 val64 = 0x0001000203000102ULL;
896 writeq(val64, &bar0->tx_w_round_robin_0);
897 val64 = 0x0001020001030004ULL;
898 writeq(val64, &bar0->tx_w_round_robin_1);
899 val64 = 0x0001000203000102ULL;
900 writeq(val64, &bar0->tx_w_round_robin_2);
901 val64 = 0x0001020001030004ULL;
902 writeq(val64, &bar0->tx_w_round_robin_3);
903 val64 = 0x0001000000000000ULL;
904 writeq(val64, &bar0->tx_w_round_robin_4);
907 val64 = 0x0001020304000102ULL;
908 writeq(val64, &bar0->tx_w_round_robin_0);
909 val64 = 0x0304050001020001ULL;
910 writeq(val64, &bar0->tx_w_round_robin_1);
911 val64 = 0x0203000100000102ULL;
912 writeq(val64, &bar0->tx_w_round_robin_2);
913 val64 = 0x0304000102030405ULL;
914 writeq(val64, &bar0->tx_w_round_robin_3);
915 val64 = 0x0001000200000000ULL;
916 writeq(val64, &bar0->tx_w_round_robin_4);
919 val64 = 0x0001020001020300ULL;
920 writeq(val64, &bar0->tx_w_round_robin_0);
921 val64 = 0x0102030400010203ULL;
922 writeq(val64, &bar0->tx_w_round_robin_1);
923 val64 = 0x0405060001020001ULL;
924 writeq(val64, &bar0->tx_w_round_robin_2);
925 val64 = 0x0304050000010200ULL;
926 writeq(val64, &bar0->tx_w_round_robin_3);
927 val64 = 0x0102030000000000ULL;
928 writeq(val64, &bar0->tx_w_round_robin_4);
931 val64 = 0x0001020300040105ULL;
932 writeq(val64, &bar0->tx_w_round_robin_0);
933 val64 = 0x0200030106000204ULL;
934 writeq(val64, &bar0->tx_w_round_robin_1);
935 val64 = 0x0103000502010007ULL;
936 writeq(val64, &bar0->tx_w_round_robin_2);
937 val64 = 0x0304010002060500ULL;
938 writeq(val64, &bar0->tx_w_round_robin_3);
939 val64 = 0x0103020400000000ULL;
940 writeq(val64, &bar0->tx_w_round_robin_4);
944 /* Filling the Rx round robin registers as per the
945 * number of Rings and steering based on QoS.
947 switch (config->rx_ring_num) {
949 val64 = 0x8080808080808080ULL;
950 writeq(val64, &bar0->rts_qos_steering);
953 val64 = 0x0000010000010000ULL;
954 writeq(val64, &bar0->rx_w_round_robin_0);
955 val64 = 0x0100000100000100ULL;
956 writeq(val64, &bar0->rx_w_round_robin_1);
957 val64 = 0x0001000001000001ULL;
958 writeq(val64, &bar0->rx_w_round_robin_2);
959 val64 = 0x0000010000010000ULL;
960 writeq(val64, &bar0->rx_w_round_robin_3);
961 val64 = 0x0100000000000000ULL;
962 writeq(val64, &bar0->rx_w_round_robin_4);
964 val64 = 0x8080808040404040ULL;
965 writeq(val64, &bar0->rts_qos_steering);
968 val64 = 0x0001000102000001ULL;
969 writeq(val64, &bar0->rx_w_round_robin_0);
970 val64 = 0x0001020000010001ULL;
971 writeq(val64, &bar0->rx_w_round_robin_1);
972 val64 = 0x0200000100010200ULL;
973 writeq(val64, &bar0->rx_w_round_robin_2);
974 val64 = 0x0001000102000001ULL;
975 writeq(val64, &bar0->rx_w_round_robin_3);
976 val64 = 0x0001020000000000ULL;
977 writeq(val64, &bar0->rx_w_round_robin_4);
979 val64 = 0x8080804040402020ULL;
980 writeq(val64, &bar0->rts_qos_steering);
983 val64 = 0x0001020300010200ULL;
984 writeq(val64, &bar0->rx_w_round_robin_0);
985 val64 = 0x0100000102030001ULL;
986 writeq(val64, &bar0->rx_w_round_robin_1);
987 val64 = 0x0200010000010203ULL;
988 writeq(val64, &bar0->rx_w_round_robin_2);
989 val64 = 0x0001020001000001ULL;
990 writeq(val64, &bar0->rx_w_round_robin_3);
991 val64 = 0x0203000100000000ULL;
992 writeq(val64, &bar0->rx_w_round_robin_4);
994 val64 = 0x8080404020201010ULL;
995 writeq(val64, &bar0->rts_qos_steering);
998 val64 = 0x0001000203000102ULL;
999 writeq(val64, &bar0->rx_w_round_robin_0);
1000 val64 = 0x0001020001030004ULL;
1001 writeq(val64, &bar0->rx_w_round_robin_1);
1002 val64 = 0x0001000203000102ULL;
1003 writeq(val64, &bar0->rx_w_round_robin_2);
1004 val64 = 0x0001020001030004ULL;
1005 writeq(val64, &bar0->rx_w_round_robin_3);
1006 val64 = 0x0001000000000000ULL;
1007 writeq(val64, &bar0->rx_w_round_robin_4);
1009 val64 = 0x8080404020201008ULL;
1010 writeq(val64, &bar0->rts_qos_steering);
1013 val64 = 0x0001020304000102ULL;
1014 writeq(val64, &bar0->rx_w_round_robin_0);
1015 val64 = 0x0304050001020001ULL;
1016 writeq(val64, &bar0->rx_w_round_robin_1);
1017 val64 = 0x0203000100000102ULL;
1018 writeq(val64, &bar0->rx_w_round_robin_2);
1019 val64 = 0x0304000102030405ULL;
1020 writeq(val64, &bar0->rx_w_round_robin_3);
1021 val64 = 0x0001000200000000ULL;
1022 writeq(val64, &bar0->rx_w_round_robin_4);
1024 val64 = 0x8080404020100804ULL;
1025 writeq(val64, &bar0->rts_qos_steering);
1028 val64 = 0x0001020001020300ULL;
1029 writeq(val64, &bar0->rx_w_round_robin_0);
1030 val64 = 0x0102030400010203ULL;
1031 writeq(val64, &bar0->rx_w_round_robin_1);
1032 val64 = 0x0405060001020001ULL;
1033 writeq(val64, &bar0->rx_w_round_robin_2);
1034 val64 = 0x0304050000010200ULL;
1035 writeq(val64, &bar0->rx_w_round_robin_3);
1036 val64 = 0x0102030000000000ULL;
1037 writeq(val64, &bar0->rx_w_round_robin_4);
1039 val64 = 0x8080402010080402ULL;
1040 writeq(val64, &bar0->rts_qos_steering);
1043 val64 = 0x0001020300040105ULL;
1044 writeq(val64, &bar0->rx_w_round_robin_0);
1045 val64 = 0x0200030106000204ULL;
1046 writeq(val64, &bar0->rx_w_round_robin_1);
1047 val64 = 0x0103000502010007ULL;
1048 writeq(val64, &bar0->rx_w_round_robin_2);
1049 val64 = 0x0304010002060500ULL;
1050 writeq(val64, &bar0->rx_w_round_robin_3);
1051 val64 = 0x0103020400000000ULL;
1052 writeq(val64, &bar0->rx_w_round_robin_4);
1054 val64 = 0x8040201008040201ULL;
1055 writeq(val64, &bar0->rts_qos_steering);
1061 for (i = 0; i < 8; i++)
1062 writeq(val64, &bar0->rts_frm_len_n[i]);
1064 /* Set the default rts frame length for the rings configured */
1065 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1066 for (i = 0 ; i < config->rx_ring_num ; i++)
1067 writeq(val64, &bar0->rts_frm_len_n[i]);
1069 /* Set the frame length for the configured rings
1070 * desired by the user
1072 for (i = 0; i < config->rx_ring_num; i++) {
1073 /* If rts_frm_len[i] == 0 then it is assumed that user not
1074 * specified frame length steering.
1075 * If the user provides the frame length then program
1076 * the rts_frm_len register for those values or else
1077 * leave it as it is.
1079 if (rts_frm_len[i] != 0) {
1080 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1081 &bar0->rts_frm_len_n[i]);
1085 /* Program statistics memory */
1086 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1089 * Initializing the sampling rate for the device to calculate the
1090 * bandwidth utilization.
1092 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1093 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1094 writeq(val64, &bar0->mac_link_util);
1098 * Initializing the Transmit and Receive Traffic Interrupt
1102 * TTI Initialization. Default Tx timer gets us about
1103 * 250 interrupts per sec. Continuous interrupts are enabled
1106 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1107 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1108 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1109 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1110 if (use_continuous_tx_intrs)
1111 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1112 writeq(val64, &bar0->tti_data1_mem);
1114 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1115 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1116 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1117 writeq(val64, &bar0->tti_data2_mem);
1119 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1120 writeq(val64, &bar0->tti_command_mem);
1123 * Once the operation completes, the Strobe bit of the command
1124 * register will be reset. We poll for this particular condition
1125 * We wait for a maximum of 500ms for the operation to complete,
1126 * if it's not complete by then we return error.
1130 val64 = readq(&bar0->tti_command_mem);
1131 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1135 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1143 /* RTI Initialization */
1144 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1145 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1146 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1147 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1149 writeq(val64, &bar0->rti_data1_mem);
1151 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1152 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1153 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1154 writeq(val64, &bar0->rti_data2_mem);
1156 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1157 writeq(val64, &bar0->rti_command_mem);
1160 * Once the operation completes, the Strobe bit of the
1161 * command register will be reset. We poll for this
1162 * particular condition. We wait for a maximum of 500ms
1163 * for the operation to complete, if it's not complete
1164 * by then we return error.
1168 val64 = readq(&bar0->rti_command_mem);
1169 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1173 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1182 * Initializing proper values as Pause threshold into all
1183 * the 8 Queues on Rx side.
1185 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1186 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1188 /* Disable RMAC PAD STRIPPING */
1189 add = (void *) &bar0->mac_cfg;
1190 val64 = readq(&bar0->mac_cfg);
1191 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1192 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1193 writel((u32) (val64), add);
1194 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1195 writel((u32) (val64 >> 32), (add + 4));
1196 val64 = readq(&bar0->mac_cfg);
1199 * Set the time value to be inserted in the pause frame
1200 * generated by xena.
1202 val64 = readq(&bar0->rmac_pause_cfg);
1203 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1204 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1205 writeq(val64, &bar0->rmac_pause_cfg);
1208 * Set the Threshold Limit for Generating the pause frame
1209 * If the amount of data in any Queue exceeds ratio of
1210 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1211 * pause frame is generated
1214 for (i = 0; i < 4; i++) {
1216 (((u64) 0xFF00 | nic->mac_control.
1217 mc_pause_threshold_q0q3)
1220 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1223 for (i = 0; i < 4; i++) {
1225 (((u64) 0xFF00 | nic->mac_control.
1226 mc_pause_threshold_q4q7)
1229 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1232 * TxDMA will stop Read request if the number of read split has
1233 * exceeded the limit pointed by shared_splits
1235 val64 = readq(&bar0->pic_control);
1236 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1237 writeq(val64, &bar0->pic_control);
1243 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1244 * @nic: device private variable,
1245 * @mask: A mask indicating which Intr block must be modified and,
1246 * @flag: A flag indicating whether to enable or disable the Intrs.
1247 * Description: This function will either disable or enable the interrupts
1248 * depending on the flag argument. The mask argument can be used to
1249 * enable/disable any Intr block.
1250 * Return Value: NONE.
1253 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1255 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1256 register u64 val64 = 0, temp64 = 0;
1258 /* Top level interrupt classification */
1259 /* PIC Interrupts */
1260 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1261 /* Enable PIC Intrs in the general intr mask register */
1262 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1263 if (flag == ENABLE_INTRS) {
1264 temp64 = readq(&bar0->general_int_mask);
1265 temp64 &= ~((u64) val64);
1266 writeq(temp64, &bar0->general_int_mask);
1268 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1269 * interrupts for now.
1272 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1274 * No MSI Support is available presently, so TTI and
1275 * RTI interrupts are also disabled.
1277 } else if (flag == DISABLE_INTRS) {
1279 * Disable PIC Intrs in the general
1280 * intr mask register
1282 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1283 temp64 = readq(&bar0->general_int_mask);
1285 writeq(val64, &bar0->general_int_mask);
1289 /* DMA Interrupts */
1290 /* Enabling/Disabling Tx DMA interrupts */
1291 if (mask & TX_DMA_INTR) {
1292 /* Enable TxDMA Intrs in the general intr mask register */
1293 val64 = TXDMA_INT_M;
1294 if (flag == ENABLE_INTRS) {
1295 temp64 = readq(&bar0->general_int_mask);
1296 temp64 &= ~((u64) val64);
1297 writeq(temp64, &bar0->general_int_mask);
1299 * Keep all interrupts other than PFC interrupt
1300 * and PCC interrupt disabled in DMA level.
1302 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1304 writeq(val64, &bar0->txdma_int_mask);
1306 * Enable only the MISC error 1 interrupt in PFC block
1308 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1309 writeq(val64, &bar0->pfc_err_mask);
1311 * Enable only the FB_ECC error interrupt in PCC block
1313 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1314 writeq(val64, &bar0->pcc_err_mask);
1315 } else if (flag == DISABLE_INTRS) {
1317 * Disable TxDMA Intrs in the general intr mask
1320 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1321 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1322 temp64 = readq(&bar0->general_int_mask);
1324 writeq(val64, &bar0->general_int_mask);
1328 /* Enabling/Disabling Rx DMA interrupts */
1329 if (mask & RX_DMA_INTR) {
1330 /* Enable RxDMA Intrs in the general intr mask register */
1331 val64 = RXDMA_INT_M;
1332 if (flag == ENABLE_INTRS) {
1333 temp64 = readq(&bar0->general_int_mask);
1334 temp64 &= ~((u64) val64);
1335 writeq(temp64, &bar0->general_int_mask);
1337 * All RxDMA block interrupts are disabled for now
1340 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1341 } else if (flag == DISABLE_INTRS) {
1343 * Disable RxDMA Intrs in the general intr mask
1346 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1347 temp64 = readq(&bar0->general_int_mask);
1349 writeq(val64, &bar0->general_int_mask);
1353 /* MAC Interrupts */
1354 /* Enabling/Disabling MAC interrupts */
1355 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1356 val64 = TXMAC_INT_M | RXMAC_INT_M;
1357 if (flag == ENABLE_INTRS) {
1358 temp64 = readq(&bar0->general_int_mask);
1359 temp64 &= ~((u64) val64);
1360 writeq(temp64, &bar0->general_int_mask);
1362 * All MAC block error interrupts are disabled for now
1363 * except the link status change interrupt.
1366 val64 = MAC_INT_STATUS_RMAC_INT;
1367 temp64 = readq(&bar0->mac_int_mask);
1368 temp64 &= ~((u64) val64);
1369 writeq(temp64, &bar0->mac_int_mask);
1371 val64 = readq(&bar0->mac_rmac_err_mask);
1372 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1373 writeq(val64, &bar0->mac_rmac_err_mask);
1374 } else if (flag == DISABLE_INTRS) {
1376 * Disable MAC Intrs in the general intr mask register
1378 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1379 writeq(DISABLE_ALL_INTRS,
1380 &bar0->mac_rmac_err_mask);
1382 temp64 = readq(&bar0->general_int_mask);
1384 writeq(val64, &bar0->general_int_mask);
1388 /* XGXS Interrupts */
1389 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1390 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1391 if (flag == ENABLE_INTRS) {
1392 temp64 = readq(&bar0->general_int_mask);
1393 temp64 &= ~((u64) val64);
1394 writeq(temp64, &bar0->general_int_mask);
1396 * All XGXS block error interrupts are disabled for now
1399 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1400 } else if (flag == DISABLE_INTRS) {
1402 * Disable MC Intrs in the general intr mask register
1404 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1405 temp64 = readq(&bar0->general_int_mask);
1407 writeq(val64, &bar0->general_int_mask);
1411 /* Memory Controller(MC) interrupts */
1412 if (mask & MC_INTR) {
1414 if (flag == ENABLE_INTRS) {
1415 temp64 = readq(&bar0->general_int_mask);
1416 temp64 &= ~((u64) val64);
1417 writeq(temp64, &bar0->general_int_mask);
1419 * Enable all MC Intrs.
1421 writeq(0x0, &bar0->mc_int_mask);
1422 writeq(0x0, &bar0->mc_err_mask);
1423 } else if (flag == DISABLE_INTRS) {
1425 * Disable MC Intrs in the general intr mask register
1427 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1428 temp64 = readq(&bar0->general_int_mask);
1430 writeq(val64, &bar0->general_int_mask);
1435 /* Tx traffic interrupts */
1436 if (mask & TX_TRAFFIC_INTR) {
1437 val64 = TXTRAFFIC_INT_M;
1438 if (flag == ENABLE_INTRS) {
1439 temp64 = readq(&bar0->general_int_mask);
1440 temp64 &= ~((u64) val64);
1441 writeq(temp64, &bar0->general_int_mask);
1443 * Enable all the Tx side interrupts
1444 * writing 0 Enables all 64 TX interrupt levels
1446 writeq(0x0, &bar0->tx_traffic_mask);
1447 } else if (flag == DISABLE_INTRS) {
1449 * Disable Tx Traffic Intrs in the general intr mask
1452 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1453 temp64 = readq(&bar0->general_int_mask);
1455 writeq(val64, &bar0->general_int_mask);
1459 /* Rx traffic interrupts */
1460 if (mask & RX_TRAFFIC_INTR) {
1461 val64 = RXTRAFFIC_INT_M;
1462 if (flag == ENABLE_INTRS) {
1463 temp64 = readq(&bar0->general_int_mask);
1464 temp64 &= ~((u64) val64);
1465 writeq(temp64, &bar0->general_int_mask);
1466 /* writing 0 Enables all 8 RX interrupt levels */
1467 writeq(0x0, &bar0->rx_traffic_mask);
1468 } else if (flag == DISABLE_INTRS) {
1470 * Disable Rx Traffic Intrs in the general intr mask
1473 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1474 temp64 = readq(&bar0->general_int_mask);
1476 writeq(val64, &bar0->general_int_mask);
1481 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1485 if (flag == FALSE) {
1487 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1488 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1489 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1493 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1494 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1495 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1501 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1502 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1503 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1504 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1505 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1509 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1510 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1511 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1512 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1513 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1522 * verify_xena_quiescence - Checks whether the H/W is ready
1523 * @val64 : Value read from adapter status register.
1524 * @flag : indicates if the adapter enable bit was ever written once
1526 * Description: Returns whether the H/W is ready to go or not. Depending
1527 * on whether adapter enable bit was written or not the comparison
1528 * differs and the calling function passes the input argument flag to
1530 * Return: 1 If xena is quiescence
1531 * 0 If Xena is not quiescence
1534 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1537 u64 tmp64 = ~((u64) val64);
1538 int rev_id = get_xena_rev_id(sp->pdev);
1542 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1543 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1544 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1545 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1546 ADAPTER_STATUS_P_PLL_LOCK))) {
1547 ret = check_prc_pcc_state(val64, flag, rev_id);
1554 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1555 * @sp: Pointer to device specifc structure
1557 * New procedure to clear mac address reading problems on Alpha platforms
1561 void fix_mac_address(nic_t * sp)
1563 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1567 while (fix_mac[i] != END_SIGN) {
1568 writeq(fix_mac[i++], &bar0->gpio_control);
1570 val64 = readq(&bar0->gpio_control);
1575 * start_nic - Turns the device on
1576 * @nic : device private variable.
1578 * This function actually turns the device on. Before this function is
1579 * called,all Registers are configured from their reset states
1580 * and shared memory is allocated but the NIC is still quiescent. On
1581 * calling this function, the device interrupts are cleared and the NIC is
1582 * literally switched on by writing into the adapter control register.
1584 * SUCCESS on success and -1 on failure.
1587 static int start_nic(struct s2io_nic *nic)
1589 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1590 struct net_device *dev = nic->dev;
1591 register u64 val64 = 0;
1594 mac_info_t *mac_control;
1595 struct config_param *config;
1597 mac_control = &nic->mac_control;
1598 config = &nic->config;
1600 /* PRC Initialization and configuration */
1601 for (i = 0; i < config->rx_ring_num; i++) {
1602 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1603 &bar0->prc_rxd0_n[i]);
1605 val64 = readq(&bar0->prc_ctrl_n[i]);
1606 #ifndef CONFIG_2BUFF_MODE
1607 val64 |= PRC_CTRL_RC_ENABLED;
1609 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1611 writeq(val64, &bar0->prc_ctrl_n[i]);
1614 #ifdef CONFIG_2BUFF_MODE
1615 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1616 val64 = readq(&bar0->rx_pa_cfg);
1617 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1618 writeq(val64, &bar0->rx_pa_cfg);
1622 * Enabling MC-RLDRAM. After enabling the device, we timeout
1623 * for around 100ms, which is approximately the time required
1624 * for the device to be ready for operation.
1626 val64 = readq(&bar0->mc_rldram_mrs);
1627 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1628 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1629 val64 = readq(&bar0->mc_rldram_mrs);
1631 msleep(100); /* Delay by around 100 ms. */
1633 /* Enabling ECC Protection. */
1634 val64 = readq(&bar0->adapter_control);
1635 val64 &= ~ADAPTER_ECC_EN;
1636 writeq(val64, &bar0->adapter_control);
1639 * Clearing any possible Link state change interrupts that
1640 * could have popped up just before Enabling the card.
1642 val64 = readq(&bar0->mac_rmac_err_reg);
1644 writeq(val64, &bar0->mac_rmac_err_reg);
1647 * Verify if the device is ready to be enabled, if so enable
1650 val64 = readq(&bar0->adapter_status);
1651 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1652 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1653 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1654 (unsigned long long) val64);
1658 /* Enable select interrupts */
1659 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1660 RX_MAC_INTR | MC_INTR;
1661 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1664 * With some switches, link might be already up at this point.
1665 * Because of this weird behavior, when we enable laser,
1666 * we may not get link. We need to handle this. We cannot
1667 * figure out which switch is misbehaving. So we are forced to
1668 * make a global change.
1671 /* Enabling Laser. */
1672 val64 = readq(&bar0->adapter_control);
1673 val64 |= ADAPTER_EOI_TX_ON;
1674 writeq(val64, &bar0->adapter_control);
1676 /* SXE-002: Initialize link and activity LED */
1677 subid = nic->pdev->subsystem_device;
1678 if ((subid & 0xFF) >= 0x07) {
1679 val64 = readq(&bar0->gpio_control);
1680 val64 |= 0x0000800000000000ULL;
1681 writeq(val64, &bar0->gpio_control);
1682 val64 = 0x0411040400000000ULL;
1683 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1687 * Don't see link state interrupts on certain switches, so
1688 * directly scheduling a link state task from here.
1690 schedule_work(&nic->set_link_task);
1696 * free_tx_buffers - Free all queued Tx buffers
1697 * @nic : device private variable.
1699 * Free all queued Tx buffers.
1700 * Return Value: void
1703 static void free_tx_buffers(struct s2io_nic *nic)
1705 struct net_device *dev = nic->dev;
1706 struct sk_buff *skb;
1709 mac_info_t *mac_control;
1710 struct config_param *config;
1711 int cnt = 0, frg_cnt;
1713 mac_control = &nic->mac_control;
1714 config = &nic->config;
1716 for (i = 0; i < config->tx_fifo_num; i++) {
1717 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1718 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1721 (struct sk_buff *) ((unsigned long) txdp->
1724 memset(txdp, 0, sizeof(TxD_t) *
1728 frg_cnt = skb_shinfo(skb)->nr_frags;
1729 pci_unmap_single(nic->pdev, (dma_addr_t)
1730 txdp->Buffer_Pointer,
1731 skb->len - skb->data_len,
1737 for (j = 0; j < frg_cnt; j++, txdp++) {
1739 &skb_shinfo(skb)->frags[j];
1740 pci_unmap_page(nic->pdev,
1750 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1754 "%s:forcibly freeing %d skbs on FIFO%d\n",
1756 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1757 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1762 * stop_nic - To stop the nic
1763 * @nic ; device private variable.
1765 * This function does exactly the opposite of what the start_nic()
1766 * function does. This function is called to stop the device.
1771 static void stop_nic(struct s2io_nic *nic)
1773 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1774 register u64 val64 = 0;
1775 u16 interruptible, i;
1776 mac_info_t *mac_control;
1777 struct config_param *config;
1779 mac_control = &nic->mac_control;
1780 config = &nic->config;
1782 /* Disable all interrupts */
1783 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1784 RX_MAC_INTR | MC_INTR;
1785 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1788 for (i = 0; i < config->rx_ring_num; i++) {
1789 val64 = readq(&bar0->prc_ctrl_n[i]);
1790 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1791 writeq(val64, &bar0->prc_ctrl_n[i]);
1796 * fill_rx_buffers - Allocates the Rx side skbs
1797 * @nic: device private variable
1798 * @ring_no: ring number
1800 * The function allocates Rx side skbs and puts the physical
1801 * address of these buffers into the RxD buffer pointers, so that the NIC
1802 * can DMA the received frame into these locations.
1803 * The NIC supports 3 receive modes, viz
1805 * 2. three buffer and
1806 * 3. Five buffer modes.
1807 * Each mode defines how many fragments the received frame will be split
1808 * up into by the NIC. The frame is split into L3 header, L4 Header,
1809 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1810 * is split into 3 fragments. As of now only single buffer mode is
1813 * SUCCESS on success or an appropriate -ve value on failure.
1816 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1818 struct net_device *dev = nic->dev;
1819 struct sk_buff *skb;
1821 int off, off1, size, block_no, block_no1;
1822 int offset, offset1;
1825 mac_info_t *mac_control;
1826 struct config_param *config;
1827 #ifdef CONFIG_2BUFF_MODE
1832 dma_addr_t rxdpphys;
1834 #ifndef CONFIG_S2IO_NAPI
1835 unsigned long flags;
1838 mac_control = &nic->mac_control;
1839 config = &nic->config;
1840 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1841 atomic_read(&nic->rx_bufs_left[ring_no]);
1842 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1843 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1845 while (alloc_tab < alloc_cnt) {
1846 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1848 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1850 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1851 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1852 #ifndef CONFIG_2BUFF_MODE
1853 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1854 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1856 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1857 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1860 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1861 block_virt_addr + off;
1862 if ((offset == offset1) && (rxdp->Host_Control)) {
1863 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1864 DBG_PRINT(INTR_DBG, " info equated\n");
1867 #ifndef CONFIG_2BUFF_MODE
1868 if (rxdp->Control_1 == END_OF_BLOCK) {
1869 mac_control->rings[ring_no].rx_curr_put_info.
1871 mac_control->rings[ring_no].rx_curr_put_info.
1872 block_index %= mac_control->rings[ring_no].block_count;
1873 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1876 off %= (MAX_RXDS_PER_BLOCK + 1);
1877 mac_control->rings[ring_no].rx_curr_put_info.offset =
1879 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1880 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1883 #ifndef CONFIG_S2IO_NAPI
1884 spin_lock_irqsave(&nic->put_lock, flags);
1885 mac_control->rings[ring_no].put_pos =
1886 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1887 spin_unlock_irqrestore(&nic->put_lock, flags);
1890 if (rxdp->Host_Control == END_OF_BLOCK) {
1891 mac_control->rings[ring_no].rx_curr_put_info.
1893 mac_control->rings[ring_no].rx_curr_put_info.block_index
1894 %= mac_control->rings[ring_no].block_count;
1895 block_no = mac_control->rings[ring_no].rx_curr_put_info
1898 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1899 dev->name, block_no,
1900 (unsigned long long) rxdp->Control_1);
1901 mac_control->rings[ring_no].rx_curr_put_info.offset =
1903 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1906 #ifndef CONFIG_S2IO_NAPI
1907 spin_lock_irqsave(&nic->put_lock, flags);
1908 mac_control->rings[ring_no].put_pos = (block_no *
1909 (MAX_RXDS_PER_BLOCK + 1)) + off;
1910 spin_unlock_irqrestore(&nic->put_lock, flags);
1914 #ifndef CONFIG_2BUFF_MODE
1915 if (rxdp->Control_1 & RXD_OWN_XENA)
1917 if (rxdp->Control_2 & BIT(0))
1920 mac_control->rings[ring_no].rx_curr_put_info.
1924 #ifdef CONFIG_2BUFF_MODE
1926 * RxDs Spanning cache lines will be replenished only
1927 * if the succeeding RxD is also owned by Host. It
1928 * will always be the ((8*i)+3) and ((8*i)+6)
1929 * descriptors for the 48 byte descriptor. The offending
1930 * decsriptor is of-course the 3rd descriptor.
1932 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1933 block_dma_addr + (off * sizeof(RxD_t));
1934 if (((u64) (rxdpphys)) % 128 > 80) {
1935 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1936 block_virt_addr + (off + 1);
1937 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1938 nextblk = (block_no + 1) %
1939 (mac_control->rings[ring_no].block_count);
1940 rxdpnext = mac_control->rings[ring_no].rx_blocks
1941 [nextblk].block_virt_addr;
1943 if (rxdpnext->Control_2 & BIT(0))
1948 #ifndef CONFIG_2BUFF_MODE
1949 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1951 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1954 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1955 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1958 #ifndef CONFIG_2BUFF_MODE
1959 skb_reserve(skb, NET_IP_ALIGN);
1960 memset(rxdp, 0, sizeof(RxD_t));
1961 rxdp->Buffer0_ptr = pci_map_single
1962 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1963 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1964 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1965 rxdp->Host_Control = (unsigned long) (skb);
1966 rxdp->Control_1 |= RXD_OWN_XENA;
1968 off %= (MAX_RXDS_PER_BLOCK + 1);
1969 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1971 ba = &mac_control->rings[ring_no].ba[block_no][off];
1972 skb_reserve(skb, BUF0_LEN);
1973 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1975 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1977 memset(rxdp, 0, sizeof(RxD_t));
1978 rxdp->Buffer2_ptr = pci_map_single
1979 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1980 PCI_DMA_FROMDEVICE);
1982 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1983 PCI_DMA_FROMDEVICE);
1985 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1986 PCI_DMA_FROMDEVICE);
1988 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1989 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1990 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1991 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1992 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1993 rxdp->Control_1 |= RXD_OWN_XENA;
1995 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1997 rxdp->Control_2 |= SET_RXD_MARKER;
1999 atomic_inc(&nic->rx_bufs_left[ring_no]);
2008 * free_rx_buffers - Frees all Rx buffers
2009 * @sp: device private variable.
2011 * This function will free all Rx buffers allocated by host.
2016 static void free_rx_buffers(struct s2io_nic *sp)
2018 struct net_device *dev = sp->dev;
2019 int i, j, blk = 0, off, buf_cnt = 0;
2021 struct sk_buff *skb;
2022 mac_info_t *mac_control;
2023 struct config_param *config;
2024 #ifdef CONFIG_2BUFF_MODE
2028 mac_control = &sp->mac_control;
2029 config = &sp->config;
2031 for (i = 0; i < config->rx_ring_num; i++) {
2032 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2033 off = j % (MAX_RXDS_PER_BLOCK + 1);
2034 rxdp = mac_control->rings[i].rx_blocks[blk].
2035 block_virt_addr + off;
2037 #ifndef CONFIG_2BUFF_MODE
2038 if (rxdp->Control_1 == END_OF_BLOCK) {
2040 (RxD_t *) ((unsigned long) rxdp->
2046 if (rxdp->Host_Control == END_OF_BLOCK) {
2052 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2053 memset(rxdp, 0, sizeof(RxD_t));
2058 (struct sk_buff *) ((unsigned long) rxdp->
2061 #ifndef CONFIG_2BUFF_MODE
2062 pci_unmap_single(sp->pdev, (dma_addr_t)
2065 HEADER_ETHERNET_II_802_3_SIZE
2066 + HEADER_802_2_SIZE +
2068 PCI_DMA_FROMDEVICE);
2070 ba = &mac_control->rings[i].ba[blk][off];
2071 pci_unmap_single(sp->pdev, (dma_addr_t)
2074 PCI_DMA_FROMDEVICE);
2075 pci_unmap_single(sp->pdev, (dma_addr_t)
2078 PCI_DMA_FROMDEVICE);
2079 pci_unmap_single(sp->pdev, (dma_addr_t)
2081 dev->mtu + BUF0_LEN + 4,
2082 PCI_DMA_FROMDEVICE);
2085 atomic_dec(&sp->rx_bufs_left[i]);
2088 memset(rxdp, 0, sizeof(RxD_t));
2090 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2091 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2092 mac_control->rings[i].rx_curr_put_info.offset = 0;
2093 mac_control->rings[i].rx_curr_get_info.offset = 0;
2094 atomic_set(&sp->rx_bufs_left[i], 0);
2095 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2096 dev->name, buf_cnt, i);
2101 * s2io_poll - Rx interrupt handler for NAPI support
2102 * @dev : pointer to the device structure.
2103 * @budget : The number of packets that were budgeted to be processed
2104 * during one pass through the 'Poll" function.
2106 * Comes into picture only if NAPI support has been incorporated. It does
2107 * the same thing that rx_intr_handler does, but not in a interrupt context
2108 * also It will process only a given number of packets.
2110 * 0 on success and 1 if there are No Rx packets to be processed.
2113 #if defined(CONFIG_S2IO_NAPI)
2114 static int s2io_poll(struct net_device *dev, int *budget)
2116 nic_t *nic = dev->priv;
2117 int pkt_cnt = 0, org_pkts_to_process;
2118 mac_info_t *mac_control;
2119 struct config_param *config;
2120 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2124 atomic_inc(&nic->isr_cnt);
2125 mac_control = &nic->mac_control;
2126 config = &nic->config;
2128 nic->pkts_to_process = *budget;
2129 if (nic->pkts_to_process > dev->quota)
2130 nic->pkts_to_process = dev->quota;
2131 org_pkts_to_process = nic->pkts_to_process;
2133 val64 = readq(&bar0->rx_traffic_int);
2134 writeq(val64, &bar0->rx_traffic_int);
2136 for (i = 0; i < config->rx_ring_num; i++) {
2137 rx_intr_handler(&mac_control->rings[i]);
2138 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2139 if (!nic->pkts_to_process) {
2140 /* Quota for the current iteration has been met */
2147 dev->quota -= pkt_cnt;
2149 netif_rx_complete(dev);
2151 for (i = 0; i < config->rx_ring_num; i++) {
2152 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2153 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2154 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2158 /* Re enable the Rx interrupts. */
2159 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2160 atomic_dec(&nic->isr_cnt);
2164 dev->quota -= pkt_cnt;
2167 for (i = 0; i < config->rx_ring_num; i++) {
2168 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2169 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2170 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2174 atomic_dec(&nic->isr_cnt);
2180 * rx_intr_handler - Rx interrupt handler
2181 * @nic: device private variable.
2183 * If the interrupt is because of a received frame or if the
2184 * receive ring contains fresh as yet un-processed frames,this function is
2185 * called. It picks out the RxD at which place the last Rx processing had
2186 * stopped and sends the skb to the OSM's Rx handler and then increments
2191 static void rx_intr_handler(ring_info_t *ring_data)
2193 nic_t *nic = ring_data->nic;
2194 struct net_device *dev = (struct net_device *) nic->dev;
2195 int get_block, get_offset, put_block, put_offset, ring_bufs;
2196 rx_curr_get_info_t get_info, put_info;
2198 struct sk_buff *skb;
2199 #ifndef CONFIG_S2IO_NAPI
2202 spin_lock(&nic->rx_lock);
2203 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2204 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2205 __FUNCTION__, dev->name);
2206 spin_unlock(&nic->rx_lock);
2209 get_info = ring_data->rx_curr_get_info;
2210 get_block = get_info.block_index;
2211 put_info = ring_data->rx_curr_put_info;
2212 put_block = put_info.block_index;
2213 ring_bufs = get_info.ring_len+1;
2214 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2216 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2218 #ifndef CONFIG_S2IO_NAPI
2219 spin_lock(&nic->put_lock);
2220 put_offset = ring_data->put_pos;
2221 spin_unlock(&nic->put_lock);
2223 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2226 while (RXD_IS_UP2DT(rxdp) &&
2227 (((get_offset + 1) % ring_bufs) != put_offset)) {
2228 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2230 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2232 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2233 spin_unlock(&nic->rx_lock);
2236 #ifndef CONFIG_2BUFF_MODE
2237 pci_unmap_single(nic->pdev, (dma_addr_t)
2240 HEADER_ETHERNET_II_802_3_SIZE +
2243 PCI_DMA_FROMDEVICE);
2245 pci_unmap_single(nic->pdev, (dma_addr_t)
2247 BUF0_LEN, PCI_DMA_FROMDEVICE);
2248 pci_unmap_single(nic->pdev, (dma_addr_t)
2250 BUF1_LEN, PCI_DMA_FROMDEVICE);
2251 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 dev->mtu + BUF0_LEN + 4,
2254 PCI_DMA_FROMDEVICE);
2256 rx_osm_handler(ring_data, rxdp);
2258 ring_data->rx_curr_get_info.offset =
2260 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2262 if (get_info.offset &&
2263 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2264 get_info.offset = 0;
2265 ring_data->rx_curr_get_info.offset
2268 get_block %= ring_data->block_count;
2269 ring_data->rx_curr_get_info.block_index
2271 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2274 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2276 #ifdef CONFIG_S2IO_NAPI
2277 nic->pkts_to_process -= 1;
2278 if (!nic->pkts_to_process)
2282 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2286 spin_unlock(&nic->rx_lock);
2290 * tx_intr_handler - Transmit interrupt handler
2291 * @nic : device private variable
2293 * If an interrupt was raised to indicate DMA complete of the
2294 * Tx packet, this function is called. It identifies the last TxD
2295 * whose buffer was freed and frees all skbs whose data have already
2296 * DMA'ed into the NICs internal memory.
2301 static void tx_intr_handler(fifo_info_t *fifo_data)
2303 nic_t *nic = fifo_data->nic;
2304 struct net_device *dev = (struct net_device *) nic->dev;
2305 tx_curr_get_info_t get_info, put_info;
2306 struct sk_buff *skb;
2310 get_info = fifo_data->tx_curr_get_info;
2311 put_info = fifo_data->tx_curr_put_info;
2312 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2314 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2315 (get_info.offset != put_info.offset) &&
2316 (txdlp->Host_Control)) {
2317 /* Check for TxD errors */
2318 if (txdlp->Control_1 & TXD_T_CODE) {
2319 unsigned long long err;
2320 err = txdlp->Control_1 & TXD_T_CODE;
2321 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2325 skb = (struct sk_buff *) ((unsigned long)
2326 txdlp->Host_Control);
2328 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2330 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2334 frg_cnt = skb_shinfo(skb)->nr_frags;
2335 nic->tx_pkt_count++;
2337 pci_unmap_single(nic->pdev, (dma_addr_t)
2338 txdlp->Buffer_Pointer,
2339 skb->len - skb->data_len,
2345 for (j = 0; j < frg_cnt; j++, txdlp++) {
2347 &skb_shinfo(skb)->frags[j];
2348 pci_unmap_page(nic->pdev,
2358 (sizeof(TxD_t) * fifo_data->max_txds));
2360 /* Updating the statistics block */
2361 nic->stats.tx_bytes += skb->len;
2362 dev_kfree_skb_irq(skb);
2365 get_info.offset %= get_info.fifo_len + 1;
2366 txdlp = (TxD_t *) fifo_data->list_info
2367 [get_info.offset].list_virt_addr;
2368 fifo_data->tx_curr_get_info.offset =
2372 spin_lock(&nic->tx_lock);
2373 if (netif_queue_stopped(dev))
2374 netif_wake_queue(dev);
2375 spin_unlock(&nic->tx_lock);
2379 * alarm_intr_handler - Alarm Interrrupt handler
2380 * @nic: device private variable
2381 * Description: If the interrupt was neither because of Rx packet or Tx
2382 * complete, this function is called. If the interrupt was to indicate
2383 * a loss of link, the OSM link status handler is invoked for any other
2384 * alarm interrupt the block that raised the interrupt is displayed
2385 * and a H/W reset is issued.
2390 static void alarm_intr_handler(struct s2io_nic *nic)
2392 struct net_device *dev = (struct net_device *) nic->dev;
2393 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2394 register u64 val64 = 0, err_reg = 0;
2396 /* Handling link status change error Intr */
2397 err_reg = readq(&bar0->mac_rmac_err_reg);
2398 writeq(err_reg, &bar0->mac_rmac_err_reg);
2399 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2400 schedule_work(&nic->set_link_task);
2403 /* Handling Ecc errors */
2404 val64 = readq(&bar0->mc_err_reg);
2405 writeq(val64, &bar0->mc_err_reg);
2406 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2407 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2408 nic->mac_control.stats_info->sw_stat.
2410 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2412 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2413 netif_stop_queue(dev);
2414 schedule_work(&nic->rst_timer_task);
2416 nic->mac_control.stats_info->sw_stat.
2421 /* In case of a serious error, the device will be Reset. */
2422 val64 = readq(&bar0->serr_source);
2423 if (val64 & SERR_SOURCE_ANY) {
2424 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2425 DBG_PRINT(ERR_DBG, "serious error!!\n");
2426 netif_stop_queue(dev);
2427 schedule_work(&nic->rst_timer_task);
2431 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2432 * Error occurs, the adapter will be recycled by disabling the
2433 * adapter enable bit and enabling it again after the device
2434 * becomes Quiescent.
2436 val64 = readq(&bar0->pcc_err_reg);
2437 writeq(val64, &bar0->pcc_err_reg);
2438 if (val64 & PCC_FB_ECC_DB_ERR) {
2439 u64 ac = readq(&bar0->adapter_control);
2440 ac &= ~(ADAPTER_CNTL_EN);
2441 writeq(ac, &bar0->adapter_control);
2442 ac = readq(&bar0->adapter_control);
2443 schedule_work(&nic->set_link_task);
2446 /* Other type of interrupts are not being handled now, TODO */
2450 * wait_for_cmd_complete - waits for a command to complete.
2451 * @sp : private member of the device structure, which is a pointer to the
2452 * s2io_nic structure.
2453 * Description: Function that waits for a command to Write into RMAC
2454 * ADDR DATA registers to be completed and returns either success or
2455 * error depending on whether the command was complete or not.
2457 * SUCCESS on success and FAILURE on failure.
2460 int wait_for_cmd_complete(nic_t * sp)
2462 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2463 int ret = FAILURE, cnt = 0;
2467 val64 = readq(&bar0->rmac_addr_cmd_mem);
2468 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2481 * s2io_reset - Resets the card.
2482 * @sp : private member of the device structure.
2483 * Description: Function to Reset the card. This function then also
2484 * restores the previously saved PCI configuration space registers as
2485 * the card reset also resets the configuration space.
2490 void s2io_reset(nic_t * sp)
2492 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2496 val64 = SW_RESET_ALL;
2497 writeq(val64, &bar0->sw_reset);
2500 * At this stage, if the PCI write is indeed completed, the
2501 * card is reset and so is the PCI Config space of the device.
2502 * So a read cannot be issued at this stage on any of the
2503 * registers to ensure the write into "sw_reset" register
2505 * Question: Is there any system call that will explicitly force
2506 * all the write commands still pending on the bus to be pushed
2508 * As of now I'am just giving a 250ms delay and hoping that the
2509 * PCI write to sw_reset register is done by this time.
2513 /* Restore the PCI state saved during initializarion. */
2514 pci_restore_state(sp->pdev);
2520 /* Set swapper to enable I/O register access */
2521 s2io_set_swapper(sp);
2523 /* Clear certain PCI/PCI-X fields after reset */
2524 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2525 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2526 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2528 val64 = readq(&bar0->txpic_int_reg);
2529 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2530 writeq(val64, &bar0->txpic_int_reg);
2532 /* Clearing PCIX Ecc status register */
2533 pci_write_config_dword(sp->pdev, 0x68, 0);
2535 /* Reset device statistics maintained by OS */
2536 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2538 /* SXE-002: Configure link and activity LED to turn it off */
2539 subid = sp->pdev->subsystem_device;
2540 if ((subid & 0xFF) >= 0x07) {
2541 val64 = readq(&bar0->gpio_control);
2542 val64 |= 0x0000800000000000ULL;
2543 writeq(val64, &bar0->gpio_control);
2544 val64 = 0x0411040400000000ULL;
2545 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2548 sp->device_enabled_once = FALSE;
2552 * s2io_set_swapper - to set the swapper controle on the card
2553 * @sp : private member of the device structure,
2554 * pointer to the s2io_nic structure.
2555 * Description: Function to set the swapper control on the card
2556 * correctly depending on the 'endianness' of the system.
2558 * SUCCESS on success and FAILURE on failure.
2561 int s2io_set_swapper(nic_t * sp)
2563 struct net_device *dev = sp->dev;
2564 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2565 u64 val64, valt, valr;
2568 * Set proper endian settings and verify the same by reading
2569 * the PIF Feed-back register.
2572 val64 = readq(&bar0->pif_rd_swapper_fb);
2573 if (val64 != 0x0123456789ABCDEFULL) {
2575 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2576 0x8100008181000081ULL, /* FE=1, SE=0 */
2577 0x4200004242000042ULL, /* FE=0, SE=1 */
2578 0}; /* FE=0, SE=0 */
2581 writeq(value[i], &bar0->swapper_ctrl);
2582 val64 = readq(&bar0->pif_rd_swapper_fb);
2583 if (val64 == 0x0123456789ABCDEFULL)
2588 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2590 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2591 (unsigned long long) val64);
2596 valr = readq(&bar0->swapper_ctrl);
2599 valt = 0x0123456789ABCDEFULL;
2600 writeq(valt, &bar0->xmsi_address);
2601 val64 = readq(&bar0->xmsi_address);
2605 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2606 0x0081810000818100ULL, /* FE=1, SE=0 */
2607 0x0042420000424200ULL, /* FE=0, SE=1 */
2608 0}; /* FE=0, SE=0 */
2611 writeq((value[i] | valr), &bar0->swapper_ctrl);
2612 writeq(valt, &bar0->xmsi_address);
2613 val64 = readq(&bar0->xmsi_address);
2619 unsigned long long x = val64;
2620 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2621 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2625 val64 = readq(&bar0->swapper_ctrl);
2626 val64 &= 0xFFFF000000000000ULL;
2630 * The device by default set to a big endian format, so a
2631 * big endian driver need not set anything.
2633 val64 |= (SWAPPER_CTRL_TXP_FE |
2634 SWAPPER_CTRL_TXP_SE |
2635 SWAPPER_CTRL_TXD_R_FE |
2636 SWAPPER_CTRL_TXD_W_FE |
2637 SWAPPER_CTRL_TXF_R_FE |
2638 SWAPPER_CTRL_RXD_R_FE |
2639 SWAPPER_CTRL_RXD_W_FE |
2640 SWAPPER_CTRL_RXF_W_FE |
2641 SWAPPER_CTRL_XMSI_FE |
2642 SWAPPER_CTRL_XMSI_SE |
2643 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2644 writeq(val64, &bar0->swapper_ctrl);
2647 * Initially we enable all bits to make it accessible by the
2648 * driver, then we selectively enable only those bits that
2651 val64 |= (SWAPPER_CTRL_TXP_FE |
2652 SWAPPER_CTRL_TXP_SE |
2653 SWAPPER_CTRL_TXD_R_FE |
2654 SWAPPER_CTRL_TXD_R_SE |
2655 SWAPPER_CTRL_TXD_W_FE |
2656 SWAPPER_CTRL_TXD_W_SE |
2657 SWAPPER_CTRL_TXF_R_FE |
2658 SWAPPER_CTRL_RXD_R_FE |
2659 SWAPPER_CTRL_RXD_R_SE |
2660 SWAPPER_CTRL_RXD_W_FE |
2661 SWAPPER_CTRL_RXD_W_SE |
2662 SWAPPER_CTRL_RXF_W_FE |
2663 SWAPPER_CTRL_XMSI_FE |
2664 SWAPPER_CTRL_XMSI_SE |
2665 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2666 writeq(val64, &bar0->swapper_ctrl);
2668 val64 = readq(&bar0->swapper_ctrl);
2671 * Verifying if endian settings are accurate by reading a
2672 * feedback register.
2674 val64 = readq(&bar0->pif_rd_swapper_fb);
2675 if (val64 != 0x0123456789ABCDEFULL) {
2676 /* Endian settings are incorrect, calls for another dekko. */
2677 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2679 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2680 (unsigned long long) val64);
2687 /* ********************************************************* *
2688 * Functions defined below concern the OS part of the driver *
2689 * ********************************************************* */
2692 * s2io_open - open entry point of the driver
2693 * @dev : pointer to the device structure.
2695 * This function is the open entry point of the driver. It mainly calls a
2696 * function to allocate Rx buffers and inserts them into the buffer
2697 * descriptors and then enables the Rx part of the NIC.
2699 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2703 int s2io_open(struct net_device *dev)
2705 nic_t *sp = dev->priv;
2709 * Make sure you have link off by default every time
2710 * Nic is initialized
2712 netif_carrier_off(dev);
2713 sp->last_link_state = 0; /* Unkown link state */
2715 /* Initialize H/W and enable interrupts */
2716 if (s2io_card_up(sp)) {
2717 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2720 goto hw_init_failed;
2723 /* After proper initialization of H/W, register ISR */
2724 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2727 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2729 goto isr_registration_failed;
2732 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2733 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2735 goto setting_mac_address_failed;
2738 netif_start_queue(dev);
2741 setting_mac_address_failed:
2742 free_irq(sp->pdev->irq, dev);
2743 isr_registration_failed:
2750 * s2io_close -close entry point of the driver
2751 * @dev : device pointer.
2753 * This is the stop entry point of the driver. It needs to undo exactly
2754 * whatever was done by the open entry point,thus it's usually referred to
2755 * as the close function.Among other things this function mainly stops the
2756 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2758 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2762 int s2io_close(struct net_device *dev)
2764 nic_t *sp = dev->priv;
2765 flush_scheduled_work();
2766 netif_stop_queue(dev);
2767 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2770 free_irq(sp->pdev->irq, dev);
2771 sp->device_close_flag = TRUE; /* Device is shut down. */
2776 * s2io_xmit - Tx entry point of te driver
2777 * @skb : the socket buffer containing the Tx data.
2778 * @dev : device pointer.
2780 * This function is the Tx entry point of the driver. S2IO NIC supports
2781 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2782 * NOTE: when device cant queue the pkt,just the trans_start variable will
2785 * 0 on success & 1 on failure.
2788 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2790 nic_t *sp = dev->priv;
2791 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2794 TxFIFO_element_t __iomem *tx_fifo;
2795 unsigned long flags;
2799 mac_info_t *mac_control;
2800 struct config_param *config;
2802 mac_control = &sp->mac_control;
2803 config = &sp->config;
2805 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2806 spin_lock_irqsave(&sp->tx_lock, flags);
2807 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2808 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2810 spin_unlock_irqrestore(&sp->tx_lock, flags);
2817 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2818 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2819 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2822 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2823 /* Avoid "put" pointer going beyond "get" pointer */
2824 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2825 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2826 netif_stop_queue(dev);
2828 spin_unlock_irqrestore(&sp->tx_lock, flags);
2832 mss = skb_shinfo(skb)->tso_size;
2834 txdp->Control_1 |= TXD_TCP_LSO_EN;
2835 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2839 frg_cnt = skb_shinfo(skb)->nr_frags;
2840 frg_len = skb->len - skb->data_len;
2842 txdp->Buffer_Pointer = pci_map_single
2843 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2844 txdp->Host_Control = (unsigned long) skb;
2845 if (skb->ip_summed == CHECKSUM_HW) {
2847 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2851 txdp->Control_2 |= config->tx_intr_type;
2853 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2854 TXD_GATHER_CODE_FIRST);
2855 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2857 /* For fragmented SKB. */
2858 for (i = 0; i < frg_cnt; i++) {
2859 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2861 txdp->Buffer_Pointer = (u64) pci_map_page
2862 (sp->pdev, frag->page, frag->page_offset,
2863 frag->size, PCI_DMA_TODEVICE);
2864 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2866 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2868 tx_fifo = mac_control->tx_FIFO_start[queue];
2869 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2870 writeq(val64, &tx_fifo->TxDL_Pointer);
2874 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2879 val64 |= TX_FIFO_SPECIAL_FUNC;
2881 writeq(val64, &tx_fifo->List_Control);
2884 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2885 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2887 /* Avoid "put" pointer going beyond "get" pointer */
2888 if (((put_off + 1) % queue_len) == get_off) {
2890 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2892 netif_stop_queue(dev);
2895 dev->trans_start = jiffies;
2896 spin_unlock_irqrestore(&sp->tx_lock, flags);
2902 * s2io_isr - ISR handler of the device .
2903 * @irq: the irq of the device.
2904 * @dev_id: a void pointer to the dev structure of the NIC.
2905 * @pt_regs: pointer to the registers pushed on the stack.
2906 * Description: This function is the ISR handler of the device. It
2907 * identifies the reason for the interrupt and calls the relevant
2908 * service routines. As a contongency measure, this ISR allocates the
2909 * recv buffers, if their numbers are below the panic value which is
2910 * presently set to 25% of the original number of rcv buffers allocated.
2912 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2913 * IRQ_NONE: will be returned if interrupt is not from our device
2915 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2917 struct net_device *dev = (struct net_device *) dev_id;
2918 nic_t *sp = dev->priv;
2919 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2921 u64 reason = 0, val64;
2922 mac_info_t *mac_control;
2923 struct config_param *config;
2925 atomic_inc(&sp->isr_cnt);
2926 mac_control = &sp->mac_control;
2927 config = &sp->config;
2930 * Identify the cause for interrupt and call the appropriate
2931 * interrupt handler. Causes for the interrupt could be;
2935 * 4. Error in any functional blocks of the NIC.
2937 reason = readq(&bar0->general_int_status);
2940 /* The interrupt was not raised by Xena. */
2941 atomic_dec(&sp->isr_cnt);
2945 if (reason & (GEN_ERROR_INTR))
2946 alarm_intr_handler(sp);
2948 #ifdef CONFIG_S2IO_NAPI
2949 if (reason & GEN_INTR_RXTRAFFIC) {
2950 if (netif_rx_schedule_prep(dev)) {
2951 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2953 __netif_rx_schedule(dev);
2957 /* If Intr is because of Rx Traffic */
2958 if (reason & GEN_INTR_RXTRAFFIC) {
2960 * rx_traffic_int reg is an R1 register, writing all 1's
2961 * will ensure that the actual interrupt causing bit get's
2962 * cleared and hence a read can be avoided.
2964 val64 = 0xFFFFFFFFFFFFFFFFULL;
2965 writeq(val64, &bar0->rx_traffic_int);
2966 for (i = 0; i < config->rx_ring_num; i++) {
2967 rx_intr_handler(&mac_control->rings[i]);
2972 /* If Intr is because of Tx Traffic */
2973 if (reason & GEN_INTR_TXTRAFFIC) {
2975 * tx_traffic_int reg is an R1 register, writing all 1's
2976 * will ensure that the actual interrupt causing bit get's
2977 * cleared and hence a read can be avoided.
2979 val64 = 0xFFFFFFFFFFFFFFFFULL;
2980 writeq(val64, &bar0->tx_traffic_int);
2982 for (i = 0; i < config->tx_fifo_num; i++)
2983 tx_intr_handler(&mac_control->fifos[i]);
2987 * If the Rx buffer count is below the panic threshold then
2988 * reallocate the buffers from the interrupt handler itself,
2989 * else schedule a tasklet to reallocate the buffers.
2991 #ifndef CONFIG_S2IO_NAPI
2992 for (i = 0; i < config->rx_ring_num; i++) {
2994 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2995 int level = rx_buffer_level(sp, rxb_size, i);
2997 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2998 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2999 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3000 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3001 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3003 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3004 clear_bit(0, (&sp->tasklet_status));
3005 atomic_dec(&sp->isr_cnt);
3008 clear_bit(0, (&sp->tasklet_status));
3009 } else if (level == LOW) {
3010 tasklet_schedule(&sp->task);
3015 atomic_dec(&sp->isr_cnt);
3022 static void s2io_updt_stats(nic_t *sp)
3024 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3028 if (atomic_read(&sp->card_state) == CARD_UP) {
3029 /* Apprx 30us on a 133 MHz bus */
3030 val64 = SET_UPDT_CLICKS(10) |
3031 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3032 writeq(val64, &bar0->stat_cfg);
3035 val64 = readq(&bar0->stat_cfg);
3036 if (!(val64 & BIT(0)))
3040 break; /* Updt failed */
3046 * s2io_get_stats - Updates the device statistics structure.
3047 * @dev : pointer to the device structure.
3049 * This function updates the device statistics structure in the s2io_nic
3050 * structure and returns a pointer to the same.
3052 * pointer to the updated net_device_stats structure.
3055 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3057 nic_t *sp = dev->priv;
3058 mac_info_t *mac_control;
3059 struct config_param *config;
3062 mac_control = &sp->mac_control;
3063 config = &sp->config;
3065 /* Configure Stats for immediate updt */
3066 s2io_updt_stats(sp);
3068 sp->stats.tx_packets =
3069 le32_to_cpu(mac_control->stats_info->tmac_frms);
3070 sp->stats.tx_errors =
3071 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3072 sp->stats.rx_errors =
3073 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3074 sp->stats.multicast =
3075 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3076 sp->stats.rx_length_errors =
3077 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3079 return (&sp->stats);
3083 * s2io_set_multicast - entry point for multicast address enable/disable.
3084 * @dev : pointer to the device structure
3086 * This function is a driver entry point which gets called by the kernel
3087 * whenever multicast addresses must be enabled/disabled. This also gets
3088 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3089 * determine, if multicast address must be enabled or if promiscuous mode
3090 * is to be disabled etc.
3095 static void s2io_set_multicast(struct net_device *dev)
3098 struct dev_mc_list *mclist;
3099 nic_t *sp = dev->priv;
3100 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3101 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3103 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3106 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3107 /* Enable all Multicast addresses */
3108 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3109 &bar0->rmac_addr_data0_mem);
3110 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3111 &bar0->rmac_addr_data1_mem);
3112 val64 = RMAC_ADDR_CMD_MEM_WE |
3113 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3114 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3115 writeq(val64, &bar0->rmac_addr_cmd_mem);
3116 /* Wait till command completes */
3117 wait_for_cmd_complete(sp);
3120 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3121 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3122 /* Disable all Multicast addresses */
3123 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3124 &bar0->rmac_addr_data0_mem);
3125 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3126 &bar0->rmac_addr_data1_mem);
3127 val64 = RMAC_ADDR_CMD_MEM_WE |
3128 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3129 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3130 writeq(val64, &bar0->rmac_addr_cmd_mem);
3131 /* Wait till command completes */
3132 wait_for_cmd_complete(sp);
3135 sp->all_multi_pos = 0;
3138 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3139 /* Put the NIC into promiscuous mode */
3140 add = &bar0->mac_cfg;
3141 val64 = readq(&bar0->mac_cfg);
3142 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3144 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3145 writel((u32) val64, add);
3146 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3147 writel((u32) (val64 >> 32), (add + 4));
3149 val64 = readq(&bar0->mac_cfg);
3150 sp->promisc_flg = 1;
3151 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3153 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3154 /* Remove the NIC from promiscuous mode */
3155 add = &bar0->mac_cfg;
3156 val64 = readq(&bar0->mac_cfg);
3157 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3159 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3160 writel((u32) val64, add);
3161 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3162 writel((u32) (val64 >> 32), (add + 4));
3164 val64 = readq(&bar0->mac_cfg);
3165 sp->promisc_flg = 0;
3166 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3170 /* Update individual M_CAST address list */
3171 if ((!sp->m_cast_flg) && dev->mc_count) {
3173 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3174 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3176 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3177 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3181 prev_cnt = sp->mc_addr_count;
3182 sp->mc_addr_count = dev->mc_count;
3184 /* Clear out the previous list of Mc in the H/W. */
3185 for (i = 0; i < prev_cnt; i++) {
3186 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3187 &bar0->rmac_addr_data0_mem);
3188 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3189 &bar0->rmac_addr_data1_mem);
3190 val64 = RMAC_ADDR_CMD_MEM_WE |
3191 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3192 RMAC_ADDR_CMD_MEM_OFFSET
3193 (MAC_MC_ADDR_START_OFFSET + i);
3194 writeq(val64, &bar0->rmac_addr_cmd_mem);
3196 /* Wait for command completes */
3197 if (wait_for_cmd_complete(sp)) {
3198 DBG_PRINT(ERR_DBG, "%s: Adding ",
3200 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3205 /* Create the new Rx filter list and update the same in H/W. */
3206 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3207 i++, mclist = mclist->next) {
3208 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3210 for (j = 0; j < ETH_ALEN; j++) {
3211 mac_addr |= mclist->dmi_addr[j];
3215 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3216 &bar0->rmac_addr_data0_mem);
3217 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3218 &bar0->rmac_addr_data1_mem);
3219 val64 = RMAC_ADDR_CMD_MEM_WE |
3220 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3221 RMAC_ADDR_CMD_MEM_OFFSET
3222 (i + MAC_MC_ADDR_START_OFFSET);
3223 writeq(val64, &bar0->rmac_addr_cmd_mem);
3225 /* Wait for command completes */
3226 if (wait_for_cmd_complete(sp)) {
3227 DBG_PRINT(ERR_DBG, "%s: Adding ",
3229 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3237 * s2io_set_mac_addr - Programs the Xframe mac address
3238 * @dev : pointer to the device structure.
3239 * @addr: a uchar pointer to the new mac address which is to be set.
3240 * Description : This procedure will program the Xframe to receive
3241 * frames with new Mac Address
3242 * Return value: SUCCESS on success and an appropriate (-)ve integer
3243 * as defined in errno.h file on failure.
3246 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3248 nic_t *sp = dev->priv;
3249 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3250 register u64 val64, mac_addr = 0;
3254 * Set the new MAC address as the new unicast filter and reflect this
3255 * change on the device address registered with the OS. It will be
3258 for (i = 0; i < ETH_ALEN; i++) {
3260 mac_addr |= addr[i];
3263 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3264 &bar0->rmac_addr_data0_mem);
3267 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3268 RMAC_ADDR_CMD_MEM_OFFSET(0);
3269 writeq(val64, &bar0->rmac_addr_cmd_mem);
3270 /* Wait till command completes */
3271 if (wait_for_cmd_complete(sp)) {
3272 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3280 * s2io_ethtool_sset - Sets different link parameters.
3281 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3282 * @info: pointer to the structure with parameters given by ethtool to set
3285 * The function sets different link parameters provided by the user onto
3291 static int s2io_ethtool_sset(struct net_device *dev,
3292 struct ethtool_cmd *info)
3294 nic_t *sp = dev->priv;
3295 if ((info->autoneg == AUTONEG_ENABLE) ||
3296 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3299 s2io_close(sp->dev);
3307 * s2io_ethtol_gset - Return link specific information.
3308 * @sp : private member of the device structure, pointer to the
3309 * s2io_nic structure.
3310 * @info : pointer to the structure with parameters given by ethtool
3311 * to return link information.
3313 * Returns link specific information like speed, duplex etc.. to ethtool.
3315 * return 0 on success.
3318 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3320 nic_t *sp = dev->priv;
3321 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3322 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3323 info->port = PORT_FIBRE;
3324 /* info->transceiver?? TODO */
3326 if (netif_carrier_ok(sp->dev)) {
3327 info->speed = 10000;
3328 info->duplex = DUPLEX_FULL;
3334 info->autoneg = AUTONEG_DISABLE;
3339 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3340 * @sp : private member of the device structure, which is a pointer to the
3341 * s2io_nic structure.
3342 * @info : pointer to the structure with parameters given by ethtool to
3343 * return driver information.
3345 * Returns driver specefic information like name, version etc.. to ethtool.
3350 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3351 struct ethtool_drvinfo *info)
3353 nic_t *sp = dev->priv;
3355 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3356 strncpy(info->version, s2io_driver_version,
3357 sizeof(s2io_driver_version));
3358 strncpy(info->fw_version, "", 32);
3359 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3360 info->regdump_len = XENA_REG_SPACE;
3361 info->eedump_len = XENA_EEPROM_SPACE;
3362 info->testinfo_len = S2IO_TEST_LEN;
3363 info->n_stats = S2IO_STAT_LEN;
3367 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3368 * @sp: private member of the device structure, which is a pointer to the
3369 * s2io_nic structure.
3370 * @regs : pointer to the structure with parameters given by ethtool for
3371 * dumping the registers.
3372 * @reg_space: The input argumnet into which all the registers are dumped.
3374 * Dumps the entire register space of xFrame NIC into the user given
3380 static void s2io_ethtool_gregs(struct net_device *dev,
3381 struct ethtool_regs *regs, void *space)
3385 u8 *reg_space = (u8 *) space;
3386 nic_t *sp = dev->priv;
3388 regs->len = XENA_REG_SPACE;
3389 regs->version = sp->pdev->subsystem_device;
3391 for (i = 0; i < regs->len; i += 8) {
3392 reg = readq(sp->bar0 + i);
3393 memcpy((reg_space + i), ®, 8);
3398 * s2io_phy_id - timer function that alternates adapter LED.
3399 * @data : address of the private member of the device structure, which
3400 * is a pointer to the s2io_nic structure, provided as an u32.
3401 * Description: This is actually the timer function that alternates the
3402 * adapter LED bit of the adapter control bit to set/reset every time on
3403 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3404 * once every second.
3406 static void s2io_phy_id(unsigned long data)
3408 nic_t *sp = (nic_t *) data;
3409 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3413 subid = sp->pdev->subsystem_device;
3414 if ((subid & 0xFF) >= 0x07) {
3415 val64 = readq(&bar0->gpio_control);
3416 val64 ^= GPIO_CTRL_GPIO_0;
3417 writeq(val64, &bar0->gpio_control);
3419 val64 = readq(&bar0->adapter_control);
3420 val64 ^= ADAPTER_LED_ON;
3421 writeq(val64, &bar0->adapter_control);
3424 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3428 * s2io_ethtool_idnic - To physically identify the nic on the system.
3429 * @sp : private member of the device structure, which is a pointer to the
3430 * s2io_nic structure.
3431 * @id : pointer to the structure with identification parameters given by
3433 * Description: Used to physically identify the NIC on the system.
3434 * The Link LED will blink for a time specified by the user for
3436 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3437 * identification is possible only if it's link is up.
3439 * int , returns 0 on success
3442 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3444 u64 val64 = 0, last_gpio_ctrl_val;
3445 nic_t *sp = dev->priv;
3446 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3449 subid = sp->pdev->subsystem_device;
3450 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3451 if ((subid & 0xFF) < 0x07) {
3452 val64 = readq(&bar0->adapter_control);
3453 if (!(val64 & ADAPTER_CNTL_EN)) {
3455 "Adapter Link down, cannot blink LED\n");
3459 if (sp->id_timer.function == NULL) {
3460 init_timer(&sp->id_timer);
3461 sp->id_timer.function = s2io_phy_id;
3462 sp->id_timer.data = (unsigned long) sp;
3464 mod_timer(&sp->id_timer, jiffies);
3466 msleep_interruptible(data * HZ);
3468 msleep_interruptible(MAX_FLICKER_TIME);
3469 del_timer_sync(&sp->id_timer);
3471 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3472 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3473 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3480 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3481 * @sp : private member of the device structure, which is a pointer to the
3482 * s2io_nic structure.
3483 * @ep : pointer to the structure with pause parameters given by ethtool.
3485 * Returns the Pause frame generation and reception capability of the NIC.
3489 static void s2io_ethtool_getpause_data(struct net_device *dev,
3490 struct ethtool_pauseparam *ep)
3493 nic_t *sp = dev->priv;
3494 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3496 val64 = readq(&bar0->rmac_pause_cfg);
3497 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3498 ep->tx_pause = TRUE;
3499 if (val64 & RMAC_PAUSE_RX_ENABLE)
3500 ep->rx_pause = TRUE;
3501 ep->autoneg = FALSE;
3505 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3506 * @sp : private member of the device structure, which is a pointer to the
3507 * s2io_nic structure.
3508 * @ep : pointer to the structure with pause parameters given by ethtool.
3510 * It can be used to set or reset Pause frame generation or reception
3511 * support of the NIC.
3513 * int, returns 0 on Success
3516 static int s2io_ethtool_setpause_data(struct net_device *dev,
3517 struct ethtool_pauseparam *ep)
3520 nic_t *sp = dev->priv;
3521 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3523 val64 = readq(&bar0->rmac_pause_cfg);
3525 val64 |= RMAC_PAUSE_GEN_ENABLE;
3527 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3529 val64 |= RMAC_PAUSE_RX_ENABLE;
3531 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3532 writeq(val64, &bar0->rmac_pause_cfg);
3537 * read_eeprom - reads 4 bytes of data from user given offset.
3538 * @sp : private member of the device structure, which is a pointer to the
3539 * s2io_nic structure.
3540 * @off : offset at which the data must be written
3541 * @data : Its an output parameter where the data read at the given
3544 * Will read 4 bytes of data from the user given offset and return the
3546 * NOTE: Will allow to read only part of the EEPROM visible through the
3549 * -1 on failure and 0 on success.
3552 #define S2IO_DEV_ID 5
3553 static int read_eeprom(nic_t * sp, int off, u32 * data)
3558 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3560 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3561 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3562 I2C_CONTROL_CNTL_START;
3563 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3565 while (exit_cnt < 5) {
3566 val64 = readq(&bar0->i2c_control);
3567 if (I2C_CONTROL_CNTL_END(val64)) {
3568 *data = I2C_CONTROL_GET_DATA(val64);
3580 * write_eeprom - actually writes the relevant part of the data value.
3581 * @sp : private member of the device structure, which is a pointer to the
3582 * s2io_nic structure.
3583 * @off : offset at which the data must be written
3584 * @data : The data that is to be written
3585 * @cnt : Number of bytes of the data that are actually to be written into
3586 * the Eeprom. (max of 3)
3588 * Actually writes the relevant part of the data value into the Eeprom
3589 * through the I2C bus.
3591 * 0 on success, -1 on failure.
3594 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3596 int exit_cnt = 0, ret = -1;
3598 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3600 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3601 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3602 I2C_CONTROL_CNTL_START;
3603 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3605 while (exit_cnt < 5) {
3606 val64 = readq(&bar0->i2c_control);
3607 if (I2C_CONTROL_CNTL_END(val64)) {
3608 if (!(val64 & I2C_CONTROL_NACK))
3620 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3621 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3622 * @eeprom : pointer to the user level structure provided by ethtool,
3623 * containing all relevant information.
3624 * @data_buf : user defined value to be written into Eeprom.
3625 * Description: Reads the values stored in the Eeprom at given offset
3626 * for a given length. Stores these values int the input argument data
3627 * buffer 'data_buf' and returns these to the caller (ethtool.)
3632 static int s2io_ethtool_geeprom(struct net_device *dev,
3633 struct ethtool_eeprom *eeprom, u8 * data_buf)
3636 nic_t *sp = dev->priv;
3638 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3640 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3641 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3643 for (i = 0; i < eeprom->len; i += 4) {
3644 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3645 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3649 memcpy((data_buf + i), &valid, 4);
3655 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3656 * @sp : private member of the device structure, which is a pointer to the
3657 * s2io_nic structure.
3658 * @eeprom : pointer to the user level structure provided by ethtool,
3659 * containing all relevant information.
3660 * @data_buf ; user defined value to be written into Eeprom.
3662 * Tries to write the user provided value in the Eeprom, at the offset
3663 * given by the user.
3665 * 0 on success, -EFAULT on failure.
3668 static int s2io_ethtool_seeprom(struct net_device *dev,
3669 struct ethtool_eeprom *eeprom,
3672 int len = eeprom->len, cnt = 0;
3673 u32 valid = 0, data;
3674 nic_t *sp = dev->priv;
3676 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3678 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3679 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3685 data = (u32) data_buf[cnt] & 0x000000FF;
3687 valid = (u32) (data << 24);
3691 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3693 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3695 "write into the specified offset\n");
3706 * s2io_register_test - reads and writes into all clock domains.
3707 * @sp : private member of the device structure, which is a pointer to the
3708 * s2io_nic structure.
3709 * @data : variable that returns the result of each of the test conducted b
3712 * Read and write into all clock domains. The NIC has 3 clock domains,
3713 * see that registers in all the three regions are accessible.
3718 static int s2io_register_test(nic_t * sp, uint64_t * data)
3720 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3724 val64 = readq(&bar0->pif_rd_swapper_fb);
3725 if (val64 != 0x123456789abcdefULL) {
3727 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3730 val64 = readq(&bar0->rmac_pause_cfg);
3731 if (val64 != 0xc000ffff00000000ULL) {
3733 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3736 val64 = readq(&bar0->rx_queue_cfg);
3737 if (val64 != 0x0808080808080808ULL) {
3739 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3742 val64 = readq(&bar0->xgxs_efifo_cfg);
3743 if (val64 != 0x000000001923141EULL) {
3745 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3748 val64 = 0x5A5A5A5A5A5A5A5AULL;
3749 writeq(val64, &bar0->xmsi_data);
3750 val64 = readq(&bar0->xmsi_data);
3751 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3753 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3756 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3757 writeq(val64, &bar0->xmsi_data);
3758 val64 = readq(&bar0->xmsi_data);
3759 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3761 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3769 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3770 * @sp : private member of the device structure, which is a pointer to the
3771 * s2io_nic structure.
3772 * @data:variable that returns the result of each of the test conducted by
3775 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3781 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3786 /* Test Write Error at offset 0 */
3787 if (!write_eeprom(sp, 0, 0, 3))
3790 /* Test Write at offset 4f0 */
3791 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3793 if (read_eeprom(sp, 0x4F0, &ret_data))
3796 if (ret_data != 0x01234567)
3799 /* Reset the EEPROM data go FFFF */
3800 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3802 /* Test Write Request Error at offset 0x7c */
3803 if (!write_eeprom(sp, 0x07C, 0, 3))
3806 /* Test Write Request at offset 0x7fc */
3807 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3809 if (read_eeprom(sp, 0x7FC, &ret_data))
3812 if (ret_data != 0x01234567)
3815 /* Reset the EEPROM data go FFFF */
3816 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3818 /* Test Write Error at offset 0x80 */
3819 if (!write_eeprom(sp, 0x080, 0, 3))
3822 /* Test Write Error at offset 0xfc */
3823 if (!write_eeprom(sp, 0x0FC, 0, 3))
3826 /* Test Write Error at offset 0x100 */
3827 if (!write_eeprom(sp, 0x100, 0, 3))
3830 /* Test Write Error at offset 4ec */
3831 if (!write_eeprom(sp, 0x4EC, 0, 3))
3839 * s2io_bist_test - invokes the MemBist test of the card .
3840 * @sp : private member of the device structure, which is a pointer to the
3841 * s2io_nic structure.
3842 * @data:variable that returns the result of each of the test conducted by
3845 * This invokes the MemBist test of the card. We give around
3846 * 2 secs time for the Test to complete. If it's still not complete
3847 * within this peiod, we consider that the test failed.
3849 * 0 on success and -1 on failure.
3852 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3855 int cnt = 0, ret = -1;
3857 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3858 bist |= PCI_BIST_START;
3859 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3862 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3863 if (!(bist & PCI_BIST_START)) {
3864 *data = (bist & PCI_BIST_CODE_MASK);
3876 * s2io-link_test - verifies the link state of the nic
3877 * @sp ; private member of the device structure, which is a pointer to the
3878 * s2io_nic structure.
3879 * @data: variable that returns the result of each of the test conducted by
3882 * The function verifies the link state of the NIC and updates the input
3883 * argument 'data' appropriately.
3888 static int s2io_link_test(nic_t * sp, uint64_t * data)
3890 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3893 val64 = readq(&bar0->adapter_status);
3894 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3901 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3902 * @sp - private member of the device structure, which is a pointer to the
3903 * s2io_nic structure.
3904 * @data - variable that returns the result of each of the test
3905 * conducted by the driver.
3907 * This is one of the offline test that tests the read and write
3908 * access to the RldRam chip on the NIC.
3913 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3915 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3917 int cnt, iteration = 0, test_pass = 0;
3919 val64 = readq(&bar0->adapter_control);
3920 val64 &= ~ADAPTER_ECC_EN;
3921 writeq(val64, &bar0->adapter_control);
3923 val64 = readq(&bar0->mc_rldram_test_ctrl);
3924 val64 |= MC_RLDRAM_TEST_MODE;
3925 writeq(val64, &bar0->mc_rldram_test_ctrl);
3927 val64 = readq(&bar0->mc_rldram_mrs);
3928 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3929 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3931 val64 |= MC_RLDRAM_MRS_ENABLE;
3932 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3934 while (iteration < 2) {
3935 val64 = 0x55555555aaaa0000ULL;
3936 if (iteration == 1) {
3937 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3939 writeq(val64, &bar0->mc_rldram_test_d0);
3941 val64 = 0xaaaa5a5555550000ULL;
3942 if (iteration == 1) {
3943 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3945 writeq(val64, &bar0->mc_rldram_test_d1);
3947 val64 = 0x55aaaaaaaa5a0000ULL;
3948 if (iteration == 1) {
3949 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3951 writeq(val64, &bar0->mc_rldram_test_d2);
3953 val64 = (u64) (0x0000003fffff0000ULL);
3954 writeq(val64, &bar0->mc_rldram_test_add);
3957 val64 = MC_RLDRAM_TEST_MODE;
3958 writeq(val64, &bar0->mc_rldram_test_ctrl);
3961 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3963 writeq(val64, &bar0->mc_rldram_test_ctrl);
3965 for (cnt = 0; cnt < 5; cnt++) {
3966 val64 = readq(&bar0->mc_rldram_test_ctrl);
3967 if (val64 & MC_RLDRAM_TEST_DONE)
3975 val64 = MC_RLDRAM_TEST_MODE;
3976 writeq(val64, &bar0->mc_rldram_test_ctrl);
3978 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3979 writeq(val64, &bar0->mc_rldram_test_ctrl);
3981 for (cnt = 0; cnt < 5; cnt++) {
3982 val64 = readq(&bar0->mc_rldram_test_ctrl);
3983 if (val64 & MC_RLDRAM_TEST_DONE)
3991 val64 = readq(&bar0->mc_rldram_test_ctrl);
3992 if (val64 & MC_RLDRAM_TEST_PASS)
4007 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4008 * @sp : private member of the device structure, which is a pointer to the
4009 * s2io_nic structure.
4010 * @ethtest : pointer to a ethtool command specific structure that will be
4011 * returned to the user.
4012 * @data : variable that returns the result of each of the test
4013 * conducted by the driver.
4015 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4016 * the health of the card.
4021 static void s2io_ethtool_test(struct net_device *dev,
4022 struct ethtool_test *ethtest,
4025 nic_t *sp = dev->priv;
4026 int orig_state = netif_running(sp->dev);
4028 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4029 /* Offline Tests. */
4031 s2io_close(sp->dev);
4033 if (s2io_register_test(sp, &data[0]))
4034 ethtest->flags |= ETH_TEST_FL_FAILED;
4038 if (s2io_rldram_test(sp, &data[3]))
4039 ethtest->flags |= ETH_TEST_FL_FAILED;
4043 if (s2io_eeprom_test(sp, &data[1]))
4044 ethtest->flags |= ETH_TEST_FL_FAILED;
4046 if (s2io_bist_test(sp, &data[4]))
4047 ethtest->flags |= ETH_TEST_FL_FAILED;
4057 "%s: is not up, cannot run test\n",
4066 if (s2io_link_test(sp, &data[2]))
4067 ethtest->flags |= ETH_TEST_FL_FAILED;
4076 static void s2io_get_ethtool_stats(struct net_device *dev,
4077 struct ethtool_stats *estats,
4081 nic_t *sp = dev->priv;
4082 StatInfo_t *stat_info = sp->mac_control.stats_info;
4084 s2io_updt_stats(sp);
4085 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4086 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4087 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4088 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4089 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4090 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4091 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4092 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4093 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4094 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4095 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4096 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4097 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4098 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4099 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4100 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4101 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4102 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4103 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4104 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4105 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4106 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4107 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4108 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4109 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4110 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4111 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4112 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4113 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4115 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4116 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4117 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4118 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4119 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4120 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4121 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4125 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4126 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4129 int s2io_ethtool_get_regs_len(struct net_device *dev)
4131 return (XENA_REG_SPACE);
4135 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4137 nic_t *sp = dev->priv;
4139 return (sp->rx_csum);
4141 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4143 nic_t *sp = dev->priv;
4152 int s2io_get_eeprom_len(struct net_device *dev)
4154 return (XENA_EEPROM_SPACE);
4157 int s2io_ethtool_self_test_count(struct net_device *dev)
4159 return (S2IO_TEST_LEN);
4161 void s2io_ethtool_get_strings(struct net_device *dev,
4162 u32 stringset, u8 * data)
4164 switch (stringset) {
4166 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4169 memcpy(data, ðtool_stats_keys,
4170 sizeof(ethtool_stats_keys));
4173 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4175 return (S2IO_STAT_LEN);
4178 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4181 dev->features |= NETIF_F_IP_CSUM;
4183 dev->features &= ~NETIF_F_IP_CSUM;
4189 static struct ethtool_ops netdev_ethtool_ops = {
4190 .get_settings = s2io_ethtool_gset,
4191 .set_settings = s2io_ethtool_sset,
4192 .get_drvinfo = s2io_ethtool_gdrvinfo,
4193 .get_regs_len = s2io_ethtool_get_regs_len,
4194 .get_regs = s2io_ethtool_gregs,
4195 .get_link = ethtool_op_get_link,
4196 .get_eeprom_len = s2io_get_eeprom_len,
4197 .get_eeprom = s2io_ethtool_geeprom,
4198 .set_eeprom = s2io_ethtool_seeprom,
4199 .get_pauseparam = s2io_ethtool_getpause_data,
4200 .set_pauseparam = s2io_ethtool_setpause_data,
4201 .get_rx_csum = s2io_ethtool_get_rx_csum,
4202 .set_rx_csum = s2io_ethtool_set_rx_csum,
4203 .get_tx_csum = ethtool_op_get_tx_csum,
4204 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4205 .get_sg = ethtool_op_get_sg,
4206 .set_sg = ethtool_op_set_sg,
4208 .get_tso = ethtool_op_get_tso,
4209 .set_tso = ethtool_op_set_tso,
4211 .self_test_count = s2io_ethtool_self_test_count,
4212 .self_test = s2io_ethtool_test,
4213 .get_strings = s2io_ethtool_get_strings,
4214 .phys_id = s2io_ethtool_idnic,
4215 .get_stats_count = s2io_ethtool_get_stats_count,
4216 .get_ethtool_stats = s2io_get_ethtool_stats
4220 * s2io_ioctl - Entry point for the Ioctl
4221 * @dev : Device pointer.
4222 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4223 * a proprietary structure used to pass information to the driver.
4224 * @cmd : This is used to distinguish between the different commands that
4225 * can be passed to the IOCTL functions.
4227 * Currently there are no special functionality supported in IOCTL, hence
4228 * function always return EOPNOTSUPPORTED
4231 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4237 * s2io_change_mtu - entry point to change MTU size for the device.
4238 * @dev : device pointer.
4239 * @new_mtu : the new MTU size for the device.
4240 * Description: A driver entry point to change MTU size for the device.
4241 * Before changing the MTU the device must be stopped.
4243 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4247 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4249 nic_t *sp = dev->priv;
4251 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4252 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4258 if (netif_running(dev)) {
4260 netif_stop_queue(dev);
4261 if (s2io_card_up(sp)) {
4262 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4265 if (netif_queue_stopped(dev))
4266 netif_wake_queue(dev);
4267 } else { /* Device is down */
4268 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4269 u64 val64 = new_mtu;
4271 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4278 * s2io_tasklet - Bottom half of the ISR.
4279 * @dev_adr : address of the device structure in dma_addr_t format.
4281 * This is the tasklet or the bottom half of the ISR. This is
4282 * an extension of the ISR which is scheduled by the scheduler to be run
4283 * when the load on the CPU is low. All low priority tasks of the ISR can
4284 * be pushed into the tasklet. For now the tasklet is used only to
4285 * replenish the Rx buffers in the Rx buffer descriptors.
4290 static void s2io_tasklet(unsigned long dev_addr)
4292 struct net_device *dev = (struct net_device *) dev_addr;
4293 nic_t *sp = dev->priv;
4295 mac_info_t *mac_control;
4296 struct config_param *config;
4298 mac_control = &sp->mac_control;
4299 config = &sp->config;
4301 if (!TASKLET_IN_USE) {
4302 for (i = 0; i < config->rx_ring_num; i++) {
4303 ret = fill_rx_buffers(sp, i);
4304 if (ret == -ENOMEM) {
4305 DBG_PRINT(ERR_DBG, "%s: Out of ",
4307 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4309 } else if (ret == -EFILL) {
4311 "%s: Rx Ring %d is full\n",
4316 clear_bit(0, (&sp->tasklet_status));
4321 * s2io_set_link - Set the LInk status
4322 * @data: long pointer to device private structue
4323 * Description: Sets the link status for the adapter
4326 static void s2io_set_link(unsigned long data)
4328 nic_t *nic = (nic_t *) data;
4329 struct net_device *dev = nic->dev;
4330 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4334 if (test_and_set_bit(0, &(nic->link_state))) {
4335 /* The card is being reset, no point doing anything */
4339 subid = nic->pdev->subsystem_device;
4341 * Allow a small delay for the NICs self initiated
4342 * cleanup to complete.
4346 val64 = readq(&bar0->adapter_status);
4347 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4348 if (LINK_IS_UP(val64)) {
4349 val64 = readq(&bar0->adapter_control);
4350 val64 |= ADAPTER_CNTL_EN;
4351 writeq(val64, &bar0->adapter_control);
4352 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4353 val64 = readq(&bar0->gpio_control);
4354 val64 |= GPIO_CTRL_GPIO_0;
4355 writeq(val64, &bar0->gpio_control);
4356 val64 = readq(&bar0->gpio_control);
4358 val64 |= ADAPTER_LED_ON;
4359 writeq(val64, &bar0->adapter_control);
4361 val64 = readq(&bar0->adapter_status);
4362 if (!LINK_IS_UP(val64)) {
4363 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4364 DBG_PRINT(ERR_DBG, " Link down");
4365 DBG_PRINT(ERR_DBG, "after ");
4366 DBG_PRINT(ERR_DBG, "enabling ");
4367 DBG_PRINT(ERR_DBG, "device \n");
4369 if (nic->device_enabled_once == FALSE) {
4370 nic->device_enabled_once = TRUE;
4372 s2io_link(nic, LINK_UP);
4374 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4375 val64 = readq(&bar0->gpio_control);
4376 val64 &= ~GPIO_CTRL_GPIO_0;
4377 writeq(val64, &bar0->gpio_control);
4378 val64 = readq(&bar0->gpio_control);
4380 s2io_link(nic, LINK_DOWN);
4382 } else { /* NIC is not Quiescent. */
4383 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4384 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4385 netif_stop_queue(dev);
4387 clear_bit(0, &(nic->link_state));
4390 static void s2io_card_down(nic_t * sp)
4393 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4394 unsigned long flags;
4395 register u64 val64 = 0;
4397 /* If s2io_set_link task is executing, wait till it completes. */
4398 while (test_and_set_bit(0, &(sp->link_state))) {
4401 atomic_set(&sp->card_state, CARD_DOWN);
4403 /* disable Tx and Rx traffic on the NIC */
4407 tasklet_kill(&sp->task);
4409 /* Check if the device is Quiescent and then Reset the NIC */
4411 val64 = readq(&bar0->adapter_status);
4412 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4420 "s2io_close:Device not Quiescent ");
4421 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4422 (unsigned long long) val64);
4428 /* Waiting till all Interrupt handlers are complete */
4432 if (!atomic_read(&sp->isr_cnt))
4437 spin_lock_irqsave(&sp->tx_lock, flags);
4438 /* Free all Tx buffers */
4439 free_tx_buffers(sp);
4440 spin_unlock_irqrestore(&sp->tx_lock, flags);
4442 /* Free all Rx buffers */
4443 spin_lock_irqsave(&sp->rx_lock, flags);
4444 free_rx_buffers(sp);
4445 spin_unlock_irqrestore(&sp->rx_lock, flags);
4447 clear_bit(0, &(sp->link_state));
4450 static int s2io_card_up(nic_t * sp)
4453 mac_info_t *mac_control;
4454 struct config_param *config;
4455 struct net_device *dev = (struct net_device *) sp->dev;
4457 /* Initialize the H/W I/O registers */
4458 if (init_nic(sp) != 0) {
4459 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4465 * Initializing the Rx buffers. For now we are considering only 1
4466 * Rx ring and initializing buffers into 30 Rx blocks
4468 mac_control = &sp->mac_control;
4469 config = &sp->config;
4471 for (i = 0; i < config->rx_ring_num; i++) {
4472 if ((ret = fill_rx_buffers(sp, i))) {
4473 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4476 free_rx_buffers(sp);
4479 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4480 atomic_read(&sp->rx_bufs_left[i]));
4483 /* Setting its receive mode */
4484 s2io_set_multicast(dev);
4486 /* Enable tasklet for the device */
4487 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4489 /* Enable Rx Traffic and interrupts on the NIC */
4490 if (start_nic(sp)) {
4491 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4492 tasklet_kill(&sp->task);
4494 free_irq(dev->irq, dev);
4495 free_rx_buffers(sp);
4499 atomic_set(&sp->card_state, CARD_UP);
4504 * s2io_restart_nic - Resets the NIC.
4505 * @data : long pointer to the device private structure
4507 * This function is scheduled to be run by the s2io_tx_watchdog
4508 * function after 0.5 secs to reset the NIC. The idea is to reduce
4509 * the run time of the watch dog routine which is run holding a
4513 static void s2io_restart_nic(unsigned long data)
4515 struct net_device *dev = (struct net_device *) data;
4516 nic_t *sp = dev->priv;
4519 if (s2io_card_up(sp)) {
4520 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4523 netif_wake_queue(dev);
4524 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4530 * s2io_tx_watchdog - Watchdog for transmit side.
4531 * @dev : Pointer to net device structure
4533 * This function is triggered if the Tx Queue is stopped
4534 * for a pre-defined amount of time when the Interface is still up.
4535 * If the Interface is jammed in such a situation, the hardware is
4536 * reset (by s2io_close) and restarted again (by s2io_open) to
4537 * overcome any problem that might have been caused in the hardware.
4542 static void s2io_tx_watchdog(struct net_device *dev)
4544 nic_t *sp = dev->priv;
4546 if (netif_carrier_ok(dev)) {
4547 schedule_work(&sp->rst_timer_task);
4552 * rx_osm_handler - To perform some OS related operations on SKB.
4553 * @sp: private member of the device structure,pointer to s2io_nic structure.
4554 * @skb : the socket buffer pointer.
4555 * @len : length of the packet
4556 * @cksum : FCS checksum of the frame.
4557 * @ring_no : the ring from which this RxD was extracted.
4559 * This function is called by the Tx interrupt serivce routine to perform
4560 * some OS related operations on the SKB before passing it to the upper
4561 * layers. It mainly checks if the checksum is OK, if so adds it to the
4562 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4563 * to the upper layer. If the checksum is wrong, it increments the Rx
4564 * packet error count, frees the SKB and returns error.
4566 * SUCCESS on success and -1 on failure.
4568 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4570 nic_t *sp = ring_data->nic;
4571 struct net_device *dev = (struct net_device *) sp->dev;
4572 struct sk_buff *skb = (struct sk_buff *)
4573 ((unsigned long) rxdp->Host_Control);
4574 int ring_no = ring_data->ring_no;
4575 u16 l3_csum, l4_csum;
4576 #ifdef CONFIG_2BUFF_MODE
4577 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4578 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4579 int get_block = ring_data->rx_curr_get_info.block_index;
4580 int get_off = ring_data->rx_curr_get_info.offset;
4581 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4582 unsigned char *buff;
4584 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4587 if (rxdp->Control_1 & RXD_T_CODE) {
4588 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4589 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4592 sp->stats.rx_crc_errors++;
4593 atomic_dec(&sp->rx_bufs_left[ring_no]);
4594 rxdp->Host_Control = 0;
4598 /* Updating statistics */
4599 rxdp->Host_Control = 0;
4601 sp->stats.rx_packets++;
4602 #ifndef CONFIG_2BUFF_MODE
4603 sp->stats.rx_bytes += len;
4605 sp->stats.rx_bytes += buf0_len + buf2_len;
4608 #ifndef CONFIG_2BUFF_MODE
4611 buff = skb_push(skb, buf0_len);
4612 memcpy(buff, ba->ba_0, buf0_len);
4613 skb_put(skb, buf2_len);
4616 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4618 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4619 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4620 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4622 * NIC verifies if the Checksum of the received
4623 * frame is Ok or not and accordingly returns
4624 * a flag in the RxD.
4626 skb->ip_summed = CHECKSUM_UNNECESSARY;
4629 * Packet with erroneous checksum, let the
4630 * upper layers deal with it.
4632 skb->ip_summed = CHECKSUM_NONE;
4635 skb->ip_summed = CHECKSUM_NONE;
4638 skb->protocol = eth_type_trans(skb, dev);
4639 #ifdef CONFIG_S2IO_NAPI
4640 netif_receive_skb(skb);
4644 dev->last_rx = jiffies;
4645 atomic_dec(&sp->rx_bufs_left[ring_no]);
4650 * s2io_link - stops/starts the Tx queue.
4651 * @sp : private member of the device structure, which is a pointer to the
4652 * s2io_nic structure.
4653 * @link : inidicates whether link is UP/DOWN.
4655 * This function stops/starts the Tx queue depending on whether the link
4656 * status of the NIC is is down or up. This is called by the Alarm
4657 * interrupt handler whenever a link change interrupt comes up.
4662 void s2io_link(nic_t * sp, int link)
4664 struct net_device *dev = (struct net_device *) sp->dev;
4666 if (link != sp->last_link_state) {
4667 if (link == LINK_DOWN) {
4668 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4669 netif_carrier_off(dev);
4671 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4672 netif_carrier_on(dev);
4675 sp->last_link_state = link;
4679 * get_xena_rev_id - to identify revision ID of xena.
4680 * @pdev : PCI Dev structure
4682 * Function to identify the Revision ID of xena.
4684 * returns the revision ID of the device.
4687 int get_xena_rev_id(struct pci_dev *pdev)
4691 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4696 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4697 * @sp : private member of the device structure, which is a pointer to the
4698 * s2io_nic structure.
4700 * This function initializes a few of the PCI and PCI-X configuration registers
4701 * with recommended values.
4706 static void s2io_init_pci(nic_t * sp)
4708 u16 pci_cmd = 0, pcix_cmd = 0;
4710 /* Enable Data Parity Error Recovery in PCI-X command register. */
4711 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4713 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4715 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4718 /* Set the PErr Response bit in PCI command register. */
4719 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4720 pci_write_config_word(sp->pdev, PCI_COMMAND,
4721 (pci_cmd | PCI_COMMAND_PARITY));
4722 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4724 /* Forcibly disabling relaxed ordering capability of the card. */
4726 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4728 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4732 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4733 MODULE_LICENSE("GPL");
4734 module_param(tx_fifo_num, int, 0);
4735 module_param(rx_ring_num, int, 0);
4736 module_param_array(tx_fifo_len, uint, NULL, 0);
4737 module_param_array(rx_ring_sz, uint, NULL, 0);
4738 module_param_array(rts_frm_len, uint, NULL, 0);
4739 module_param(use_continuous_tx_intrs, int, 1);
4740 module_param(rmac_pause_time, int, 0);
4741 module_param(mc_pause_threshold_q0q3, int, 0);
4742 module_param(mc_pause_threshold_q4q7, int, 0);
4743 module_param(shared_splits, int, 0);
4744 module_param(tmac_util_period, int, 0);
4745 module_param(rmac_util_period, int, 0);
4746 #ifndef CONFIG_S2IO_NAPI
4747 module_param(indicate_max_pkts, int, 0);
4751 * s2io_init_nic - Initialization of the adapter .
4752 * @pdev : structure containing the PCI related information of the device.
4753 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4755 * The function initializes an adapter identified by the pci_dec structure.
4756 * All OS related initialization including memory and device structure and
4757 * initlaization of the device private variable is done. Also the swapper
4758 * control register is initialized to enable read and write into the I/O
4759 * registers of the device.
4761 * returns 0 on success and negative on failure.
4764 static int __devinit
4765 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4768 struct net_device *dev;
4770 int dma_flag = FALSE;
4771 u32 mac_up, mac_down;
4772 u64 val64 = 0, tmp64 = 0;
4773 XENA_dev_config_t __iomem *bar0 = NULL;
4775 mac_info_t *mac_control;
4776 struct config_param *config;
4778 #ifdef CONFIG_S2IO_NAPI
4779 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4782 if ((ret = pci_enable_device(pdev))) {
4784 "s2io_init_nic: pci_enable_device failed\n");
4788 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4789 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4791 if (pci_set_consistent_dma_mask
4792 (pdev, DMA_64BIT_MASK)) {
4794 "Unable to obtain 64bit DMA for \
4795 consistent allocations\n");
4796 pci_disable_device(pdev);
4799 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4800 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4802 pci_disable_device(pdev);
4806 if (pci_request_regions(pdev, s2io_driver_name)) {
4807 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4808 pci_disable_device(pdev);
4812 dev = alloc_etherdev(sizeof(nic_t));
4814 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4815 pci_disable_device(pdev);
4816 pci_release_regions(pdev);
4820 pci_set_master(pdev);
4821 pci_set_drvdata(pdev, dev);
4822 SET_MODULE_OWNER(dev);
4823 SET_NETDEV_DEV(dev, &pdev->dev);
4825 /* Private member variable initialized to s2io NIC structure */
4827 memset(sp, 0, sizeof(nic_t));
4830 sp->high_dma_flag = dma_flag;
4831 sp->device_enabled_once = FALSE;
4833 /* Initialize some PCI/PCI-X fields of the NIC. */
4837 * Setting the device configuration parameters.
4838 * Most of these parameters can be specified by the user during
4839 * module insertion as they are module loadable parameters. If
4840 * these parameters are not not specified during load time, they
4841 * are initialized with default values.
4843 mac_control = &sp->mac_control;
4844 config = &sp->config;
4846 /* Tx side parameters. */
4847 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4848 config->tx_fifo_num = tx_fifo_num;
4849 for (i = 0; i < MAX_TX_FIFOS; i++) {
4850 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4851 config->tx_cfg[i].fifo_priority = i;
4854 /* mapping the QoS priority to the configured fifos */
4855 for (i = 0; i < MAX_TX_FIFOS; i++)
4856 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4858 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4859 for (i = 0; i < config->tx_fifo_num; i++) {
4860 config->tx_cfg[i].f_no_snoop =
4861 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4862 if (config->tx_cfg[i].fifo_len < 65) {
4863 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4867 config->max_txds = MAX_SKB_FRAGS;
4869 /* Rx side parameters. */
4870 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4871 config->rx_ring_num = rx_ring_num;
4872 for (i = 0; i < MAX_RX_RINGS; i++) {
4873 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4874 (MAX_RXDS_PER_BLOCK + 1);
4875 config->rx_cfg[i].ring_priority = i;
4878 for (i = 0; i < rx_ring_num; i++) {
4879 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4880 config->rx_cfg[i].f_no_snoop =
4881 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4884 /* Setting Mac Control parameters */
4885 mac_control->rmac_pause_time = rmac_pause_time;
4886 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4887 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4890 /* Initialize Ring buffer parameters. */
4891 for (i = 0; i < config->rx_ring_num; i++)
4892 atomic_set(&sp->rx_bufs_left[i], 0);
4894 /* Initialize the number of ISRs currently running */
4895 atomic_set(&sp->isr_cnt, 0);
4897 /* initialize the shared memory used by the NIC and the host */
4898 if (init_shared_mem(sp)) {
4899 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4902 goto mem_alloc_failed;
4905 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4906 pci_resource_len(pdev, 0));
4908 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4911 goto bar0_remap_failed;
4914 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4915 pci_resource_len(pdev, 2));
4917 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4920 goto bar1_remap_failed;
4923 dev->irq = pdev->irq;
4924 dev->base_addr = (unsigned long) sp->bar0;
4926 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4927 for (j = 0; j < MAX_TX_FIFOS; j++) {
4928 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4929 (sp->bar1 + (j * 0x00020000));
4932 /* Driver entry points */
4933 dev->open = &s2io_open;
4934 dev->stop = &s2io_close;
4935 dev->hard_start_xmit = &s2io_xmit;
4936 dev->get_stats = &s2io_get_stats;
4937 dev->set_multicast_list = &s2io_set_multicast;
4938 dev->do_ioctl = &s2io_ioctl;
4939 dev->change_mtu = &s2io_change_mtu;
4940 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4943 * will use eth_mac_addr() for dev->set_mac_address
4944 * mac address will be set every time dev->open() is called
4946 #if defined(CONFIG_S2IO_NAPI)
4947 dev->poll = s2io_poll;
4951 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4952 if (sp->high_dma_flag == TRUE)
4953 dev->features |= NETIF_F_HIGHDMA;
4955 dev->features |= NETIF_F_TSO;
4958 dev->tx_timeout = &s2io_tx_watchdog;
4959 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4960 INIT_WORK(&sp->rst_timer_task,
4961 (void (*)(void *)) s2io_restart_nic, dev);
4962 INIT_WORK(&sp->set_link_task,
4963 (void (*)(void *)) s2io_set_link, sp);
4965 pci_save_state(sp->pdev);
4967 /* Setting swapper control on the NIC, for proper reset operation */
4968 if (s2io_set_swapper(sp)) {
4969 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4972 goto set_swap_failed;
4976 * Fix for all "FFs" MAC address problems observed on
4979 fix_mac_address(sp);
4983 * MAC address initialization.
4984 * For now only one mac address will be read and used.
4987 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4988 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4989 writeq(val64, &bar0->rmac_addr_cmd_mem);
4990 wait_for_cmd_complete(sp);
4992 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4993 mac_down = (u32) tmp64;
4994 mac_up = (u32) (tmp64 >> 32);
4996 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4998 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4999 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5000 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5001 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5002 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5003 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5006 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5007 sp->def_mac_addr[0].mac_addr[0],
5008 sp->def_mac_addr[0].mac_addr[1],
5009 sp->def_mac_addr[0].mac_addr[2],
5010 sp->def_mac_addr[0].mac_addr[3],
5011 sp->def_mac_addr[0].mac_addr[4],
5012 sp->def_mac_addr[0].mac_addr[5]);
5014 /* Set the factory defined MAC address initially */
5015 dev->addr_len = ETH_ALEN;
5016 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5019 * Initialize the tasklet status and link state flags
5020 * and the card statte parameter
5022 atomic_set(&(sp->card_state), 0);
5023 sp->tasklet_status = 0;
5026 /* Initialize spinlocks */
5027 spin_lock_init(&sp->tx_lock);
5028 #ifndef CONFIG_S2IO_NAPI
5029 spin_lock_init(&sp->put_lock);
5031 spin_lock_init(&sp->rx_lock);
5034 * SXE-002: Configure link and activity LED to init state
5037 subid = sp->pdev->subsystem_device;
5038 if ((subid & 0xFF) >= 0x07) {
5039 val64 = readq(&bar0->gpio_control);
5040 val64 |= 0x0000800000000000ULL;
5041 writeq(val64, &bar0->gpio_control);
5042 val64 = 0x0411040400000000ULL;
5043 writeq(val64, (void __iomem *) bar0 + 0x2700);
5044 val64 = readq(&bar0->gpio_control);
5047 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5049 if (register_netdev(dev)) {
5050 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5052 goto register_failed;
5055 /* Initialize device name */
5056 strcpy(sp->name, dev->name);
5057 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5060 * Make Link state as off at this point, when the Link change
5061 * interrupt comes the state will be automatically changed to
5064 netif_carrier_off(dev);
5075 free_shared_mem(sp);
5076 pci_disable_device(pdev);
5077 pci_release_regions(pdev);
5078 pci_set_drvdata(pdev, NULL);
5085 * s2io_rem_nic - Free the PCI device
5086 * @pdev: structure containing the PCI related information of the device.
5087 * Description: This function is called by the Pci subsystem to release a
5088 * PCI device and free up all resource held up by the device. This could
5089 * be in response to a Hot plug event or when the driver is to be removed
5093 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5095 struct net_device *dev =
5096 (struct net_device *) pci_get_drvdata(pdev);
5100 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5105 unregister_netdev(dev);
5107 free_shared_mem(sp);
5110 pci_disable_device(pdev);
5111 pci_release_regions(pdev);
5112 pci_set_drvdata(pdev, NULL);
5117 * s2io_starter - Entry point for the driver
5118 * Description: This function is the entry point for the driver. It verifies
5119 * the module loadable parameters and initializes PCI configuration space.
5122 int __init s2io_starter(void)
5124 return pci_module_init(&s2io_driver);
5128 * s2io_closer - Cleanup routine for the driver
5129 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5132 void s2io_closer(void)
5134 pci_unregister_driver(&s2io_driver);
5135 DBG_PRINT(INIT_DBG, "cleanup done\n");
5138 module_init(s2io_starter);
5139 module_exit(s2io_closer);