1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 mac_info_t *mac_control;
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
104 MAX_RXDS_PER_BLOCK) {
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114 "Register test\t(offline)",
115 "Eeprom test\t(offline)",
116 "Link test\t(online)",
117 "RLDRAM test\t(offline)",
118 "BIST Test\t(offline)"
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_data_octets"},
127 {"tmac_pause_ctrl_frms"},
128 {"tmac_any_err_frms"},
129 {"tmac_vld_ip_octets"},
137 {"rmac_data_octets"},
138 {"rmac_fcs_err_frms"},
140 {"rmac_vld_mcst_frms"},
141 {"rmac_vld_bcst_frms"},
142 {"rmac_in_rng_len_err_frms"},
144 {"rmac_pause_ctrl_frms"},
145 {"rmac_discarded_frms"},
146 {"rmac_usized_frms"},
147 {"rmac_osized_frms"},
149 {"rmac_jabber_frms"},
157 {"rmac_err_drp_udp"},
159 {"rmac_accepted_ip"},
161 {"\n DRIVER STATISTICS"},
162 {"single_bit_ecc_errs"},
163 {"double_bit_ecc_errs"},
166 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
167 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
170 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 * Constants to be programmed into the Xena's registers, to configure
177 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
180 static u64 default_mdio_cfg[] = {
182 0xC001010000000000ULL, 0xC0010100000000E0ULL,
183 0xC0010100008000E4ULL,
184 /* Remove Reset from PMA PLL */
185 0xC001010000000000ULL, 0xC0010100000000E0ULL,
186 0xC0010100000000E4ULL,
190 static u64 default_dtx_cfg[] = {
191 0x8000051500000000ULL, 0x80000515000000E0ULL,
192 0x80000515D93500E4ULL, 0x8001051500000000ULL,
193 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
194 0x8002051500000000ULL, 0x80020515000000E0ULL,
195 0x80020515F21000E4ULL,
196 /* Set PADLOOPBACKN */
197 0x8002051500000000ULL, 0x80020515000000E0ULL,
198 0x80020515B20000E4ULL, 0x8003051500000000ULL,
199 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
200 0x8004051500000000ULL, 0x80040515000000E0ULL,
201 0x80040515B20000E4ULL, 0x8005051500000000ULL,
202 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
204 /* Remove PADLOOPBACKN */
205 0x8002051500000000ULL, 0x80020515000000E0ULL,
206 0x80020515F20000E4ULL, 0x8003051500000000ULL,
207 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
208 0x8004051500000000ULL, 0x80040515000000E0ULL,
209 0x80040515F20000E4ULL, 0x8005051500000000ULL,
210 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
215 * Constants for Fixing the MacAddress problem seen mostly on
218 static u64 fix_mac[] = {
219 0x0060000000000000ULL, 0x0060600000000000ULL,
220 0x0040600000000000ULL, 0x0000600000000000ULL,
221 0x0020600000000000ULL, 0x0060600000000000ULL,
222 0x0020600000000000ULL, 0x0060600000000000ULL,
223 0x0020600000000000ULL, 0x0060600000000000ULL,
224 0x0020600000000000ULL, 0x0060600000000000ULL,
225 0x0020600000000000ULL, 0x0060600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0060600000000000ULL,
229 0x0020600000000000ULL, 0x0060600000000000ULL,
230 0x0020600000000000ULL, 0x0060600000000000ULL,
231 0x0020600000000000ULL, 0x0000600000000000ULL,
232 0x0040600000000000ULL, 0x0060600000000000ULL,
236 /* Module Loadable parameters. */
237 static unsigned int tx_fifo_num = 1;
238 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
239 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
240 static unsigned int rx_ring_num = 1;
241 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
242 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
243 static unsigned int rts_frm_len[MAX_RX_RINGS] =
244 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
245 static unsigned int use_continuous_tx_intrs = 1;
246 static unsigned int rmac_pause_time = 65535;
247 static unsigned int mc_pause_threshold_q0q3 = 187;
248 static unsigned int mc_pause_threshold_q4q7 = 187;
249 static unsigned int shared_splits;
250 static unsigned int tmac_util_period = 5;
251 static unsigned int rmac_util_period = 5;
252 #ifndef CONFIG_S2IO_NAPI
253 static unsigned int indicate_max_pkts;
258 * This table lists all the devices that this driver supports.
260 static struct pci_device_id s2io_tbl[] __devinitdata = {
261 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
262 PCI_ANY_ID, PCI_ANY_ID},
263 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
264 PCI_ANY_ID, PCI_ANY_ID},
265 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
266 PCI_ANY_ID, PCI_ANY_ID},
267 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
268 PCI_ANY_ID, PCI_ANY_ID},
272 MODULE_DEVICE_TABLE(pci, s2io_tbl);
274 static struct pci_driver s2io_driver = {
276 .id_table = s2io_tbl,
277 .probe = s2io_init_nic,
278 .remove = __devexit_p(s2io_rem_nic),
281 /* A simplifier macro used both by init and free shared_mem Fns(). */
282 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
285 * init_shared_mem - Allocation and Initialization of Memory
286 * @nic: Device private variable.
287 * Description: The function allocates all the memory areas shared
288 * between the NIC and the driver. This includes Tx descriptors,
289 * Rx descriptors and the statistics block.
292 static int init_shared_mem(struct s2io_nic *nic)
295 void *tmp_v_addr, *tmp_v_addr_next;
296 dma_addr_t tmp_p_addr, tmp_p_addr_next;
297 RxD_block_t *pre_rxd_blk = NULL;
298 int i, j, blk_cnt, rx_sz, tx_sz;
299 int lst_size, lst_per_page;
300 struct net_device *dev = nic->dev;
301 #ifdef CONFIG_2BUFF_MODE
306 mac_info_t *mac_control;
307 struct config_param *config;
309 mac_control = &nic->mac_control;
310 config = &nic->config;
313 /* Allocation and initialization of TXDLs in FIOFs */
315 for (i = 0; i < config->tx_fifo_num; i++) {
316 size += config->tx_cfg[i].fifo_len;
318 if (size > MAX_AVAILABLE_TXDS) {
319 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
321 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
322 DBG_PRINT(ERR_DBG, "that can be used\n");
326 lst_size = (sizeof(TxD_t) * config->max_txds);
327 tx_sz = lst_size * size;
328 lst_per_page = PAGE_SIZE / lst_size;
330 for (i = 0; i < config->tx_fifo_num; i++) {
331 int fifo_len = config->tx_cfg[i].fifo_len;
332 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
333 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
335 if (!mac_control->fifos[i].list_info) {
337 "Malloc failed for list_info\n");
340 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
342 for (i = 0; i < config->tx_fifo_num; i++) {
343 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
345 mac_control->fifos[i].tx_curr_put_info.offset = 0;
346 mac_control->fifos[i].tx_curr_put_info.fifo_len =
347 config->tx_cfg[i].fifo_len - 1;
348 mac_control->fifos[i].tx_curr_get_info.offset = 0;
349 mac_control->fifos[i].tx_curr_get_info.fifo_len =
350 config->tx_cfg[i].fifo_len - 1;
351 mac_control->fifos[i].fifo_no = i;
352 mac_control->fifos[i].nic = nic;
353 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
355 for (j = 0; j < page_num; j++) {
359 tmp_v = pci_alloc_consistent(nic->pdev,
363 "pci_alloc_consistent ");
364 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
367 while (k < lst_per_page) {
368 int l = (j * lst_per_page) + k;
369 if (l == config->tx_cfg[i].fifo_len)
371 mac_control->fifos[i].list_info[l].list_virt_addr =
372 tmp_v + (k * lst_size);
373 mac_control->fifos[i].list_info[l].list_phy_addr =
374 tmp_p + (k * lst_size);
380 /* Allocation and initialization of RXDs in Rings */
382 for (i = 0; i < config->rx_ring_num; i++) {
383 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
384 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
385 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
387 DBG_PRINT(ERR_DBG, "RxDs per Block");
390 size += config->rx_cfg[i].num_rxd;
391 mac_control->rings[i].block_count =
392 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
393 mac_control->rings[i].pkt_cnt =
394 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
396 size = (size * (sizeof(RxD_t)));
399 for (i = 0; i < config->rx_ring_num; i++) {
400 mac_control->rings[i].rx_curr_get_info.block_index = 0;
401 mac_control->rings[i].rx_curr_get_info.offset = 0;
402 mac_control->rings[i].rx_curr_get_info.ring_len =
403 config->rx_cfg[i].num_rxd - 1;
404 mac_control->rings[i].rx_curr_put_info.block_index = 0;
405 mac_control->rings[i].rx_curr_put_info.offset = 0;
406 mac_control->rings[i].rx_curr_put_info.ring_len =
407 config->rx_cfg[i].num_rxd - 1;
408 mac_control->rings[i].nic = nic;
409 mac_control->rings[i].ring_no = i;
412 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
413 /* Allocating all the Rx blocks */
414 for (j = 0; j < blk_cnt; j++) {
415 #ifndef CONFIG_2BUFF_MODE
416 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
418 size = SIZE_OF_BLOCK;
420 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
422 if (tmp_v_addr == NULL) {
424 * In case of failure, free_shared_mem()
425 * is called, which should free any
426 * memory that was alloced till the
429 mac_control->rings[i].rx_blocks[j].block_virt_addr =
433 memset(tmp_v_addr, 0, size);
434 mac_control->rings[i].rx_blocks[j].block_virt_addr =
436 mac_control->rings[i].rx_blocks[j].block_dma_addr =
439 /* Interlinking all Rx Blocks */
440 for (j = 0; j < blk_cnt; j++) {
442 mac_control->rings[i].rx_blocks[j].block_virt_addr;
444 mac_control->rings[i].rx_blocks[(j + 1) %
445 blk_cnt].block_virt_addr;
447 mac_control->rings[i].rx_blocks[j].block_dma_addr;
449 mac_control->rings[i].rx_blocks[(j + 1) %
450 blk_cnt].block_dma_addr;
452 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
453 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
456 #ifndef CONFIG_2BUFF_MODE
457 pre_rxd_blk->reserved_2_pNext_RxD_block =
458 (unsigned long) tmp_v_addr_next;
460 pre_rxd_blk->pNext_RxD_Blk_physical =
461 (u64) tmp_p_addr_next;
465 #ifdef CONFIG_2BUFF_MODE
467 * Allocation of Storages for buffer addresses in 2BUFF mode
468 * and the buffers as well.
470 for (i = 0; i < config->rx_ring_num; i++) {
472 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
473 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
475 if (!mac_control->rings[i].ba)
477 for (j = 0; j < blk_cnt; j++) {
479 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
480 (MAX_RXDS_PER_BLOCK + 1)),
482 if (!mac_control->rings[i].ba[j])
484 while (k != MAX_RXDS_PER_BLOCK) {
485 ba = &mac_control->rings[i].ba[j][k];
487 ba->ba_0_org = (void *) kmalloc
488 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
491 tmp = (u64) ba->ba_0_org;
493 tmp &= ~((u64) ALIGN_SIZE);
494 ba->ba_0 = (void *) tmp;
496 ba->ba_1_org = (void *) kmalloc
497 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
500 tmp = (u64) ba->ba_1_org;
502 tmp &= ~((u64) ALIGN_SIZE);
503 ba->ba_1 = (void *) tmp;
510 /* Allocation and initialization of Statistics block */
511 size = sizeof(StatInfo_t);
512 mac_control->stats_mem = pci_alloc_consistent
513 (nic->pdev, size, &mac_control->stats_mem_phy);
515 if (!mac_control->stats_mem) {
517 * In case of failure, free_shared_mem() is called, which
518 * should free any memory that was alloced till the
523 mac_control->stats_mem_sz = size;
525 tmp_v_addr = mac_control->stats_mem;
526 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
527 memset(tmp_v_addr, 0, size);
528 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
529 (unsigned long long) tmp_p_addr);
535 * free_shared_mem - Free the allocated Memory
536 * @nic: Device private variable.
537 * Description: This function is to free all memory locations allocated by
538 * the init_shared_mem() function and return it to the kernel.
541 static void free_shared_mem(struct s2io_nic *nic)
543 int i, j, blk_cnt, size;
545 dma_addr_t tmp_p_addr;
546 mac_info_t *mac_control;
547 struct config_param *config;
548 int lst_size, lst_per_page;
554 mac_control = &nic->mac_control;
555 config = &nic->config;
557 lst_size = (sizeof(TxD_t) * config->max_txds);
558 lst_per_page = PAGE_SIZE / lst_size;
560 for (i = 0; i < config->tx_fifo_num; i++) {
561 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
563 for (j = 0; j < page_num; j++) {
564 int mem_blks = (j * lst_per_page);
565 if (!mac_control->fifos[i].list_info[mem_blks].
568 pci_free_consistent(nic->pdev, PAGE_SIZE,
569 mac_control->fifos[i].
572 mac_control->fifos[i].
576 kfree(mac_control->fifos[i].list_info);
579 #ifndef CONFIG_2BUFF_MODE
580 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
582 size = SIZE_OF_BLOCK;
584 for (i = 0; i < config->rx_ring_num; i++) {
585 blk_cnt = mac_control->rings[i].block_count;
586 for (j = 0; j < blk_cnt; j++) {
587 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
589 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
591 if (tmp_v_addr == NULL)
593 pci_free_consistent(nic->pdev, size,
594 tmp_v_addr, tmp_p_addr);
598 #ifdef CONFIG_2BUFF_MODE
599 /* Freeing buffer storage addresses in 2BUFF mode. */
600 for (i = 0; i < config->rx_ring_num; i++) {
602 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
603 for (j = 0; j < blk_cnt; j++) {
605 if (!mac_control->rings[i].ba[j])
607 while (k != MAX_RXDS_PER_BLOCK) {
608 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
613 kfree(mac_control->rings[i].ba[j]);
615 if (mac_control->rings[i].ba)
616 kfree(mac_control->rings[i].ba);
620 if (mac_control->stats_mem) {
621 pci_free_consistent(nic->pdev,
622 mac_control->stats_mem_sz,
623 mac_control->stats_mem,
624 mac_control->stats_mem_phy);
629 * init_nic - Initialization of hardware
630 * @nic: device peivate variable
631 * Description: The function sequentially configures every block
632 * of the H/W from their reset values.
633 * Return Value: SUCCESS on success and
634 * '-1' on failure (endian settings incorrect).
637 static int init_nic(struct s2io_nic *nic)
639 XENA_dev_config_t __iomem *bar0 = nic->bar0;
640 struct net_device *dev = nic->dev;
641 register u64 val64 = 0;
645 mac_info_t *mac_control;
646 struct config_param *config;
647 int mdio_cnt = 0, dtx_cnt = 0;
648 unsigned long long mem_share;
651 mac_control = &nic->mac_control;
652 config = &nic->config;
654 /* to set the swapper controle on the card */
655 if(s2io_set_swapper(nic)) {
656 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
660 /* Remove XGXS from reset state */
662 writeq(val64, &bar0->sw_reset);
664 val64 = readq(&bar0->sw_reset);
666 /* Enable Receiving broadcasts */
667 add = &bar0->mac_cfg;
668 val64 = readq(&bar0->mac_cfg);
669 val64 |= MAC_RMAC_BCAST_ENABLE;
670 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
671 writel((u32) val64, add);
672 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
673 writel((u32) (val64 >> 32), (add + 4));
675 /* Read registers in all blocks */
676 val64 = readq(&bar0->mac_int_mask);
677 val64 = readq(&bar0->mc_int_mask);
678 val64 = readq(&bar0->xgxs_int_mask);
682 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
685 * Configuring the XAUI Interface of Xena.
686 * ***************************************
687 * To Configure the Xena's XAUI, one has to write a series
688 * of 64 bit values into two registers in a particular
689 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
690 * which will be defined in the array of configuration values
691 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
692 * to switch writing from one regsiter to another. We continue
693 * writing these values until we encounter the 'END_SIGN' macro.
694 * For example, After making a series of 21 writes into
695 * dtx_control register the 'SWITCH_SIGN' appears and hence we
696 * start writing into mdio_control until we encounter END_SIGN.
700 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
701 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
705 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
706 &bar0->dtx_control, UF);
707 val64 = readq(&bar0->dtx_control);
711 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
712 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
716 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
717 &bar0->mdio_control, UF);
718 val64 = readq(&bar0->mdio_control);
721 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
722 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
729 /* Tx DMA Initialization */
731 writeq(val64, &bar0->tx_fifo_partition_0);
732 writeq(val64, &bar0->tx_fifo_partition_1);
733 writeq(val64, &bar0->tx_fifo_partition_2);
734 writeq(val64, &bar0->tx_fifo_partition_3);
737 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
739 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
740 13) | vBIT(config->tx_cfg[i].fifo_priority,
743 if (i == (config->tx_fifo_num - 1)) {
750 writeq(val64, &bar0->tx_fifo_partition_0);
754 writeq(val64, &bar0->tx_fifo_partition_1);
758 writeq(val64, &bar0->tx_fifo_partition_2);
762 writeq(val64, &bar0->tx_fifo_partition_3);
767 /* Enable Tx FIFO partition 0. */
768 val64 = readq(&bar0->tx_fifo_partition_0);
769 val64 |= BIT(0); /* To enable the FIFO partition. */
770 writeq(val64, &bar0->tx_fifo_partition_0);
773 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
774 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
776 if (get_xena_rev_id(nic->pdev) < 4)
777 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
779 val64 = readq(&bar0->tx_fifo_partition_0);
780 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
781 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
784 * Initialization of Tx_PA_CONFIG register to ignore packet
785 * integrity checking.
787 val64 = readq(&bar0->tx_pa_cfg);
788 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
789 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
790 writeq(val64, &bar0->tx_pa_cfg);
792 /* Rx DMA intialization. */
794 for (i = 0; i < config->rx_ring_num; i++) {
796 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
799 writeq(val64, &bar0->rx_queue_priority);
802 * Allocating equal share of memory to all the
807 for (i = 0; i < config->rx_ring_num; i++) {
810 mem_share = (mem_size / config->rx_ring_num +
811 mem_size % config->rx_ring_num);
812 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
815 mem_share = (mem_size / config->rx_ring_num);
816 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
819 mem_share = (mem_size / config->rx_ring_num);
820 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
823 mem_share = (mem_size / config->rx_ring_num);
824 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
827 mem_share = (mem_size / config->rx_ring_num);
828 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
831 mem_share = (mem_size / config->rx_ring_num);
832 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
835 mem_share = (mem_size / config->rx_ring_num);
836 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
839 mem_share = (mem_size / config->rx_ring_num);
840 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
844 writeq(val64, &bar0->rx_queue_cfg);
847 * Filling Tx round robin registers
848 * as per the number of FIFOs
850 switch (config->tx_fifo_num) {
852 val64 = 0x0000000000000000ULL;
853 writeq(val64, &bar0->tx_w_round_robin_0);
854 writeq(val64, &bar0->tx_w_round_robin_1);
855 writeq(val64, &bar0->tx_w_round_robin_2);
856 writeq(val64, &bar0->tx_w_round_robin_3);
857 writeq(val64, &bar0->tx_w_round_robin_4);
860 val64 = 0x0000010000010000ULL;
861 writeq(val64, &bar0->tx_w_round_robin_0);
862 val64 = 0x0100000100000100ULL;
863 writeq(val64, &bar0->tx_w_round_robin_1);
864 val64 = 0x0001000001000001ULL;
865 writeq(val64, &bar0->tx_w_round_robin_2);
866 val64 = 0x0000010000010000ULL;
867 writeq(val64, &bar0->tx_w_round_robin_3);
868 val64 = 0x0100000000000000ULL;
869 writeq(val64, &bar0->tx_w_round_robin_4);
872 val64 = 0x0001000102000001ULL;
873 writeq(val64, &bar0->tx_w_round_robin_0);
874 val64 = 0x0001020000010001ULL;
875 writeq(val64, &bar0->tx_w_round_robin_1);
876 val64 = 0x0200000100010200ULL;
877 writeq(val64, &bar0->tx_w_round_robin_2);
878 val64 = 0x0001000102000001ULL;
879 writeq(val64, &bar0->tx_w_round_robin_3);
880 val64 = 0x0001020000000000ULL;
881 writeq(val64, &bar0->tx_w_round_robin_4);
884 val64 = 0x0001020300010200ULL;
885 writeq(val64, &bar0->tx_w_round_robin_0);
886 val64 = 0x0100000102030001ULL;
887 writeq(val64, &bar0->tx_w_round_robin_1);
888 val64 = 0x0200010000010203ULL;
889 writeq(val64, &bar0->tx_w_round_robin_2);
890 val64 = 0x0001020001000001ULL;
891 writeq(val64, &bar0->tx_w_round_robin_3);
892 val64 = 0x0203000100000000ULL;
893 writeq(val64, &bar0->tx_w_round_robin_4);
896 val64 = 0x0001000203000102ULL;
897 writeq(val64, &bar0->tx_w_round_robin_0);
898 val64 = 0x0001020001030004ULL;
899 writeq(val64, &bar0->tx_w_round_robin_1);
900 val64 = 0x0001000203000102ULL;
901 writeq(val64, &bar0->tx_w_round_robin_2);
902 val64 = 0x0001020001030004ULL;
903 writeq(val64, &bar0->tx_w_round_robin_3);
904 val64 = 0x0001000000000000ULL;
905 writeq(val64, &bar0->tx_w_round_robin_4);
908 val64 = 0x0001020304000102ULL;
909 writeq(val64, &bar0->tx_w_round_robin_0);
910 val64 = 0x0304050001020001ULL;
911 writeq(val64, &bar0->tx_w_round_robin_1);
912 val64 = 0x0203000100000102ULL;
913 writeq(val64, &bar0->tx_w_round_robin_2);
914 val64 = 0x0304000102030405ULL;
915 writeq(val64, &bar0->tx_w_round_robin_3);
916 val64 = 0x0001000200000000ULL;
917 writeq(val64, &bar0->tx_w_round_robin_4);
920 val64 = 0x0001020001020300ULL;
921 writeq(val64, &bar0->tx_w_round_robin_0);
922 val64 = 0x0102030400010203ULL;
923 writeq(val64, &bar0->tx_w_round_robin_1);
924 val64 = 0x0405060001020001ULL;
925 writeq(val64, &bar0->tx_w_round_robin_2);
926 val64 = 0x0304050000010200ULL;
927 writeq(val64, &bar0->tx_w_round_robin_3);
928 val64 = 0x0102030000000000ULL;
929 writeq(val64, &bar0->tx_w_round_robin_4);
932 val64 = 0x0001020300040105ULL;
933 writeq(val64, &bar0->tx_w_round_robin_0);
934 val64 = 0x0200030106000204ULL;
935 writeq(val64, &bar0->tx_w_round_robin_1);
936 val64 = 0x0103000502010007ULL;
937 writeq(val64, &bar0->tx_w_round_robin_2);
938 val64 = 0x0304010002060500ULL;
939 writeq(val64, &bar0->tx_w_round_robin_3);
940 val64 = 0x0103020400000000ULL;
941 writeq(val64, &bar0->tx_w_round_robin_4);
945 /* Filling the Rx round robin registers as per the
946 * number of Rings and steering based on QoS.
948 switch (config->rx_ring_num) {
950 val64 = 0x8080808080808080ULL;
951 writeq(val64, &bar0->rts_qos_steering);
954 val64 = 0x0000010000010000ULL;
955 writeq(val64, &bar0->rx_w_round_robin_0);
956 val64 = 0x0100000100000100ULL;
957 writeq(val64, &bar0->rx_w_round_robin_1);
958 val64 = 0x0001000001000001ULL;
959 writeq(val64, &bar0->rx_w_round_robin_2);
960 val64 = 0x0000010000010000ULL;
961 writeq(val64, &bar0->rx_w_round_robin_3);
962 val64 = 0x0100000000000000ULL;
963 writeq(val64, &bar0->rx_w_round_robin_4);
965 val64 = 0x8080808040404040ULL;
966 writeq(val64, &bar0->rts_qos_steering);
969 val64 = 0x0001000102000001ULL;
970 writeq(val64, &bar0->rx_w_round_robin_0);
971 val64 = 0x0001020000010001ULL;
972 writeq(val64, &bar0->rx_w_round_robin_1);
973 val64 = 0x0200000100010200ULL;
974 writeq(val64, &bar0->rx_w_round_robin_2);
975 val64 = 0x0001000102000001ULL;
976 writeq(val64, &bar0->rx_w_round_robin_3);
977 val64 = 0x0001020000000000ULL;
978 writeq(val64, &bar0->rx_w_round_robin_4);
980 val64 = 0x8080804040402020ULL;
981 writeq(val64, &bar0->rts_qos_steering);
984 val64 = 0x0001020300010200ULL;
985 writeq(val64, &bar0->rx_w_round_robin_0);
986 val64 = 0x0100000102030001ULL;
987 writeq(val64, &bar0->rx_w_round_robin_1);
988 val64 = 0x0200010000010203ULL;
989 writeq(val64, &bar0->rx_w_round_robin_2);
990 val64 = 0x0001020001000001ULL;
991 writeq(val64, &bar0->rx_w_round_robin_3);
992 val64 = 0x0203000100000000ULL;
993 writeq(val64, &bar0->rx_w_round_robin_4);
995 val64 = 0x8080404020201010ULL;
996 writeq(val64, &bar0->rts_qos_steering);
999 val64 = 0x0001000203000102ULL;
1000 writeq(val64, &bar0->rx_w_round_robin_0);
1001 val64 = 0x0001020001030004ULL;
1002 writeq(val64, &bar0->rx_w_round_robin_1);
1003 val64 = 0x0001000203000102ULL;
1004 writeq(val64, &bar0->rx_w_round_robin_2);
1005 val64 = 0x0001020001030004ULL;
1006 writeq(val64, &bar0->rx_w_round_robin_3);
1007 val64 = 0x0001000000000000ULL;
1008 writeq(val64, &bar0->rx_w_round_robin_4);
1010 val64 = 0x8080404020201008ULL;
1011 writeq(val64, &bar0->rts_qos_steering);
1014 val64 = 0x0001020304000102ULL;
1015 writeq(val64, &bar0->rx_w_round_robin_0);
1016 val64 = 0x0304050001020001ULL;
1017 writeq(val64, &bar0->rx_w_round_robin_1);
1018 val64 = 0x0203000100000102ULL;
1019 writeq(val64, &bar0->rx_w_round_robin_2);
1020 val64 = 0x0304000102030405ULL;
1021 writeq(val64, &bar0->rx_w_round_robin_3);
1022 val64 = 0x0001000200000000ULL;
1023 writeq(val64, &bar0->rx_w_round_robin_4);
1025 val64 = 0x8080404020100804ULL;
1026 writeq(val64, &bar0->rts_qos_steering);
1029 val64 = 0x0001020001020300ULL;
1030 writeq(val64, &bar0->rx_w_round_robin_0);
1031 val64 = 0x0102030400010203ULL;
1032 writeq(val64, &bar0->rx_w_round_robin_1);
1033 val64 = 0x0405060001020001ULL;
1034 writeq(val64, &bar0->rx_w_round_robin_2);
1035 val64 = 0x0304050000010200ULL;
1036 writeq(val64, &bar0->rx_w_round_robin_3);
1037 val64 = 0x0102030000000000ULL;
1038 writeq(val64, &bar0->rx_w_round_robin_4);
1040 val64 = 0x8080402010080402ULL;
1041 writeq(val64, &bar0->rts_qos_steering);
1044 val64 = 0x0001020300040105ULL;
1045 writeq(val64, &bar0->rx_w_round_robin_0);
1046 val64 = 0x0200030106000204ULL;
1047 writeq(val64, &bar0->rx_w_round_robin_1);
1048 val64 = 0x0103000502010007ULL;
1049 writeq(val64, &bar0->rx_w_round_robin_2);
1050 val64 = 0x0304010002060500ULL;
1051 writeq(val64, &bar0->rx_w_round_robin_3);
1052 val64 = 0x0103020400000000ULL;
1053 writeq(val64, &bar0->rx_w_round_robin_4);
1055 val64 = 0x8040201008040201ULL;
1056 writeq(val64, &bar0->rts_qos_steering);
1062 for (i = 0; i < 8; i++)
1063 writeq(val64, &bar0->rts_frm_len_n[i]);
1065 /* Set the default rts frame length for the rings configured */
1066 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1067 for (i = 0 ; i < config->rx_ring_num ; i++)
1068 writeq(val64, &bar0->rts_frm_len_n[i]);
1070 /* Set the frame length for the configured rings
1071 * desired by the user
1073 for (i = 0; i < config->rx_ring_num; i++) {
1074 /* If rts_frm_len[i] == 0 then it is assumed that user not
1075 * specified frame length steering.
1076 * If the user provides the frame length then program
1077 * the rts_frm_len register for those values or else
1078 * leave it as it is.
1080 if (rts_frm_len[i] != 0) {
1081 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1082 &bar0->rts_frm_len_n[i]);
1086 /* Program statistics memory */
1087 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1090 * Initializing the sampling rate for the device to calculate the
1091 * bandwidth utilization.
1093 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1094 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1095 writeq(val64, &bar0->mac_link_util);
1099 * Initializing the Transmit and Receive Traffic Interrupt
1103 * TTI Initialization. Default Tx timer gets us about
1104 * 250 interrupts per sec. Continuous interrupts are enabled
1107 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1108 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1109 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1110 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1111 if (use_continuous_tx_intrs)
1112 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1113 writeq(val64, &bar0->tti_data1_mem);
1115 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1116 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1117 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1118 writeq(val64, &bar0->tti_data2_mem);
1120 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1121 writeq(val64, &bar0->tti_command_mem);
1124 * Once the operation completes, the Strobe bit of the command
1125 * register will be reset. We poll for this particular condition
1126 * We wait for a maximum of 500ms for the operation to complete,
1127 * if it's not complete by then we return error.
1131 val64 = readq(&bar0->tti_command_mem);
1132 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1136 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1144 /* RTI Initialization */
1145 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1146 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1147 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1148 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1150 writeq(val64, &bar0->rti_data1_mem);
1152 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1153 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1154 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1155 writeq(val64, &bar0->rti_data2_mem);
1157 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1158 writeq(val64, &bar0->rti_command_mem);
1161 * Once the operation completes, the Strobe bit of the
1162 * command register will be reset. We poll for this
1163 * particular condition. We wait for a maximum of 500ms
1164 * for the operation to complete, if it's not complete
1165 * by then we return error.
1169 val64 = readq(&bar0->rti_command_mem);
1170 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1174 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1183 * Initializing proper values as Pause threshold into all
1184 * the 8 Queues on Rx side.
1186 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1187 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1189 /* Disable RMAC PAD STRIPPING */
1190 add = (void *) &bar0->mac_cfg;
1191 val64 = readq(&bar0->mac_cfg);
1192 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1193 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1194 writel((u32) (val64), add);
1195 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1196 writel((u32) (val64 >> 32), (add + 4));
1197 val64 = readq(&bar0->mac_cfg);
1200 * Set the time value to be inserted in the pause frame
1201 * generated by xena.
1203 val64 = readq(&bar0->rmac_pause_cfg);
1204 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1205 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1206 writeq(val64, &bar0->rmac_pause_cfg);
1209 * Set the Threshold Limit for Generating the pause frame
1210 * If the amount of data in any Queue exceeds ratio of
1211 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1212 * pause frame is generated
1215 for (i = 0; i < 4; i++) {
1217 (((u64) 0xFF00 | nic->mac_control.
1218 mc_pause_threshold_q0q3)
1221 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1224 for (i = 0; i < 4; i++) {
1226 (((u64) 0xFF00 | nic->mac_control.
1227 mc_pause_threshold_q4q7)
1230 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1233 * TxDMA will stop Read request if the number of read split has
1234 * exceeded the limit pointed by shared_splits
1236 val64 = readq(&bar0->pic_control);
1237 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1238 writeq(val64, &bar0->pic_control);
1244 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1245 * @nic: device private variable,
1246 * @mask: A mask indicating which Intr block must be modified and,
1247 * @flag: A flag indicating whether to enable or disable the Intrs.
1248 * Description: This function will either disable or enable the interrupts
1249 * depending on the flag argument. The mask argument can be used to
1250 * enable/disable any Intr block.
1251 * Return Value: NONE.
1254 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1256 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1257 register u64 val64 = 0, temp64 = 0;
1259 /* Top level interrupt classification */
1260 /* PIC Interrupts */
1261 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1262 /* Enable PIC Intrs in the general intr mask register */
1263 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1264 if (flag == ENABLE_INTRS) {
1265 temp64 = readq(&bar0->general_int_mask);
1266 temp64 &= ~((u64) val64);
1267 writeq(temp64, &bar0->general_int_mask);
1269 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1270 * interrupts for now.
1273 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1275 * No MSI Support is available presently, so TTI and
1276 * RTI interrupts are also disabled.
1278 } else if (flag == DISABLE_INTRS) {
1280 * Disable PIC Intrs in the general
1281 * intr mask register
1283 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1284 temp64 = readq(&bar0->general_int_mask);
1286 writeq(val64, &bar0->general_int_mask);
1290 /* DMA Interrupts */
1291 /* Enabling/Disabling Tx DMA interrupts */
1292 if (mask & TX_DMA_INTR) {
1293 /* Enable TxDMA Intrs in the general intr mask register */
1294 val64 = TXDMA_INT_M;
1295 if (flag == ENABLE_INTRS) {
1296 temp64 = readq(&bar0->general_int_mask);
1297 temp64 &= ~((u64) val64);
1298 writeq(temp64, &bar0->general_int_mask);
1300 * Keep all interrupts other than PFC interrupt
1301 * and PCC interrupt disabled in DMA level.
1303 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1305 writeq(val64, &bar0->txdma_int_mask);
1307 * Enable only the MISC error 1 interrupt in PFC block
1309 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1310 writeq(val64, &bar0->pfc_err_mask);
1312 * Enable only the FB_ECC error interrupt in PCC block
1314 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1315 writeq(val64, &bar0->pcc_err_mask);
1316 } else if (flag == DISABLE_INTRS) {
1318 * Disable TxDMA Intrs in the general intr mask
1321 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1322 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1323 temp64 = readq(&bar0->general_int_mask);
1325 writeq(val64, &bar0->general_int_mask);
1329 /* Enabling/Disabling Rx DMA interrupts */
1330 if (mask & RX_DMA_INTR) {
1331 /* Enable RxDMA Intrs in the general intr mask register */
1332 val64 = RXDMA_INT_M;
1333 if (flag == ENABLE_INTRS) {
1334 temp64 = readq(&bar0->general_int_mask);
1335 temp64 &= ~((u64) val64);
1336 writeq(temp64, &bar0->general_int_mask);
1338 * All RxDMA block interrupts are disabled for now
1341 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1342 } else if (flag == DISABLE_INTRS) {
1344 * Disable RxDMA Intrs in the general intr mask
1347 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1348 temp64 = readq(&bar0->general_int_mask);
1350 writeq(val64, &bar0->general_int_mask);
1354 /* MAC Interrupts */
1355 /* Enabling/Disabling MAC interrupts */
1356 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1357 val64 = TXMAC_INT_M | RXMAC_INT_M;
1358 if (flag == ENABLE_INTRS) {
1359 temp64 = readq(&bar0->general_int_mask);
1360 temp64 &= ~((u64) val64);
1361 writeq(temp64, &bar0->general_int_mask);
1363 * All MAC block error interrupts are disabled for now
1364 * except the link status change interrupt.
1367 val64 = MAC_INT_STATUS_RMAC_INT;
1368 temp64 = readq(&bar0->mac_int_mask);
1369 temp64 &= ~((u64) val64);
1370 writeq(temp64, &bar0->mac_int_mask);
1372 val64 = readq(&bar0->mac_rmac_err_mask);
1373 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1374 writeq(val64, &bar0->mac_rmac_err_mask);
1375 } else if (flag == DISABLE_INTRS) {
1377 * Disable MAC Intrs in the general intr mask register
1379 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1380 writeq(DISABLE_ALL_INTRS,
1381 &bar0->mac_rmac_err_mask);
1383 temp64 = readq(&bar0->general_int_mask);
1385 writeq(val64, &bar0->general_int_mask);
1389 /* XGXS Interrupts */
1390 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1391 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1392 if (flag == ENABLE_INTRS) {
1393 temp64 = readq(&bar0->general_int_mask);
1394 temp64 &= ~((u64) val64);
1395 writeq(temp64, &bar0->general_int_mask);
1397 * All XGXS block error interrupts are disabled for now
1400 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1401 } else if (flag == DISABLE_INTRS) {
1403 * Disable MC Intrs in the general intr mask register
1405 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1406 temp64 = readq(&bar0->general_int_mask);
1408 writeq(val64, &bar0->general_int_mask);
1412 /* Memory Controller(MC) interrupts */
1413 if (mask & MC_INTR) {
1415 if (flag == ENABLE_INTRS) {
1416 temp64 = readq(&bar0->general_int_mask);
1417 temp64 &= ~((u64) val64);
1418 writeq(temp64, &bar0->general_int_mask);
1420 * Enable all MC Intrs.
1422 writeq(0x0, &bar0->mc_int_mask);
1423 writeq(0x0, &bar0->mc_err_mask);
1424 } else if (flag == DISABLE_INTRS) {
1426 * Disable MC Intrs in the general intr mask register
1428 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1429 temp64 = readq(&bar0->general_int_mask);
1431 writeq(val64, &bar0->general_int_mask);
1436 /* Tx traffic interrupts */
1437 if (mask & TX_TRAFFIC_INTR) {
1438 val64 = TXTRAFFIC_INT_M;
1439 if (flag == ENABLE_INTRS) {
1440 temp64 = readq(&bar0->general_int_mask);
1441 temp64 &= ~((u64) val64);
1442 writeq(temp64, &bar0->general_int_mask);
1444 * Enable all the Tx side interrupts
1445 * writing 0 Enables all 64 TX interrupt levels
1447 writeq(0x0, &bar0->tx_traffic_mask);
1448 } else if (flag == DISABLE_INTRS) {
1450 * Disable Tx Traffic Intrs in the general intr mask
1453 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1454 temp64 = readq(&bar0->general_int_mask);
1456 writeq(val64, &bar0->general_int_mask);
1460 /* Rx traffic interrupts */
1461 if (mask & RX_TRAFFIC_INTR) {
1462 val64 = RXTRAFFIC_INT_M;
1463 if (flag == ENABLE_INTRS) {
1464 temp64 = readq(&bar0->general_int_mask);
1465 temp64 &= ~((u64) val64);
1466 writeq(temp64, &bar0->general_int_mask);
1467 /* writing 0 Enables all 8 RX interrupt levels */
1468 writeq(0x0, &bar0->rx_traffic_mask);
1469 } else if (flag == DISABLE_INTRS) {
1471 * Disable Rx Traffic Intrs in the general intr mask
1474 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1475 temp64 = readq(&bar0->general_int_mask);
1477 writeq(val64, &bar0->general_int_mask);
1482 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1486 if (flag == FALSE) {
1488 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1489 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1490 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1494 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1495 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1496 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1502 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1503 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1504 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1505 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1506 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1510 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1511 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1512 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1513 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1514 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1523 * verify_xena_quiescence - Checks whether the H/W is ready
1524 * @val64 : Value read from adapter status register.
1525 * @flag : indicates if the adapter enable bit was ever written once
1527 * Description: Returns whether the H/W is ready to go or not. Depending
1528 * on whether adapter enable bit was written or not the comparison
1529 * differs and the calling function passes the input argument flag to
1531 * Return: 1 If xena is quiescence
1532 * 0 If Xena is not quiescence
1535 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1538 u64 tmp64 = ~((u64) val64);
1539 int rev_id = get_xena_rev_id(sp->pdev);
1543 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1544 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1545 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1546 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1547 ADAPTER_STATUS_P_PLL_LOCK))) {
1548 ret = check_prc_pcc_state(val64, flag, rev_id);
1555 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1556 * @sp: Pointer to device specifc structure
1558 * New procedure to clear mac address reading problems on Alpha platforms
1562 void fix_mac_address(nic_t * sp)
1564 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1568 while (fix_mac[i] != END_SIGN) {
1569 writeq(fix_mac[i++], &bar0->gpio_control);
1571 val64 = readq(&bar0->gpio_control);
1576 * start_nic - Turns the device on
1577 * @nic : device private variable.
1579 * This function actually turns the device on. Before this function is
1580 * called,all Registers are configured from their reset states
1581 * and shared memory is allocated but the NIC is still quiescent. On
1582 * calling this function, the device interrupts are cleared and the NIC is
1583 * literally switched on by writing into the adapter control register.
1585 * SUCCESS on success and -1 on failure.
1588 static int start_nic(struct s2io_nic *nic)
1590 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1591 struct net_device *dev = nic->dev;
1592 register u64 val64 = 0;
1595 mac_info_t *mac_control;
1596 struct config_param *config;
1598 mac_control = &nic->mac_control;
1599 config = &nic->config;
1601 /* PRC Initialization and configuration */
1602 for (i = 0; i < config->rx_ring_num; i++) {
1603 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1604 &bar0->prc_rxd0_n[i]);
1606 val64 = readq(&bar0->prc_ctrl_n[i]);
1607 #ifndef CONFIG_2BUFF_MODE
1608 val64 |= PRC_CTRL_RC_ENABLED;
1610 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1612 writeq(val64, &bar0->prc_ctrl_n[i]);
1615 #ifdef CONFIG_2BUFF_MODE
1616 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1617 val64 = readq(&bar0->rx_pa_cfg);
1618 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1619 writeq(val64, &bar0->rx_pa_cfg);
1623 * Enabling MC-RLDRAM. After enabling the device, we timeout
1624 * for around 100ms, which is approximately the time required
1625 * for the device to be ready for operation.
1627 val64 = readq(&bar0->mc_rldram_mrs);
1628 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1629 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1630 val64 = readq(&bar0->mc_rldram_mrs);
1632 msleep(100); /* Delay by around 100 ms. */
1634 /* Enabling ECC Protection. */
1635 val64 = readq(&bar0->adapter_control);
1636 val64 &= ~ADAPTER_ECC_EN;
1637 writeq(val64, &bar0->adapter_control);
1640 * Clearing any possible Link state change interrupts that
1641 * could have popped up just before Enabling the card.
1643 val64 = readq(&bar0->mac_rmac_err_reg);
1645 writeq(val64, &bar0->mac_rmac_err_reg);
1648 * Verify if the device is ready to be enabled, if so enable
1651 val64 = readq(&bar0->adapter_status);
1652 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1653 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1654 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1655 (unsigned long long) val64);
1659 /* Enable select interrupts */
1660 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1661 RX_MAC_INTR | MC_INTR;
1662 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1665 * With some switches, link might be already up at this point.
1666 * Because of this weird behavior, when we enable laser,
1667 * we may not get link. We need to handle this. We cannot
1668 * figure out which switch is misbehaving. So we are forced to
1669 * make a global change.
1672 /* Enabling Laser. */
1673 val64 = readq(&bar0->adapter_control);
1674 val64 |= ADAPTER_EOI_TX_ON;
1675 writeq(val64, &bar0->adapter_control);
1677 /* SXE-002: Initialize link and activity LED */
1678 subid = nic->pdev->subsystem_device;
1679 if ((subid & 0xFF) >= 0x07) {
1680 val64 = readq(&bar0->gpio_control);
1681 val64 |= 0x0000800000000000ULL;
1682 writeq(val64, &bar0->gpio_control);
1683 val64 = 0x0411040400000000ULL;
1684 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1688 * Don't see link state interrupts on certain switches, so
1689 * directly scheduling a link state task from here.
1691 schedule_work(&nic->set_link_task);
1697 * free_tx_buffers - Free all queued Tx buffers
1698 * @nic : device private variable.
1700 * Free all queued Tx buffers.
1701 * Return Value: void
1704 static void free_tx_buffers(struct s2io_nic *nic)
1706 struct net_device *dev = nic->dev;
1707 struct sk_buff *skb;
1710 mac_info_t *mac_control;
1711 struct config_param *config;
1712 int cnt = 0, frg_cnt;
1714 mac_control = &nic->mac_control;
1715 config = &nic->config;
1717 for (i = 0; i < config->tx_fifo_num; i++) {
1718 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1719 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1722 (struct sk_buff *) ((unsigned long) txdp->
1725 memset(txdp, 0, sizeof(TxD_t) *
1729 frg_cnt = skb_shinfo(skb)->nr_frags;
1730 pci_unmap_single(nic->pdev, (dma_addr_t)
1731 txdp->Buffer_Pointer,
1732 skb->len - skb->data_len,
1738 for (j = 0; j < frg_cnt; j++, txdp++) {
1740 &skb_shinfo(skb)->frags[j];
1741 pci_unmap_page(nic->pdev,
1751 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1755 "%s:forcibly freeing %d skbs on FIFO%d\n",
1757 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1758 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1763 * stop_nic - To stop the nic
1764 * @nic ; device private variable.
1766 * This function does exactly the opposite of what the start_nic()
1767 * function does. This function is called to stop the device.
1772 static void stop_nic(struct s2io_nic *nic)
1774 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1775 register u64 val64 = 0;
1776 u16 interruptible, i;
1777 mac_info_t *mac_control;
1778 struct config_param *config;
1780 mac_control = &nic->mac_control;
1781 config = &nic->config;
1783 /* Disable all interrupts */
1784 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1785 RX_MAC_INTR | MC_INTR;
1786 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1789 for (i = 0; i < config->rx_ring_num; i++) {
1790 val64 = readq(&bar0->prc_ctrl_n[i]);
1791 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1792 writeq(val64, &bar0->prc_ctrl_n[i]);
1797 * fill_rx_buffers - Allocates the Rx side skbs
1798 * @nic: device private variable
1799 * @ring_no: ring number
1801 * The function allocates Rx side skbs and puts the physical
1802 * address of these buffers into the RxD buffer pointers, so that the NIC
1803 * can DMA the received frame into these locations.
1804 * The NIC supports 3 receive modes, viz
1806 * 2. three buffer and
1807 * 3. Five buffer modes.
1808 * Each mode defines how many fragments the received frame will be split
1809 * up into by the NIC. The frame is split into L3 header, L4 Header,
1810 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1811 * is split into 3 fragments. As of now only single buffer mode is
1814 * SUCCESS on success or an appropriate -ve value on failure.
1817 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1819 struct net_device *dev = nic->dev;
1820 struct sk_buff *skb;
1822 int off, off1, size, block_no, block_no1;
1823 int offset, offset1;
1826 mac_info_t *mac_control;
1827 struct config_param *config;
1828 #ifdef CONFIG_2BUFF_MODE
1833 dma_addr_t rxdpphys;
1835 #ifndef CONFIG_S2IO_NAPI
1836 unsigned long flags;
1839 mac_control = &nic->mac_control;
1840 config = &nic->config;
1841 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1842 atomic_read(&nic->rx_bufs_left[ring_no]);
1843 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1844 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1846 while (alloc_tab < alloc_cnt) {
1847 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1849 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1851 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1852 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1853 #ifndef CONFIG_2BUFF_MODE
1854 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1855 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1857 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1858 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1861 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1862 block_virt_addr + off;
1863 if ((offset == offset1) && (rxdp->Host_Control)) {
1864 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1865 DBG_PRINT(INTR_DBG, " info equated\n");
1868 #ifndef CONFIG_2BUFF_MODE
1869 if (rxdp->Control_1 == END_OF_BLOCK) {
1870 mac_control->rings[ring_no].rx_curr_put_info.
1872 mac_control->rings[ring_no].rx_curr_put_info.
1873 block_index %= mac_control->rings[ring_no].block_count;
1874 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1877 off %= (MAX_RXDS_PER_BLOCK + 1);
1878 mac_control->rings[ring_no].rx_curr_put_info.offset =
1880 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1881 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1884 #ifndef CONFIG_S2IO_NAPI
1885 spin_lock_irqsave(&nic->put_lock, flags);
1886 mac_control->rings[ring_no].put_pos =
1887 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1888 spin_unlock_irqrestore(&nic->put_lock, flags);
1891 if (rxdp->Host_Control == END_OF_BLOCK) {
1892 mac_control->rings[ring_no].rx_curr_put_info.
1894 mac_control->rings[ring_no].rx_curr_put_info.block_index
1895 %= mac_control->rings[ring_no].block_count;
1896 block_no = mac_control->rings[ring_no].rx_curr_put_info
1899 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1900 dev->name, block_no,
1901 (unsigned long long) rxdp->Control_1);
1902 mac_control->rings[ring_no].rx_curr_put_info.offset =
1904 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1907 #ifndef CONFIG_S2IO_NAPI
1908 spin_lock_irqsave(&nic->put_lock, flags);
1909 mac_control->rings[ring_no].put_pos = (block_no *
1910 (MAX_RXDS_PER_BLOCK + 1)) + off;
1911 spin_unlock_irqrestore(&nic->put_lock, flags);
1915 #ifndef CONFIG_2BUFF_MODE
1916 if (rxdp->Control_1 & RXD_OWN_XENA)
1918 if (rxdp->Control_2 & BIT(0))
1921 mac_control->rings[ring_no].rx_curr_put_info.
1925 #ifdef CONFIG_2BUFF_MODE
1927 * RxDs Spanning cache lines will be replenished only
1928 * if the succeeding RxD is also owned by Host. It
1929 * will always be the ((8*i)+3) and ((8*i)+6)
1930 * descriptors for the 48 byte descriptor. The offending
1931 * decsriptor is of-course the 3rd descriptor.
1933 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1934 block_dma_addr + (off * sizeof(RxD_t));
1935 if (((u64) (rxdpphys)) % 128 > 80) {
1936 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1937 block_virt_addr + (off + 1);
1938 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1939 nextblk = (block_no + 1) %
1940 (mac_control->rings[ring_no].block_count);
1941 rxdpnext = mac_control->rings[ring_no].rx_blocks
1942 [nextblk].block_virt_addr;
1944 if (rxdpnext->Control_2 & BIT(0))
1949 #ifndef CONFIG_2BUFF_MODE
1950 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1952 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1955 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1956 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1959 #ifndef CONFIG_2BUFF_MODE
1960 skb_reserve(skb, NET_IP_ALIGN);
1961 memset(rxdp, 0, sizeof(RxD_t));
1962 rxdp->Buffer0_ptr = pci_map_single
1963 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1964 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1965 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1966 rxdp->Host_Control = (unsigned long) (skb);
1967 rxdp->Control_1 |= RXD_OWN_XENA;
1969 off %= (MAX_RXDS_PER_BLOCK + 1);
1970 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1972 ba = &mac_control->rings[ring_no].ba[block_no][off];
1973 skb_reserve(skb, BUF0_LEN);
1974 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1976 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1978 memset(rxdp, 0, sizeof(RxD_t));
1979 rxdp->Buffer2_ptr = pci_map_single
1980 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1981 PCI_DMA_FROMDEVICE);
1983 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1984 PCI_DMA_FROMDEVICE);
1986 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1987 PCI_DMA_FROMDEVICE);
1989 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1990 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1991 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1992 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1993 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1994 rxdp->Control_1 |= RXD_OWN_XENA;
1996 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1998 rxdp->Control_2 |= SET_RXD_MARKER;
2000 atomic_inc(&nic->rx_bufs_left[ring_no]);
2009 * free_rx_buffers - Frees all Rx buffers
2010 * @sp: device private variable.
2012 * This function will free all Rx buffers allocated by host.
2017 static void free_rx_buffers(struct s2io_nic *sp)
2019 struct net_device *dev = sp->dev;
2020 int i, j, blk = 0, off, buf_cnt = 0;
2022 struct sk_buff *skb;
2023 mac_info_t *mac_control;
2024 struct config_param *config;
2025 #ifdef CONFIG_2BUFF_MODE
2029 mac_control = &sp->mac_control;
2030 config = &sp->config;
2032 for (i = 0; i < config->rx_ring_num; i++) {
2033 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2034 off = j % (MAX_RXDS_PER_BLOCK + 1);
2035 rxdp = mac_control->rings[i].rx_blocks[blk].
2036 block_virt_addr + off;
2038 #ifndef CONFIG_2BUFF_MODE
2039 if (rxdp->Control_1 == END_OF_BLOCK) {
2041 (RxD_t *) ((unsigned long) rxdp->
2047 if (rxdp->Host_Control == END_OF_BLOCK) {
2053 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2054 memset(rxdp, 0, sizeof(RxD_t));
2059 (struct sk_buff *) ((unsigned long) rxdp->
2062 #ifndef CONFIG_2BUFF_MODE
2063 pci_unmap_single(sp->pdev, (dma_addr_t)
2066 HEADER_ETHERNET_II_802_3_SIZE
2067 + HEADER_802_2_SIZE +
2069 PCI_DMA_FROMDEVICE);
2071 ba = &mac_control->rings[i].ba[blk][off];
2072 pci_unmap_single(sp->pdev, (dma_addr_t)
2075 PCI_DMA_FROMDEVICE);
2076 pci_unmap_single(sp->pdev, (dma_addr_t)
2079 PCI_DMA_FROMDEVICE);
2080 pci_unmap_single(sp->pdev, (dma_addr_t)
2082 dev->mtu + BUF0_LEN + 4,
2083 PCI_DMA_FROMDEVICE);
2086 atomic_dec(&sp->rx_bufs_left[i]);
2089 memset(rxdp, 0, sizeof(RxD_t));
2091 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2092 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2093 mac_control->rings[i].rx_curr_put_info.offset = 0;
2094 mac_control->rings[i].rx_curr_get_info.offset = 0;
2095 atomic_set(&sp->rx_bufs_left[i], 0);
2096 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2097 dev->name, buf_cnt, i);
2102 * s2io_poll - Rx interrupt handler for NAPI support
2103 * @dev : pointer to the device structure.
2104 * @budget : The number of packets that were budgeted to be processed
2105 * during one pass through the 'Poll" function.
2107 * Comes into picture only if NAPI support has been incorporated. It does
2108 * the same thing that rx_intr_handler does, but not in a interrupt context
2109 * also It will process only a given number of packets.
2111 * 0 on success and 1 if there are No Rx packets to be processed.
2114 #if defined(CONFIG_S2IO_NAPI)
2115 static int s2io_poll(struct net_device *dev, int *budget)
2117 nic_t *nic = dev->priv;
2118 int pkt_cnt = 0, org_pkts_to_process;
2119 mac_info_t *mac_control;
2120 struct config_param *config;
2121 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2125 atomic_inc(&nic->isr_cnt);
2126 mac_control = &nic->mac_control;
2127 config = &nic->config;
2129 nic->pkts_to_process = *budget;
2130 if (nic->pkts_to_process > dev->quota)
2131 nic->pkts_to_process = dev->quota;
2132 org_pkts_to_process = nic->pkts_to_process;
2134 val64 = readq(&bar0->rx_traffic_int);
2135 writeq(val64, &bar0->rx_traffic_int);
2137 for (i = 0; i < config->rx_ring_num; i++) {
2138 rx_intr_handler(&mac_control->rings[i]);
2139 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2140 if (!nic->pkts_to_process) {
2141 /* Quota for the current iteration has been met */
2148 dev->quota -= pkt_cnt;
2150 netif_rx_complete(dev);
2152 for (i = 0; i < config->rx_ring_num; i++) {
2153 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2154 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2155 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2159 /* Re enable the Rx interrupts. */
2160 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2161 atomic_dec(&nic->isr_cnt);
2165 dev->quota -= pkt_cnt;
2168 for (i = 0; i < config->rx_ring_num; i++) {
2169 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2170 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2171 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2175 atomic_dec(&nic->isr_cnt);
2181 * rx_intr_handler - Rx interrupt handler
2182 * @nic: device private variable.
2184 * If the interrupt is because of a received frame or if the
2185 * receive ring contains fresh as yet un-processed frames,this function is
2186 * called. It picks out the RxD at which place the last Rx processing had
2187 * stopped and sends the skb to the OSM's Rx handler and then increments
2192 static void rx_intr_handler(ring_info_t *ring_data)
2194 nic_t *nic = ring_data->nic;
2195 struct net_device *dev = (struct net_device *) nic->dev;
2196 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2197 int get_block, get_offset, put_block, put_offset, ring_bufs;
2198 rx_curr_get_info_t get_info, put_info;
2200 struct sk_buff *skb;
2201 #ifndef CONFIG_S2IO_NAPI
2206 spin_lock(&nic->rx_lock);
2207 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2208 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2209 __FUNCTION__, dev->name);
2210 spin_unlock(&nic->rx_lock);
2214 * rx_traffic_int reg is an R1 register, hence we read and write
2215 * back the same value in the register to clear it
2217 val64 = readq(&bar0->tx_traffic_int);
2218 writeq(val64, &bar0->tx_traffic_int);
2220 get_info = ring_data->rx_curr_get_info;
2221 get_block = get_info.block_index;
2222 put_info = ring_data->rx_curr_put_info;
2223 put_block = put_info.block_index;
2224 ring_bufs = get_info.ring_len+1;
2225 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2227 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2229 #ifndef CONFIG_S2IO_NAPI
2230 spin_lock(&nic->put_lock);
2231 put_offset = ring_data->put_pos;
2232 spin_unlock(&nic->put_lock);
2234 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2237 while (RXD_IS_UP2DT(rxdp) &&
2238 (((get_offset + 1) % ring_bufs) != put_offset)) {
2239 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2241 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2243 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2244 spin_unlock(&nic->rx_lock);
2247 #ifndef CONFIG_2BUFF_MODE
2248 pci_unmap_single(nic->pdev, (dma_addr_t)
2251 HEADER_ETHERNET_II_802_3_SIZE +
2254 PCI_DMA_FROMDEVICE);
2256 pci_unmap_single(nic->pdev, (dma_addr_t)
2258 BUF0_LEN, PCI_DMA_FROMDEVICE);
2259 pci_unmap_single(nic->pdev, (dma_addr_t)
2261 BUF1_LEN, PCI_DMA_FROMDEVICE);
2262 pci_unmap_single(nic->pdev, (dma_addr_t)
2264 dev->mtu + BUF0_LEN + 4,
2265 PCI_DMA_FROMDEVICE);
2267 rx_osm_handler(ring_data, rxdp);
2269 ring_data->rx_curr_get_info.offset =
2271 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2273 if (get_info.offset &&
2274 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2275 get_info.offset = 0;
2276 ring_data->rx_curr_get_info.offset
2279 get_block %= ring_data->block_count;
2280 ring_data->rx_curr_get_info.block_index
2282 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2285 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2287 #ifdef CONFIG_S2IO_NAPI
2288 nic->pkts_to_process -= 1;
2289 if (!nic->pkts_to_process)
2293 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2297 spin_unlock(&nic->rx_lock);
2301 * tx_intr_handler - Transmit interrupt handler
2302 * @nic : device private variable
2304 * If an interrupt was raised to indicate DMA complete of the
2305 * Tx packet, this function is called. It identifies the last TxD
2306 * whose buffer was freed and frees all skbs whose data have already
2307 * DMA'ed into the NICs internal memory.
2312 static void tx_intr_handler(fifo_info_t *fifo_data)
2314 nic_t *nic = fifo_data->nic;
2315 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2316 struct net_device *dev = (struct net_device *) nic->dev;
2317 tx_curr_get_info_t get_info, put_info;
2318 struct sk_buff *skb;
2321 register u64 val64 = 0;
2324 * tx_traffic_int reg is an R1 register, hence we read and write
2325 * back the same value in the register to clear it
2327 val64 = readq(&bar0->tx_traffic_int);
2328 writeq(val64, &bar0->tx_traffic_int);
2330 get_info = fifo_data->tx_curr_get_info;
2331 put_info = fifo_data->tx_curr_put_info;
2332 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2334 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2335 (get_info.offset != put_info.offset) &&
2336 (txdlp->Host_Control)) {
2337 /* Check for TxD errors */
2338 if (txdlp->Control_1 & TXD_T_CODE) {
2339 unsigned long long err;
2340 err = txdlp->Control_1 & TXD_T_CODE;
2341 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2345 skb = (struct sk_buff *) ((unsigned long)
2346 txdlp->Host_Control);
2348 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2350 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2354 frg_cnt = skb_shinfo(skb)->nr_frags;
2355 nic->tx_pkt_count++;
2357 pci_unmap_single(nic->pdev, (dma_addr_t)
2358 txdlp->Buffer_Pointer,
2359 skb->len - skb->data_len,
2365 for (j = 0; j < frg_cnt; j++, txdlp++) {
2367 &skb_shinfo(skb)->frags[j];
2368 pci_unmap_page(nic->pdev,
2378 (sizeof(TxD_t) * fifo_data->max_txds));
2380 /* Updating the statistics block */
2381 nic->stats.tx_bytes += skb->len;
2382 dev_kfree_skb_irq(skb);
2385 get_info.offset %= get_info.fifo_len + 1;
2386 txdlp = (TxD_t *) fifo_data->list_info
2387 [get_info.offset].list_virt_addr;
2388 fifo_data->tx_curr_get_info.offset =
2392 spin_lock(&nic->tx_lock);
2393 if (netif_queue_stopped(dev))
2394 netif_wake_queue(dev);
2395 spin_unlock(&nic->tx_lock);
2399 * alarm_intr_handler - Alarm Interrrupt handler
2400 * @nic: device private variable
2401 * Description: If the interrupt was neither because of Rx packet or Tx
2402 * complete, this function is called. If the interrupt was to indicate
2403 * a loss of link, the OSM link status handler is invoked for any other
2404 * alarm interrupt the block that raised the interrupt is displayed
2405 * and a H/W reset is issued.
2410 static void alarm_intr_handler(struct s2io_nic *nic)
2412 struct net_device *dev = (struct net_device *) nic->dev;
2413 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2414 register u64 val64 = 0, err_reg = 0;
2416 /* Handling link status change error Intr */
2417 err_reg = readq(&bar0->mac_rmac_err_reg);
2418 writeq(err_reg, &bar0->mac_rmac_err_reg);
2419 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2420 schedule_work(&nic->set_link_task);
2423 /* Handling Ecc errors */
2424 val64 = readq(&bar0->mc_err_reg);
2425 writeq(val64, &bar0->mc_err_reg);
2426 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2427 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2428 nic->mac_control.stats_info->sw_stat.
2430 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2432 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2433 netif_stop_queue(dev);
2434 schedule_work(&nic->rst_timer_task);
2436 nic->mac_control.stats_info->sw_stat.
2441 /* In case of a serious error, the device will be Reset. */
2442 val64 = readq(&bar0->serr_source);
2443 if (val64 & SERR_SOURCE_ANY) {
2444 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2445 DBG_PRINT(ERR_DBG, "serious error!!\n");
2446 netif_stop_queue(dev);
2447 schedule_work(&nic->rst_timer_task);
2451 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2452 * Error occurs, the adapter will be recycled by disabling the
2453 * adapter enable bit and enabling it again after the device
2454 * becomes Quiescent.
2456 val64 = readq(&bar0->pcc_err_reg);
2457 writeq(val64, &bar0->pcc_err_reg);
2458 if (val64 & PCC_FB_ECC_DB_ERR) {
2459 u64 ac = readq(&bar0->adapter_control);
2460 ac &= ~(ADAPTER_CNTL_EN);
2461 writeq(ac, &bar0->adapter_control);
2462 ac = readq(&bar0->adapter_control);
2463 schedule_work(&nic->set_link_task);
2466 /* Other type of interrupts are not being handled now, TODO */
2470 * wait_for_cmd_complete - waits for a command to complete.
2471 * @sp : private member of the device structure, which is a pointer to the
2472 * s2io_nic structure.
2473 * Description: Function that waits for a command to Write into RMAC
2474 * ADDR DATA registers to be completed and returns either success or
2475 * error depending on whether the command was complete or not.
2477 * SUCCESS on success and FAILURE on failure.
2480 int wait_for_cmd_complete(nic_t * sp)
2482 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2483 int ret = FAILURE, cnt = 0;
2487 val64 = readq(&bar0->rmac_addr_cmd_mem);
2488 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2501 * s2io_reset - Resets the card.
2502 * @sp : private member of the device structure.
2503 * Description: Function to Reset the card. This function then also
2504 * restores the previously saved PCI configuration space registers as
2505 * the card reset also resets the configuration space.
2510 void s2io_reset(nic_t * sp)
2512 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2516 val64 = SW_RESET_ALL;
2517 writeq(val64, &bar0->sw_reset);
2520 * At this stage, if the PCI write is indeed completed, the
2521 * card is reset and so is the PCI Config space of the device.
2522 * So a read cannot be issued at this stage on any of the
2523 * registers to ensure the write into "sw_reset" register
2525 * Question: Is there any system call that will explicitly force
2526 * all the write commands still pending on the bus to be pushed
2528 * As of now I'am just giving a 250ms delay and hoping that the
2529 * PCI write to sw_reset register is done by this time.
2533 /* Restore the PCI state saved during initializarion. */
2534 pci_restore_state(sp->pdev);
2540 /* Set swapper to enable I/O register access */
2541 s2io_set_swapper(sp);
2543 /* Clear certain PCI/PCI-X fields after reset */
2544 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2545 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2546 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2548 val64 = readq(&bar0->txpic_int_reg);
2549 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2550 writeq(val64, &bar0->txpic_int_reg);
2552 /* Clearing PCIX Ecc status register */
2553 pci_write_config_dword(sp->pdev, 0x68, 0);
2555 /* Reset device statistics maintained by OS */
2556 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2558 /* SXE-002: Configure link and activity LED to turn it off */
2559 subid = sp->pdev->subsystem_device;
2560 if ((subid & 0xFF) >= 0x07) {
2561 val64 = readq(&bar0->gpio_control);
2562 val64 |= 0x0000800000000000ULL;
2563 writeq(val64, &bar0->gpio_control);
2564 val64 = 0x0411040400000000ULL;
2565 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2568 sp->device_enabled_once = FALSE;
2572 * s2io_set_swapper - to set the swapper controle on the card
2573 * @sp : private member of the device structure,
2574 * pointer to the s2io_nic structure.
2575 * Description: Function to set the swapper control on the card
2576 * correctly depending on the 'endianness' of the system.
2578 * SUCCESS on success and FAILURE on failure.
2581 int s2io_set_swapper(nic_t * sp)
2583 struct net_device *dev = sp->dev;
2584 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2585 u64 val64, valt, valr;
2588 * Set proper endian settings and verify the same by reading
2589 * the PIF Feed-back register.
2592 val64 = readq(&bar0->pif_rd_swapper_fb);
2593 if (val64 != 0x0123456789ABCDEFULL) {
2595 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2596 0x8100008181000081ULL, /* FE=1, SE=0 */
2597 0x4200004242000042ULL, /* FE=0, SE=1 */
2598 0}; /* FE=0, SE=0 */
2601 writeq(value[i], &bar0->swapper_ctrl);
2602 val64 = readq(&bar0->pif_rd_swapper_fb);
2603 if (val64 == 0x0123456789ABCDEFULL)
2608 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2610 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2611 (unsigned long long) val64);
2616 valr = readq(&bar0->swapper_ctrl);
2619 valt = 0x0123456789ABCDEFULL;
2620 writeq(valt, &bar0->xmsi_address);
2621 val64 = readq(&bar0->xmsi_address);
2625 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2626 0x0081810000818100ULL, /* FE=1, SE=0 */
2627 0x0042420000424200ULL, /* FE=0, SE=1 */
2628 0}; /* FE=0, SE=0 */
2631 writeq((value[i] | valr), &bar0->swapper_ctrl);
2632 writeq(valt, &bar0->xmsi_address);
2633 val64 = readq(&bar0->xmsi_address);
2639 unsigned long long x = val64;
2640 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2641 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2645 val64 = readq(&bar0->swapper_ctrl);
2646 val64 &= 0xFFFF000000000000ULL;
2650 * The device by default set to a big endian format, so a
2651 * big endian driver need not set anything.
2653 val64 |= (SWAPPER_CTRL_TXP_FE |
2654 SWAPPER_CTRL_TXP_SE |
2655 SWAPPER_CTRL_TXD_R_FE |
2656 SWAPPER_CTRL_TXD_W_FE |
2657 SWAPPER_CTRL_TXF_R_FE |
2658 SWAPPER_CTRL_RXD_R_FE |
2659 SWAPPER_CTRL_RXD_W_FE |
2660 SWAPPER_CTRL_RXF_W_FE |
2661 SWAPPER_CTRL_XMSI_FE |
2662 SWAPPER_CTRL_XMSI_SE |
2663 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2664 writeq(val64, &bar0->swapper_ctrl);
2667 * Initially we enable all bits to make it accessible by the
2668 * driver, then we selectively enable only those bits that
2671 val64 |= (SWAPPER_CTRL_TXP_FE |
2672 SWAPPER_CTRL_TXP_SE |
2673 SWAPPER_CTRL_TXD_R_FE |
2674 SWAPPER_CTRL_TXD_R_SE |
2675 SWAPPER_CTRL_TXD_W_FE |
2676 SWAPPER_CTRL_TXD_W_SE |
2677 SWAPPER_CTRL_TXF_R_FE |
2678 SWAPPER_CTRL_RXD_R_FE |
2679 SWAPPER_CTRL_RXD_R_SE |
2680 SWAPPER_CTRL_RXD_W_FE |
2681 SWAPPER_CTRL_RXD_W_SE |
2682 SWAPPER_CTRL_RXF_W_FE |
2683 SWAPPER_CTRL_XMSI_FE |
2684 SWAPPER_CTRL_XMSI_SE |
2685 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2686 writeq(val64, &bar0->swapper_ctrl);
2688 val64 = readq(&bar0->swapper_ctrl);
2691 * Verifying if endian settings are accurate by reading a
2692 * feedback register.
2694 val64 = readq(&bar0->pif_rd_swapper_fb);
2695 if (val64 != 0x0123456789ABCDEFULL) {
2696 /* Endian settings are incorrect, calls for another dekko. */
2697 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2699 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2700 (unsigned long long) val64);
2707 /* ********************************************************* *
2708 * Functions defined below concern the OS part of the driver *
2709 * ********************************************************* */
2712 * s2io_open - open entry point of the driver
2713 * @dev : pointer to the device structure.
2715 * This function is the open entry point of the driver. It mainly calls a
2716 * function to allocate Rx buffers and inserts them into the buffer
2717 * descriptors and then enables the Rx part of the NIC.
2719 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2723 int s2io_open(struct net_device *dev)
2725 nic_t *sp = dev->priv;
2729 * Make sure you have link off by default every time
2730 * Nic is initialized
2732 netif_carrier_off(dev);
2733 sp->last_link_state = 0; /* Unkown link state */
2735 /* Initialize H/W and enable interrupts */
2736 if (s2io_card_up(sp)) {
2737 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2740 goto hw_init_failed;
2743 /* After proper initialization of H/W, register ISR */
2744 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2747 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2749 goto isr_registration_failed;
2752 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2753 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2755 goto setting_mac_address_failed;
2758 netif_start_queue(dev);
2761 setting_mac_address_failed:
2762 free_irq(sp->pdev->irq, dev);
2763 isr_registration_failed:
2770 * s2io_close -close entry point of the driver
2771 * @dev : device pointer.
2773 * This is the stop entry point of the driver. It needs to undo exactly
2774 * whatever was done by the open entry point,thus it's usually referred to
2775 * as the close function.Among other things this function mainly stops the
2776 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2778 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2782 int s2io_close(struct net_device *dev)
2784 nic_t *sp = dev->priv;
2785 flush_scheduled_work();
2786 netif_stop_queue(dev);
2787 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2790 free_irq(sp->pdev->irq, dev);
2791 sp->device_close_flag = TRUE; /* Device is shut down. */
2796 * s2io_xmit - Tx entry point of te driver
2797 * @skb : the socket buffer containing the Tx data.
2798 * @dev : device pointer.
2800 * This function is the Tx entry point of the driver. S2IO NIC supports
2801 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2802 * NOTE: when device cant queue the pkt,just the trans_start variable will
2805 * 0 on success & 1 on failure.
2808 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2810 nic_t *sp = dev->priv;
2811 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2814 TxFIFO_element_t __iomem *tx_fifo;
2815 unsigned long flags;
2819 mac_info_t *mac_control;
2820 struct config_param *config;
2821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2823 mac_control = &sp->mac_control;
2824 config = &sp->config;
2826 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2827 spin_lock_irqsave(&sp->tx_lock, flags);
2828 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2829 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2831 spin_unlock_irqrestore(&sp->tx_lock, flags);
2838 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2839 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2840 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2843 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2844 /* Avoid "put" pointer going beyond "get" pointer */
2845 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2846 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2847 netif_stop_queue(dev);
2849 spin_unlock_irqrestore(&sp->tx_lock, flags);
2853 mss = skb_shinfo(skb)->tso_size;
2855 txdp->Control_1 |= TXD_TCP_LSO_EN;
2856 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2860 frg_cnt = skb_shinfo(skb)->nr_frags;
2861 frg_len = skb->len - skb->data_len;
2863 txdp->Buffer_Pointer = pci_map_single
2864 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2865 txdp->Host_Control = (unsigned long) skb;
2866 if (skb->ip_summed == CHECKSUM_HW) {
2868 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2872 txdp->Control_2 |= config->tx_intr_type;
2874 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2875 TXD_GATHER_CODE_FIRST);
2876 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2878 /* For fragmented SKB. */
2879 for (i = 0; i < frg_cnt; i++) {
2880 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2882 txdp->Buffer_Pointer = (u64) pci_map_page
2883 (sp->pdev, frag->page, frag->page_offset,
2884 frag->size, PCI_DMA_TODEVICE);
2885 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2887 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2889 tx_fifo = mac_control->tx_FIFO_start[queue];
2890 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2891 writeq(val64, &tx_fifo->TxDL_Pointer);
2893 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2898 val64 |= TX_FIFO_SPECIAL_FUNC;
2900 writeq(val64, &tx_fifo->List_Control);
2902 /* Perform a PCI read to flush previous writes */
2903 val64 = readq(&bar0->general_int_status);
2906 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2907 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2909 /* Avoid "put" pointer going beyond "get" pointer */
2910 if (((put_off + 1) % queue_len) == get_off) {
2912 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2914 netif_stop_queue(dev);
2917 dev->trans_start = jiffies;
2918 spin_unlock_irqrestore(&sp->tx_lock, flags);
2924 * s2io_isr - ISR handler of the device .
2925 * @irq: the irq of the device.
2926 * @dev_id: a void pointer to the dev structure of the NIC.
2927 * @pt_regs: pointer to the registers pushed on the stack.
2928 * Description: This function is the ISR handler of the device. It
2929 * identifies the reason for the interrupt and calls the relevant
2930 * service routines. As a contongency measure, this ISR allocates the
2931 * recv buffers, if their numbers are below the panic value which is
2932 * presently set to 25% of the original number of rcv buffers allocated.
2934 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2935 * IRQ_NONE: will be returned if interrupt is not from our device
2937 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2939 struct net_device *dev = (struct net_device *) dev_id;
2940 nic_t *sp = dev->priv;
2941 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2944 mac_info_t *mac_control;
2945 struct config_param *config;
2947 atomic_inc(&sp->isr_cnt);
2948 mac_control = &sp->mac_control;
2949 config = &sp->config;
2952 * Identify the cause for interrupt and call the appropriate
2953 * interrupt handler. Causes for the interrupt could be;
2957 * 4. Error in any functional blocks of the NIC.
2959 reason = readq(&bar0->general_int_status);
2962 /* The interrupt was not raised by Xena. */
2963 atomic_dec(&sp->isr_cnt);
2967 if (reason & (GEN_ERROR_INTR))
2968 alarm_intr_handler(sp);
2970 #ifdef CONFIG_S2IO_NAPI
2971 if (reason & GEN_INTR_RXTRAFFIC) {
2972 if (netif_rx_schedule_prep(dev)) {
2973 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2975 __netif_rx_schedule(dev);
2979 /* If Intr is because of Rx Traffic */
2980 if (reason & GEN_INTR_RXTRAFFIC) {
2981 for (i = 0; i < config->rx_ring_num; i++) {
2982 rx_intr_handler(&mac_control->rings[i]);
2987 /* If Intr is because of Tx Traffic */
2988 if (reason & GEN_INTR_TXTRAFFIC) {
2989 for (i = 0; i < config->tx_fifo_num; i++)
2990 tx_intr_handler(&mac_control->fifos[i]);
2994 * If the Rx buffer count is below the panic threshold then
2995 * reallocate the buffers from the interrupt handler itself,
2996 * else schedule a tasklet to reallocate the buffers.
2998 #ifndef CONFIG_S2IO_NAPI
2999 for (i = 0; i < config->rx_ring_num; i++) {
3001 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3002 int level = rx_buffer_level(sp, rxb_size, i);
3004 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3005 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3006 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3007 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3008 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3010 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3011 clear_bit(0, (&sp->tasklet_status));
3012 atomic_dec(&sp->isr_cnt);
3015 clear_bit(0, (&sp->tasklet_status));
3016 } else if (level == LOW) {
3017 tasklet_schedule(&sp->task);
3022 atomic_dec(&sp->isr_cnt);
3029 static void s2io_updt_stats(nic_t *sp)
3031 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3035 if (atomic_read(&sp->card_state) == CARD_UP) {
3036 /* Apprx 30us on a 133 MHz bus */
3037 val64 = SET_UPDT_CLICKS(10) |
3038 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3039 writeq(val64, &bar0->stat_cfg);
3042 val64 = readq(&bar0->stat_cfg);
3043 if (!(val64 & BIT(0)))
3047 break; /* Updt failed */
3053 * s2io_get_stats - Updates the device statistics structure.
3054 * @dev : pointer to the device structure.
3056 * This function updates the device statistics structure in the s2io_nic
3057 * structure and returns a pointer to the same.
3059 * pointer to the updated net_device_stats structure.
3062 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3064 nic_t *sp = dev->priv;
3065 mac_info_t *mac_control;
3066 struct config_param *config;
3069 mac_control = &sp->mac_control;
3070 config = &sp->config;
3072 /* Configure Stats for immediate updt */
3073 s2io_updt_stats(sp);
3075 sp->stats.tx_packets =
3076 le32_to_cpu(mac_control->stats_info->tmac_frms);
3077 sp->stats.tx_errors =
3078 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3079 sp->stats.rx_errors =
3080 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3081 sp->stats.multicast =
3082 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3083 sp->stats.rx_length_errors =
3084 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3086 return (&sp->stats);
3090 * s2io_set_multicast - entry point for multicast address enable/disable.
3091 * @dev : pointer to the device structure
3093 * This function is a driver entry point which gets called by the kernel
3094 * whenever multicast addresses must be enabled/disabled. This also gets
3095 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3096 * determine, if multicast address must be enabled or if promiscuous mode
3097 * is to be disabled etc.
3102 static void s2io_set_multicast(struct net_device *dev)
3105 struct dev_mc_list *mclist;
3106 nic_t *sp = dev->priv;
3107 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3108 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3110 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3113 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3114 /* Enable all Multicast addresses */
3115 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3116 &bar0->rmac_addr_data0_mem);
3117 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3118 &bar0->rmac_addr_data1_mem);
3119 val64 = RMAC_ADDR_CMD_MEM_WE |
3120 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3121 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3122 writeq(val64, &bar0->rmac_addr_cmd_mem);
3123 /* Wait till command completes */
3124 wait_for_cmd_complete(sp);
3127 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3128 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3129 /* Disable all Multicast addresses */
3130 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3131 &bar0->rmac_addr_data0_mem);
3132 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3133 &bar0->rmac_addr_data1_mem);
3134 val64 = RMAC_ADDR_CMD_MEM_WE |
3135 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3136 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3137 writeq(val64, &bar0->rmac_addr_cmd_mem);
3138 /* Wait till command completes */
3139 wait_for_cmd_complete(sp);
3142 sp->all_multi_pos = 0;
3145 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3146 /* Put the NIC into promiscuous mode */
3147 add = &bar0->mac_cfg;
3148 val64 = readq(&bar0->mac_cfg);
3149 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3151 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3152 writel((u32) val64, add);
3153 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3154 writel((u32) (val64 >> 32), (add + 4));
3156 val64 = readq(&bar0->mac_cfg);
3157 sp->promisc_flg = 1;
3158 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3160 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3161 /* Remove the NIC from promiscuous mode */
3162 add = &bar0->mac_cfg;
3163 val64 = readq(&bar0->mac_cfg);
3164 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3166 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3167 writel((u32) val64, add);
3168 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3169 writel((u32) (val64 >> 32), (add + 4));
3171 val64 = readq(&bar0->mac_cfg);
3172 sp->promisc_flg = 0;
3173 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3177 /* Update individual M_CAST address list */
3178 if ((!sp->m_cast_flg) && dev->mc_count) {
3180 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3181 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3183 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3184 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3188 prev_cnt = sp->mc_addr_count;
3189 sp->mc_addr_count = dev->mc_count;
3191 /* Clear out the previous list of Mc in the H/W. */
3192 for (i = 0; i < prev_cnt; i++) {
3193 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3194 &bar0->rmac_addr_data0_mem);
3195 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3196 &bar0->rmac_addr_data1_mem);
3197 val64 = RMAC_ADDR_CMD_MEM_WE |
3198 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3199 RMAC_ADDR_CMD_MEM_OFFSET
3200 (MAC_MC_ADDR_START_OFFSET + i);
3201 writeq(val64, &bar0->rmac_addr_cmd_mem);
3203 /* Wait for command completes */
3204 if (wait_for_cmd_complete(sp)) {
3205 DBG_PRINT(ERR_DBG, "%s: Adding ",
3207 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3212 /* Create the new Rx filter list and update the same in H/W. */
3213 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3214 i++, mclist = mclist->next) {
3215 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3217 for (j = 0; j < ETH_ALEN; j++) {
3218 mac_addr |= mclist->dmi_addr[j];
3222 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3223 &bar0->rmac_addr_data0_mem);
3224 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3225 &bar0->rmac_addr_data1_mem);
3226 val64 = RMAC_ADDR_CMD_MEM_WE |
3227 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3228 RMAC_ADDR_CMD_MEM_OFFSET
3229 (i + MAC_MC_ADDR_START_OFFSET);
3230 writeq(val64, &bar0->rmac_addr_cmd_mem);
3232 /* Wait for command completes */
3233 if (wait_for_cmd_complete(sp)) {
3234 DBG_PRINT(ERR_DBG, "%s: Adding ",
3236 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3244 * s2io_set_mac_addr - Programs the Xframe mac address
3245 * @dev : pointer to the device structure.
3246 * @addr: a uchar pointer to the new mac address which is to be set.
3247 * Description : This procedure will program the Xframe to receive
3248 * frames with new Mac Address
3249 * Return value: SUCCESS on success and an appropriate (-)ve integer
3250 * as defined in errno.h file on failure.
3253 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3255 nic_t *sp = dev->priv;
3256 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3257 register u64 val64, mac_addr = 0;
3261 * Set the new MAC address as the new unicast filter and reflect this
3262 * change on the device address registered with the OS. It will be
3265 for (i = 0; i < ETH_ALEN; i++) {
3267 mac_addr |= addr[i];
3270 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3271 &bar0->rmac_addr_data0_mem);
3274 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3275 RMAC_ADDR_CMD_MEM_OFFSET(0);
3276 writeq(val64, &bar0->rmac_addr_cmd_mem);
3277 /* Wait till command completes */
3278 if (wait_for_cmd_complete(sp)) {
3279 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3287 * s2io_ethtool_sset - Sets different link parameters.
3288 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3289 * @info: pointer to the structure with parameters given by ethtool to set
3292 * The function sets different link parameters provided by the user onto
3298 static int s2io_ethtool_sset(struct net_device *dev,
3299 struct ethtool_cmd *info)
3301 nic_t *sp = dev->priv;
3302 if ((info->autoneg == AUTONEG_ENABLE) ||
3303 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3306 s2io_close(sp->dev);
3314 * s2io_ethtol_gset - Return link specific information.
3315 * @sp : private member of the device structure, pointer to the
3316 * s2io_nic structure.
3317 * @info : pointer to the structure with parameters given by ethtool
3318 * to return link information.
3320 * Returns link specific information like speed, duplex etc.. to ethtool.
3322 * return 0 on success.
3325 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3327 nic_t *sp = dev->priv;
3328 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3329 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3330 info->port = PORT_FIBRE;
3331 /* info->transceiver?? TODO */
3333 if (netif_carrier_ok(sp->dev)) {
3334 info->speed = 10000;
3335 info->duplex = DUPLEX_FULL;
3341 info->autoneg = AUTONEG_DISABLE;
3346 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3347 * @sp : private member of the device structure, which is a pointer to the
3348 * s2io_nic structure.
3349 * @info : pointer to the structure with parameters given by ethtool to
3350 * return driver information.
3352 * Returns driver specefic information like name, version etc.. to ethtool.
3357 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3358 struct ethtool_drvinfo *info)
3360 nic_t *sp = dev->priv;
3362 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3363 strncpy(info->version, s2io_driver_version,
3364 sizeof(s2io_driver_version));
3365 strncpy(info->fw_version, "", 32);
3366 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3367 info->regdump_len = XENA_REG_SPACE;
3368 info->eedump_len = XENA_EEPROM_SPACE;
3369 info->testinfo_len = S2IO_TEST_LEN;
3370 info->n_stats = S2IO_STAT_LEN;
3374 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3375 * @sp: private member of the device structure, which is a pointer to the
3376 * s2io_nic structure.
3377 * @regs : pointer to the structure with parameters given by ethtool for
3378 * dumping the registers.
3379 * @reg_space: The input argumnet into which all the registers are dumped.
3381 * Dumps the entire register space of xFrame NIC into the user given
3387 static void s2io_ethtool_gregs(struct net_device *dev,
3388 struct ethtool_regs *regs, void *space)
3392 u8 *reg_space = (u8 *) space;
3393 nic_t *sp = dev->priv;
3395 regs->len = XENA_REG_SPACE;
3396 regs->version = sp->pdev->subsystem_device;
3398 for (i = 0; i < regs->len; i += 8) {
3399 reg = readq(sp->bar0 + i);
3400 memcpy((reg_space + i), ®, 8);
3405 * s2io_phy_id - timer function that alternates adapter LED.
3406 * @data : address of the private member of the device structure, which
3407 * is a pointer to the s2io_nic structure, provided as an u32.
3408 * Description: This is actually the timer function that alternates the
3409 * adapter LED bit of the adapter control bit to set/reset every time on
3410 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3411 * once every second.
3413 static void s2io_phy_id(unsigned long data)
3415 nic_t *sp = (nic_t *) data;
3416 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3420 subid = sp->pdev->subsystem_device;
3421 if ((subid & 0xFF) >= 0x07) {
3422 val64 = readq(&bar0->gpio_control);
3423 val64 ^= GPIO_CTRL_GPIO_0;
3424 writeq(val64, &bar0->gpio_control);
3426 val64 = readq(&bar0->adapter_control);
3427 val64 ^= ADAPTER_LED_ON;
3428 writeq(val64, &bar0->adapter_control);
3431 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3435 * s2io_ethtool_idnic - To physically identify the nic on the system.
3436 * @sp : private member of the device structure, which is a pointer to the
3437 * s2io_nic structure.
3438 * @id : pointer to the structure with identification parameters given by
3440 * Description: Used to physically identify the NIC on the system.
3441 * The Link LED will blink for a time specified by the user for
3443 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3444 * identification is possible only if it's link is up.
3446 * int , returns 0 on success
3449 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3451 u64 val64 = 0, last_gpio_ctrl_val;
3452 nic_t *sp = dev->priv;
3453 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3456 subid = sp->pdev->subsystem_device;
3457 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3458 if ((subid & 0xFF) < 0x07) {
3459 val64 = readq(&bar0->adapter_control);
3460 if (!(val64 & ADAPTER_CNTL_EN)) {
3462 "Adapter Link down, cannot blink LED\n");
3466 if (sp->id_timer.function == NULL) {
3467 init_timer(&sp->id_timer);
3468 sp->id_timer.function = s2io_phy_id;
3469 sp->id_timer.data = (unsigned long) sp;
3471 mod_timer(&sp->id_timer, jiffies);
3473 msleep_interruptible(data * HZ);
3475 msleep_interruptible(MAX_FLICKER_TIME);
3476 del_timer_sync(&sp->id_timer);
3478 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3479 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3480 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3487 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3488 * @sp : private member of the device structure, which is a pointer to the
3489 * s2io_nic structure.
3490 * @ep : pointer to the structure with pause parameters given by ethtool.
3492 * Returns the Pause frame generation and reception capability of the NIC.
3496 static void s2io_ethtool_getpause_data(struct net_device *dev,
3497 struct ethtool_pauseparam *ep)
3500 nic_t *sp = dev->priv;
3501 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3503 val64 = readq(&bar0->rmac_pause_cfg);
3504 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3505 ep->tx_pause = TRUE;
3506 if (val64 & RMAC_PAUSE_RX_ENABLE)
3507 ep->rx_pause = TRUE;
3508 ep->autoneg = FALSE;
3512 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3513 * @sp : private member of the device structure, which is a pointer to the
3514 * s2io_nic structure.
3515 * @ep : pointer to the structure with pause parameters given by ethtool.
3517 * It can be used to set or reset Pause frame generation or reception
3518 * support of the NIC.
3520 * int, returns 0 on Success
3523 static int s2io_ethtool_setpause_data(struct net_device *dev,
3524 struct ethtool_pauseparam *ep)
3527 nic_t *sp = dev->priv;
3528 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3530 val64 = readq(&bar0->rmac_pause_cfg);
3532 val64 |= RMAC_PAUSE_GEN_ENABLE;
3534 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3536 val64 |= RMAC_PAUSE_RX_ENABLE;
3538 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3539 writeq(val64, &bar0->rmac_pause_cfg);
3544 * read_eeprom - reads 4 bytes of data from user given offset.
3545 * @sp : private member of the device structure, which is a pointer to the
3546 * s2io_nic structure.
3547 * @off : offset at which the data must be written
3548 * @data : Its an output parameter where the data read at the given
3551 * Will read 4 bytes of data from the user given offset and return the
3553 * NOTE: Will allow to read only part of the EEPROM visible through the
3556 * -1 on failure and 0 on success.
3559 #define S2IO_DEV_ID 5
3560 static int read_eeprom(nic_t * sp, int off, u32 * data)
3565 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3567 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3568 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3569 I2C_CONTROL_CNTL_START;
3570 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3572 while (exit_cnt < 5) {
3573 val64 = readq(&bar0->i2c_control);
3574 if (I2C_CONTROL_CNTL_END(val64)) {
3575 *data = I2C_CONTROL_GET_DATA(val64);
3587 * write_eeprom - actually writes the relevant part of the data value.
3588 * @sp : private member of the device structure, which is a pointer to the
3589 * s2io_nic structure.
3590 * @off : offset at which the data must be written
3591 * @data : The data that is to be written
3592 * @cnt : Number of bytes of the data that are actually to be written into
3593 * the Eeprom. (max of 3)
3595 * Actually writes the relevant part of the data value into the Eeprom
3596 * through the I2C bus.
3598 * 0 on success, -1 on failure.
3601 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3603 int exit_cnt = 0, ret = -1;
3605 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3607 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3608 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3609 I2C_CONTROL_CNTL_START;
3610 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3612 while (exit_cnt < 5) {
3613 val64 = readq(&bar0->i2c_control);
3614 if (I2C_CONTROL_CNTL_END(val64)) {
3615 if (!(val64 & I2C_CONTROL_NACK))
3627 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3628 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3629 * @eeprom : pointer to the user level structure provided by ethtool,
3630 * containing all relevant information.
3631 * @data_buf : user defined value to be written into Eeprom.
3632 * Description: Reads the values stored in the Eeprom at given offset
3633 * for a given length. Stores these values int the input argument data
3634 * buffer 'data_buf' and returns these to the caller (ethtool.)
3639 static int s2io_ethtool_geeprom(struct net_device *dev,
3640 struct ethtool_eeprom *eeprom, u8 * data_buf)
3643 nic_t *sp = dev->priv;
3645 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3647 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3648 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3650 for (i = 0; i < eeprom->len; i += 4) {
3651 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3652 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3656 memcpy((data_buf + i), &valid, 4);
3662 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3663 * @sp : private member of the device structure, which is a pointer to the
3664 * s2io_nic structure.
3665 * @eeprom : pointer to the user level structure provided by ethtool,
3666 * containing all relevant information.
3667 * @data_buf ; user defined value to be written into Eeprom.
3669 * Tries to write the user provided value in the Eeprom, at the offset
3670 * given by the user.
3672 * 0 on success, -EFAULT on failure.
3675 static int s2io_ethtool_seeprom(struct net_device *dev,
3676 struct ethtool_eeprom *eeprom,
3679 int len = eeprom->len, cnt = 0;
3680 u32 valid = 0, data;
3681 nic_t *sp = dev->priv;
3683 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3685 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3686 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3692 data = (u32) data_buf[cnt] & 0x000000FF;
3694 valid = (u32) (data << 24);
3698 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3700 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3702 "write into the specified offset\n");
3713 * s2io_register_test - reads and writes into all clock domains.
3714 * @sp : private member of the device structure, which is a pointer to the
3715 * s2io_nic structure.
3716 * @data : variable that returns the result of each of the test conducted b
3719 * Read and write into all clock domains. The NIC has 3 clock domains,
3720 * see that registers in all the three regions are accessible.
3725 static int s2io_register_test(nic_t * sp, uint64_t * data)
3727 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3731 val64 = readq(&bar0->pif_rd_swapper_fb);
3732 if (val64 != 0x123456789abcdefULL) {
3734 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3737 val64 = readq(&bar0->rmac_pause_cfg);
3738 if (val64 != 0xc000ffff00000000ULL) {
3740 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3743 val64 = readq(&bar0->rx_queue_cfg);
3744 if (val64 != 0x0808080808080808ULL) {
3746 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3749 val64 = readq(&bar0->xgxs_efifo_cfg);
3750 if (val64 != 0x000000001923141EULL) {
3752 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3755 val64 = 0x5A5A5A5A5A5A5A5AULL;
3756 writeq(val64, &bar0->xmsi_data);
3757 val64 = readq(&bar0->xmsi_data);
3758 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3760 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3763 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3764 writeq(val64, &bar0->xmsi_data);
3765 val64 = readq(&bar0->xmsi_data);
3766 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3768 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3776 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3777 * @sp : private member of the device structure, which is a pointer to the
3778 * s2io_nic structure.
3779 * @data:variable that returns the result of each of the test conducted by
3782 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3788 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3793 /* Test Write Error at offset 0 */
3794 if (!write_eeprom(sp, 0, 0, 3))
3797 /* Test Write at offset 4f0 */
3798 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3800 if (read_eeprom(sp, 0x4F0, &ret_data))
3803 if (ret_data != 0x01234567)
3806 /* Reset the EEPROM data go FFFF */
3807 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3809 /* Test Write Request Error at offset 0x7c */
3810 if (!write_eeprom(sp, 0x07C, 0, 3))
3813 /* Test Write Request at offset 0x7fc */
3814 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3816 if (read_eeprom(sp, 0x7FC, &ret_data))
3819 if (ret_data != 0x01234567)
3822 /* Reset the EEPROM data go FFFF */
3823 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3825 /* Test Write Error at offset 0x80 */
3826 if (!write_eeprom(sp, 0x080, 0, 3))
3829 /* Test Write Error at offset 0xfc */
3830 if (!write_eeprom(sp, 0x0FC, 0, 3))
3833 /* Test Write Error at offset 0x100 */
3834 if (!write_eeprom(sp, 0x100, 0, 3))
3837 /* Test Write Error at offset 4ec */
3838 if (!write_eeprom(sp, 0x4EC, 0, 3))
3846 * s2io_bist_test - invokes the MemBist test of the card .
3847 * @sp : private member of the device structure, which is a pointer to the
3848 * s2io_nic structure.
3849 * @data:variable that returns the result of each of the test conducted by
3852 * This invokes the MemBist test of the card. We give around
3853 * 2 secs time for the Test to complete. If it's still not complete
3854 * within this peiod, we consider that the test failed.
3856 * 0 on success and -1 on failure.
3859 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3862 int cnt = 0, ret = -1;
3864 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3865 bist |= PCI_BIST_START;
3866 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3869 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3870 if (!(bist & PCI_BIST_START)) {
3871 *data = (bist & PCI_BIST_CODE_MASK);
3883 * s2io-link_test - verifies the link state of the nic
3884 * @sp ; private member of the device structure, which is a pointer to the
3885 * s2io_nic structure.
3886 * @data: variable that returns the result of each of the test conducted by
3889 * The function verifies the link state of the NIC and updates the input
3890 * argument 'data' appropriately.
3895 static int s2io_link_test(nic_t * sp, uint64_t * data)
3897 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3900 val64 = readq(&bar0->adapter_status);
3901 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3908 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3909 * @sp - private member of the device structure, which is a pointer to the
3910 * s2io_nic structure.
3911 * @data - variable that returns the result of each of the test
3912 * conducted by the driver.
3914 * This is one of the offline test that tests the read and write
3915 * access to the RldRam chip on the NIC.
3920 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3922 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3924 int cnt, iteration = 0, test_pass = 0;
3926 val64 = readq(&bar0->adapter_control);
3927 val64 &= ~ADAPTER_ECC_EN;
3928 writeq(val64, &bar0->adapter_control);
3930 val64 = readq(&bar0->mc_rldram_test_ctrl);
3931 val64 |= MC_RLDRAM_TEST_MODE;
3932 writeq(val64, &bar0->mc_rldram_test_ctrl);
3934 val64 = readq(&bar0->mc_rldram_mrs);
3935 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3936 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3938 val64 |= MC_RLDRAM_MRS_ENABLE;
3939 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3941 while (iteration < 2) {
3942 val64 = 0x55555555aaaa0000ULL;
3943 if (iteration == 1) {
3944 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3946 writeq(val64, &bar0->mc_rldram_test_d0);
3948 val64 = 0xaaaa5a5555550000ULL;
3949 if (iteration == 1) {
3950 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3952 writeq(val64, &bar0->mc_rldram_test_d1);
3954 val64 = 0x55aaaaaaaa5a0000ULL;
3955 if (iteration == 1) {
3956 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3958 writeq(val64, &bar0->mc_rldram_test_d2);
3960 val64 = (u64) (0x0000003fffff0000ULL);
3961 writeq(val64, &bar0->mc_rldram_test_add);
3964 val64 = MC_RLDRAM_TEST_MODE;
3965 writeq(val64, &bar0->mc_rldram_test_ctrl);
3968 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3970 writeq(val64, &bar0->mc_rldram_test_ctrl);
3972 for (cnt = 0; cnt < 5; cnt++) {
3973 val64 = readq(&bar0->mc_rldram_test_ctrl);
3974 if (val64 & MC_RLDRAM_TEST_DONE)
3982 val64 = MC_RLDRAM_TEST_MODE;
3983 writeq(val64, &bar0->mc_rldram_test_ctrl);
3985 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3986 writeq(val64, &bar0->mc_rldram_test_ctrl);
3988 for (cnt = 0; cnt < 5; cnt++) {
3989 val64 = readq(&bar0->mc_rldram_test_ctrl);
3990 if (val64 & MC_RLDRAM_TEST_DONE)
3998 val64 = readq(&bar0->mc_rldram_test_ctrl);
3999 if (val64 & MC_RLDRAM_TEST_PASS)
4014 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4015 * @sp : private member of the device structure, which is a pointer to the
4016 * s2io_nic structure.
4017 * @ethtest : pointer to a ethtool command specific structure that will be
4018 * returned to the user.
4019 * @data : variable that returns the result of each of the test
4020 * conducted by the driver.
4022 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4023 * the health of the card.
4028 static void s2io_ethtool_test(struct net_device *dev,
4029 struct ethtool_test *ethtest,
4032 nic_t *sp = dev->priv;
4033 int orig_state = netif_running(sp->dev);
4035 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4036 /* Offline Tests. */
4038 s2io_close(sp->dev);
4040 if (s2io_register_test(sp, &data[0]))
4041 ethtest->flags |= ETH_TEST_FL_FAILED;
4045 if (s2io_rldram_test(sp, &data[3]))
4046 ethtest->flags |= ETH_TEST_FL_FAILED;
4050 if (s2io_eeprom_test(sp, &data[1]))
4051 ethtest->flags |= ETH_TEST_FL_FAILED;
4053 if (s2io_bist_test(sp, &data[4]))
4054 ethtest->flags |= ETH_TEST_FL_FAILED;
4064 "%s: is not up, cannot run test\n",
4073 if (s2io_link_test(sp, &data[2]))
4074 ethtest->flags |= ETH_TEST_FL_FAILED;
4083 static void s2io_get_ethtool_stats(struct net_device *dev,
4084 struct ethtool_stats *estats,
4088 nic_t *sp = dev->priv;
4089 StatInfo_t *stat_info = sp->mac_control.stats_info;
4091 s2io_updt_stats(sp);
4092 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4093 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4094 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4095 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4096 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4097 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4098 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4099 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4100 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4101 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4102 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4103 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4104 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4105 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4106 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4107 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4108 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4109 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4110 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4111 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4112 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4113 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4115 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4116 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4117 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4118 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4119 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4120 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4121 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4124 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4125 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4126 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4127 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4128 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4129 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4130 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4132 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4133 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4136 int s2io_ethtool_get_regs_len(struct net_device *dev)
4138 return (XENA_REG_SPACE);
4142 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4144 nic_t *sp = dev->priv;
4146 return (sp->rx_csum);
4148 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4150 nic_t *sp = dev->priv;
4159 int s2io_get_eeprom_len(struct net_device *dev)
4161 return (XENA_EEPROM_SPACE);
4164 int s2io_ethtool_self_test_count(struct net_device *dev)
4166 return (S2IO_TEST_LEN);
4168 void s2io_ethtool_get_strings(struct net_device *dev,
4169 u32 stringset, u8 * data)
4171 switch (stringset) {
4173 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4176 memcpy(data, ðtool_stats_keys,
4177 sizeof(ethtool_stats_keys));
4180 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4182 return (S2IO_STAT_LEN);
4185 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4188 dev->features |= NETIF_F_IP_CSUM;
4190 dev->features &= ~NETIF_F_IP_CSUM;
4196 static struct ethtool_ops netdev_ethtool_ops = {
4197 .get_settings = s2io_ethtool_gset,
4198 .set_settings = s2io_ethtool_sset,
4199 .get_drvinfo = s2io_ethtool_gdrvinfo,
4200 .get_regs_len = s2io_ethtool_get_regs_len,
4201 .get_regs = s2io_ethtool_gregs,
4202 .get_link = ethtool_op_get_link,
4203 .get_eeprom_len = s2io_get_eeprom_len,
4204 .get_eeprom = s2io_ethtool_geeprom,
4205 .set_eeprom = s2io_ethtool_seeprom,
4206 .get_pauseparam = s2io_ethtool_getpause_data,
4207 .set_pauseparam = s2io_ethtool_setpause_data,
4208 .get_rx_csum = s2io_ethtool_get_rx_csum,
4209 .set_rx_csum = s2io_ethtool_set_rx_csum,
4210 .get_tx_csum = ethtool_op_get_tx_csum,
4211 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4212 .get_sg = ethtool_op_get_sg,
4213 .set_sg = ethtool_op_set_sg,
4215 .get_tso = ethtool_op_get_tso,
4216 .set_tso = ethtool_op_set_tso,
4218 .self_test_count = s2io_ethtool_self_test_count,
4219 .self_test = s2io_ethtool_test,
4220 .get_strings = s2io_ethtool_get_strings,
4221 .phys_id = s2io_ethtool_idnic,
4222 .get_stats_count = s2io_ethtool_get_stats_count,
4223 .get_ethtool_stats = s2io_get_ethtool_stats
4227 * s2io_ioctl - Entry point for the Ioctl
4228 * @dev : Device pointer.
4229 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4230 * a proprietary structure used to pass information to the driver.
4231 * @cmd : This is used to distinguish between the different commands that
4232 * can be passed to the IOCTL functions.
4234 * Currently there are no special functionality supported in IOCTL, hence
4235 * function always return EOPNOTSUPPORTED
4238 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4244 * s2io_change_mtu - entry point to change MTU size for the device.
4245 * @dev : device pointer.
4246 * @new_mtu : the new MTU size for the device.
4247 * Description: A driver entry point to change MTU size for the device.
4248 * Before changing the MTU the device must be stopped.
4250 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4254 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4256 nic_t *sp = dev->priv;
4257 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4260 if (netif_running(dev)) {
4261 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4262 DBG_PRINT(ERR_DBG, "change its MTU\n");
4266 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4267 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4272 /* Set the new MTU into the PYLD register of the NIC */
4274 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4282 * s2io_tasklet - Bottom half of the ISR.
4283 * @dev_adr : address of the device structure in dma_addr_t format.
4285 * This is the tasklet or the bottom half of the ISR. This is
4286 * an extension of the ISR which is scheduled by the scheduler to be run
4287 * when the load on the CPU is low. All low priority tasks of the ISR can
4288 * be pushed into the tasklet. For now the tasklet is used only to
4289 * replenish the Rx buffers in the Rx buffer descriptors.
4294 static void s2io_tasklet(unsigned long dev_addr)
4296 struct net_device *dev = (struct net_device *) dev_addr;
4297 nic_t *sp = dev->priv;
4299 mac_info_t *mac_control;
4300 struct config_param *config;
4302 mac_control = &sp->mac_control;
4303 config = &sp->config;
4305 if (!TASKLET_IN_USE) {
4306 for (i = 0; i < config->rx_ring_num; i++) {
4307 ret = fill_rx_buffers(sp, i);
4308 if (ret == -ENOMEM) {
4309 DBG_PRINT(ERR_DBG, "%s: Out of ",
4311 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4313 } else if (ret == -EFILL) {
4315 "%s: Rx Ring %d is full\n",
4320 clear_bit(0, (&sp->tasklet_status));
4325 * s2io_set_link - Set the LInk status
4326 * @data: long pointer to device private structue
4327 * Description: Sets the link status for the adapter
4330 static void s2io_set_link(unsigned long data)
4332 nic_t *nic = (nic_t *) data;
4333 struct net_device *dev = nic->dev;
4334 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4338 if (test_and_set_bit(0, &(nic->link_state))) {
4339 /* The card is being reset, no point doing anything */
4343 subid = nic->pdev->subsystem_device;
4345 * Allow a small delay for the NICs self initiated
4346 * cleanup to complete.
4350 val64 = readq(&bar0->adapter_status);
4351 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4352 if (LINK_IS_UP(val64)) {
4353 val64 = readq(&bar0->adapter_control);
4354 val64 |= ADAPTER_CNTL_EN;
4355 writeq(val64, &bar0->adapter_control);
4356 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4357 val64 = readq(&bar0->gpio_control);
4358 val64 |= GPIO_CTRL_GPIO_0;
4359 writeq(val64, &bar0->gpio_control);
4360 val64 = readq(&bar0->gpio_control);
4362 val64 |= ADAPTER_LED_ON;
4363 writeq(val64, &bar0->adapter_control);
4365 val64 = readq(&bar0->adapter_status);
4366 if (!LINK_IS_UP(val64)) {
4367 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4368 DBG_PRINT(ERR_DBG, " Link down");
4369 DBG_PRINT(ERR_DBG, "after ");
4370 DBG_PRINT(ERR_DBG, "enabling ");
4371 DBG_PRINT(ERR_DBG, "device \n");
4373 if (nic->device_enabled_once == FALSE) {
4374 nic->device_enabled_once = TRUE;
4376 s2io_link(nic, LINK_UP);
4378 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4379 val64 = readq(&bar0->gpio_control);
4380 val64 &= ~GPIO_CTRL_GPIO_0;
4381 writeq(val64, &bar0->gpio_control);
4382 val64 = readq(&bar0->gpio_control);
4384 s2io_link(nic, LINK_DOWN);
4386 } else { /* NIC is not Quiescent. */
4387 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4388 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4389 netif_stop_queue(dev);
4391 clear_bit(0, &(nic->link_state));
4394 static void s2io_card_down(nic_t * sp)
4397 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4398 unsigned long flags;
4399 register u64 val64 = 0;
4401 /* If s2io_set_link task is executing, wait till it completes. */
4402 while (test_and_set_bit(0, &(sp->link_state))) {
4405 atomic_set(&sp->card_state, CARD_DOWN);
4407 /* disable Tx and Rx traffic on the NIC */
4411 tasklet_kill(&sp->task);
4413 /* Check if the device is Quiescent and then Reset the NIC */
4415 val64 = readq(&bar0->adapter_status);
4416 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4424 "s2io_close:Device not Quiescent ");
4425 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4426 (unsigned long long) val64);
4432 /* Waiting till all Interrupt handlers are complete */
4436 if (!atomic_read(&sp->isr_cnt))
4441 spin_lock_irqsave(&sp->tx_lock, flags);
4442 /* Free all Tx buffers */
4443 free_tx_buffers(sp);
4444 spin_unlock_irqrestore(&sp->tx_lock, flags);
4446 /* Free all Rx buffers */
4447 spin_lock_irqsave(&sp->rx_lock, flags);
4448 free_rx_buffers(sp);
4449 spin_unlock_irqrestore(&sp->rx_lock, flags);
4451 clear_bit(0, &(sp->link_state));
4454 static int s2io_card_up(nic_t * sp)
4457 mac_info_t *mac_control;
4458 struct config_param *config;
4459 struct net_device *dev = (struct net_device *) sp->dev;
4461 /* Initialize the H/W I/O registers */
4462 if (init_nic(sp) != 0) {
4463 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4469 * Initializing the Rx buffers. For now we are considering only 1
4470 * Rx ring and initializing buffers into 30 Rx blocks
4472 mac_control = &sp->mac_control;
4473 config = &sp->config;
4475 for (i = 0; i < config->rx_ring_num; i++) {
4476 if ((ret = fill_rx_buffers(sp, i))) {
4477 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4480 free_rx_buffers(sp);
4483 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4484 atomic_read(&sp->rx_bufs_left[i]));
4487 /* Setting its receive mode */
4488 s2io_set_multicast(dev);
4490 /* Enable tasklet for the device */
4491 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4493 /* Enable Rx Traffic and interrupts on the NIC */
4494 if (start_nic(sp)) {
4495 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4496 tasklet_kill(&sp->task);
4498 free_irq(dev->irq, dev);
4499 free_rx_buffers(sp);
4503 atomic_set(&sp->card_state, CARD_UP);
4508 * s2io_restart_nic - Resets the NIC.
4509 * @data : long pointer to the device private structure
4511 * This function is scheduled to be run by the s2io_tx_watchdog
4512 * function after 0.5 secs to reset the NIC. The idea is to reduce
4513 * the run time of the watch dog routine which is run holding a
4517 static void s2io_restart_nic(unsigned long data)
4519 struct net_device *dev = (struct net_device *) data;
4520 nic_t *sp = dev->priv;
4523 if (s2io_card_up(sp)) {
4524 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4527 netif_wake_queue(dev);
4528 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4534 * s2io_tx_watchdog - Watchdog for transmit side.
4535 * @dev : Pointer to net device structure
4537 * This function is triggered if the Tx Queue is stopped
4538 * for a pre-defined amount of time when the Interface is still up.
4539 * If the Interface is jammed in such a situation, the hardware is
4540 * reset (by s2io_close) and restarted again (by s2io_open) to
4541 * overcome any problem that might have been caused in the hardware.
4546 static void s2io_tx_watchdog(struct net_device *dev)
4548 nic_t *sp = dev->priv;
4550 if (netif_carrier_ok(dev)) {
4551 schedule_work(&sp->rst_timer_task);
4556 * rx_osm_handler - To perform some OS related operations on SKB.
4557 * @sp: private member of the device structure,pointer to s2io_nic structure.
4558 * @skb : the socket buffer pointer.
4559 * @len : length of the packet
4560 * @cksum : FCS checksum of the frame.
4561 * @ring_no : the ring from which this RxD was extracted.
4563 * This function is called by the Tx interrupt serivce routine to perform
4564 * some OS related operations on the SKB before passing it to the upper
4565 * layers. It mainly checks if the checksum is OK, if so adds it to the
4566 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4567 * to the upper layer. If the checksum is wrong, it increments the Rx
4568 * packet error count, frees the SKB and returns error.
4570 * SUCCESS on success and -1 on failure.
4572 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4574 nic_t *sp = ring_data->nic;
4575 struct net_device *dev = (struct net_device *) sp->dev;
4576 struct sk_buff *skb = (struct sk_buff *)
4577 ((unsigned long) rxdp->Host_Control);
4578 int ring_no = ring_data->ring_no;
4579 u16 l3_csum, l4_csum;
4580 #ifdef CONFIG_2BUFF_MODE
4581 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4582 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4583 int get_block = ring_data->rx_curr_get_info.block_index;
4584 int get_off = ring_data->rx_curr_get_info.offset;
4585 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4586 unsigned char *buff;
4588 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4591 if (rxdp->Control_1 & RXD_T_CODE) {
4592 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4593 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4596 sp->stats.rx_crc_errors++;
4597 atomic_dec(&sp->rx_bufs_left[ring_no]);
4598 rxdp->Host_Control = 0;
4602 /* Updating statistics */
4603 rxdp->Host_Control = 0;
4605 sp->stats.rx_packets++;
4606 #ifndef CONFIG_2BUFF_MODE
4607 sp->stats.rx_bytes += len;
4609 sp->stats.rx_bytes += buf0_len + buf2_len;
4612 #ifndef CONFIG_2BUFF_MODE
4615 buff = skb_push(skb, buf0_len);
4616 memcpy(buff, ba->ba_0, buf0_len);
4617 skb_put(skb, buf2_len);
4620 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4622 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4623 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4624 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4626 * NIC verifies if the Checksum of the received
4627 * frame is Ok or not and accordingly returns
4628 * a flag in the RxD.
4630 skb->ip_summed = CHECKSUM_UNNECESSARY;
4633 * Packet with erroneous checksum, let the
4634 * upper layers deal with it.
4636 skb->ip_summed = CHECKSUM_NONE;
4639 skb->ip_summed = CHECKSUM_NONE;
4642 skb->protocol = eth_type_trans(skb, dev);
4643 #ifdef CONFIG_S2IO_NAPI
4644 netif_receive_skb(skb);
4648 dev->last_rx = jiffies;
4649 atomic_dec(&sp->rx_bufs_left[ring_no]);
4654 * s2io_link - stops/starts the Tx queue.
4655 * @sp : private member of the device structure, which is a pointer to the
4656 * s2io_nic structure.
4657 * @link : inidicates whether link is UP/DOWN.
4659 * This function stops/starts the Tx queue depending on whether the link
4660 * status of the NIC is is down or up. This is called by the Alarm
4661 * interrupt handler whenever a link change interrupt comes up.
4666 void s2io_link(nic_t * sp, int link)
4668 struct net_device *dev = (struct net_device *) sp->dev;
4670 if (link != sp->last_link_state) {
4671 if (link == LINK_DOWN) {
4672 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4673 netif_carrier_off(dev);
4675 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4676 netif_carrier_on(dev);
4679 sp->last_link_state = link;
4683 * get_xena_rev_id - to identify revision ID of xena.
4684 * @pdev : PCI Dev structure
4686 * Function to identify the Revision ID of xena.
4688 * returns the revision ID of the device.
4691 int get_xena_rev_id(struct pci_dev *pdev)
4695 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4700 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4701 * @sp : private member of the device structure, which is a pointer to the
4702 * s2io_nic structure.
4704 * This function initializes a few of the PCI and PCI-X configuration registers
4705 * with recommended values.
4710 static void s2io_init_pci(nic_t * sp)
4712 u16 pci_cmd = 0, pcix_cmd = 0;
4714 /* Enable Data Parity Error Recovery in PCI-X command register. */
4715 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4717 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4719 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4722 /* Set the PErr Response bit in PCI command register. */
4723 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4724 pci_write_config_word(sp->pdev, PCI_COMMAND,
4725 (pci_cmd | PCI_COMMAND_PARITY));
4726 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4728 /* Forcibly disabling relaxed ordering capability of the card. */
4730 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4732 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4736 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4737 MODULE_LICENSE("GPL");
4738 module_param(tx_fifo_num, int, 0);
4739 module_param(rx_ring_num, int, 0);
4740 module_param_array(tx_fifo_len, uint, NULL, 0);
4741 module_param_array(rx_ring_sz, uint, NULL, 0);
4742 module_param_array(rts_frm_len, uint, NULL, 0);
4743 module_param(use_continuous_tx_intrs, int, 1);
4744 module_param(rmac_pause_time, int, 0);
4745 module_param(mc_pause_threshold_q0q3, int, 0);
4746 module_param(mc_pause_threshold_q4q7, int, 0);
4747 module_param(shared_splits, int, 0);
4748 module_param(tmac_util_period, int, 0);
4749 module_param(rmac_util_period, int, 0);
4750 #ifndef CONFIG_S2IO_NAPI
4751 module_param(indicate_max_pkts, int, 0);
4755 * s2io_init_nic - Initialization of the adapter .
4756 * @pdev : structure containing the PCI related information of the device.
4757 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4759 * The function initializes an adapter identified by the pci_dec structure.
4760 * All OS related initialization including memory and device structure and
4761 * initlaization of the device private variable is done. Also the swapper
4762 * control register is initialized to enable read and write into the I/O
4763 * registers of the device.
4765 * returns 0 on success and negative on failure.
4768 static int __devinit
4769 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4772 struct net_device *dev;
4774 int dma_flag = FALSE;
4775 u32 mac_up, mac_down;
4776 u64 val64 = 0, tmp64 = 0;
4777 XENA_dev_config_t __iomem *bar0 = NULL;
4779 mac_info_t *mac_control;
4780 struct config_param *config;
4782 #ifdef CONFIG_S2IO_NAPI
4783 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4786 if ((ret = pci_enable_device(pdev))) {
4788 "s2io_init_nic: pci_enable_device failed\n");
4792 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4793 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4795 if (pci_set_consistent_dma_mask
4796 (pdev, DMA_64BIT_MASK)) {
4798 "Unable to obtain 64bit DMA for \
4799 consistent allocations\n");
4800 pci_disable_device(pdev);
4803 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4804 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4806 pci_disable_device(pdev);
4810 if (pci_request_regions(pdev, s2io_driver_name)) {
4811 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4812 pci_disable_device(pdev);
4816 dev = alloc_etherdev(sizeof(nic_t));
4818 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4819 pci_disable_device(pdev);
4820 pci_release_regions(pdev);
4824 pci_set_master(pdev);
4825 pci_set_drvdata(pdev, dev);
4826 SET_MODULE_OWNER(dev);
4827 SET_NETDEV_DEV(dev, &pdev->dev);
4829 /* Private member variable initialized to s2io NIC structure */
4831 memset(sp, 0, sizeof(nic_t));
4834 sp->high_dma_flag = dma_flag;
4835 sp->device_enabled_once = FALSE;
4837 /* Initialize some PCI/PCI-X fields of the NIC. */
4841 * Setting the device configuration parameters.
4842 * Most of these parameters can be specified by the user during
4843 * module insertion as they are module loadable parameters. If
4844 * these parameters are not not specified during load time, they
4845 * are initialized with default values.
4847 mac_control = &sp->mac_control;
4848 config = &sp->config;
4850 /* Tx side parameters. */
4851 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4852 config->tx_fifo_num = tx_fifo_num;
4853 for (i = 0; i < MAX_TX_FIFOS; i++) {
4854 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4855 config->tx_cfg[i].fifo_priority = i;
4858 /* mapping the QoS priority to the configured fifos */
4859 for (i = 0; i < MAX_TX_FIFOS; i++)
4860 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4862 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4863 for (i = 0; i < config->tx_fifo_num; i++) {
4864 config->tx_cfg[i].f_no_snoop =
4865 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4866 if (config->tx_cfg[i].fifo_len < 65) {
4867 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4871 config->max_txds = MAX_SKB_FRAGS;
4873 /* Rx side parameters. */
4874 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4875 config->rx_ring_num = rx_ring_num;
4876 for (i = 0; i < MAX_RX_RINGS; i++) {
4877 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4878 (MAX_RXDS_PER_BLOCK + 1);
4879 config->rx_cfg[i].ring_priority = i;
4882 for (i = 0; i < rx_ring_num; i++) {
4883 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4884 config->rx_cfg[i].f_no_snoop =
4885 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4888 /* Setting Mac Control parameters */
4889 mac_control->rmac_pause_time = rmac_pause_time;
4890 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4891 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4894 /* Initialize Ring buffer parameters. */
4895 for (i = 0; i < config->rx_ring_num; i++)
4896 atomic_set(&sp->rx_bufs_left[i], 0);
4898 /* Initialize the number of ISRs currently running */
4899 atomic_set(&sp->isr_cnt, 0);
4901 /* initialize the shared memory used by the NIC and the host */
4902 if (init_shared_mem(sp)) {
4903 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4906 goto mem_alloc_failed;
4909 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4910 pci_resource_len(pdev, 0));
4912 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4915 goto bar0_remap_failed;
4918 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4919 pci_resource_len(pdev, 2));
4921 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4924 goto bar1_remap_failed;
4927 dev->irq = pdev->irq;
4928 dev->base_addr = (unsigned long) sp->bar0;
4930 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4931 for (j = 0; j < MAX_TX_FIFOS; j++) {
4932 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4933 (sp->bar1 + (j * 0x00020000));
4936 /* Driver entry points */
4937 dev->open = &s2io_open;
4938 dev->stop = &s2io_close;
4939 dev->hard_start_xmit = &s2io_xmit;
4940 dev->get_stats = &s2io_get_stats;
4941 dev->set_multicast_list = &s2io_set_multicast;
4942 dev->do_ioctl = &s2io_ioctl;
4943 dev->change_mtu = &s2io_change_mtu;
4944 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4947 * will use eth_mac_addr() for dev->set_mac_address
4948 * mac address will be set every time dev->open() is called
4950 #if defined(CONFIG_S2IO_NAPI)
4951 dev->poll = s2io_poll;
4955 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4956 if (sp->high_dma_flag == TRUE)
4957 dev->features |= NETIF_F_HIGHDMA;
4959 dev->features |= NETIF_F_TSO;
4962 dev->tx_timeout = &s2io_tx_watchdog;
4963 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4964 INIT_WORK(&sp->rst_timer_task,
4965 (void (*)(void *)) s2io_restart_nic, dev);
4966 INIT_WORK(&sp->set_link_task,
4967 (void (*)(void *)) s2io_set_link, sp);
4969 pci_save_state(sp->pdev);
4971 /* Setting swapper control on the NIC, for proper reset operation */
4972 if (s2io_set_swapper(sp)) {
4973 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4976 goto set_swap_failed;
4980 * Fix for all "FFs" MAC address problems observed on
4983 fix_mac_address(sp);
4987 * MAC address initialization.
4988 * For now only one mac address will be read and used.
4991 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4992 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4993 writeq(val64, &bar0->rmac_addr_cmd_mem);
4994 wait_for_cmd_complete(sp);
4996 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4997 mac_down = (u32) tmp64;
4998 mac_up = (u32) (tmp64 >> 32);
5000 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5002 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5003 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5004 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5005 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5006 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5007 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5010 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5011 sp->def_mac_addr[0].mac_addr[0],
5012 sp->def_mac_addr[0].mac_addr[1],
5013 sp->def_mac_addr[0].mac_addr[2],
5014 sp->def_mac_addr[0].mac_addr[3],
5015 sp->def_mac_addr[0].mac_addr[4],
5016 sp->def_mac_addr[0].mac_addr[5]);
5018 /* Set the factory defined MAC address initially */
5019 dev->addr_len = ETH_ALEN;
5020 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5023 * Initialize the tasklet status and link state flags
5024 * and the card statte parameter
5026 atomic_set(&(sp->card_state), 0);
5027 sp->tasklet_status = 0;
5030 /* Initialize spinlocks */
5031 spin_lock_init(&sp->tx_lock);
5032 #ifndef CONFIG_S2IO_NAPI
5033 spin_lock_init(&sp->put_lock);
5035 spin_lock_init(&sp->rx_lock);
5038 * SXE-002: Configure link and activity LED to init state
5041 subid = sp->pdev->subsystem_device;
5042 if ((subid & 0xFF) >= 0x07) {
5043 val64 = readq(&bar0->gpio_control);
5044 val64 |= 0x0000800000000000ULL;
5045 writeq(val64, &bar0->gpio_control);
5046 val64 = 0x0411040400000000ULL;
5047 writeq(val64, (void __iomem *) bar0 + 0x2700);
5048 val64 = readq(&bar0->gpio_control);
5051 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5053 if (register_netdev(dev)) {
5054 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5056 goto register_failed;
5059 /* Initialize device name */
5060 strcpy(sp->name, dev->name);
5061 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5064 * Make Link state as off at this point, when the Link change
5065 * interrupt comes the state will be automatically changed to
5068 netif_carrier_off(dev);
5079 free_shared_mem(sp);
5080 pci_disable_device(pdev);
5081 pci_release_regions(pdev);
5082 pci_set_drvdata(pdev, NULL);
5089 * s2io_rem_nic - Free the PCI device
5090 * @pdev: structure containing the PCI related information of the device.
5091 * Description: This function is called by the Pci subsystem to release a
5092 * PCI device and free up all resource held up by the device. This could
5093 * be in response to a Hot plug event or when the driver is to be removed
5097 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5099 struct net_device *dev =
5100 (struct net_device *) pci_get_drvdata(pdev);
5104 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5109 unregister_netdev(dev);
5111 free_shared_mem(sp);
5114 pci_disable_device(pdev);
5115 pci_release_regions(pdev);
5116 pci_set_drvdata(pdev, NULL);
5121 * s2io_starter - Entry point for the driver
5122 * Description: This function is the entry point for the driver. It verifies
5123 * the module loadable parameters and initializes PCI configuration space.
5126 int __init s2io_starter(void)
5128 return pci_module_init(&s2io_driver);
5132 * s2io_closer - Cleanup routine for the driver
5133 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5136 void s2io_closer(void)
5138 pci_unregister_driver(&s2io_driver);
5139 DBG_PRINT(INIT_DBG, "cleanup done\n");
5142 module_init(s2io_starter);
5143 module_exit(s2io_closer);