1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 #define DRV_VERSION "Version 2.0.9.1"
70 /* S2io Driver name & version. */
71 static char s2io_driver_name[] = "Neterion";
72 static char s2io_driver_version[] = DRV_VERSION;
74 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
78 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
79 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
85 * Cards with following subsystem_id have a link state indication
86 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
87 * macro below identifies these cards given the subsystem_id.
89 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
90 (dev_type == XFRAME_I_DEVICE) ? \
91 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
92 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
94 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
95 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
96 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
99 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
102 mac_info_t *mac_control;
104 mac_control = &sp->mac_control;
105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
107 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
115 /* Ethtool related variables and Macros. */
116 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
117 "Register test\t(offline)",
118 "Eeprom test\t(offline)",
119 "Link test\t(online)",
120 "RLDRAM test\t(offline)",
121 "BIST Test\t(offline)"
124 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
126 {"tmac_data_octets"},
130 {"tmac_pause_ctrl_frms"},
131 {"tmac_any_err_frms"},
132 {"tmac_vld_ip_octets"},
140 {"rmac_data_octets"},
141 {"rmac_fcs_err_frms"},
143 {"rmac_vld_mcst_frms"},
144 {"rmac_vld_bcst_frms"},
145 {"rmac_in_rng_len_err_frms"},
147 {"rmac_pause_ctrl_frms"},
148 {"rmac_discarded_frms"},
149 {"rmac_usized_frms"},
150 {"rmac_osized_frms"},
152 {"rmac_jabber_frms"},
160 {"rmac_err_drp_udp"},
162 {"rmac_accepted_ip"},
164 {"\n DRIVER STATISTICS"},
165 {"single_bit_ecc_errs"},
166 {"double_bit_ecc_errs"},
169 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
170 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
172 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
173 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
175 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
176 init_timer(&timer); \
177 timer.function = handle; \
178 timer.data = (unsigned long) arg; \
179 mod_timer(&timer, (jiffies + exp)) \
182 static void s2io_vlan_rx_register(struct net_device *dev,
183 struct vlan_group *grp)
185 nic_t *nic = dev->priv;
188 spin_lock_irqsave(&nic->tx_lock, flags);
190 spin_unlock_irqrestore(&nic->tx_lock, flags);
193 /* Unregister the vlan */
194 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
196 nic_t *nic = dev->priv;
199 spin_lock_irqsave(&nic->tx_lock, flags);
201 nic->vlgrp->vlan_devices[vid] = NULL;
202 spin_unlock_irqrestore(&nic->tx_lock, flags);
206 * Constants to be programmed into the Xena's registers, to configure
210 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
213 static u64 herc_act_dtx_cfg[] = {
215 0x8000051536750000ULL, 0x80000515367500E0ULL,
217 0x8000051536750004ULL, 0x80000515367500E4ULL,
219 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
221 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
223 0x801205150D440000ULL, 0x801205150D4400E0ULL,
225 0x801205150D440004ULL, 0x801205150D4400E4ULL,
227 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
229 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
234 static u64 xena_mdio_cfg[] = {
236 0xC001010000000000ULL, 0xC0010100000000E0ULL,
237 0xC0010100008000E4ULL,
238 /* Remove Reset from PMA PLL */
239 0xC001010000000000ULL, 0xC0010100000000E0ULL,
240 0xC0010100000000E4ULL,
244 static u64 xena_dtx_cfg[] = {
245 0x8000051500000000ULL, 0x80000515000000E0ULL,
246 0x80000515D93500E4ULL, 0x8001051500000000ULL,
247 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
248 0x8002051500000000ULL, 0x80020515000000E0ULL,
249 0x80020515F21000E4ULL,
250 /* Set PADLOOPBACKN */
251 0x8002051500000000ULL, 0x80020515000000E0ULL,
252 0x80020515B20000E4ULL, 0x8003051500000000ULL,
253 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
254 0x8004051500000000ULL, 0x80040515000000E0ULL,
255 0x80040515B20000E4ULL, 0x8005051500000000ULL,
256 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
258 /* Remove PADLOOPBACKN */
259 0x8002051500000000ULL, 0x80020515000000E0ULL,
260 0x80020515F20000E4ULL, 0x8003051500000000ULL,
261 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
262 0x8004051500000000ULL, 0x80040515000000E0ULL,
263 0x80040515F20000E4ULL, 0x8005051500000000ULL,
264 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
269 * Constants for Fixing the MacAddress problem seen mostly on
272 static u64 fix_mac[] = {
273 0x0060000000000000ULL, 0x0060600000000000ULL,
274 0x0040600000000000ULL, 0x0000600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0060600000000000ULL,
284 0x0020600000000000ULL, 0x0060600000000000ULL,
285 0x0020600000000000ULL, 0x0000600000000000ULL,
286 0x0040600000000000ULL, 0x0060600000000000ULL,
290 /* Module Loadable parameters. */
291 static unsigned int tx_fifo_num = 1;
292 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
293 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
294 static unsigned int rx_ring_num = 1;
295 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int rts_frm_len[MAX_RX_RINGS] =
298 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
299 static unsigned int use_continuous_tx_intrs = 1;
300 static unsigned int rmac_pause_time = 65535;
301 static unsigned int mc_pause_threshold_q0q3 = 187;
302 static unsigned int mc_pause_threshold_q4q7 = 187;
303 static unsigned int shared_splits;
304 static unsigned int tmac_util_period = 5;
305 static unsigned int rmac_util_period = 5;
306 static unsigned int bimodal = 0;
307 #ifndef CONFIG_S2IO_NAPI
308 static unsigned int indicate_max_pkts;
310 /* Frequency of Rx desc syncs expressed as power of 2 */
311 static unsigned int rxsync_frequency = 3;
312 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
313 static unsigned int intr_type = 0;
317 * This table lists all the devices that this driver supports.
319 static struct pci_device_id s2io_tbl[] __devinitdata = {
320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
321 PCI_ANY_ID, PCI_ANY_ID},
322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
323 PCI_ANY_ID, PCI_ANY_ID},
324 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
325 PCI_ANY_ID, PCI_ANY_ID},
326 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
327 PCI_ANY_ID, PCI_ANY_ID},
331 MODULE_DEVICE_TABLE(pci, s2io_tbl);
333 static struct pci_driver s2io_driver = {
335 .id_table = s2io_tbl,
336 .probe = s2io_init_nic,
337 .remove = __devexit_p(s2io_rem_nic),
340 /* A simplifier macro used both by init and free shared_mem Fns(). */
341 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
344 * init_shared_mem - Allocation and Initialization of Memory
345 * @nic: Device private variable.
346 * Description: The function allocates all the memory areas shared
347 * between the NIC and the driver. This includes Tx descriptors,
348 * Rx descriptors and the statistics block.
351 static int init_shared_mem(struct s2io_nic *nic)
354 void *tmp_v_addr, *tmp_v_addr_next;
355 dma_addr_t tmp_p_addr, tmp_p_addr_next;
356 RxD_block_t *pre_rxd_blk = NULL;
357 int i, j, blk_cnt, rx_sz, tx_sz;
358 int lst_size, lst_per_page;
359 struct net_device *dev = nic->dev;
360 #ifdef CONFIG_2BUFF_MODE
365 mac_info_t *mac_control;
366 struct config_param *config;
368 mac_control = &nic->mac_control;
369 config = &nic->config;
372 /* Allocation and initialization of TXDLs in FIOFs */
374 for (i = 0; i < config->tx_fifo_num; i++) {
375 size += config->tx_cfg[i].fifo_len;
377 if (size > MAX_AVAILABLE_TXDS) {
378 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
380 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
384 lst_size = (sizeof(TxD_t) * config->max_txds);
385 tx_sz = lst_size * size;
386 lst_per_page = PAGE_SIZE / lst_size;
388 for (i = 0; i < config->tx_fifo_num; i++) {
389 int fifo_len = config->tx_cfg[i].fifo_len;
390 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
391 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
393 if (!mac_control->fifos[i].list_info) {
395 "Malloc failed for list_info\n");
398 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
400 for (i = 0; i < config->tx_fifo_num; i++) {
401 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
403 mac_control->fifos[i].tx_curr_put_info.offset = 0;
404 mac_control->fifos[i].tx_curr_put_info.fifo_len =
405 config->tx_cfg[i].fifo_len - 1;
406 mac_control->fifos[i].tx_curr_get_info.offset = 0;
407 mac_control->fifos[i].tx_curr_get_info.fifo_len =
408 config->tx_cfg[i].fifo_len - 1;
409 mac_control->fifos[i].fifo_no = i;
410 mac_control->fifos[i].nic = nic;
411 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
413 for (j = 0; j < page_num; j++) {
417 tmp_v = pci_alloc_consistent(nic->pdev,
421 "pci_alloc_consistent ");
422 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
425 /* If we got a zero DMA address(can happen on
426 * certain platforms like PPC), reallocate.
427 * Store virtual address of page we don't want,
431 mac_control->zerodma_virt_addr = tmp_v;
433 "%s: Zero DMA address for TxDL. ", dev->name);
435 "Virtual address %p\n", tmp_v);
436 tmp_v = pci_alloc_consistent(nic->pdev,
440 "pci_alloc_consistent ");
441 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
445 while (k < lst_per_page) {
446 int l = (j * lst_per_page) + k;
447 if (l == config->tx_cfg[i].fifo_len)
449 mac_control->fifos[i].list_info[l].list_virt_addr =
450 tmp_v + (k * lst_size);
451 mac_control->fifos[i].list_info[l].list_phy_addr =
452 tmp_p + (k * lst_size);
458 /* Allocation and initialization of RXDs in Rings */
460 for (i = 0; i < config->rx_ring_num; i++) {
461 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
462 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
465 DBG_PRINT(ERR_DBG, "RxDs per Block");
468 size += config->rx_cfg[i].num_rxd;
469 mac_control->rings[i].block_count =
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
471 mac_control->rings[i].pkt_cnt =
472 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
474 size = (size * (sizeof(RxD_t)));
477 for (i = 0; i < config->rx_ring_num; i++) {
478 mac_control->rings[i].rx_curr_get_info.block_index = 0;
479 mac_control->rings[i].rx_curr_get_info.offset = 0;
480 mac_control->rings[i].rx_curr_get_info.ring_len =
481 config->rx_cfg[i].num_rxd - 1;
482 mac_control->rings[i].rx_curr_put_info.block_index = 0;
483 mac_control->rings[i].rx_curr_put_info.offset = 0;
484 mac_control->rings[i].rx_curr_put_info.ring_len =
485 config->rx_cfg[i].num_rxd - 1;
486 mac_control->rings[i].nic = nic;
487 mac_control->rings[i].ring_no = i;
490 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
491 /* Allocating all the Rx blocks */
492 for (j = 0; j < blk_cnt; j++) {
493 #ifndef CONFIG_2BUFF_MODE
494 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
496 size = SIZE_OF_BLOCK;
498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
500 if (tmp_v_addr == NULL) {
502 * In case of failure, free_shared_mem()
503 * is called, which should free any
504 * memory that was alloced till the
507 mac_control->rings[i].rx_blocks[j].block_virt_addr =
511 memset(tmp_v_addr, 0, size);
512 mac_control->rings[i].rx_blocks[j].block_virt_addr =
514 mac_control->rings[i].rx_blocks[j].block_dma_addr =
517 /* Interlinking all Rx Blocks */
518 for (j = 0; j < blk_cnt; j++) {
520 mac_control->rings[i].rx_blocks[j].block_virt_addr;
522 mac_control->rings[i].rx_blocks[(j + 1) %
523 blk_cnt].block_virt_addr;
525 mac_control->rings[i].rx_blocks[j].block_dma_addr;
527 mac_control->rings[i].rx_blocks[(j + 1) %
528 blk_cnt].block_dma_addr;
530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
534 #ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk->reserved_2_pNext_RxD_block =
536 (unsigned long) tmp_v_addr_next;
538 pre_rxd_blk->pNext_RxD_Blk_physical =
539 (u64) tmp_p_addr_next;
543 #ifdef CONFIG_2BUFF_MODE
545 * Allocation of Storages for buffer addresses in 2BUFF mode
546 * and the buffers as well.
548 for (i = 0; i < config->rx_ring_num; i++) {
550 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
551 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
553 if (!mac_control->rings[i].ba)
555 for (j = 0; j < blk_cnt; j++) {
557 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558 (MAX_RXDS_PER_BLOCK + 1)),
560 if (!mac_control->rings[i].ba[j])
562 while (k != MAX_RXDS_PER_BLOCK) {
563 ba = &mac_control->rings[i].ba[j][k];
565 ba->ba_0_org = (void *) kmalloc
566 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
569 tmp = (unsigned long) ba->ba_0_org;
571 tmp &= ~((unsigned long) ALIGN_SIZE);
572 ba->ba_0 = (void *) tmp;
574 ba->ba_1_org = (void *) kmalloc
575 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
578 tmp = (unsigned long) ba->ba_1_org;
580 tmp &= ~((unsigned long) ALIGN_SIZE);
581 ba->ba_1 = (void *) tmp;
588 /* Allocation and initialization of Statistics block */
589 size = sizeof(StatInfo_t);
590 mac_control->stats_mem = pci_alloc_consistent
591 (nic->pdev, size, &mac_control->stats_mem_phy);
593 if (!mac_control->stats_mem) {
595 * In case of failure, free_shared_mem() is called, which
596 * should free any memory that was alloced till the
601 mac_control->stats_mem_sz = size;
603 tmp_v_addr = mac_control->stats_mem;
604 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
605 memset(tmp_v_addr, 0, size);
606 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
607 (unsigned long long) tmp_p_addr);
613 * free_shared_mem - Free the allocated Memory
614 * @nic: Device private variable.
615 * Description: This function is to free all memory locations allocated by
616 * the init_shared_mem() function and return it to the kernel.
619 static void free_shared_mem(struct s2io_nic *nic)
621 int i, j, blk_cnt, size;
623 dma_addr_t tmp_p_addr;
624 mac_info_t *mac_control;
625 struct config_param *config;
626 int lst_size, lst_per_page;
627 struct net_device *dev = nic->dev;
632 mac_control = &nic->mac_control;
633 config = &nic->config;
635 lst_size = (sizeof(TxD_t) * config->max_txds);
636 lst_per_page = PAGE_SIZE / lst_size;
638 for (i = 0; i < config->tx_fifo_num; i++) {
639 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
641 for (j = 0; j < page_num; j++) {
642 int mem_blks = (j * lst_per_page);
643 if (!mac_control->fifos[i].list_info)
645 if (!mac_control->fifos[i].list_info[mem_blks].
648 pci_free_consistent(nic->pdev, PAGE_SIZE,
649 mac_control->fifos[i].
652 mac_control->fifos[i].
656 /* If we got a zero DMA address during allocation,
659 if (mac_control->zerodma_virt_addr) {
660 pci_free_consistent(nic->pdev, PAGE_SIZE,
661 mac_control->zerodma_virt_addr,
664 "%s: Freeing TxDL with zero DMA addr. ",
666 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
667 mac_control->zerodma_virt_addr);
669 kfree(mac_control->fifos[i].list_info);
672 #ifndef CONFIG_2BUFF_MODE
673 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
675 size = SIZE_OF_BLOCK;
677 for (i = 0; i < config->rx_ring_num; i++) {
678 blk_cnt = mac_control->rings[i].block_count;
679 for (j = 0; j < blk_cnt; j++) {
680 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
682 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
684 if (tmp_v_addr == NULL)
686 pci_free_consistent(nic->pdev, size,
687 tmp_v_addr, tmp_p_addr);
691 #ifdef CONFIG_2BUFF_MODE
692 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i = 0; i < config->rx_ring_num; i++) {
695 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
696 for (j = 0; j < blk_cnt; j++) {
698 if (!mac_control->rings[i].ba[j])
700 while (k != MAX_RXDS_PER_BLOCK) {
701 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
706 kfree(mac_control->rings[i].ba[j]);
708 kfree(mac_control->rings[i].ba);
712 if (mac_control->stats_mem) {
713 pci_free_consistent(nic->pdev,
714 mac_control->stats_mem_sz,
715 mac_control->stats_mem,
716 mac_control->stats_mem_phy);
721 * s2io_verify_pci_mode -
724 static int s2io_verify_pci_mode(nic_t *nic)
726 XENA_dev_config_t __iomem *bar0 = nic->bar0;
727 register u64 val64 = 0;
730 val64 = readq(&bar0->pci_mode);
731 mode = (u8)GET_PCI_MODE(val64);
733 if ( val64 & PCI_MODE_UNKNOWN_MODE)
734 return -1; /* Unknown PCI mode */
740 * s2io_print_pci_mode -
742 static int s2io_print_pci_mode(nic_t *nic)
744 XENA_dev_config_t __iomem *bar0 = nic->bar0;
745 register u64 val64 = 0;
747 struct config_param *config = &nic->config;
749 val64 = readq(&bar0->pci_mode);
750 mode = (u8)GET_PCI_MODE(val64);
752 if ( val64 & PCI_MODE_UNKNOWN_MODE)
753 return -1; /* Unknown PCI mode */
755 if (val64 & PCI_MODE_32_BITS) {
756 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
758 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
762 case PCI_MODE_PCI_33:
763 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
764 config->bus_speed = 33;
766 case PCI_MODE_PCI_66:
767 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
768 config->bus_speed = 133;
770 case PCI_MODE_PCIX_M1_66:
771 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
772 config->bus_speed = 133; /* Herc doubles the clock rate */
774 case PCI_MODE_PCIX_M1_100:
775 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
776 config->bus_speed = 200;
778 case PCI_MODE_PCIX_M1_133:
779 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
780 config->bus_speed = 266;
782 case PCI_MODE_PCIX_M2_66:
783 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
784 config->bus_speed = 133;
786 case PCI_MODE_PCIX_M2_100:
787 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
788 config->bus_speed = 200;
790 case PCI_MODE_PCIX_M2_133:
791 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
792 config->bus_speed = 266;
795 return -1; /* Unsupported bus speed */
802 * init_nic - Initialization of hardware
803 * @nic: device peivate variable
804 * Description: The function sequentially configures every block
805 * of the H/W from their reset values.
806 * Return Value: SUCCESS on success and
807 * '-1' on failure (endian settings incorrect).
810 static int init_nic(struct s2io_nic *nic)
812 XENA_dev_config_t __iomem *bar0 = nic->bar0;
813 struct net_device *dev = nic->dev;
814 register u64 val64 = 0;
818 mac_info_t *mac_control;
819 struct config_param *config;
820 int mdio_cnt = 0, dtx_cnt = 0;
821 unsigned long long mem_share;
824 mac_control = &nic->mac_control;
825 config = &nic->config;
827 /* to set the swapper controle on the card */
828 if(s2io_set_swapper(nic)) {
829 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
834 * Herc requires EOI to be removed from reset before XGXS, so..
836 if (nic->device_type & XFRAME_II_DEVICE) {
837 val64 = 0xA500000000ULL;
838 writeq(val64, &bar0->sw_reset);
840 val64 = readq(&bar0->sw_reset);
843 /* Remove XGXS from reset state */
845 writeq(val64, &bar0->sw_reset);
847 val64 = readq(&bar0->sw_reset);
849 /* Enable Receiving broadcasts */
850 add = &bar0->mac_cfg;
851 val64 = readq(&bar0->mac_cfg);
852 val64 |= MAC_RMAC_BCAST_ENABLE;
853 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
854 writel((u32) val64, add);
855 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
856 writel((u32) (val64 >> 32), (add + 4));
858 /* Read registers in all blocks */
859 val64 = readq(&bar0->mac_int_mask);
860 val64 = readq(&bar0->mc_int_mask);
861 val64 = readq(&bar0->xgxs_int_mask);
865 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
868 * Configuring the XAUI Interface of Xena.
869 * ***************************************
870 * To Configure the Xena's XAUI, one has to write a series
871 * of 64 bit values into two registers in a particular
872 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
873 * which will be defined in the array of configuration values
874 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
875 * to switch writing from one regsiter to another. We continue
876 * writing these values until we encounter the 'END_SIGN' macro.
877 * For example, After making a series of 21 writes into
878 * dtx_control register the 'SWITCH_SIGN' appears and hence we
879 * start writing into mdio_control until we encounter END_SIGN.
881 if (nic->device_type & XFRAME_II_DEVICE) {
882 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
883 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
884 &bar0->dtx_control, UF);
886 msleep(1); /* Necessary!! */
892 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
893 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
897 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
898 &bar0->dtx_control, UF);
899 val64 = readq(&bar0->dtx_control);
903 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
904 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
908 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
909 &bar0->mdio_control, UF);
910 val64 = readq(&bar0->mdio_control);
913 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
914 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
922 /* Tx DMA Initialization */
924 writeq(val64, &bar0->tx_fifo_partition_0);
925 writeq(val64, &bar0->tx_fifo_partition_1);
926 writeq(val64, &bar0->tx_fifo_partition_2);
927 writeq(val64, &bar0->tx_fifo_partition_3);
930 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
932 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
933 13) | vBIT(config->tx_cfg[i].fifo_priority,
936 if (i == (config->tx_fifo_num - 1)) {
943 writeq(val64, &bar0->tx_fifo_partition_0);
947 writeq(val64, &bar0->tx_fifo_partition_1);
951 writeq(val64, &bar0->tx_fifo_partition_2);
955 writeq(val64, &bar0->tx_fifo_partition_3);
960 /* Enable Tx FIFO partition 0. */
961 val64 = readq(&bar0->tx_fifo_partition_0);
962 val64 |= BIT(0); /* To enable the FIFO partition. */
963 writeq(val64, &bar0->tx_fifo_partition_0);
966 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
967 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
969 if ((nic->device_type == XFRAME_I_DEVICE) &&
970 (get_xena_rev_id(nic->pdev) < 4))
971 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
973 val64 = readq(&bar0->tx_fifo_partition_0);
974 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
975 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
978 * Initialization of Tx_PA_CONFIG register to ignore packet
979 * integrity checking.
981 val64 = readq(&bar0->tx_pa_cfg);
982 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
983 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
984 writeq(val64, &bar0->tx_pa_cfg);
986 /* Rx DMA intialization. */
988 for (i = 0; i < config->rx_ring_num; i++) {
990 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
993 writeq(val64, &bar0->rx_queue_priority);
996 * Allocating equal share of memory to all the
1000 if (nic->device_type & XFRAME_II_DEVICE)
1005 for (i = 0; i < config->rx_ring_num; i++) {
1008 mem_share = (mem_size / config->rx_ring_num +
1009 mem_size % config->rx_ring_num);
1010 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1013 mem_share = (mem_size / config->rx_ring_num);
1014 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1017 mem_share = (mem_size / config->rx_ring_num);
1018 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1021 mem_share = (mem_size / config->rx_ring_num);
1022 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1025 mem_share = (mem_size / config->rx_ring_num);
1026 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1029 mem_share = (mem_size / config->rx_ring_num);
1030 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1033 mem_share = (mem_size / config->rx_ring_num);
1034 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1037 mem_share = (mem_size / config->rx_ring_num);
1038 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1042 writeq(val64, &bar0->rx_queue_cfg);
1045 * Filling Tx round robin registers
1046 * as per the number of FIFOs
1048 switch (config->tx_fifo_num) {
1050 val64 = 0x0000000000000000ULL;
1051 writeq(val64, &bar0->tx_w_round_robin_0);
1052 writeq(val64, &bar0->tx_w_round_robin_1);
1053 writeq(val64, &bar0->tx_w_round_robin_2);
1054 writeq(val64, &bar0->tx_w_round_robin_3);
1055 writeq(val64, &bar0->tx_w_round_robin_4);
1058 val64 = 0x0000010000010000ULL;
1059 writeq(val64, &bar0->tx_w_round_robin_0);
1060 val64 = 0x0100000100000100ULL;
1061 writeq(val64, &bar0->tx_w_round_robin_1);
1062 val64 = 0x0001000001000001ULL;
1063 writeq(val64, &bar0->tx_w_round_robin_2);
1064 val64 = 0x0000010000010000ULL;
1065 writeq(val64, &bar0->tx_w_round_robin_3);
1066 val64 = 0x0100000000000000ULL;
1067 writeq(val64, &bar0->tx_w_round_robin_4);
1070 val64 = 0x0001000102000001ULL;
1071 writeq(val64, &bar0->tx_w_round_robin_0);
1072 val64 = 0x0001020000010001ULL;
1073 writeq(val64, &bar0->tx_w_round_robin_1);
1074 val64 = 0x0200000100010200ULL;
1075 writeq(val64, &bar0->tx_w_round_robin_2);
1076 val64 = 0x0001000102000001ULL;
1077 writeq(val64, &bar0->tx_w_round_robin_3);
1078 val64 = 0x0001020000000000ULL;
1079 writeq(val64, &bar0->tx_w_round_robin_4);
1082 val64 = 0x0001020300010200ULL;
1083 writeq(val64, &bar0->tx_w_round_robin_0);
1084 val64 = 0x0100000102030001ULL;
1085 writeq(val64, &bar0->tx_w_round_robin_1);
1086 val64 = 0x0200010000010203ULL;
1087 writeq(val64, &bar0->tx_w_round_robin_2);
1088 val64 = 0x0001020001000001ULL;
1089 writeq(val64, &bar0->tx_w_round_robin_3);
1090 val64 = 0x0203000100000000ULL;
1091 writeq(val64, &bar0->tx_w_round_robin_4);
1094 val64 = 0x0001000203000102ULL;
1095 writeq(val64, &bar0->tx_w_round_robin_0);
1096 val64 = 0x0001020001030004ULL;
1097 writeq(val64, &bar0->tx_w_round_robin_1);
1098 val64 = 0x0001000203000102ULL;
1099 writeq(val64, &bar0->tx_w_round_robin_2);
1100 val64 = 0x0001020001030004ULL;
1101 writeq(val64, &bar0->tx_w_round_robin_3);
1102 val64 = 0x0001000000000000ULL;
1103 writeq(val64, &bar0->tx_w_round_robin_4);
1106 val64 = 0x0001020304000102ULL;
1107 writeq(val64, &bar0->tx_w_round_robin_0);
1108 val64 = 0x0304050001020001ULL;
1109 writeq(val64, &bar0->tx_w_round_robin_1);
1110 val64 = 0x0203000100000102ULL;
1111 writeq(val64, &bar0->tx_w_round_robin_2);
1112 val64 = 0x0304000102030405ULL;
1113 writeq(val64, &bar0->tx_w_round_robin_3);
1114 val64 = 0x0001000200000000ULL;
1115 writeq(val64, &bar0->tx_w_round_robin_4);
1118 val64 = 0x0001020001020300ULL;
1119 writeq(val64, &bar0->tx_w_round_robin_0);
1120 val64 = 0x0102030400010203ULL;
1121 writeq(val64, &bar0->tx_w_round_robin_1);
1122 val64 = 0x0405060001020001ULL;
1123 writeq(val64, &bar0->tx_w_round_robin_2);
1124 val64 = 0x0304050000010200ULL;
1125 writeq(val64, &bar0->tx_w_round_robin_3);
1126 val64 = 0x0102030000000000ULL;
1127 writeq(val64, &bar0->tx_w_round_robin_4);
1130 val64 = 0x0001020300040105ULL;
1131 writeq(val64, &bar0->tx_w_round_robin_0);
1132 val64 = 0x0200030106000204ULL;
1133 writeq(val64, &bar0->tx_w_round_robin_1);
1134 val64 = 0x0103000502010007ULL;
1135 writeq(val64, &bar0->tx_w_round_robin_2);
1136 val64 = 0x0304010002060500ULL;
1137 writeq(val64, &bar0->tx_w_round_robin_3);
1138 val64 = 0x0103020400000000ULL;
1139 writeq(val64, &bar0->tx_w_round_robin_4);
1143 /* Filling the Rx round robin registers as per the
1144 * number of Rings and steering based on QoS.
1146 switch (config->rx_ring_num) {
1148 val64 = 0x8080808080808080ULL;
1149 writeq(val64, &bar0->rts_qos_steering);
1152 val64 = 0x0000010000010000ULL;
1153 writeq(val64, &bar0->rx_w_round_robin_0);
1154 val64 = 0x0100000100000100ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_1);
1156 val64 = 0x0001000001000001ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_2);
1158 val64 = 0x0000010000010000ULL;
1159 writeq(val64, &bar0->rx_w_round_robin_3);
1160 val64 = 0x0100000000000000ULL;
1161 writeq(val64, &bar0->rx_w_round_robin_4);
1163 val64 = 0x8080808040404040ULL;
1164 writeq(val64, &bar0->rts_qos_steering);
1167 val64 = 0x0001000102000001ULL;
1168 writeq(val64, &bar0->rx_w_round_robin_0);
1169 val64 = 0x0001020000010001ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_1);
1171 val64 = 0x0200000100010200ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_2);
1173 val64 = 0x0001000102000001ULL;
1174 writeq(val64, &bar0->rx_w_round_robin_3);
1175 val64 = 0x0001020000000000ULL;
1176 writeq(val64, &bar0->rx_w_round_robin_4);
1178 val64 = 0x8080804040402020ULL;
1179 writeq(val64, &bar0->rts_qos_steering);
1182 val64 = 0x0001020300010200ULL;
1183 writeq(val64, &bar0->rx_w_round_robin_0);
1184 val64 = 0x0100000102030001ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_1);
1186 val64 = 0x0200010000010203ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_2);
1188 val64 = 0x0001020001000001ULL;
1189 writeq(val64, &bar0->rx_w_round_robin_3);
1190 val64 = 0x0203000100000000ULL;
1191 writeq(val64, &bar0->rx_w_round_robin_4);
1193 val64 = 0x8080404020201010ULL;
1194 writeq(val64, &bar0->rts_qos_steering);
1197 val64 = 0x0001000203000102ULL;
1198 writeq(val64, &bar0->rx_w_round_robin_0);
1199 val64 = 0x0001020001030004ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_1);
1201 val64 = 0x0001000203000102ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_2);
1203 val64 = 0x0001020001030004ULL;
1204 writeq(val64, &bar0->rx_w_round_robin_3);
1205 val64 = 0x0001000000000000ULL;
1206 writeq(val64, &bar0->rx_w_round_robin_4);
1208 val64 = 0x8080404020201008ULL;
1209 writeq(val64, &bar0->rts_qos_steering);
1212 val64 = 0x0001020304000102ULL;
1213 writeq(val64, &bar0->rx_w_round_robin_0);
1214 val64 = 0x0304050001020001ULL;
1215 writeq(val64, &bar0->rx_w_round_robin_1);
1216 val64 = 0x0203000100000102ULL;
1217 writeq(val64, &bar0->rx_w_round_robin_2);
1218 val64 = 0x0304000102030405ULL;
1219 writeq(val64, &bar0->rx_w_round_robin_3);
1220 val64 = 0x0001000200000000ULL;
1221 writeq(val64, &bar0->rx_w_round_robin_4);
1223 val64 = 0x8080404020100804ULL;
1224 writeq(val64, &bar0->rts_qos_steering);
1227 val64 = 0x0001020001020300ULL;
1228 writeq(val64, &bar0->rx_w_round_robin_0);
1229 val64 = 0x0102030400010203ULL;
1230 writeq(val64, &bar0->rx_w_round_robin_1);
1231 val64 = 0x0405060001020001ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_2);
1233 val64 = 0x0304050000010200ULL;
1234 writeq(val64, &bar0->rx_w_round_robin_3);
1235 val64 = 0x0102030000000000ULL;
1236 writeq(val64, &bar0->rx_w_round_robin_4);
1238 val64 = 0x8080402010080402ULL;
1239 writeq(val64, &bar0->rts_qos_steering);
1242 val64 = 0x0001020300040105ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_0);
1244 val64 = 0x0200030106000204ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_1);
1246 val64 = 0x0103000502010007ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_2);
1248 val64 = 0x0304010002060500ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_3);
1250 val64 = 0x0103020400000000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_4);
1253 val64 = 0x8040201008040201ULL;
1254 writeq(val64, &bar0->rts_qos_steering);
1260 for (i = 0; i < 8; i++)
1261 writeq(val64, &bar0->rts_frm_len_n[i]);
1263 /* Set the default rts frame length for the rings configured */
1264 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1265 for (i = 0 ; i < config->rx_ring_num ; i++)
1266 writeq(val64, &bar0->rts_frm_len_n[i]);
1268 /* Set the frame length for the configured rings
1269 * desired by the user
1271 for (i = 0; i < config->rx_ring_num; i++) {
1272 /* If rts_frm_len[i] == 0 then it is assumed that user not
1273 * specified frame length steering.
1274 * If the user provides the frame length then program
1275 * the rts_frm_len register for those values or else
1276 * leave it as it is.
1278 if (rts_frm_len[i] != 0) {
1279 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1280 &bar0->rts_frm_len_n[i]);
1284 /* Program statistics memory */
1285 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1287 if (nic->device_type == XFRAME_II_DEVICE) {
1288 val64 = STAT_BC(0x320);
1289 writeq(val64, &bar0->stat_byte_cnt);
1293 * Initializing the sampling rate for the device to calculate the
1294 * bandwidth utilization.
1296 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1297 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1298 writeq(val64, &bar0->mac_link_util);
1302 * Initializing the Transmit and Receive Traffic Interrupt
1306 * TTI Initialization. Default Tx timer gets us about
1307 * 250 interrupts per sec. Continuous interrupts are enabled
1310 if (nic->device_type == XFRAME_II_DEVICE) {
1311 int count = (nic->config.bus_speed * 125)/2;
1312 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1315 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1317 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1318 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1319 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1320 if (use_continuous_tx_intrs)
1321 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1322 writeq(val64, &bar0->tti_data1_mem);
1324 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1325 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1326 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1327 writeq(val64, &bar0->tti_data2_mem);
1329 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1330 writeq(val64, &bar0->tti_command_mem);
1333 * Once the operation completes, the Strobe bit of the command
1334 * register will be reset. We poll for this particular condition
1335 * We wait for a maximum of 500ms for the operation to complete,
1336 * if it's not complete by then we return error.
1340 val64 = readq(&bar0->tti_command_mem);
1341 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1345 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1353 if (nic->config.bimodal) {
1355 for (k = 0; k < config->rx_ring_num; k++) {
1356 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1357 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1358 writeq(val64, &bar0->tti_command_mem);
1361 * Once the operation completes, the Strobe bit of the command
1362 * register will be reset. We poll for this particular condition
1363 * We wait for a maximum of 500ms for the operation to complete,
1364 * if it's not complete by then we return error.
1368 val64 = readq(&bar0->tti_command_mem);
1369 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1374 "%s: TTI init Failed\n",
1384 /* RTI Initialization */
1385 if (nic->device_type == XFRAME_II_DEVICE) {
1387 * Programmed to generate Apprx 500 Intrs per
1390 int count = (nic->config.bus_speed * 125)/4;
1391 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1393 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1395 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1396 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1397 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1399 writeq(val64, &bar0->rti_data1_mem);
1401 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1402 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1403 if (nic->intr_type == MSI_X)
1404 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1405 RTI_DATA2_MEM_RX_UFC_D(0x40));
1407 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1408 RTI_DATA2_MEM_RX_UFC_D(0x80));
1409 writeq(val64, &bar0->rti_data2_mem);
1411 for (i = 0; i < config->rx_ring_num; i++) {
1412 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1413 | RTI_CMD_MEM_OFFSET(i);
1414 writeq(val64, &bar0->rti_command_mem);
1417 * Once the operation completes, the Strobe bit of the
1418 * command register will be reset. We poll for this
1419 * particular condition. We wait for a maximum of 500ms
1420 * for the operation to complete, if it's not complete
1421 * by then we return error.
1425 val64 = readq(&bar0->rti_command_mem);
1426 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1430 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1441 * Initializing proper values as Pause threshold into all
1442 * the 8 Queues on Rx side.
1444 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1445 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1447 /* Disable RMAC PAD STRIPPING */
1448 add = &bar0->mac_cfg;
1449 val64 = readq(&bar0->mac_cfg);
1450 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1451 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1452 writel((u32) (val64), add);
1453 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1454 writel((u32) (val64 >> 32), (add + 4));
1455 val64 = readq(&bar0->mac_cfg);
1458 * Set the time value to be inserted in the pause frame
1459 * generated by xena.
1461 val64 = readq(&bar0->rmac_pause_cfg);
1462 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1463 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1464 writeq(val64, &bar0->rmac_pause_cfg);
1467 * Set the Threshold Limit for Generating the pause frame
1468 * If the amount of data in any Queue exceeds ratio of
1469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1470 * pause frame is generated
1473 for (i = 0; i < 4; i++) {
1475 (((u64) 0xFF00 | nic->mac_control.
1476 mc_pause_threshold_q0q3)
1479 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1482 for (i = 0; i < 4; i++) {
1484 (((u64) 0xFF00 | nic->mac_control.
1485 mc_pause_threshold_q4q7)
1488 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1491 * TxDMA will stop Read request if the number of read split has
1492 * exceeded the limit pointed by shared_splits
1494 val64 = readq(&bar0->pic_control);
1495 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1496 writeq(val64, &bar0->pic_control);
1499 * Programming the Herc to split every write transaction
1500 * that does not start on an ADB to reduce disconnects.
1502 if (nic->device_type == XFRAME_II_DEVICE) {
1503 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1504 writeq(val64, &bar0->wreq_split_mask);
1507 /* Setting Link stability period to 64 ms */
1508 if (nic->device_type == XFRAME_II_DEVICE) {
1509 val64 = MISC_LINK_STABILITY_PRD(3);
1510 writeq(val64, &bar0->misc_control);
1515 #define LINK_UP_DOWN_INTERRUPT 1
1516 #define MAC_RMAC_ERR_TIMER 2
1518 int s2io_link_fault_indication(nic_t *nic)
1520 if (nic->intr_type != INTA)
1521 return MAC_RMAC_ERR_TIMER;
1522 if (nic->device_type == XFRAME_II_DEVICE)
1523 return LINK_UP_DOWN_INTERRUPT;
1525 return MAC_RMAC_ERR_TIMER;
1529 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1530 * @nic: device private variable,
1531 * @mask: A mask indicating which Intr block must be modified and,
1532 * @flag: A flag indicating whether to enable or disable the Intrs.
1533 * Description: This function will either disable or enable the interrupts
1534 * depending on the flag argument. The mask argument can be used to
1535 * enable/disable any Intr block.
1536 * Return Value: NONE.
1539 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1541 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1542 register u64 val64 = 0, temp64 = 0;
1544 /* Top level interrupt classification */
1545 /* PIC Interrupts */
1546 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1547 /* Enable PIC Intrs in the general intr mask register */
1548 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1549 if (flag == ENABLE_INTRS) {
1550 temp64 = readq(&bar0->general_int_mask);
1551 temp64 &= ~((u64) val64);
1552 writeq(temp64, &bar0->general_int_mask);
1554 * If Hercules adapter enable GPIO otherwise
1555 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1556 * interrupts for now.
1559 if (s2io_link_fault_indication(nic) ==
1560 LINK_UP_DOWN_INTERRUPT ) {
1561 temp64 = readq(&bar0->pic_int_mask);
1562 temp64 &= ~((u64) PIC_INT_GPIO);
1563 writeq(temp64, &bar0->pic_int_mask);
1564 temp64 = readq(&bar0->gpio_int_mask);
1565 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1566 writeq(temp64, &bar0->gpio_int_mask);
1568 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1571 * No MSI Support is available presently, so TTI and
1572 * RTI interrupts are also disabled.
1574 } else if (flag == DISABLE_INTRS) {
1576 * Disable PIC Intrs in the general
1577 * intr mask register
1579 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1580 temp64 = readq(&bar0->general_int_mask);
1582 writeq(val64, &bar0->general_int_mask);
1586 /* DMA Interrupts */
1587 /* Enabling/Disabling Tx DMA interrupts */
1588 if (mask & TX_DMA_INTR) {
1589 /* Enable TxDMA Intrs in the general intr mask register */
1590 val64 = TXDMA_INT_M;
1591 if (flag == ENABLE_INTRS) {
1592 temp64 = readq(&bar0->general_int_mask);
1593 temp64 &= ~((u64) val64);
1594 writeq(temp64, &bar0->general_int_mask);
1596 * Keep all interrupts other than PFC interrupt
1597 * and PCC interrupt disabled in DMA level.
1599 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1601 writeq(val64, &bar0->txdma_int_mask);
1603 * Enable only the MISC error 1 interrupt in PFC block
1605 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1606 writeq(val64, &bar0->pfc_err_mask);
1608 * Enable only the FB_ECC error interrupt in PCC block
1610 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1611 writeq(val64, &bar0->pcc_err_mask);
1612 } else if (flag == DISABLE_INTRS) {
1614 * Disable TxDMA Intrs in the general intr mask
1617 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1618 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1619 temp64 = readq(&bar0->general_int_mask);
1621 writeq(val64, &bar0->general_int_mask);
1625 /* Enabling/Disabling Rx DMA interrupts */
1626 if (mask & RX_DMA_INTR) {
1627 /* Enable RxDMA Intrs in the general intr mask register */
1628 val64 = RXDMA_INT_M;
1629 if (flag == ENABLE_INTRS) {
1630 temp64 = readq(&bar0->general_int_mask);
1631 temp64 &= ~((u64) val64);
1632 writeq(temp64, &bar0->general_int_mask);
1634 * All RxDMA block interrupts are disabled for now
1637 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1638 } else if (flag == DISABLE_INTRS) {
1640 * Disable RxDMA Intrs in the general intr mask
1643 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1644 temp64 = readq(&bar0->general_int_mask);
1646 writeq(val64, &bar0->general_int_mask);
1650 /* MAC Interrupts */
1651 /* Enabling/Disabling MAC interrupts */
1652 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1653 val64 = TXMAC_INT_M | RXMAC_INT_M;
1654 if (flag == ENABLE_INTRS) {
1655 temp64 = readq(&bar0->general_int_mask);
1656 temp64 &= ~((u64) val64);
1657 writeq(temp64, &bar0->general_int_mask);
1659 * All MAC block error interrupts are disabled for now
1662 } else if (flag == DISABLE_INTRS) {
1664 * Disable MAC Intrs in the general intr mask register
1666 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1667 writeq(DISABLE_ALL_INTRS,
1668 &bar0->mac_rmac_err_mask);
1670 temp64 = readq(&bar0->general_int_mask);
1672 writeq(val64, &bar0->general_int_mask);
1676 /* XGXS Interrupts */
1677 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1678 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1679 if (flag == ENABLE_INTRS) {
1680 temp64 = readq(&bar0->general_int_mask);
1681 temp64 &= ~((u64) val64);
1682 writeq(temp64, &bar0->general_int_mask);
1684 * All XGXS block error interrupts are disabled for now
1687 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1688 } else if (flag == DISABLE_INTRS) {
1690 * Disable MC Intrs in the general intr mask register
1692 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1693 temp64 = readq(&bar0->general_int_mask);
1695 writeq(val64, &bar0->general_int_mask);
1699 /* Memory Controller(MC) interrupts */
1700 if (mask & MC_INTR) {
1702 if (flag == ENABLE_INTRS) {
1703 temp64 = readq(&bar0->general_int_mask);
1704 temp64 &= ~((u64) val64);
1705 writeq(temp64, &bar0->general_int_mask);
1707 * Enable all MC Intrs.
1709 writeq(0x0, &bar0->mc_int_mask);
1710 writeq(0x0, &bar0->mc_err_mask);
1711 } else if (flag == DISABLE_INTRS) {
1713 * Disable MC Intrs in the general intr mask register
1715 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1716 temp64 = readq(&bar0->general_int_mask);
1718 writeq(val64, &bar0->general_int_mask);
1723 /* Tx traffic interrupts */
1724 if (mask & TX_TRAFFIC_INTR) {
1725 val64 = TXTRAFFIC_INT_M;
1726 if (flag == ENABLE_INTRS) {
1727 temp64 = readq(&bar0->general_int_mask);
1728 temp64 &= ~((u64) val64);
1729 writeq(temp64, &bar0->general_int_mask);
1731 * Enable all the Tx side interrupts
1732 * writing 0 Enables all 64 TX interrupt levels
1734 writeq(0x0, &bar0->tx_traffic_mask);
1735 } else if (flag == DISABLE_INTRS) {
1737 * Disable Tx Traffic Intrs in the general intr mask
1740 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1741 temp64 = readq(&bar0->general_int_mask);
1743 writeq(val64, &bar0->general_int_mask);
1747 /* Rx traffic interrupts */
1748 if (mask & RX_TRAFFIC_INTR) {
1749 val64 = RXTRAFFIC_INT_M;
1750 if (flag == ENABLE_INTRS) {
1751 temp64 = readq(&bar0->general_int_mask);
1752 temp64 &= ~((u64) val64);
1753 writeq(temp64, &bar0->general_int_mask);
1754 /* writing 0 Enables all 8 RX interrupt levels */
1755 writeq(0x0, &bar0->rx_traffic_mask);
1756 } else if (flag == DISABLE_INTRS) {
1758 * Disable Rx Traffic Intrs in the general intr mask
1761 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1762 temp64 = readq(&bar0->general_int_mask);
1764 writeq(val64, &bar0->general_int_mask);
1769 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1773 if (flag == FALSE) {
1774 if ((!herc && (rev_id >= 4)) || herc) {
1775 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1776 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1777 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1781 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1782 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1783 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1788 if ((!herc && (rev_id >= 4)) || herc) {
1789 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1790 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1791 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1792 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1793 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1797 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1798 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1799 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1800 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1801 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1810 * verify_xena_quiescence - Checks whether the H/W is ready
1811 * @val64 : Value read from adapter status register.
1812 * @flag : indicates if the adapter enable bit was ever written once
1814 * Description: Returns whether the H/W is ready to go or not. Depending
1815 * on whether adapter enable bit was written or not the comparison
1816 * differs and the calling function passes the input argument flag to
1818 * Return: 1 If xena is quiescence
1819 * 0 If Xena is not quiescence
1822 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1825 u64 tmp64 = ~((u64) val64);
1826 int rev_id = get_xena_rev_id(sp->pdev);
1828 herc = (sp->device_type == XFRAME_II_DEVICE);
1831 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1832 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1833 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1834 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1835 ADAPTER_STATUS_P_PLL_LOCK))) {
1836 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1843 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1844 * @sp: Pointer to device specifc structure
1846 * New procedure to clear mac address reading problems on Alpha platforms
1850 void fix_mac_address(nic_t * sp)
1852 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1856 while (fix_mac[i] != END_SIGN) {
1857 writeq(fix_mac[i++], &bar0->gpio_control);
1859 val64 = readq(&bar0->gpio_control);
1864 * start_nic - Turns the device on
1865 * @nic : device private variable.
1867 * This function actually turns the device on. Before this function is
1868 * called,all Registers are configured from their reset states
1869 * and shared memory is allocated but the NIC is still quiescent. On
1870 * calling this function, the device interrupts are cleared and the NIC is
1871 * literally switched on by writing into the adapter control register.
1873 * SUCCESS on success and -1 on failure.
1876 static int start_nic(struct s2io_nic *nic)
1878 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1879 struct net_device *dev = nic->dev;
1880 register u64 val64 = 0;
1883 mac_info_t *mac_control;
1884 struct config_param *config;
1886 mac_control = &nic->mac_control;
1887 config = &nic->config;
1889 /* PRC Initialization and configuration */
1890 for (i = 0; i < config->rx_ring_num; i++) {
1891 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1892 &bar0->prc_rxd0_n[i]);
1894 val64 = readq(&bar0->prc_ctrl_n[i]);
1895 if (nic->config.bimodal)
1896 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1897 #ifndef CONFIG_2BUFF_MODE
1898 val64 |= PRC_CTRL_RC_ENABLED;
1900 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1902 writeq(val64, &bar0->prc_ctrl_n[i]);
1905 #ifdef CONFIG_2BUFF_MODE
1906 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1907 val64 = readq(&bar0->rx_pa_cfg);
1908 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1909 writeq(val64, &bar0->rx_pa_cfg);
1913 * Enabling MC-RLDRAM. After enabling the device, we timeout
1914 * for around 100ms, which is approximately the time required
1915 * for the device to be ready for operation.
1917 val64 = readq(&bar0->mc_rldram_mrs);
1918 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1919 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1920 val64 = readq(&bar0->mc_rldram_mrs);
1922 msleep(100); /* Delay by around 100 ms. */
1924 /* Enabling ECC Protection. */
1925 val64 = readq(&bar0->adapter_control);
1926 val64 &= ~ADAPTER_ECC_EN;
1927 writeq(val64, &bar0->adapter_control);
1930 * Clearing any possible Link state change interrupts that
1931 * could have popped up just before Enabling the card.
1933 val64 = readq(&bar0->mac_rmac_err_reg);
1935 writeq(val64, &bar0->mac_rmac_err_reg);
1938 * Verify if the device is ready to be enabled, if so enable
1941 val64 = readq(&bar0->adapter_status);
1942 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1943 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1944 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1945 (unsigned long long) val64);
1949 /* Enable select interrupts */
1950 if (nic->intr_type != INTA)
1951 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1953 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1954 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1955 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1956 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1960 * With some switches, link might be already up at this point.
1961 * Because of this weird behavior, when we enable laser,
1962 * we may not get link. We need to handle this. We cannot
1963 * figure out which switch is misbehaving. So we are forced to
1964 * make a global change.
1967 /* Enabling Laser. */
1968 val64 = readq(&bar0->adapter_control);
1969 val64 |= ADAPTER_EOI_TX_ON;
1970 writeq(val64, &bar0->adapter_control);
1972 /* SXE-002: Initialize link and activity LED */
1973 subid = nic->pdev->subsystem_device;
1974 if (((subid & 0xFF) >= 0x07) &&
1975 (nic->device_type == XFRAME_I_DEVICE)) {
1976 val64 = readq(&bar0->gpio_control);
1977 val64 |= 0x0000800000000000ULL;
1978 writeq(val64, &bar0->gpio_control);
1979 val64 = 0x0411040400000000ULL;
1980 writeq(val64, (void __iomem *)bar0 + 0x2700);
1984 * Don't see link state interrupts on certain switches, so
1985 * directly scheduling a link state task from here.
1987 schedule_work(&nic->set_link_task);
1993 * free_tx_buffers - Free all queued Tx buffers
1994 * @nic : device private variable.
1996 * Free all queued Tx buffers.
1997 * Return Value: void
2000 static void free_tx_buffers(struct s2io_nic *nic)
2002 struct net_device *dev = nic->dev;
2003 struct sk_buff *skb;
2006 mac_info_t *mac_control;
2007 struct config_param *config;
2008 int cnt = 0, frg_cnt;
2010 mac_control = &nic->mac_control;
2011 config = &nic->config;
2013 for (i = 0; i < config->tx_fifo_num; i++) {
2014 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2015 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2018 (struct sk_buff *) ((unsigned long) txdp->
2021 memset(txdp, 0, sizeof(TxD_t) *
2025 frg_cnt = skb_shinfo(skb)->nr_frags;
2026 pci_unmap_single(nic->pdev, (dma_addr_t)
2027 txdp->Buffer_Pointer,
2028 skb->len - skb->data_len,
2034 for (j = 0; j < frg_cnt; j++, txdp++) {
2036 &skb_shinfo(skb)->frags[j];
2037 pci_unmap_page(nic->pdev,
2047 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2051 "%s:forcibly freeing %d skbs on FIFO%d\n",
2053 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2054 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2059 * stop_nic - To stop the nic
2060 * @nic ; device private variable.
2062 * This function does exactly the opposite of what the start_nic()
2063 * function does. This function is called to stop the device.
2068 static void stop_nic(struct s2io_nic *nic)
2070 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2071 register u64 val64 = 0;
2072 u16 interruptible, i;
2073 mac_info_t *mac_control;
2074 struct config_param *config;
2076 mac_control = &nic->mac_control;
2077 config = &nic->config;
2079 /* Disable all interrupts */
2080 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2081 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2082 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2083 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2086 for (i = 0; i < config->rx_ring_num; i++) {
2087 val64 = readq(&bar0->prc_ctrl_n[i]);
2088 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2089 writeq(val64, &bar0->prc_ctrl_n[i]);
2094 * fill_rx_buffers - Allocates the Rx side skbs
2095 * @nic: device private variable
2096 * @ring_no: ring number
2098 * The function allocates Rx side skbs and puts the physical
2099 * address of these buffers into the RxD buffer pointers, so that the NIC
2100 * can DMA the received frame into these locations.
2101 * The NIC supports 3 receive modes, viz
2103 * 2. three buffer and
2104 * 3. Five buffer modes.
2105 * Each mode defines how many fragments the received frame will be split
2106 * up into by the NIC. The frame is split into L3 header, L4 Header,
2107 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2108 * is split into 3 fragments. As of now only single buffer mode is
2111 * SUCCESS on success or an appropriate -ve value on failure.
2114 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2116 struct net_device *dev = nic->dev;
2117 struct sk_buff *skb;
2119 int off, off1, size, block_no, block_no1;
2120 int offset, offset1;
2123 mac_info_t *mac_control;
2124 struct config_param *config;
2125 #ifdef CONFIG_2BUFF_MODE
2130 dma_addr_t rxdpphys;
2132 #ifndef CONFIG_S2IO_NAPI
2133 unsigned long flags;
2135 RxD_t *first_rxdp = NULL;
2137 mac_control = &nic->mac_control;
2138 config = &nic->config;
2139 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2140 atomic_read(&nic->rx_bufs_left[ring_no]);
2141 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2142 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2144 while (alloc_tab < alloc_cnt) {
2145 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2147 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2149 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2150 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2151 #ifndef CONFIG_2BUFF_MODE
2152 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2153 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2155 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2156 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2159 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2160 block_virt_addr + off;
2161 if ((offset == offset1) && (rxdp->Host_Control)) {
2162 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2163 DBG_PRINT(INTR_DBG, " info equated\n");
2166 #ifndef CONFIG_2BUFF_MODE
2167 if (rxdp->Control_1 == END_OF_BLOCK) {
2168 mac_control->rings[ring_no].rx_curr_put_info.
2170 mac_control->rings[ring_no].rx_curr_put_info.
2171 block_index %= mac_control->rings[ring_no].block_count;
2172 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2175 off %= (MAX_RXDS_PER_BLOCK + 1);
2176 mac_control->rings[ring_no].rx_curr_put_info.offset =
2178 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2179 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2182 #ifndef CONFIG_S2IO_NAPI
2183 spin_lock_irqsave(&nic->put_lock, flags);
2184 mac_control->rings[ring_no].put_pos =
2185 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2186 spin_unlock_irqrestore(&nic->put_lock, flags);
2189 if (rxdp->Host_Control == END_OF_BLOCK) {
2190 mac_control->rings[ring_no].rx_curr_put_info.
2192 mac_control->rings[ring_no].rx_curr_put_info.block_index
2193 %= mac_control->rings[ring_no].block_count;
2194 block_no = mac_control->rings[ring_no].rx_curr_put_info
2197 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2198 dev->name, block_no,
2199 (unsigned long long) rxdp->Control_1);
2200 mac_control->rings[ring_no].rx_curr_put_info.offset =
2202 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2205 #ifndef CONFIG_S2IO_NAPI
2206 spin_lock_irqsave(&nic->put_lock, flags);
2207 mac_control->rings[ring_no].put_pos = (block_no *
2208 (MAX_RXDS_PER_BLOCK + 1)) + off;
2209 spin_unlock_irqrestore(&nic->put_lock, flags);
2213 #ifndef CONFIG_2BUFF_MODE
2214 if (rxdp->Control_1 & RXD_OWN_XENA)
2216 if (rxdp->Control_2 & BIT(0))
2219 mac_control->rings[ring_no].rx_curr_put_info.
2223 #ifdef CONFIG_2BUFF_MODE
2225 * RxDs Spanning cache lines will be replenished only
2226 * if the succeeding RxD is also owned by Host. It
2227 * will always be the ((8*i)+3) and ((8*i)+6)
2228 * descriptors for the 48 byte descriptor. The offending
2229 * decsriptor is of-course the 3rd descriptor.
2231 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2232 block_dma_addr + (off * sizeof(RxD_t));
2233 if (((u64) (rxdpphys)) % 128 > 80) {
2234 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2235 block_virt_addr + (off + 1);
2236 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2237 nextblk = (block_no + 1) %
2238 (mac_control->rings[ring_no].block_count);
2239 rxdpnext = mac_control->rings[ring_no].rx_blocks
2240 [nextblk].block_virt_addr;
2242 if (rxdpnext->Control_2 & BIT(0))
2247 #ifndef CONFIG_2BUFF_MODE
2248 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2250 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2253 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2254 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2257 first_rxdp->Control_1 |= RXD_OWN_XENA;
2261 #ifndef CONFIG_2BUFF_MODE
2262 skb_reserve(skb, NET_IP_ALIGN);
2263 memset(rxdp, 0, sizeof(RxD_t));
2264 rxdp->Buffer0_ptr = pci_map_single
2265 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2266 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2267 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2268 rxdp->Host_Control = (unsigned long) (skb);
2269 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2270 rxdp->Control_1 |= RXD_OWN_XENA;
2272 off %= (MAX_RXDS_PER_BLOCK + 1);
2273 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2275 ba = &mac_control->rings[ring_no].ba[block_no][off];
2276 skb_reserve(skb, BUF0_LEN);
2277 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2279 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2281 memset(rxdp, 0, sizeof(RxD_t));
2282 rxdp->Buffer2_ptr = pci_map_single
2283 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2284 PCI_DMA_FROMDEVICE);
2286 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2287 PCI_DMA_FROMDEVICE);
2289 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2290 PCI_DMA_FROMDEVICE);
2292 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2293 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2294 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2295 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2296 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2297 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2298 rxdp->Control_1 |= RXD_OWN_XENA;
2300 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2302 rxdp->Control_2 |= SET_RXD_MARKER;
2304 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2307 first_rxdp->Control_1 |= RXD_OWN_XENA;
2311 atomic_inc(&nic->rx_bufs_left[ring_no]);
2316 /* Transfer ownership of first descriptor to adapter just before
2317 * exiting. Before that, use memory barrier so that ownership
2318 * and other fields are seen by adapter correctly.
2322 first_rxdp->Control_1 |= RXD_OWN_XENA;
2329 * free_rx_buffers - Frees all Rx buffers
2330 * @sp: device private variable.
2332 * This function will free all Rx buffers allocated by host.
2337 static void free_rx_buffers(struct s2io_nic *sp)
2339 struct net_device *dev = sp->dev;
2340 int i, j, blk = 0, off, buf_cnt = 0;
2342 struct sk_buff *skb;
2343 mac_info_t *mac_control;
2344 struct config_param *config;
2345 #ifdef CONFIG_2BUFF_MODE
2349 mac_control = &sp->mac_control;
2350 config = &sp->config;
2352 for (i = 0; i < config->rx_ring_num; i++) {
2353 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2354 off = j % (MAX_RXDS_PER_BLOCK + 1);
2355 rxdp = mac_control->rings[i].rx_blocks[blk].
2356 block_virt_addr + off;
2358 #ifndef CONFIG_2BUFF_MODE
2359 if (rxdp->Control_1 == END_OF_BLOCK) {
2361 (RxD_t *) ((unsigned long) rxdp->
2367 if (rxdp->Host_Control == END_OF_BLOCK) {
2373 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2374 memset(rxdp, 0, sizeof(RxD_t));
2379 (struct sk_buff *) ((unsigned long) rxdp->
2382 #ifndef CONFIG_2BUFF_MODE
2383 pci_unmap_single(sp->pdev, (dma_addr_t)
2386 HEADER_ETHERNET_II_802_3_SIZE
2387 + HEADER_802_2_SIZE +
2389 PCI_DMA_FROMDEVICE);
2391 ba = &mac_control->rings[i].ba[blk][off];
2392 pci_unmap_single(sp->pdev, (dma_addr_t)
2395 PCI_DMA_FROMDEVICE);
2396 pci_unmap_single(sp->pdev, (dma_addr_t)
2399 PCI_DMA_FROMDEVICE);
2400 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 dev->mtu + BUF0_LEN + 4,
2403 PCI_DMA_FROMDEVICE);
2406 atomic_dec(&sp->rx_bufs_left[i]);
2409 memset(rxdp, 0, sizeof(RxD_t));
2411 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2412 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2413 mac_control->rings[i].rx_curr_put_info.offset = 0;
2414 mac_control->rings[i].rx_curr_get_info.offset = 0;
2415 atomic_set(&sp->rx_bufs_left[i], 0);
2416 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2417 dev->name, buf_cnt, i);
2422 * s2io_poll - Rx interrupt handler for NAPI support
2423 * @dev : pointer to the device structure.
2424 * @budget : The number of packets that were budgeted to be processed
2425 * during one pass through the 'Poll" function.
2427 * Comes into picture only if NAPI support has been incorporated. It does
2428 * the same thing that rx_intr_handler does, but not in a interrupt context
2429 * also It will process only a given number of packets.
2431 * 0 on success and 1 if there are No Rx packets to be processed.
2434 #if defined(CONFIG_S2IO_NAPI)
2435 static int s2io_poll(struct net_device *dev, int *budget)
2437 nic_t *nic = dev->priv;
2438 int pkt_cnt = 0, org_pkts_to_process;
2439 mac_info_t *mac_control;
2440 struct config_param *config;
2441 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2445 atomic_inc(&nic->isr_cnt);
2446 mac_control = &nic->mac_control;
2447 config = &nic->config;
2449 nic->pkts_to_process = *budget;
2450 if (nic->pkts_to_process > dev->quota)
2451 nic->pkts_to_process = dev->quota;
2452 org_pkts_to_process = nic->pkts_to_process;
2454 val64 = readq(&bar0->rx_traffic_int);
2455 writeq(val64, &bar0->rx_traffic_int);
2457 for (i = 0; i < config->rx_ring_num; i++) {
2458 rx_intr_handler(&mac_control->rings[i]);
2459 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2460 if (!nic->pkts_to_process) {
2461 /* Quota for the current iteration has been met */
2468 dev->quota -= pkt_cnt;
2470 netif_rx_complete(dev);
2472 for (i = 0; i < config->rx_ring_num; i++) {
2473 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2474 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2475 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2479 /* Re enable the Rx interrupts. */
2480 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2481 atomic_dec(&nic->isr_cnt);
2485 dev->quota -= pkt_cnt;
2488 for (i = 0; i < config->rx_ring_num; i++) {
2489 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2490 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2491 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2495 atomic_dec(&nic->isr_cnt);
2501 * rx_intr_handler - Rx interrupt handler
2502 * @nic: device private variable.
2504 * If the interrupt is because of a received frame or if the
2505 * receive ring contains fresh as yet un-processed frames,this function is
2506 * called. It picks out the RxD at which place the last Rx processing had
2507 * stopped and sends the skb to the OSM's Rx handler and then increments
2512 static void rx_intr_handler(ring_info_t *ring_data)
2514 nic_t *nic = ring_data->nic;
2515 struct net_device *dev = (struct net_device *) nic->dev;
2516 int get_block, get_offset, put_block, put_offset, ring_bufs;
2517 rx_curr_get_info_t get_info, put_info;
2519 struct sk_buff *skb;
2520 #ifndef CONFIG_S2IO_NAPI
2523 spin_lock(&nic->rx_lock);
2524 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2525 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2526 __FUNCTION__, dev->name);
2527 spin_unlock(&nic->rx_lock);
2531 get_info = ring_data->rx_curr_get_info;
2532 get_block = get_info.block_index;
2533 put_info = ring_data->rx_curr_put_info;
2534 put_block = put_info.block_index;
2535 ring_bufs = get_info.ring_len+1;
2536 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2538 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2540 #ifndef CONFIG_S2IO_NAPI
2541 spin_lock(&nic->put_lock);
2542 put_offset = ring_data->put_pos;
2543 spin_unlock(&nic->put_lock);
2545 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2548 while (RXD_IS_UP2DT(rxdp) &&
2549 (((get_offset + 1) % ring_bufs) != put_offset)) {
2550 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2552 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2554 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2555 spin_unlock(&nic->rx_lock);
2558 #ifndef CONFIG_2BUFF_MODE
2559 pci_unmap_single(nic->pdev, (dma_addr_t)
2562 HEADER_ETHERNET_II_802_3_SIZE +
2565 PCI_DMA_FROMDEVICE);
2567 pci_unmap_single(nic->pdev, (dma_addr_t)
2569 BUF0_LEN, PCI_DMA_FROMDEVICE);
2570 pci_unmap_single(nic->pdev, (dma_addr_t)
2572 BUF1_LEN, PCI_DMA_FROMDEVICE);
2573 pci_unmap_single(nic->pdev, (dma_addr_t)
2575 dev->mtu + BUF0_LEN + 4,
2576 PCI_DMA_FROMDEVICE);
2578 rx_osm_handler(ring_data, rxdp);
2580 ring_data->rx_curr_get_info.offset =
2582 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2584 if (get_info.offset &&
2585 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2586 get_info.offset = 0;
2587 ring_data->rx_curr_get_info.offset
2590 get_block %= ring_data->block_count;
2591 ring_data->rx_curr_get_info.block_index
2593 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2596 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2598 #ifdef CONFIG_S2IO_NAPI
2599 nic->pkts_to_process -= 1;
2600 if (!nic->pkts_to_process)
2604 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2608 spin_unlock(&nic->rx_lock);
2612 * tx_intr_handler - Transmit interrupt handler
2613 * @nic : device private variable
2615 * If an interrupt was raised to indicate DMA complete of the
2616 * Tx packet, this function is called. It identifies the last TxD
2617 * whose buffer was freed and frees all skbs whose data have already
2618 * DMA'ed into the NICs internal memory.
2623 static void tx_intr_handler(fifo_info_t *fifo_data)
2625 nic_t *nic = fifo_data->nic;
2626 struct net_device *dev = (struct net_device *) nic->dev;
2627 tx_curr_get_info_t get_info, put_info;
2628 struct sk_buff *skb;
2632 get_info = fifo_data->tx_curr_get_info;
2633 put_info = fifo_data->tx_curr_put_info;
2634 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2636 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2637 (get_info.offset != put_info.offset) &&
2638 (txdlp->Host_Control)) {
2639 /* Check for TxD errors */
2640 if (txdlp->Control_1 & TXD_T_CODE) {
2641 unsigned long long err;
2642 err = txdlp->Control_1 & TXD_T_CODE;
2643 if ((err >> 48) == 0xA) {
2644 DBG_PRINT(TX_DBG, "TxD returned due \
2645 to loss of link\n");
2648 DBG_PRINT(ERR_DBG, "***TxD error \
2653 skb = (struct sk_buff *) ((unsigned long)
2654 txdlp->Host_Control);
2656 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2658 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2662 frg_cnt = skb_shinfo(skb)->nr_frags;
2663 nic->tx_pkt_count++;
2665 pci_unmap_single(nic->pdev, (dma_addr_t)
2666 txdlp->Buffer_Pointer,
2667 skb->len - skb->data_len,
2673 for (j = 0; j < frg_cnt; j++, txdlp++) {
2675 &skb_shinfo(skb)->frags[j];
2676 if (!txdlp->Buffer_Pointer)
2678 pci_unmap_page(nic->pdev,
2688 (sizeof(TxD_t) * fifo_data->max_txds));
2690 /* Updating the statistics block */
2691 nic->stats.tx_bytes += skb->len;
2692 dev_kfree_skb_irq(skb);
2695 get_info.offset %= get_info.fifo_len + 1;
2696 txdlp = (TxD_t *) fifo_data->list_info
2697 [get_info.offset].list_virt_addr;
2698 fifo_data->tx_curr_get_info.offset =
2702 spin_lock(&nic->tx_lock);
2703 if (netif_queue_stopped(dev))
2704 netif_wake_queue(dev);
2705 spin_unlock(&nic->tx_lock);
2709 * alarm_intr_handler - Alarm Interrrupt handler
2710 * @nic: device private variable
2711 * Description: If the interrupt was neither because of Rx packet or Tx
2712 * complete, this function is called. If the interrupt was to indicate
2713 * a loss of link, the OSM link status handler is invoked for any other
2714 * alarm interrupt the block that raised the interrupt is displayed
2715 * and a H/W reset is issued.
2720 static void alarm_intr_handler(struct s2io_nic *nic)
2722 struct net_device *dev = (struct net_device *) nic->dev;
2723 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2724 register u64 val64 = 0, err_reg = 0;
2726 /* Handling link status change error Intr */
2727 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2728 err_reg = readq(&bar0->mac_rmac_err_reg);
2729 writeq(err_reg, &bar0->mac_rmac_err_reg);
2730 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2731 schedule_work(&nic->set_link_task);
2735 /* Handling Ecc errors */
2736 val64 = readq(&bar0->mc_err_reg);
2737 writeq(val64, &bar0->mc_err_reg);
2738 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2739 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2740 nic->mac_control.stats_info->sw_stat.
2742 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2744 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2745 if (nic->device_type != XFRAME_II_DEVICE) {
2746 /* Reset XframeI only if critical error */
2747 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2748 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2749 netif_stop_queue(dev);
2750 schedule_work(&nic->rst_timer_task);
2754 nic->mac_control.stats_info->sw_stat.
2759 /* In case of a serious error, the device will be Reset. */
2760 val64 = readq(&bar0->serr_source);
2761 if (val64 & SERR_SOURCE_ANY) {
2762 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2763 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2764 (unsigned long long)val64);
2765 netif_stop_queue(dev);
2766 schedule_work(&nic->rst_timer_task);
2770 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2771 * Error occurs, the adapter will be recycled by disabling the
2772 * adapter enable bit and enabling it again after the device
2773 * becomes Quiescent.
2775 val64 = readq(&bar0->pcc_err_reg);
2776 writeq(val64, &bar0->pcc_err_reg);
2777 if (val64 & PCC_FB_ECC_DB_ERR) {
2778 u64 ac = readq(&bar0->adapter_control);
2779 ac &= ~(ADAPTER_CNTL_EN);
2780 writeq(ac, &bar0->adapter_control);
2781 ac = readq(&bar0->adapter_control);
2782 schedule_work(&nic->set_link_task);
2785 /* Other type of interrupts are not being handled now, TODO */
2789 * wait_for_cmd_complete - waits for a command to complete.
2790 * @sp : private member of the device structure, which is a pointer to the
2791 * s2io_nic structure.
2792 * Description: Function that waits for a command to Write into RMAC
2793 * ADDR DATA registers to be completed and returns either success or
2794 * error depending on whether the command was complete or not.
2796 * SUCCESS on success and FAILURE on failure.
2799 int wait_for_cmd_complete(nic_t * sp)
2801 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2802 int ret = FAILURE, cnt = 0;
2806 val64 = readq(&bar0->rmac_addr_cmd_mem);
2807 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2820 * s2io_reset - Resets the card.
2821 * @sp : private member of the device structure.
2822 * Description: Function to Reset the card. This function then also
2823 * restores the previously saved PCI configuration space registers as
2824 * the card reset also resets the configuration space.
2829 void s2io_reset(nic_t * sp)
2831 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2835 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2836 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2838 val64 = SW_RESET_ALL;
2839 writeq(val64, &bar0->sw_reset);
2842 * At this stage, if the PCI write is indeed completed, the
2843 * card is reset and so is the PCI Config space of the device.
2844 * So a read cannot be issued at this stage on any of the
2845 * registers to ensure the write into "sw_reset" register
2847 * Question: Is there any system call that will explicitly force
2848 * all the write commands still pending on the bus to be pushed
2850 * As of now I'am just giving a 250ms delay and hoping that the
2851 * PCI write to sw_reset register is done by this time.
2855 /* Restore the PCI state saved during initialization. */
2856 pci_restore_state(sp->pdev);
2857 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2863 /* Set swapper to enable I/O register access */
2864 s2io_set_swapper(sp);
2866 /* Restore the MSIX table entries from local variables */
2867 restore_xmsi_data(sp);
2869 /* Clear certain PCI/PCI-X fields after reset */
2870 if (sp->device_type == XFRAME_II_DEVICE) {
2871 /* Clear parity err detect bit */
2872 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2874 /* Clearing PCIX Ecc status register */
2875 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2877 /* Clearing PCI_STATUS error reflected here */
2878 writeq(BIT(62), &bar0->txpic_int_reg);
2881 /* Reset device statistics maintained by OS */
2882 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2884 /* SXE-002: Configure link and activity LED to turn it off */
2885 subid = sp->pdev->subsystem_device;
2886 if (((subid & 0xFF) >= 0x07) &&
2887 (sp->device_type == XFRAME_I_DEVICE)) {
2888 val64 = readq(&bar0->gpio_control);
2889 val64 |= 0x0000800000000000ULL;
2890 writeq(val64, &bar0->gpio_control);
2891 val64 = 0x0411040400000000ULL;
2892 writeq(val64, (void __iomem *)bar0 + 0x2700);
2896 * Clear spurious ECC interrupts that would have occured on
2897 * XFRAME II cards after reset.
2899 if (sp->device_type == XFRAME_II_DEVICE) {
2900 val64 = readq(&bar0->pcc_err_reg);
2901 writeq(val64, &bar0->pcc_err_reg);
2904 sp->device_enabled_once = FALSE;
2908 * s2io_set_swapper - to set the swapper controle on the card
2909 * @sp : private member of the device structure,
2910 * pointer to the s2io_nic structure.
2911 * Description: Function to set the swapper control on the card
2912 * correctly depending on the 'endianness' of the system.
2914 * SUCCESS on success and FAILURE on failure.
2917 int s2io_set_swapper(nic_t * sp)
2919 struct net_device *dev = sp->dev;
2920 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2921 u64 val64, valt, valr;
2924 * Set proper endian settings and verify the same by reading
2925 * the PIF Feed-back register.
2928 val64 = readq(&bar0->pif_rd_swapper_fb);
2929 if (val64 != 0x0123456789ABCDEFULL) {
2931 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2932 0x8100008181000081ULL, /* FE=1, SE=0 */
2933 0x4200004242000042ULL, /* FE=0, SE=1 */
2934 0}; /* FE=0, SE=0 */
2937 writeq(value[i], &bar0->swapper_ctrl);
2938 val64 = readq(&bar0->pif_rd_swapper_fb);
2939 if (val64 == 0x0123456789ABCDEFULL)
2944 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2946 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2947 (unsigned long long) val64);
2952 valr = readq(&bar0->swapper_ctrl);
2955 valt = 0x0123456789ABCDEFULL;
2956 writeq(valt, &bar0->xmsi_address);
2957 val64 = readq(&bar0->xmsi_address);
2961 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2962 0x0081810000818100ULL, /* FE=1, SE=0 */
2963 0x0042420000424200ULL, /* FE=0, SE=1 */
2964 0}; /* FE=0, SE=0 */
2967 writeq((value[i] | valr), &bar0->swapper_ctrl);
2968 writeq(valt, &bar0->xmsi_address);
2969 val64 = readq(&bar0->xmsi_address);
2975 unsigned long long x = val64;
2976 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2977 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2981 val64 = readq(&bar0->swapper_ctrl);
2982 val64 &= 0xFFFF000000000000ULL;
2986 * The device by default set to a big endian format, so a
2987 * big endian driver need not set anything.
2989 val64 |= (SWAPPER_CTRL_TXP_FE |
2990 SWAPPER_CTRL_TXP_SE |
2991 SWAPPER_CTRL_TXD_R_FE |
2992 SWAPPER_CTRL_TXD_W_FE |
2993 SWAPPER_CTRL_TXF_R_FE |
2994 SWAPPER_CTRL_RXD_R_FE |
2995 SWAPPER_CTRL_RXD_W_FE |
2996 SWAPPER_CTRL_RXF_W_FE |
2997 SWAPPER_CTRL_XMSI_FE |
2998 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2999 if (sp->intr_type == INTA)
3000 val64 |= SWAPPER_CTRL_XMSI_SE;
3001 writeq(val64, &bar0->swapper_ctrl);
3004 * Initially we enable all bits to make it accessible by the
3005 * driver, then we selectively enable only those bits that
3008 val64 |= (SWAPPER_CTRL_TXP_FE |
3009 SWAPPER_CTRL_TXP_SE |
3010 SWAPPER_CTRL_TXD_R_FE |
3011 SWAPPER_CTRL_TXD_R_SE |
3012 SWAPPER_CTRL_TXD_W_FE |
3013 SWAPPER_CTRL_TXD_W_SE |
3014 SWAPPER_CTRL_TXF_R_FE |
3015 SWAPPER_CTRL_RXD_R_FE |
3016 SWAPPER_CTRL_RXD_R_SE |
3017 SWAPPER_CTRL_RXD_W_FE |
3018 SWAPPER_CTRL_RXD_W_SE |
3019 SWAPPER_CTRL_RXF_W_FE |
3020 SWAPPER_CTRL_XMSI_FE |
3021 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3022 if (sp->intr_type == INTA)
3023 val64 |= SWAPPER_CTRL_XMSI_SE;
3024 writeq(val64, &bar0->swapper_ctrl);
3026 val64 = readq(&bar0->swapper_ctrl);
3029 * Verifying if endian settings are accurate by reading a
3030 * feedback register.
3032 val64 = readq(&bar0->pif_rd_swapper_fb);
3033 if (val64 != 0x0123456789ABCDEFULL) {
3034 /* Endian settings are incorrect, calls for another dekko. */
3035 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3037 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3038 (unsigned long long) val64);
3045 int wait_for_msix_trans(nic_t *nic, int i)
3047 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3049 int ret = 0, cnt = 0;
3052 val64 = readq(&bar0->xmsi_access);
3053 if (!(val64 & BIT(15)))
3059 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3066 void restore_xmsi_data(nic_t *nic)
3068 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3072 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3073 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3074 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3075 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3076 writeq(val64, &bar0->xmsi_access);
3077 if (wait_for_msix_trans(nic, i)) {
3078 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3084 void store_xmsi_data(nic_t *nic)
3086 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3087 u64 val64, addr, data;
3090 /* Store and display */
3091 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3092 val64 = (BIT(15) | vBIT(i, 26, 6));
3093 writeq(val64, &bar0->xmsi_access);
3094 if (wait_for_msix_trans(nic, i)) {
3095 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3098 addr = readq(&bar0->xmsi_address);
3099 data = readq(&bar0->xmsi_data);
3101 nic->msix_info[i].addr = addr;
3102 nic->msix_info[i].data = data;
3107 int s2io_enable_msi(nic_t *nic)
3109 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3110 u16 msi_ctrl, msg_val;
3111 struct config_param *config = &nic->config;
3112 struct net_device *dev = nic->dev;
3113 u64 val64, tx_mat, rx_mat;
3116 val64 = readq(&bar0->pic_control);
3118 writeq(val64, &bar0->pic_control);
3120 err = pci_enable_msi(nic->pdev);
3122 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3128 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3129 * for interrupt handling.
3131 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3133 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3134 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3136 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3138 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3140 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3141 tx_mat = readq(&bar0->tx_mat0_n[0]);
3142 for (i=0; i<config->tx_fifo_num; i++) {
3143 tx_mat |= TX_MAT_SET(i, 1);
3145 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3147 rx_mat = readq(&bar0->rx_mat);
3148 for (i=0; i<config->rx_ring_num; i++) {
3149 rx_mat |= RX_MAT_SET(i, 1);
3151 writeq(rx_mat, &bar0->rx_mat);
3153 dev->irq = nic->pdev->irq;
3157 int s2io_enable_msi_x(nic_t *nic)
3159 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3161 u16 msi_control; /* Temp variable */
3162 int ret, i, j, msix_indx = 1;
3164 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3166 if (nic->entries == NULL) {
3167 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3170 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3173 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3175 if (nic->s2io_entries == NULL) {
3176 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3177 kfree(nic->entries);
3180 memset(nic->s2io_entries, 0,
3181 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3183 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3184 nic->entries[i].entry = i;
3185 nic->s2io_entries[i].entry = i;
3186 nic->s2io_entries[i].arg = NULL;
3187 nic->s2io_entries[i].in_use = 0;
3190 tx_mat = readq(&bar0->tx_mat0_n[0]);
3191 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3192 tx_mat |= TX_MAT_SET(i, msix_indx);
3193 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3194 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3195 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3197 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3199 if (!nic->config.bimodal) {
3200 rx_mat = readq(&bar0->rx_mat);
3201 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3202 rx_mat |= RX_MAT_SET(j, msix_indx);
3203 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3204 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3205 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3207 writeq(rx_mat, &bar0->rx_mat);
3209 tx_mat = readq(&bar0->tx_mat0_n[7]);
3210 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3211 tx_mat |= TX_MAT_SET(i, msix_indx);
3212 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3213 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3214 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3216 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3219 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3221 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3222 kfree(nic->entries);
3223 kfree(nic->s2io_entries);
3224 nic->entries = NULL;
3225 nic->s2io_entries = NULL;
3230 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3231 * in the herc NIC. (Temp change, needs to be removed later)
3233 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3234 msi_control |= 0x1; /* Enable MSI */
3235 pci_write_config_word(nic->pdev, 0x42, msi_control);
3240 /* ********************************************************* *
3241 * Functions defined below concern the OS part of the driver *
3242 * ********************************************************* */
3245 * s2io_open - open entry point of the driver
3246 * @dev : pointer to the device structure.
3248 * This function is the open entry point of the driver. It mainly calls a
3249 * function to allocate Rx buffers and inserts them into the buffer
3250 * descriptors and then enables the Rx part of the NIC.
3252 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3256 int s2io_open(struct net_device *dev)
3258 nic_t *sp = dev->priv;
3261 u16 msi_control; /* Temp variable */
3264 * Make sure you have link off by default every time
3265 * Nic is initialized
3267 netif_carrier_off(dev);
3268 sp->last_link_state = 0;
3270 /* Initialize H/W and enable interrupts */
3271 if (s2io_card_up(sp)) {
3272 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3275 goto hw_init_failed;
3278 /* Store the values of the MSIX table in the nic_t structure */
3279 store_xmsi_data(sp);
3281 /* After proper initialization of H/W, register ISR */
3282 if (sp->intr_type == MSI) {
3283 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3284 SA_SHIRQ, sp->name, dev);
3286 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3287 failed\n", dev->name);
3288 goto isr_registration_failed;
3291 if (sp->intr_type == MSI_X) {
3292 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3293 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3294 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3296 err = request_irq(sp->entries[i].vector,
3297 s2io_msix_fifo_handle, 0, sp->desc1,
3298 sp->s2io_entries[i].arg);
3299 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3300 sp->msix_info[i].addr);
3302 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3304 err = request_irq(sp->entries[i].vector,
3305 s2io_msix_ring_handle, 0, sp->desc2,
3306 sp->s2io_entries[i].arg);
3307 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3308 sp->msix_info[i].addr);
3311 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3312 failed\n", dev->name, i);
3313 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3314 goto isr_registration_failed;
3316 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3319 if (sp->intr_type == INTA) {
3320 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3323 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3325 goto isr_registration_failed;
3329 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3330 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3332 goto setting_mac_address_failed;
3335 netif_start_queue(dev);
3338 setting_mac_address_failed:
3339 if (sp->intr_type != MSI_X)
3340 free_irq(sp->pdev->irq, dev);
3341 isr_registration_failed:
3342 del_timer_sync(&sp->alarm_timer);
3343 if (sp->intr_type == MSI_X) {
3344 if (sp->device_type == XFRAME_II_DEVICE) {
3345 for (i=1; (sp->s2io_entries[i].in_use ==
3346 MSIX_REGISTERED_SUCCESS); i++) {
3347 int vector = sp->entries[i].vector;
3348 void *arg = sp->s2io_entries[i].arg;
3350 free_irq(vector, arg);
3352 pci_disable_msix(sp->pdev);
3355 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3356 msi_control &= 0xFFFE; /* Disable MSI */
3357 pci_write_config_word(sp->pdev, 0x42, msi_control);
3360 else if (sp->intr_type == MSI)
3361 pci_disable_msi(sp->pdev);
3364 if (sp->intr_type == MSI_X) {
3367 if (sp->s2io_entries)
3368 kfree(sp->s2io_entries);
3374 * s2io_close -close entry point of the driver
3375 * @dev : device pointer.
3377 * This is the stop entry point of the driver. It needs to undo exactly
3378 * whatever was done by the open entry point,thus it's usually referred to
3379 * as the close function.Among other things this function mainly stops the
3380 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3382 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3386 int s2io_close(struct net_device *dev)
3388 nic_t *sp = dev->priv;
3392 flush_scheduled_work();
3393 netif_stop_queue(dev);
3394 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3397 if (sp->intr_type == MSI_X) {
3398 if (sp->device_type == XFRAME_II_DEVICE) {
3399 for (i=1; (sp->s2io_entries[i].in_use ==
3400 MSIX_REGISTERED_SUCCESS); i++) {
3401 int vector = sp->entries[i].vector;
3402 void *arg = sp->s2io_entries[i].arg;
3404 free_irq(vector, arg);
3406 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3407 msi_control &= 0xFFFE; /* Disable MSI */
3408 pci_write_config_word(sp->pdev, 0x42, msi_control);
3410 pci_disable_msix(sp->pdev);
3414 free_irq(sp->pdev->irq, dev);
3415 if (sp->intr_type == MSI)
3416 pci_disable_msi(sp->pdev);
3418 sp->device_close_flag = TRUE; /* Device is shut down. */
3423 * s2io_xmit - Tx entry point of te driver
3424 * @skb : the socket buffer containing the Tx data.
3425 * @dev : device pointer.
3427 * This function is the Tx entry point of the driver. S2IO NIC supports
3428 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3429 * NOTE: when device cant queue the pkt,just the trans_start variable will
3432 * 0 on success & 1 on failure.
3435 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3437 nic_t *sp = dev->priv;
3438 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3441 TxFIFO_element_t __iomem *tx_fifo;
3442 unsigned long flags;
3447 int vlan_priority = 0;
3448 mac_info_t *mac_control;
3449 struct config_param *config;
3451 mac_control = &sp->mac_control;
3452 config = &sp->config;
3454 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3455 spin_lock_irqsave(&sp->tx_lock, flags);
3456 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3457 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3459 spin_unlock_irqrestore(&sp->tx_lock, flags);
3466 /* Get Fifo number to Transmit based on vlan priority */
3467 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3468 vlan_tag = vlan_tx_tag_get(skb);
3469 vlan_priority = vlan_tag >> 13;
3470 queue = config->fifo_mapping[vlan_priority];
3473 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3474 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3475 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3478 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3479 /* Avoid "put" pointer going beyond "get" pointer */
3480 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3481 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3482 netif_stop_queue(dev);
3484 spin_unlock_irqrestore(&sp->tx_lock, flags);
3488 /* A buffer with no data will be dropped */
3490 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3492 spin_unlock_irqrestore(&sp->tx_lock, flags);
3497 mss = skb_shinfo(skb)->tso_size;
3499 txdp->Control_1 |= TXD_TCP_LSO_EN;
3500 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3504 frg_cnt = skb_shinfo(skb)->nr_frags;
3505 frg_len = skb->len - skb->data_len;
3507 txdp->Buffer_Pointer = pci_map_single
3508 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3509 txdp->Host_Control = (unsigned long) skb;
3510 if (skb->ip_summed == CHECKSUM_HW) {
3512 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3516 txdp->Control_2 |= config->tx_intr_type;
3518 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3519 txdp->Control_2 |= TXD_VLAN_ENABLE;
3520 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3523 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3524 TXD_GATHER_CODE_FIRST);
3525 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3527 /* For fragmented SKB. */
3528 for (i = 0; i < frg_cnt; i++) {
3529 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3530 /* A '0' length fragment will be ignored */
3534 txdp->Buffer_Pointer = (u64) pci_map_page
3535 (sp->pdev, frag->page, frag->page_offset,
3536 frag->size, PCI_DMA_TODEVICE);
3537 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3539 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3541 tx_fifo = mac_control->tx_FIFO_start[queue];
3542 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3543 writeq(val64, &tx_fifo->TxDL_Pointer);
3545 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3550 val64 |= TX_FIFO_SPECIAL_FUNC;
3552 writeq(val64, &tx_fifo->List_Control);
3557 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3558 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3560 /* Avoid "put" pointer going beyond "get" pointer */
3561 if (((put_off + 1) % queue_len) == get_off) {
3563 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3565 netif_stop_queue(dev);
3568 dev->trans_start = jiffies;
3569 spin_unlock_irqrestore(&sp->tx_lock, flags);
3575 s2io_alarm_handle(unsigned long data)
3577 nic_t *sp = (nic_t *)data;
3579 alarm_intr_handler(sp);
3580 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3584 s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3586 struct net_device *dev = (struct net_device *) dev_id;
3587 nic_t *sp = dev->priv;
3590 mac_info_t *mac_control;
3591 struct config_param *config;
3593 atomic_inc(&sp->isr_cnt);
3594 mac_control = &sp->mac_control;
3595 config = &sp->config;
3596 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3598 /* If Intr is because of Rx Traffic */
3599 for (i = 0; i < config->rx_ring_num; i++)
3600 rx_intr_handler(&mac_control->rings[i]);
3602 /* If Intr is because of Tx Traffic */
3603 for (i = 0; i < config->tx_fifo_num; i++)
3604 tx_intr_handler(&mac_control->fifos[i]);
3607 * If the Rx buffer count is below the panic threshold then
3608 * reallocate the buffers from the interrupt handler itself,
3609 * else schedule a tasklet to reallocate the buffers.
3611 for (i = 0; i < config->rx_ring_num; i++) {
3612 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3613 int level = rx_buffer_level(sp, rxb_size, i);
3615 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3616 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3617 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3618 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3619 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3621 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3622 clear_bit(0, (&sp->tasklet_status));
3623 atomic_dec(&sp->isr_cnt);
3626 clear_bit(0, (&sp->tasklet_status));
3627 } else if (level == LOW) {
3628 tasklet_schedule(&sp->task);
3632 atomic_dec(&sp->isr_cnt);
3637 s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3639 ring_info_t *ring = (ring_info_t *)dev_id;
3640 nic_t *sp = ring->nic;
3641 int rxb_size, level, rng_n;
3643 atomic_inc(&sp->isr_cnt);
3644 rx_intr_handler(ring);
3646 rng_n = ring->ring_no;
3647 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3648 level = rx_buffer_level(sp, rxb_size, rng_n);
3650 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3652 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3653 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3654 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3655 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3657 clear_bit(0, (&sp->tasklet_status));
3660 clear_bit(0, (&sp->tasklet_status));
3661 } else if (level == LOW) {
3662 tasklet_schedule(&sp->task);
3664 atomic_dec(&sp->isr_cnt);
3670 s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3672 fifo_info_t *fifo = (fifo_info_t *)dev_id;
3673 nic_t *sp = fifo->nic;
3675 atomic_inc(&sp->isr_cnt);
3676 tx_intr_handler(fifo);
3677 atomic_dec(&sp->isr_cnt);
3681 static void s2io_txpic_intr_handle(nic_t *sp)
3683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3686 val64 = readq(&bar0->pic_int_status);
3687 if (val64 & PIC_INT_GPIO) {
3688 val64 = readq(&bar0->gpio_int_reg);
3689 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3690 (val64 & GPIO_INT_REG_LINK_UP)) {
3691 val64 |= GPIO_INT_REG_LINK_DOWN;
3692 val64 |= GPIO_INT_REG_LINK_UP;
3693 writeq(val64, &bar0->gpio_int_reg);
3697 if (((sp->last_link_state == LINK_UP) &&
3698 (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3699 ((sp->last_link_state == LINK_DOWN) &&
3700 (val64 & GPIO_INT_REG_LINK_UP))) {
3701 val64 = readq(&bar0->gpio_int_mask);
3702 val64 |= GPIO_INT_MASK_LINK_DOWN;
3703 val64 |= GPIO_INT_MASK_LINK_UP;
3704 writeq(val64, &bar0->gpio_int_mask);
3705 s2io_set_link((unsigned long)sp);
3708 if (sp->last_link_state == LINK_UP) {
3709 /*enable down interrupt */
3710 val64 = readq(&bar0->gpio_int_mask);
3711 /* unmasks link down intr */
3712 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
3713 /* masks link up intr */
3714 val64 |= GPIO_INT_MASK_LINK_UP;
3715 writeq(val64, &bar0->gpio_int_mask);
3717 /*enable UP Interrupt */
3718 val64 = readq(&bar0->gpio_int_mask);
3719 /* unmasks link up interrupt */
3720 val64 &= ~GPIO_INT_MASK_LINK_UP;
3721 /* masks link down interrupt */
3722 val64 |= GPIO_INT_MASK_LINK_DOWN;
3723 writeq(val64, &bar0->gpio_int_mask);
3729 * s2io_isr - ISR handler of the device .
3730 * @irq: the irq of the device.
3731 * @dev_id: a void pointer to the dev structure of the NIC.
3732 * @pt_regs: pointer to the registers pushed on the stack.
3733 * Description: This function is the ISR handler of the device. It
3734 * identifies the reason for the interrupt and calls the relevant
3735 * service routines. As a contongency measure, this ISR allocates the
3736 * recv buffers, if their numbers are below the panic value which is
3737 * presently set to 25% of the original number of rcv buffers allocated.
3739 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
3740 * IRQ_NONE: will be returned if interrupt is not from our device
3742 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3744 struct net_device *dev = (struct net_device *) dev_id;
3745 nic_t *sp = dev->priv;
3746 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3748 u64 reason = 0, val64;
3749 mac_info_t *mac_control;
3750 struct config_param *config;
3752 atomic_inc(&sp->isr_cnt);
3753 mac_control = &sp->mac_control;
3754 config = &sp->config;
3757 * Identify the cause for interrupt and call the appropriate
3758 * interrupt handler. Causes for the interrupt could be;
3762 * 4. Error in any functional blocks of the NIC.
3764 reason = readq(&bar0->general_int_status);
3767 /* The interrupt was not raised by Xena. */
3768 atomic_dec(&sp->isr_cnt);
3772 #ifdef CONFIG_S2IO_NAPI
3773 if (reason & GEN_INTR_RXTRAFFIC) {
3774 if (netif_rx_schedule_prep(dev)) {
3775 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3777 __netif_rx_schedule(dev);
3781 /* If Intr is because of Rx Traffic */
3782 if (reason & GEN_INTR_RXTRAFFIC) {
3784 * rx_traffic_int reg is an R1 register, writing all 1's
3785 * will ensure that the actual interrupt causing bit get's
3786 * cleared and hence a read can be avoided.
3788 val64 = 0xFFFFFFFFFFFFFFFFULL;
3789 writeq(val64, &bar0->rx_traffic_int);
3790 for (i = 0; i < config->rx_ring_num; i++) {
3791 rx_intr_handler(&mac_control->rings[i]);
3796 /* If Intr is because of Tx Traffic */
3797 if (reason & GEN_INTR_TXTRAFFIC) {
3799 * tx_traffic_int reg is an R1 register, writing all 1's
3800 * will ensure that the actual interrupt causing bit get's
3801 * cleared and hence a read can be avoided.
3803 val64 = 0xFFFFFFFFFFFFFFFFULL;
3804 writeq(val64, &bar0->tx_traffic_int);
3806 for (i = 0; i < config->tx_fifo_num; i++)
3807 tx_intr_handler(&mac_control->fifos[i]);
3810 if (reason & GEN_INTR_TXPIC)
3811 s2io_txpic_intr_handle(sp);
3813 * If the Rx buffer count is below the panic threshold then
3814 * reallocate the buffers from the interrupt handler itself,
3815 * else schedule a tasklet to reallocate the buffers.
3817 #ifndef CONFIG_S2IO_NAPI
3818 for (i = 0; i < config->rx_ring_num; i++) {
3820 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3821 int level = rx_buffer_level(sp, rxb_size, i);
3823 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3824 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3825 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3826 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3827 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3829 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3830 clear_bit(0, (&sp->tasklet_status));
3831 atomic_dec(&sp->isr_cnt);
3834 clear_bit(0, (&sp->tasklet_status));
3835 } else if (level == LOW) {
3836 tasklet_schedule(&sp->task);
3841 atomic_dec(&sp->isr_cnt);
3848 static void s2io_updt_stats(nic_t *sp)
3850 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3854 if (atomic_read(&sp->card_state) == CARD_UP) {
3855 /* Apprx 30us on a 133 MHz bus */
3856 val64 = SET_UPDT_CLICKS(10) |
3857 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3858 writeq(val64, &bar0->stat_cfg);
3861 val64 = readq(&bar0->stat_cfg);
3862 if (!(val64 & BIT(0)))
3866 break; /* Updt failed */
3872 * s2io_get_stats - Updates the device statistics structure.
3873 * @dev : pointer to the device structure.
3875 * This function updates the device statistics structure in the s2io_nic
3876 * structure and returns a pointer to the same.
3878 * pointer to the updated net_device_stats structure.
3881 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3883 nic_t *sp = dev->priv;
3884 mac_info_t *mac_control;
3885 struct config_param *config;
3888 mac_control = &sp->mac_control;
3889 config = &sp->config;
3891 /* Configure Stats for immediate updt */
3892 s2io_updt_stats(sp);
3894 sp->stats.tx_packets =
3895 le32_to_cpu(mac_control->stats_info->tmac_frms);
3896 sp->stats.tx_errors =
3897 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3898 sp->stats.rx_errors =
3899 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3900 sp->stats.multicast =
3901 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3902 sp->stats.rx_length_errors =
3903 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3905 return (&sp->stats);
3909 * s2io_set_multicast - entry point for multicast address enable/disable.
3910 * @dev : pointer to the device structure
3912 * This function is a driver entry point which gets called by the kernel
3913 * whenever multicast addresses must be enabled/disabled. This also gets
3914 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3915 * determine, if multicast address must be enabled or if promiscuous mode
3916 * is to be disabled etc.
3921 static void s2io_set_multicast(struct net_device *dev)
3924 struct dev_mc_list *mclist;
3925 nic_t *sp = dev->priv;
3926 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3927 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3929 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3932 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3933 /* Enable all Multicast addresses */
3934 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3935 &bar0->rmac_addr_data0_mem);
3936 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3937 &bar0->rmac_addr_data1_mem);
3938 val64 = RMAC_ADDR_CMD_MEM_WE |
3939 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3940 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3941 writeq(val64, &bar0->rmac_addr_cmd_mem);
3942 /* Wait till command completes */
3943 wait_for_cmd_complete(sp);
3946 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3947 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3948 /* Disable all Multicast addresses */
3949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3950 &bar0->rmac_addr_data0_mem);
3951 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3952 &bar0->rmac_addr_data1_mem);
3953 val64 = RMAC_ADDR_CMD_MEM_WE |
3954 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3955 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3956 writeq(val64, &bar0->rmac_addr_cmd_mem);
3957 /* Wait till command completes */
3958 wait_for_cmd_complete(sp);
3961 sp->all_multi_pos = 0;
3964 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3965 /* Put the NIC into promiscuous mode */
3966 add = &bar0->mac_cfg;
3967 val64 = readq(&bar0->mac_cfg);
3968 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3970 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3971 writel((u32) val64, add);
3972 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3973 writel((u32) (val64 >> 32), (add + 4));
3975 val64 = readq(&bar0->mac_cfg);
3976 sp->promisc_flg = 1;
3977 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3979 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3980 /* Remove the NIC from promiscuous mode */
3981 add = &bar0->mac_cfg;
3982 val64 = readq(&bar0->mac_cfg);
3983 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3986 writel((u32) val64, add);
3987 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3988 writel((u32) (val64 >> 32), (add + 4));
3990 val64 = readq(&bar0->mac_cfg);
3991 sp->promisc_flg = 0;
3992 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3996 /* Update individual M_CAST address list */
3997 if ((!sp->m_cast_flg) && dev->mc_count) {
3999 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4000 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4002 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4003 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4007 prev_cnt = sp->mc_addr_count;
4008 sp->mc_addr_count = dev->mc_count;
4010 /* Clear out the previous list of Mc in the H/W. */
4011 for (i = 0; i < prev_cnt; i++) {
4012 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4013 &bar0->rmac_addr_data0_mem);
4014 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4015 &bar0->rmac_addr_data1_mem);
4016 val64 = RMAC_ADDR_CMD_MEM_WE |
4017 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4018 RMAC_ADDR_CMD_MEM_OFFSET
4019 (MAC_MC_ADDR_START_OFFSET + i);
4020 writeq(val64, &bar0->rmac_addr_cmd_mem);
4022 /* Wait for command completes */
4023 if (wait_for_cmd_complete(sp)) {
4024 DBG_PRINT(ERR_DBG, "%s: Adding ",
4026 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4031 /* Create the new Rx filter list and update the same in H/W. */
4032 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4033 i++, mclist = mclist->next) {
4034 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4036 for (j = 0; j < ETH_ALEN; j++) {
4037 mac_addr |= mclist->dmi_addr[j];
4041 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4042 &bar0->rmac_addr_data0_mem);
4043 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4044 &bar0->rmac_addr_data1_mem);
4045 val64 = RMAC_ADDR_CMD_MEM_WE |
4046 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4047 RMAC_ADDR_CMD_MEM_OFFSET
4048 (i + MAC_MC_ADDR_START_OFFSET);
4049 writeq(val64, &bar0->rmac_addr_cmd_mem);
4051 /* Wait for command completes */
4052 if (wait_for_cmd_complete(sp)) {
4053 DBG_PRINT(ERR_DBG, "%s: Adding ",
4055 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4063 * s2io_set_mac_addr - Programs the Xframe mac address
4064 * @dev : pointer to the device structure.
4065 * @addr: a uchar pointer to the new mac address which is to be set.
4066 * Description : This procedure will program the Xframe to receive
4067 * frames with new Mac Address
4068 * Return value: SUCCESS on success and an appropriate (-)ve integer
4069 * as defined in errno.h file on failure.
4072 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4074 nic_t *sp = dev->priv;
4075 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4076 register u64 val64, mac_addr = 0;
4080 * Set the new MAC address as the new unicast filter and reflect this
4081 * change on the device address registered with the OS. It will be
4084 for (i = 0; i < ETH_ALEN; i++) {
4086 mac_addr |= addr[i];
4089 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4090 &bar0->rmac_addr_data0_mem);
4093 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4094 RMAC_ADDR_CMD_MEM_OFFSET(0);
4095 writeq(val64, &bar0->rmac_addr_cmd_mem);
4096 /* Wait till command completes */
4097 if (wait_for_cmd_complete(sp)) {
4098 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4106 * s2io_ethtool_sset - Sets different link parameters.
4107 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4108 * @info: pointer to the structure with parameters given by ethtool to set
4111 * The function sets different link parameters provided by the user onto
4117 static int s2io_ethtool_sset(struct net_device *dev,
4118 struct ethtool_cmd *info)
4120 nic_t *sp = dev->priv;
4121 if ((info->autoneg == AUTONEG_ENABLE) ||
4122 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4125 s2io_close(sp->dev);
4133 * s2io_ethtol_gset - Return link specific information.
4134 * @sp : private member of the device structure, pointer to the
4135 * s2io_nic structure.
4136 * @info : pointer to the structure with parameters given by ethtool
4137 * to return link information.
4139 * Returns link specific information like speed, duplex etc.. to ethtool.
4141 * return 0 on success.
4144 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4146 nic_t *sp = dev->priv;
4147 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4148 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4149 info->port = PORT_FIBRE;
4150 /* info->transceiver?? TODO */
4152 if (netif_carrier_ok(sp->dev)) {
4153 info->speed = 10000;
4154 info->duplex = DUPLEX_FULL;
4160 info->autoneg = AUTONEG_DISABLE;
4165 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4166 * @sp : private member of the device structure, which is a pointer to the
4167 * s2io_nic structure.
4168 * @info : pointer to the structure with parameters given by ethtool to
4169 * return driver information.
4171 * Returns driver specefic information like name, version etc.. to ethtool.
4176 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4177 struct ethtool_drvinfo *info)
4179 nic_t *sp = dev->priv;
4181 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4182 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4183 strncpy(info->fw_version, "", sizeof(info->fw_version));
4184 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4185 info->regdump_len = XENA_REG_SPACE;
4186 info->eedump_len = XENA_EEPROM_SPACE;
4187 info->testinfo_len = S2IO_TEST_LEN;
4188 info->n_stats = S2IO_STAT_LEN;
4192 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4193 * @sp: private member of the device structure, which is a pointer to the
4194 * s2io_nic structure.
4195 * @regs : pointer to the structure with parameters given by ethtool for
4196 * dumping the registers.
4197 * @reg_space: The input argumnet into which all the registers are dumped.
4199 * Dumps the entire register space of xFrame NIC into the user given
4205 static void s2io_ethtool_gregs(struct net_device *dev,
4206 struct ethtool_regs *regs, void *space)
4210 u8 *reg_space = (u8 *) space;
4211 nic_t *sp = dev->priv;
4213 regs->len = XENA_REG_SPACE;
4214 regs->version = sp->pdev->subsystem_device;
4216 for (i = 0; i < regs->len; i += 8) {
4217 reg = readq(sp->bar0 + i);
4218 memcpy((reg_space + i), ®, 8);
4223 * s2io_phy_id - timer function that alternates adapter LED.
4224 * @data : address of the private member of the device structure, which
4225 * is a pointer to the s2io_nic structure, provided as an u32.
4226 * Description: This is actually the timer function that alternates the
4227 * adapter LED bit of the adapter control bit to set/reset every time on
4228 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4229 * once every second.
4231 static void s2io_phy_id(unsigned long data)
4233 nic_t *sp = (nic_t *) data;
4234 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4238 subid = sp->pdev->subsystem_device;
4239 if ((sp->device_type == XFRAME_II_DEVICE) ||
4240 ((subid & 0xFF) >= 0x07)) {
4241 val64 = readq(&bar0->gpio_control);
4242 val64 ^= GPIO_CTRL_GPIO_0;
4243 writeq(val64, &bar0->gpio_control);
4245 val64 = readq(&bar0->adapter_control);
4246 val64 ^= ADAPTER_LED_ON;
4247 writeq(val64, &bar0->adapter_control);
4250 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4254 * s2io_ethtool_idnic - To physically identify the nic on the system.
4255 * @sp : private member of the device structure, which is a pointer to the
4256 * s2io_nic structure.
4257 * @id : pointer to the structure with identification parameters given by
4259 * Description: Used to physically identify the NIC on the system.
4260 * The Link LED will blink for a time specified by the user for
4262 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4263 * identification is possible only if it's link is up.
4265 * int , returns 0 on success
4268 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4270 u64 val64 = 0, last_gpio_ctrl_val;
4271 nic_t *sp = dev->priv;
4272 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4275 subid = sp->pdev->subsystem_device;
4276 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4277 if ((sp->device_type == XFRAME_I_DEVICE) &&
4278 ((subid & 0xFF) < 0x07)) {
4279 val64 = readq(&bar0->adapter_control);
4280 if (!(val64 & ADAPTER_CNTL_EN)) {
4282 "Adapter Link down, cannot blink LED\n");
4286 if (sp->id_timer.function == NULL) {
4287 init_timer(&sp->id_timer);
4288 sp->id_timer.function = s2io_phy_id;
4289 sp->id_timer.data = (unsigned long) sp;
4291 mod_timer(&sp->id_timer, jiffies);
4293 msleep_interruptible(data * HZ);
4295 msleep_interruptible(MAX_FLICKER_TIME);
4296 del_timer_sync(&sp->id_timer);
4298 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4299 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4300 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4307 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4308 * @sp : private member of the device structure, which is a pointer to the
4309 * s2io_nic structure.
4310 * @ep : pointer to the structure with pause parameters given by ethtool.
4312 * Returns the Pause frame generation and reception capability of the NIC.
4316 static void s2io_ethtool_getpause_data(struct net_device *dev,
4317 struct ethtool_pauseparam *ep)
4320 nic_t *sp = dev->priv;
4321 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4323 val64 = readq(&bar0->rmac_pause_cfg);
4324 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4325 ep->tx_pause = TRUE;
4326 if (val64 & RMAC_PAUSE_RX_ENABLE)
4327 ep->rx_pause = TRUE;
4328 ep->autoneg = FALSE;
4332 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4333 * @sp : private member of the device structure, which is a pointer to the
4334 * s2io_nic structure.
4335 * @ep : pointer to the structure with pause parameters given by ethtool.
4337 * It can be used to set or reset Pause frame generation or reception
4338 * support of the NIC.
4340 * int, returns 0 on Success
4343 static int s2io_ethtool_setpause_data(struct net_device *dev,
4344 struct ethtool_pauseparam *ep)
4347 nic_t *sp = dev->priv;
4348 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4350 val64 = readq(&bar0->rmac_pause_cfg);
4352 val64 |= RMAC_PAUSE_GEN_ENABLE;
4354 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4356 val64 |= RMAC_PAUSE_RX_ENABLE;
4358 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4359 writeq(val64, &bar0->rmac_pause_cfg);
4364 * read_eeprom - reads 4 bytes of data from user given offset.
4365 * @sp : private member of the device structure, which is a pointer to the
4366 * s2io_nic structure.
4367 * @off : offset at which the data must be written
4368 * @data : Its an output parameter where the data read at the given
4371 * Will read 4 bytes of data from the user given offset and return the
4373 * NOTE: Will allow to read only part of the EEPROM visible through the
4376 * -1 on failure and 0 on success.
4379 #define S2IO_DEV_ID 5
4380 static int read_eeprom(nic_t * sp, int off, u64 * data)
4385 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4387 if (sp->device_type == XFRAME_I_DEVICE) {
4388 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4389 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4390 I2C_CONTROL_CNTL_START;
4391 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4393 while (exit_cnt < 5) {
4394 val64 = readq(&bar0->i2c_control);
4395 if (I2C_CONTROL_CNTL_END(val64)) {
4396 *data = I2C_CONTROL_GET_DATA(val64);
4405 if (sp->device_type == XFRAME_II_DEVICE) {
4406 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4407 SPI_CONTROL_BYTECNT(0x3) |
4408 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4409 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4410 val64 |= SPI_CONTROL_REQ;
4411 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4412 while (exit_cnt < 5) {
4413 val64 = readq(&bar0->spi_control);
4414 if (val64 & SPI_CONTROL_NACK) {
4417 } else if (val64 & SPI_CONTROL_DONE) {
4418 *data = readq(&bar0->spi_data);
4431 * write_eeprom - actually writes the relevant part of the data value.
4432 * @sp : private member of the device structure, which is a pointer to the
4433 * s2io_nic structure.
4434 * @off : offset at which the data must be written
4435 * @data : The data that is to be written
4436 * @cnt : Number of bytes of the data that are actually to be written into
4437 * the Eeprom. (max of 3)
4439 * Actually writes the relevant part of the data value into the Eeprom
4440 * through the I2C bus.
4442 * 0 on success, -1 on failure.
4445 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4447 int exit_cnt = 0, ret = -1;
4449 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4451 if (sp->device_type == XFRAME_I_DEVICE) {
4452 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4453 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4454 I2C_CONTROL_CNTL_START;
4455 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4457 while (exit_cnt < 5) {
4458 val64 = readq(&bar0->i2c_control);
4459 if (I2C_CONTROL_CNTL_END(val64)) {
4460 if (!(val64 & I2C_CONTROL_NACK))
4469 if (sp->device_type == XFRAME_II_DEVICE) {
4470 int write_cnt = (cnt == 8) ? 0 : cnt;
4471 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4473 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4474 SPI_CONTROL_BYTECNT(write_cnt) |
4475 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4476 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4477 val64 |= SPI_CONTROL_REQ;
4478 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4479 while (exit_cnt < 5) {
4480 val64 = readq(&bar0->spi_control);
4481 if (val64 & SPI_CONTROL_NACK) {
4484 } else if (val64 & SPI_CONTROL_DONE) {
4496 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4497 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4498 * @eeprom : pointer to the user level structure provided by ethtool,
4499 * containing all relevant information.
4500 * @data_buf : user defined value to be written into Eeprom.
4501 * Description: Reads the values stored in the Eeprom at given offset
4502 * for a given length. Stores these values int the input argument data
4503 * buffer 'data_buf' and returns these to the caller (ethtool.)
4508 static int s2io_ethtool_geeprom(struct net_device *dev,
4509 struct ethtool_eeprom *eeprom, u8 * data_buf)
4513 nic_t *sp = dev->priv;
4515 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4517 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4518 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4520 for (i = 0; i < eeprom->len; i += 4) {
4521 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4522 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4526 memcpy((data_buf + i), &valid, 4);
4532 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4533 * @sp : private member of the device structure, which is a pointer to the
4534 * s2io_nic structure.
4535 * @eeprom : pointer to the user level structure provided by ethtool,
4536 * containing all relevant information.
4537 * @data_buf ; user defined value to be written into Eeprom.
4539 * Tries to write the user provided value in the Eeprom, at the offset
4540 * given by the user.
4542 * 0 on success, -EFAULT on failure.
4545 static int s2io_ethtool_seeprom(struct net_device *dev,
4546 struct ethtool_eeprom *eeprom,
4549 int len = eeprom->len, cnt = 0;
4550 u64 valid = 0, data;
4551 nic_t *sp = dev->priv;
4553 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4555 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4556 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4562 data = (u32) data_buf[cnt] & 0x000000FF;
4564 valid = (u32) (data << 24);
4568 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4570 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4572 "write into the specified offset\n");
4583 * s2io_register_test - reads and writes into all clock domains.
4584 * @sp : private member of the device structure, which is a pointer to the
4585 * s2io_nic structure.
4586 * @data : variable that returns the result of each of the test conducted b
4589 * Read and write into all clock domains. The NIC has 3 clock domains,
4590 * see that registers in all the three regions are accessible.
4595 static int s2io_register_test(nic_t * sp, uint64_t * data)
4597 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4598 u64 val64 = 0, exp_val;
4601 val64 = readq(&bar0->pif_rd_swapper_fb);
4602 if (val64 != 0x123456789abcdefULL) {
4604 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4607 val64 = readq(&bar0->rmac_pause_cfg);
4608 if (val64 != 0xc000ffff00000000ULL) {
4610 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4613 val64 = readq(&bar0->rx_queue_cfg);
4614 if (sp->device_type == XFRAME_II_DEVICE)
4615 exp_val = 0x0404040404040404ULL;
4617 exp_val = 0x0808080808080808ULL;
4618 if (val64 != exp_val) {
4620 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4623 val64 = readq(&bar0->xgxs_efifo_cfg);
4624 if (val64 != 0x000000001923141EULL) {
4626 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4629 val64 = 0x5A5A5A5A5A5A5A5AULL;
4630 writeq(val64, &bar0->xmsi_data);
4631 val64 = readq(&bar0->xmsi_data);
4632 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4634 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4637 val64 = 0xA5A5A5A5A5A5A5A5ULL;
4638 writeq(val64, &bar0->xmsi_data);
4639 val64 = readq(&bar0->xmsi_data);
4640 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4642 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4650 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4651 * @sp : private member of the device structure, which is a pointer to the
4652 * s2io_nic structure.
4653 * @data:variable that returns the result of each of the test conducted by
4656 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4662 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4665 u64 ret_data, org_4F0, org_7F0;
4666 u8 saved_4F0 = 0, saved_7F0 = 0;
4667 struct net_device *dev = sp->dev;
4669 /* Test Write Error at offset 0 */
4670 /* Note that SPI interface allows write access to all areas
4671 * of EEPROM. Hence doing all negative testing only for Xframe I.
4673 if (sp->device_type == XFRAME_I_DEVICE)
4674 if (!write_eeprom(sp, 0, 0, 3))
4677 /* Save current values at offsets 0x4F0 and 0x7F0 */
4678 if (!read_eeprom(sp, 0x4F0, &org_4F0))
4680 if (!read_eeprom(sp, 0x7F0, &org_7F0))
4683 /* Test Write at offset 4f0 */
4684 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4686 if (read_eeprom(sp, 0x4F0, &ret_data))
4689 if (ret_data != 0x012345) {
4690 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4694 /* Reset the EEPROM data go FFFF */
4695 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4697 /* Test Write Request Error at offset 0x7c */
4698 if (sp->device_type == XFRAME_I_DEVICE)
4699 if (!write_eeprom(sp, 0x07C, 0, 3))
4702 /* Test Write Request at offset 0x7f0 */
4703 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4705 if (read_eeprom(sp, 0x7F0, &ret_data))
4708 if (ret_data != 0x012345) {
4709 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4713 /* Reset the EEPROM data go FFFF */
4714 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4716 if (sp->device_type == XFRAME_I_DEVICE) {
4717 /* Test Write Error at offset 0x80 */
4718 if (!write_eeprom(sp, 0x080, 0, 3))
4721 /* Test Write Error at offset 0xfc */
4722 if (!write_eeprom(sp, 0x0FC, 0, 3))
4725 /* Test Write Error at offset 0x100 */
4726 if (!write_eeprom(sp, 0x100, 0, 3))
4729 /* Test Write Error at offset 4ec */
4730 if (!write_eeprom(sp, 0x4EC, 0, 3))
4734 /* Restore values at offsets 0x4F0 and 0x7F0 */
4736 write_eeprom(sp, 0x4F0, org_4F0, 3);
4738 write_eeprom(sp, 0x7F0, org_7F0, 3);
4745 * s2io_bist_test - invokes the MemBist test of the card .
4746 * @sp : private member of the device structure, which is a pointer to the
4747 * s2io_nic structure.
4748 * @data:variable that returns the result of each of the test conducted by
4751 * This invokes the MemBist test of the card. We give around
4752 * 2 secs time for the Test to complete. If it's still not complete
4753 * within this peiod, we consider that the test failed.
4755 * 0 on success and -1 on failure.
4758 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4761 int cnt = 0, ret = -1;
4763 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4764 bist |= PCI_BIST_START;
4765 pci_write_config_word(sp->pdev, PCI_BIST, bist);
4768 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4769 if (!(bist & PCI_BIST_START)) {
4770 *data = (bist & PCI_BIST_CODE_MASK);
4782 * s2io-link_test - verifies the link state of the nic
4783 * @sp ; private member of the device structure, which is a pointer to the
4784 * s2io_nic structure.
4785 * @data: variable that returns the result of each of the test conducted by
4788 * The function verifies the link state of the NIC and updates the input
4789 * argument 'data' appropriately.
4794 static int s2io_link_test(nic_t * sp, uint64_t * data)
4796 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4799 val64 = readq(&bar0->adapter_status);
4800 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4807 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4808 * @sp - private member of the device structure, which is a pointer to the
4809 * s2io_nic structure.
4810 * @data - variable that returns the result of each of the test
4811 * conducted by the driver.
4813 * This is one of the offline test that tests the read and write
4814 * access to the RldRam chip on the NIC.
4819 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4823 int cnt, iteration = 0, test_fail = 0;
4825 val64 = readq(&bar0->adapter_control);
4826 val64 &= ~ADAPTER_ECC_EN;
4827 writeq(val64, &bar0->adapter_control);
4829 val64 = readq(&bar0->mc_rldram_test_ctrl);
4830 val64 |= MC_RLDRAM_TEST_MODE;
4831 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4833 val64 = readq(&bar0->mc_rldram_mrs);
4834 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4835 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4837 val64 |= MC_RLDRAM_MRS_ENABLE;
4838 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4840 while (iteration < 2) {
4841 val64 = 0x55555555aaaa0000ULL;
4842 if (iteration == 1) {
4843 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4845 writeq(val64, &bar0->mc_rldram_test_d0);
4847 val64 = 0xaaaa5a5555550000ULL;
4848 if (iteration == 1) {
4849 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4851 writeq(val64, &bar0->mc_rldram_test_d1);
4853 val64 = 0x55aaaaaaaa5a0000ULL;
4854 if (iteration == 1) {
4855 val64 ^= 0xFFFFFFFFFFFF0000ULL;
4857 writeq(val64, &bar0->mc_rldram_test_d2);
4859 val64 = (u64) (0x0000003ffffe0100ULL);
4860 writeq(val64, &bar0->mc_rldram_test_add);
4862 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4864 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4866 for (cnt = 0; cnt < 5; cnt++) {
4867 val64 = readq(&bar0->mc_rldram_test_ctrl);
4868 if (val64 & MC_RLDRAM_TEST_DONE)
4876 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4877 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4879 for (cnt = 0; cnt < 5; cnt++) {
4880 val64 = readq(&bar0->mc_rldram_test_ctrl);
4881 if (val64 & MC_RLDRAM_TEST_DONE)
4889 val64 = readq(&bar0->mc_rldram_test_ctrl);
4890 if (!(val64 & MC_RLDRAM_TEST_PASS))
4898 /* Bring the adapter out of test mode */
4899 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
4905 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4906 * @sp : private member of the device structure, which is a pointer to the
4907 * s2io_nic structure.
4908 * @ethtest : pointer to a ethtool command specific structure that will be
4909 * returned to the user.
4910 * @data : variable that returns the result of each of the test
4911 * conducted by the driver.
4913 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4914 * the health of the card.
4919 static void s2io_ethtool_test(struct net_device *dev,
4920 struct ethtool_test *ethtest,
4923 nic_t *sp = dev->priv;
4924 int orig_state = netif_running(sp->dev);
4926 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4927 /* Offline Tests. */
4929 s2io_close(sp->dev);
4931 if (s2io_register_test(sp, &data[0]))
4932 ethtest->flags |= ETH_TEST_FL_FAILED;
4936 if (s2io_rldram_test(sp, &data[3]))
4937 ethtest->flags |= ETH_TEST_FL_FAILED;
4941 if (s2io_eeprom_test(sp, &data[1]))
4942 ethtest->flags |= ETH_TEST_FL_FAILED;
4944 if (s2io_bist_test(sp, &data[4]))
4945 ethtest->flags |= ETH_TEST_FL_FAILED;
4955 "%s: is not up, cannot run test\n",
4964 if (s2io_link_test(sp, &data[2]))
4965 ethtest->flags |= ETH_TEST_FL_FAILED;
4974 static void s2io_get_ethtool_stats(struct net_device *dev,
4975 struct ethtool_stats *estats,
4979 nic_t *sp = dev->priv;
4980 StatInfo_t *stat_info = sp->mac_control.stats_info;
4982 s2io_updt_stats(sp);
4984 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
4985 le32_to_cpu(stat_info->tmac_frms);
4987 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4988 le32_to_cpu(stat_info->tmac_data_octets);
4989 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4991 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4992 le32_to_cpu(stat_info->tmac_mcst_frms);
4994 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4995 le32_to_cpu(stat_info->tmac_bcst_frms);
4996 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4998 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4999 le32_to_cpu(stat_info->tmac_any_err_frms);
5000 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5002 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5003 le32_to_cpu(stat_info->tmac_vld_ip);
5005 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5006 le32_to_cpu(stat_info->tmac_drop_ip);
5008 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5009 le32_to_cpu(stat_info->tmac_icmp);
5011 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5012 le32_to_cpu(stat_info->tmac_rst_tcp);
5013 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5014 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5015 le32_to_cpu(stat_info->tmac_udp);
5017 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5018 le32_to_cpu(stat_info->rmac_vld_frms);
5020 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5021 le32_to_cpu(stat_info->rmac_data_octets);
5022 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5023 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5025 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5026 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5028 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5029 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5030 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5031 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5032 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5034 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5035 le32_to_cpu(stat_info->rmac_discarded_frms);
5037 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5038 le32_to_cpu(stat_info->rmac_usized_frms);
5040 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5041 le32_to_cpu(stat_info->rmac_osized_frms);
5043 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5044 le32_to_cpu(stat_info->rmac_frag_frms);
5046 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5047 le32_to_cpu(stat_info->rmac_jabber_frms);
5048 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5049 le32_to_cpu(stat_info->rmac_ip);
5050 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5051 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5052 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5053 le32_to_cpu(stat_info->rmac_drop_ip);
5054 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5055 le32_to_cpu(stat_info->rmac_icmp);
5056 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5057 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5058 le32_to_cpu(stat_info->rmac_udp);
5060 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5061 le32_to_cpu(stat_info->rmac_err_drp_udp);
5063 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5064 le32_to_cpu(stat_info->rmac_pause_cnt);
5066 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5067 le32_to_cpu(stat_info->rmac_accepted_ip);
5068 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5070 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5071 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5074 int s2io_ethtool_get_regs_len(struct net_device *dev)
5076 return (XENA_REG_SPACE);
5080 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5082 nic_t *sp = dev->priv;
5084 return (sp->rx_csum);
5086 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5088 nic_t *sp = dev->priv;
5097 int s2io_get_eeprom_len(struct net_device *dev)
5099 return (XENA_EEPROM_SPACE);
5102 int s2io_ethtool_self_test_count(struct net_device *dev)
5104 return (S2IO_TEST_LEN);
5106 void s2io_ethtool_get_strings(struct net_device *dev,
5107 u32 stringset, u8 * data)
5109 switch (stringset) {
5111 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5114 memcpy(data, ðtool_stats_keys,
5115 sizeof(ethtool_stats_keys));
5118 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5120 return (S2IO_STAT_LEN);
5123 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5126 dev->features |= NETIF_F_IP_CSUM;
5128 dev->features &= ~NETIF_F_IP_CSUM;
5134 static struct ethtool_ops netdev_ethtool_ops = {
5135 .get_settings = s2io_ethtool_gset,
5136 .set_settings = s2io_ethtool_sset,
5137 .get_drvinfo = s2io_ethtool_gdrvinfo,
5138 .get_regs_len = s2io_ethtool_get_regs_len,
5139 .get_regs = s2io_ethtool_gregs,
5140 .get_link = ethtool_op_get_link,
5141 .get_eeprom_len = s2io_get_eeprom_len,
5142 .get_eeprom = s2io_ethtool_geeprom,
5143 .set_eeprom = s2io_ethtool_seeprom,
5144 .get_pauseparam = s2io_ethtool_getpause_data,
5145 .set_pauseparam = s2io_ethtool_setpause_data,
5146 .get_rx_csum = s2io_ethtool_get_rx_csum,
5147 .set_rx_csum = s2io_ethtool_set_rx_csum,
5148 .get_tx_csum = ethtool_op_get_tx_csum,
5149 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5150 .get_sg = ethtool_op_get_sg,
5151 .set_sg = ethtool_op_set_sg,
5153 .get_tso = ethtool_op_get_tso,
5154 .set_tso = ethtool_op_set_tso,
5156 .self_test_count = s2io_ethtool_self_test_count,
5157 .self_test = s2io_ethtool_test,
5158 .get_strings = s2io_ethtool_get_strings,
5159 .phys_id = s2io_ethtool_idnic,
5160 .get_stats_count = s2io_ethtool_get_stats_count,
5161 .get_ethtool_stats = s2io_get_ethtool_stats
5165 * s2io_ioctl - Entry point for the Ioctl
5166 * @dev : Device pointer.
5167 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5168 * a proprietary structure used to pass information to the driver.
5169 * @cmd : This is used to distinguish between the different commands that
5170 * can be passed to the IOCTL functions.
5172 * Currently there are no special functionality supported in IOCTL, hence
5173 * function always return EOPNOTSUPPORTED
5176 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5182 * s2io_change_mtu - entry point to change MTU size for the device.
5183 * @dev : device pointer.
5184 * @new_mtu : the new MTU size for the device.
5185 * Description: A driver entry point to change MTU size for the device.
5186 * Before changing the MTU the device must be stopped.
5188 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5192 int s2io_change_mtu(struct net_device *dev, int new_mtu)
5194 nic_t *sp = dev->priv;
5196 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5197 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5203 if (netif_running(dev)) {
5205 netif_stop_queue(dev);
5206 if (s2io_card_up(sp)) {
5207 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5210 if (netif_queue_stopped(dev))
5211 netif_wake_queue(dev);
5212 } else { /* Device is down */
5213 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5214 u64 val64 = new_mtu;
5216 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5223 * s2io_tasklet - Bottom half of the ISR.
5224 * @dev_adr : address of the device structure in dma_addr_t format.
5226 * This is the tasklet or the bottom half of the ISR. This is
5227 * an extension of the ISR which is scheduled by the scheduler to be run
5228 * when the load on the CPU is low. All low priority tasks of the ISR can
5229 * be pushed into the tasklet. For now the tasklet is used only to
5230 * replenish the Rx buffers in the Rx buffer descriptors.
5235 static void s2io_tasklet(unsigned long dev_addr)
5237 struct net_device *dev = (struct net_device *) dev_addr;
5238 nic_t *sp = dev->priv;
5240 mac_info_t *mac_control;
5241 struct config_param *config;
5243 mac_control = &sp->mac_control;
5244 config = &sp->config;
5246 if (!TASKLET_IN_USE) {
5247 for (i = 0; i < config->rx_ring_num; i++) {
5248 ret = fill_rx_buffers(sp, i);
5249 if (ret == -ENOMEM) {
5250 DBG_PRINT(ERR_DBG, "%s: Out of ",
5252 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5254 } else if (ret == -EFILL) {
5256 "%s: Rx Ring %d is full\n",
5261 clear_bit(0, (&sp->tasklet_status));
5266 * s2io_set_link - Set the LInk status
5267 * @data: long pointer to device private structue
5268 * Description: Sets the link status for the adapter
5271 static void s2io_set_link(unsigned long data)
5273 nic_t *nic = (nic_t *) data;
5274 struct net_device *dev = nic->dev;
5275 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5279 if (test_and_set_bit(0, &(nic->link_state))) {
5280 /* The card is being reset, no point doing anything */
5284 subid = nic->pdev->subsystem_device;
5285 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5287 * Allow a small delay for the NICs self initiated
5288 * cleanup to complete.
5293 val64 = readq(&bar0->adapter_status);
5294 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
5295 if (LINK_IS_UP(val64)) {
5296 val64 = readq(&bar0->adapter_control);
5297 val64 |= ADAPTER_CNTL_EN;
5298 writeq(val64, &bar0->adapter_control);
5299 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5301 val64 = readq(&bar0->gpio_control);
5302 val64 |= GPIO_CTRL_GPIO_0;
5303 writeq(val64, &bar0->gpio_control);
5304 val64 = readq(&bar0->gpio_control);
5306 val64 |= ADAPTER_LED_ON;
5307 writeq(val64, &bar0->adapter_control);
5309 if (s2io_link_fault_indication(nic) ==
5310 MAC_RMAC_ERR_TIMER) {
5311 val64 = readq(&bar0->adapter_status);
5312 if (!LINK_IS_UP(val64)) {
5313 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5314 DBG_PRINT(ERR_DBG, " Link down");
5315 DBG_PRINT(ERR_DBG, "after ");
5316 DBG_PRINT(ERR_DBG, "enabling ");
5317 DBG_PRINT(ERR_DBG, "device \n");
5320 if (nic->device_enabled_once == FALSE) {
5321 nic->device_enabled_once = TRUE;
5323 s2io_link(nic, LINK_UP);
5325 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5327 val64 = readq(&bar0->gpio_control);
5328 val64 &= ~GPIO_CTRL_GPIO_0;
5329 writeq(val64, &bar0->gpio_control);
5330 val64 = readq(&bar0->gpio_control);
5332 s2io_link(nic, LINK_DOWN);
5334 } else { /* NIC is not Quiescent. */
5335 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5336 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5337 netif_stop_queue(dev);
5339 clear_bit(0, &(nic->link_state));
5342 static void s2io_card_down(nic_t * sp)
5345 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5346 unsigned long flags;
5347 register u64 val64 = 0;
5349 del_timer_sync(&sp->alarm_timer);
5350 /* If s2io_set_link task is executing, wait till it completes. */
5351 while (test_and_set_bit(0, &(sp->link_state))) {
5354 atomic_set(&sp->card_state, CARD_DOWN);
5356 /* disable Tx and Rx traffic on the NIC */
5360 tasklet_kill(&sp->task);
5362 /* Check if the device is Quiescent and then Reset the NIC */
5364 val64 = readq(&bar0->adapter_status);
5365 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
5373 "s2io_close:Device not Quiescent ");
5374 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
5375 (unsigned long long) val64);
5381 /* Waiting till all Interrupt handlers are complete */
5385 if (!atomic_read(&sp->isr_cnt))
5390 spin_lock_irqsave(&sp->tx_lock, flags);
5391 /* Free all Tx buffers */
5392 free_tx_buffers(sp);
5393 spin_unlock_irqrestore(&sp->tx_lock, flags);
5395 /* Free all Rx buffers */
5396 spin_lock_irqsave(&sp->rx_lock, flags);
5397 free_rx_buffers(sp);
5398 spin_unlock_irqrestore(&sp->rx_lock, flags);
5400 clear_bit(0, &(sp->link_state));
5403 static int s2io_card_up(nic_t * sp)
5406 mac_info_t *mac_control;
5407 struct config_param *config;
5408 struct net_device *dev = (struct net_device *) sp->dev;
5410 /* Initialize the H/W I/O registers */
5411 if (init_nic(sp) != 0) {
5412 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
5417 if (sp->intr_type == MSI)
5418 ret = s2io_enable_msi(sp);
5419 else if (sp->intr_type == MSI_X)
5420 ret = s2io_enable_msi_x(sp);
5422 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5423 sp->intr_type = INTA;
5427 * Initializing the Rx buffers. For now we are considering only 1
5428 * Rx ring and initializing buffers into 30 Rx blocks
5430 mac_control = &sp->mac_control;
5431 config = &sp->config;
5433 for (i = 0; i < config->rx_ring_num; i++) {
5434 if ((ret = fill_rx_buffers(sp, i))) {
5435 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
5438 free_rx_buffers(sp);
5441 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
5442 atomic_read(&sp->rx_bufs_left[i]));
5445 /* Setting its receive mode */
5446 s2io_set_multicast(dev);
5448 /* Enable tasklet for the device */
5449 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
5451 /* Enable Rx Traffic and interrupts on the NIC */
5452 if (start_nic(sp)) {
5453 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
5454 tasklet_kill(&sp->task);
5456 free_irq(dev->irq, dev);
5457 free_rx_buffers(sp);
5461 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
5463 atomic_set(&sp->card_state, CARD_UP);
5468 * s2io_restart_nic - Resets the NIC.
5469 * @data : long pointer to the device private structure
5471 * This function is scheduled to be run by the s2io_tx_watchdog
5472 * function after 0.5 secs to reset the NIC. The idea is to reduce
5473 * the run time of the watch dog routine which is run holding a
5477 static void s2io_restart_nic(unsigned long data)
5479 struct net_device *dev = (struct net_device *) data;
5480 nic_t *sp = dev->priv;
5483 if (s2io_card_up(sp)) {
5484 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5487 netif_wake_queue(dev);
5488 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5494 * s2io_tx_watchdog - Watchdog for transmit side.
5495 * @dev : Pointer to net device structure
5497 * This function is triggered if the Tx Queue is stopped
5498 * for a pre-defined amount of time when the Interface is still up.
5499 * If the Interface is jammed in such a situation, the hardware is
5500 * reset (by s2io_close) and restarted again (by s2io_open) to
5501 * overcome any problem that might have been caused in the hardware.
5506 static void s2io_tx_watchdog(struct net_device *dev)
5508 nic_t *sp = dev->priv;
5510 if (netif_carrier_ok(dev)) {
5511 schedule_work(&sp->rst_timer_task);
5516 * rx_osm_handler - To perform some OS related operations on SKB.
5517 * @sp: private member of the device structure,pointer to s2io_nic structure.
5518 * @skb : the socket buffer pointer.
5519 * @len : length of the packet
5520 * @cksum : FCS checksum of the frame.
5521 * @ring_no : the ring from which this RxD was extracted.
5523 * This function is called by the Tx interrupt serivce routine to perform
5524 * some OS related operations on the SKB before passing it to the upper
5525 * layers. It mainly checks if the checksum is OK, if so adds it to the
5526 * SKBs cksum variable, increments the Rx packet count and passes the SKB
5527 * to the upper layer. If the checksum is wrong, it increments the Rx
5528 * packet error count, frees the SKB and returns error.
5530 * SUCCESS on success and -1 on failure.
5532 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5534 nic_t *sp = ring_data->nic;
5535 struct net_device *dev = (struct net_device *) sp->dev;
5536 struct sk_buff *skb = (struct sk_buff *)
5537 ((unsigned long) rxdp->Host_Control);
5538 int ring_no = ring_data->ring_no;
5539 u16 l3_csum, l4_csum;
5540 #ifdef CONFIG_2BUFF_MODE
5541 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5542 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5543 int get_block = ring_data->rx_curr_get_info.block_index;
5544 int get_off = ring_data->rx_curr_get_info.offset;
5545 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5546 unsigned char *buff;
5548 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5551 if (rxdp->Control_1 & RXD_T_CODE) {
5552 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5553 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5556 sp->stats.rx_crc_errors++;
5557 atomic_dec(&sp->rx_bufs_left[ring_no]);
5558 rxdp->Host_Control = 0;
5562 /* Updating statistics */
5563 rxdp->Host_Control = 0;
5565 sp->stats.rx_packets++;
5566 #ifndef CONFIG_2BUFF_MODE
5567 sp->stats.rx_bytes += len;
5569 sp->stats.rx_bytes += buf0_len + buf2_len;
5572 #ifndef CONFIG_2BUFF_MODE
5575 buff = skb_push(skb, buf0_len);
5576 memcpy(buff, ba->ba_0, buf0_len);
5577 skb_put(skb, buf2_len);
5580 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5582 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5583 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5584 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5586 * NIC verifies if the Checksum of the received
5587 * frame is Ok or not and accordingly returns
5588 * a flag in the RxD.
5590 skb->ip_summed = CHECKSUM_UNNECESSARY;
5593 * Packet with erroneous checksum, let the
5594 * upper layers deal with it.
5596 skb->ip_summed = CHECKSUM_NONE;
5599 skb->ip_summed = CHECKSUM_NONE;
5602 skb->protocol = eth_type_trans(skb, dev);
5603 #ifdef CONFIG_S2IO_NAPI
5604 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5605 /* Queueing the vlan frame to the upper layer */
5606 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5607 RXD_GET_VLAN_TAG(rxdp->Control_2));
5609 netif_receive_skb(skb);
5612 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5613 /* Queueing the vlan frame to the upper layer */
5614 vlan_hwaccel_rx(skb, sp->vlgrp,
5615 RXD_GET_VLAN_TAG(rxdp->Control_2));
5620 dev->last_rx = jiffies;
5621 atomic_dec(&sp->rx_bufs_left[ring_no]);
5626 * s2io_link - stops/starts the Tx queue.
5627 * @sp : private member of the device structure, which is a pointer to the
5628 * s2io_nic structure.
5629 * @link : inidicates whether link is UP/DOWN.
5631 * This function stops/starts the Tx queue depending on whether the link
5632 * status of the NIC is is down or up. This is called by the Alarm
5633 * interrupt handler whenever a link change interrupt comes up.
5638 void s2io_link(nic_t * sp, int link)
5640 struct net_device *dev = (struct net_device *) sp->dev;
5642 if (link != sp->last_link_state) {
5643 if (link == LINK_DOWN) {
5644 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5645 netif_carrier_off(dev);
5647 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5648 netif_carrier_on(dev);
5651 sp->last_link_state = link;
5655 * get_xena_rev_id - to identify revision ID of xena.
5656 * @pdev : PCI Dev structure
5658 * Function to identify the Revision ID of xena.
5660 * returns the revision ID of the device.
5663 int get_xena_rev_id(struct pci_dev *pdev)
5667 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5672 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5673 * @sp : private member of the device structure, which is a pointer to the
5674 * s2io_nic structure.
5676 * This function initializes a few of the PCI and PCI-X configuration registers
5677 * with recommended values.
5682 static void s2io_init_pci(nic_t * sp)
5684 u16 pci_cmd = 0, pcix_cmd = 0;
5686 /* Enable Data Parity Error Recovery in PCI-X command register. */
5687 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5689 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5691 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5694 /* Set the PErr Response bit in PCI command register. */
5695 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5696 pci_write_config_word(sp->pdev, PCI_COMMAND,
5697 (pci_cmd | PCI_COMMAND_PARITY));
5698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5700 /* Forcibly disabling relaxed ordering capability of the card. */
5702 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5704 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5708 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5709 MODULE_LICENSE("GPL");
5710 MODULE_VERSION(DRV_VERSION);
5712 module_param(tx_fifo_num, int, 0);
5713 module_param(rx_ring_num, int, 0);
5714 module_param_array(tx_fifo_len, uint, NULL, 0);
5715 module_param_array(rx_ring_sz, uint, NULL, 0);
5716 module_param_array(rts_frm_len, uint, NULL, 0);
5717 module_param(use_continuous_tx_intrs, int, 1);
5718 module_param(rmac_pause_time, int, 0);
5719 module_param(mc_pause_threshold_q0q3, int, 0);
5720 module_param(mc_pause_threshold_q4q7, int, 0);
5721 module_param(shared_splits, int, 0);
5722 module_param(tmac_util_period, int, 0);
5723 module_param(rmac_util_period, int, 0);
5724 module_param(bimodal, bool, 0);
5725 #ifndef CONFIG_S2IO_NAPI
5726 module_param(indicate_max_pkts, int, 0);
5728 module_param(rxsync_frequency, int, 0);
5729 module_param(intr_type, int, 0);
5732 * s2io_init_nic - Initialization of the adapter .
5733 * @pdev : structure containing the PCI related information of the device.
5734 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5736 * The function initializes an adapter identified by the pci_dec structure.
5737 * All OS related initialization including memory and device structure and
5738 * initlaization of the device private variable is done. Also the swapper
5739 * control register is initialized to enable read and write into the I/O
5740 * registers of the device.
5742 * returns 0 on success and negative on failure.
5745 static int __devinit
5746 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5749 struct net_device *dev;
5751 int dma_flag = FALSE;
5752 u32 mac_up, mac_down;
5753 u64 val64 = 0, tmp64 = 0;
5754 XENA_dev_config_t __iomem *bar0 = NULL;
5756 mac_info_t *mac_control;
5757 struct config_param *config;
5759 u8 dev_intr_type = intr_type;
5761 #ifdef CONFIG_S2IO_NAPI
5762 if (dev_intr_type != INTA) {
5763 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5764 is enabled. Defaulting to INTA\n");
5765 dev_intr_type = INTA;
5768 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5771 if ((ret = pci_enable_device(pdev))) {
5773 "s2io_init_nic: pci_enable_device failed\n");
5777 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5778 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5780 if (pci_set_consistent_dma_mask
5781 (pdev, DMA_64BIT_MASK)) {
5783 "Unable to obtain 64bit DMA for \
5784 consistent allocations\n");
5785 pci_disable_device(pdev);
5788 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5789 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5791 pci_disable_device(pdev);
5795 if ((dev_intr_type == MSI_X) &&
5796 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5797 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5798 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5799 Defaulting to INTA\n");
5800 dev_intr_type = INTA;
5802 if (dev_intr_type != MSI_X) {
5803 if (pci_request_regions(pdev, s2io_driver_name)) {
5804 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5805 pci_disable_device(pdev);
5810 if (!(request_mem_region(pci_resource_start(pdev, 0),
5811 pci_resource_len(pdev, 0), s2io_driver_name))) {
5812 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5813 pci_disable_device(pdev);
5816 if (!(request_mem_region(pci_resource_start(pdev, 2),
5817 pci_resource_len(pdev, 2), s2io_driver_name))) {
5818 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5819 release_mem_region(pci_resource_start(pdev, 0),
5820 pci_resource_len(pdev, 0));
5821 pci_disable_device(pdev);
5826 dev = alloc_etherdev(sizeof(nic_t));
5828 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5829 pci_disable_device(pdev);
5830 pci_release_regions(pdev);
5834 pci_set_master(pdev);
5835 pci_set_drvdata(pdev, dev);
5836 SET_MODULE_OWNER(dev);
5837 SET_NETDEV_DEV(dev, &pdev->dev);
5839 /* Private member variable initialized to s2io NIC structure */
5841 memset(sp, 0, sizeof(nic_t));
5844 sp->high_dma_flag = dma_flag;
5845 sp->device_enabled_once = FALSE;
5846 sp->intr_type = dev_intr_type;
5848 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5849 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5850 sp->device_type = XFRAME_II_DEVICE;
5852 sp->device_type = XFRAME_I_DEVICE;
5855 /* Initialize some PCI/PCI-X fields of the NIC. */
5859 * Setting the device configuration parameters.
5860 * Most of these parameters can be specified by the user during
5861 * module insertion as they are module loadable parameters. If
5862 * these parameters are not not specified during load time, they
5863 * are initialized with default values.
5865 mac_control = &sp->mac_control;
5866 config = &sp->config;
5868 /* Tx side parameters. */
5869 if (tx_fifo_len[0] == 0)
5870 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5871 config->tx_fifo_num = tx_fifo_num;
5872 for (i = 0; i < MAX_TX_FIFOS; i++) {
5873 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5874 config->tx_cfg[i].fifo_priority = i;
5877 /* mapping the QoS priority to the configured fifos */
5878 for (i = 0; i < MAX_TX_FIFOS; i++)
5879 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5881 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5882 for (i = 0; i < config->tx_fifo_num; i++) {
5883 config->tx_cfg[i].f_no_snoop =
5884 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5885 if (config->tx_cfg[i].fifo_len < 65) {
5886 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5890 config->max_txds = MAX_SKB_FRAGS + 1;
5892 /* Rx side parameters. */
5893 if (rx_ring_sz[0] == 0)
5894 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5895 config->rx_ring_num = rx_ring_num;
5896 for (i = 0; i < MAX_RX_RINGS; i++) {
5897 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5898 (MAX_RXDS_PER_BLOCK + 1);
5899 config->rx_cfg[i].ring_priority = i;
5902 for (i = 0; i < rx_ring_num; i++) {
5903 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5904 config->rx_cfg[i].f_no_snoop =
5905 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5908 /* Setting Mac Control parameters */
5909 mac_control->rmac_pause_time = rmac_pause_time;
5910 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5911 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5914 /* Initialize Ring buffer parameters. */
5915 for (i = 0; i < config->rx_ring_num; i++)
5916 atomic_set(&sp->rx_bufs_left[i], 0);
5918 /* Initialize the number of ISRs currently running */
5919 atomic_set(&sp->isr_cnt, 0);
5921 /* initialize the shared memory used by the NIC and the host */
5922 if (init_shared_mem(sp)) {
5923 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5926 goto mem_alloc_failed;
5929 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5930 pci_resource_len(pdev, 0));
5932 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5935 goto bar0_remap_failed;
5938 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5939 pci_resource_len(pdev, 2));
5941 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5944 goto bar1_remap_failed;
5947 dev->irq = pdev->irq;
5948 dev->base_addr = (unsigned long) sp->bar0;
5950 /* Initializing the BAR1 address as the start of the FIFO pointer. */
5951 for (j = 0; j < MAX_TX_FIFOS; j++) {
5952 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5953 (sp->bar1 + (j * 0x00020000));
5956 /* Driver entry points */
5957 dev->open = &s2io_open;
5958 dev->stop = &s2io_close;
5959 dev->hard_start_xmit = &s2io_xmit;
5960 dev->get_stats = &s2io_get_stats;
5961 dev->set_multicast_list = &s2io_set_multicast;
5962 dev->do_ioctl = &s2io_ioctl;
5963 dev->change_mtu = &s2io_change_mtu;
5964 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5965 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5966 dev->vlan_rx_register = s2io_vlan_rx_register;
5967 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5970 * will use eth_mac_addr() for dev->set_mac_address
5971 * mac address will be set every time dev->open() is called
5973 #if defined(CONFIG_S2IO_NAPI)
5974 dev->poll = s2io_poll;
5978 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5979 if (sp->high_dma_flag == TRUE)
5980 dev->features |= NETIF_F_HIGHDMA;
5982 dev->features |= NETIF_F_TSO;
5985 dev->tx_timeout = &s2io_tx_watchdog;
5986 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5987 INIT_WORK(&sp->rst_timer_task,
5988 (void (*)(void *)) s2io_restart_nic, dev);
5989 INIT_WORK(&sp->set_link_task,
5990 (void (*)(void *)) s2io_set_link, sp);
5992 pci_save_state(sp->pdev);
5994 /* Setting swapper control on the NIC, for proper reset operation */
5995 if (s2io_set_swapper(sp)) {
5996 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5999 goto set_swap_failed;
6002 /* Verify if the Herc works on the slot its placed into */
6003 if (sp->device_type & XFRAME_II_DEVICE) {
6004 mode = s2io_verify_pci_mode(sp);
6006 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6007 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6009 goto set_swap_failed;
6013 /* Not needed for Herc */
6014 if (sp->device_type & XFRAME_I_DEVICE) {
6016 * Fix for all "FFs" MAC address problems observed on
6019 fix_mac_address(sp);
6024 * MAC address initialization.
6025 * For now only one mac address will be read and used.
6028 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6029 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6030 writeq(val64, &bar0->rmac_addr_cmd_mem);
6031 wait_for_cmd_complete(sp);
6033 tmp64 = readq(&bar0->rmac_addr_data0_mem);
6034 mac_down = (u32) tmp64;
6035 mac_up = (u32) (tmp64 >> 32);
6037 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
6039 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
6040 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
6041 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
6042 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
6043 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
6044 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
6046 /* Set the factory defined MAC address initially */
6047 dev->addr_len = ETH_ALEN;
6048 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
6051 * Initialize the tasklet status and link state flags
6052 * and the card state parameter
6054 atomic_set(&(sp->card_state), 0);
6055 sp->tasklet_status = 0;
6058 /* Initialize spinlocks */
6059 spin_lock_init(&sp->tx_lock);
6060 #ifndef CONFIG_S2IO_NAPI
6061 spin_lock_init(&sp->put_lock);
6063 spin_lock_init(&sp->rx_lock);
6066 * SXE-002: Configure link and activity LED to init state
6069 subid = sp->pdev->subsystem_device;
6070 if ((subid & 0xFF) >= 0x07) {
6071 val64 = readq(&bar0->gpio_control);
6072 val64 |= 0x0000800000000000ULL;
6073 writeq(val64, &bar0->gpio_control);
6074 val64 = 0x0411040400000000ULL;
6075 writeq(val64, (void __iomem *) bar0 + 0x2700);
6076 val64 = readq(&bar0->gpio_control);
6079 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
6081 if (register_netdev(dev)) {
6082 DBG_PRINT(ERR_DBG, "Device registration failed\n");
6084 goto register_failed;
6087 if (sp->device_type & XFRAME_II_DEVICE) {
6088 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
6090 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6091 get_xena_rev_id(sp->pdev),
6092 s2io_driver_version);
6093 #ifdef CONFIG_2BUFF_MODE
6094 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6096 switch(sp->intr_type) {
6098 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6101 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6104 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6108 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6109 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6110 sp->def_mac_addr[0].mac_addr[0],
6111 sp->def_mac_addr[0].mac_addr[1],
6112 sp->def_mac_addr[0].mac_addr[2],
6113 sp->def_mac_addr[0].mac_addr[3],
6114 sp->def_mac_addr[0].mac_addr[4],
6115 sp->def_mac_addr[0].mac_addr[5]);
6116 mode = s2io_print_pci_mode(sp);
6118 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
6120 goto set_swap_failed;
6123 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
6125 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6126 get_xena_rev_id(sp->pdev),
6127 s2io_driver_version);
6128 #ifdef CONFIG_2BUFF_MODE
6129 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6131 switch(sp->intr_type) {
6133 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6136 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6139 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6142 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
6143 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
6144 sp->def_mac_addr[0].mac_addr[0],
6145 sp->def_mac_addr[0].mac_addr[1],
6146 sp->def_mac_addr[0].mac_addr[2],
6147 sp->def_mac_addr[0].mac_addr[3],
6148 sp->def_mac_addr[0].mac_addr[4],
6149 sp->def_mac_addr[0].mac_addr[5]);
6152 /* Initialize device name */
6153 strcpy(sp->name, dev->name);
6154 if (sp->device_type & XFRAME_II_DEVICE)
6155 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
6157 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
6159 /* Initialize bimodal Interrupts */
6160 sp->config.bimodal = bimodal;
6161 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
6162 sp->config.bimodal = 0;
6163 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
6168 * Make Link state as off at this point, when the Link change
6169 * interrupt comes the state will be automatically changed to
6172 netif_carrier_off(dev);
6183 free_shared_mem(sp);
6184 pci_disable_device(pdev);
6185 if (dev_intr_type != MSI_X)
6186 pci_release_regions(pdev);
6188 release_mem_region(pci_resource_start(pdev, 0),
6189 pci_resource_len(pdev, 0));
6190 release_mem_region(pci_resource_start(pdev, 2),
6191 pci_resource_len(pdev, 2));
6193 pci_set_drvdata(pdev, NULL);
6200 * s2io_rem_nic - Free the PCI device
6201 * @pdev: structure containing the PCI related information of the device.
6202 * Description: This function is called by the Pci subsystem to release a
6203 * PCI device and free up all resource held up by the device. This could
6204 * be in response to a Hot plug event or when the driver is to be removed
6208 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
6210 struct net_device *dev =
6211 (struct net_device *) pci_get_drvdata(pdev);
6215 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
6220 unregister_netdev(dev);
6222 free_shared_mem(sp);
6225 pci_disable_device(pdev);
6226 if (sp->intr_type != MSI_X)
6227 pci_release_regions(pdev);
6229 release_mem_region(pci_resource_start(pdev, 0),
6230 pci_resource_len(pdev, 0));
6231 release_mem_region(pci_resource_start(pdev, 2),
6232 pci_resource_len(pdev, 2));
6234 pci_set_drvdata(pdev, NULL);
6239 * s2io_starter - Entry point for the driver
6240 * Description: This function is the entry point for the driver. It verifies
6241 * the module loadable parameters and initializes PCI configuration space.
6244 int __init s2io_starter(void)
6246 return pci_module_init(&s2io_driver);
6250 * s2io_closer - Cleanup routine for the driver
6251 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
6254 void s2io_closer(void)
6256 pci_unregister_driver(&s2io_driver);
6257 DBG_PRINT(INIT_DBG, "cleanup done\n");
6260 module_init(s2io_starter);
6261 module_exit(s2io_closer);