1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
55 ************************************************************************/
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
76 #include <linux/tcp.h>
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
82 #include <asm/div64.h>
87 #include "s2io-regs.h"
89 #define DRV_VERSION "2.0.26.25"
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
319 {"prc_pcix_err_cnt"},
326 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
336 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
356 static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
360 struct s2io_nic *nic = dev->priv;
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
381 struct s2io_nic *nic = dev->priv;
382 unsigned long flags[MAX_TX_FIFOS];
383 struct mac_info *mac_control = &nic->mac_control;
384 struct config_param *config = &nic->config;
386 for (i = 0; i < config->tx_fifo_num; i++)
387 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
390 vlan_group_set_device(nic->vlgrp, vid, NULL);
392 for (i = config->tx_fifo_num - 1; i >= 0; i--)
393 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
398 * Constants to be programmed into the Xena's registers, to configure
403 static const u64 herc_act_dtx_cfg[] = {
405 0x8000051536750000ULL, 0x80000515367500E0ULL,
407 0x8000051536750004ULL, 0x80000515367500E4ULL,
409 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
411 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
413 0x801205150D440000ULL, 0x801205150D4400E0ULL,
415 0x801205150D440004ULL, 0x801205150D4400E4ULL,
417 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
419 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
424 static const u64 xena_dtx_cfg[] = {
426 0x8000051500000000ULL, 0x80000515000000E0ULL,
428 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
430 0x8001051500000000ULL, 0x80010515000000E0ULL,
432 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
434 0x8002051500000000ULL, 0x80020515000000E0ULL,
436 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
441 * Constants for Fixing the MacAddress problem seen mostly on
444 static const u64 fix_mac[] = {
445 0x0060000000000000ULL, 0x0060600000000000ULL,
446 0x0040600000000000ULL, 0x0000600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0060600000000000ULL,
456 0x0020600000000000ULL, 0x0060600000000000ULL,
457 0x0020600000000000ULL, 0x0000600000000000ULL,
458 0x0040600000000000ULL, 0x0060600000000000ULL,
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490 * aggregation happens until we hit max IP pkt size(64K)
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
512 * This table lists all the devices that this driver supports.
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520 PCI_ANY_ID, PCI_ANY_ID},
521 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522 PCI_ANY_ID, PCI_ANY_ID},
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
528 static struct pci_error_handlers s2io_err_handler = {
529 .error_detected = s2io_io_error_detected,
530 .slot_reset = s2io_io_slot_reset,
531 .resume = s2io_io_resume,
534 static struct pci_driver s2io_driver = {
536 .id_table = s2io_tbl,
537 .probe = s2io_init_nic,
538 .remove = __devexit_p(s2io_rem_nic),
539 .err_handler = &s2io_err_handler,
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
549 if (sp->config.multiq) {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 netif_stop_subqueue(sp->dev, i);
553 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
559 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
561 if (sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
564 sp->mac_control.fifos[fifo_no].queue_state =
566 netif_stop_queue(sp->dev);
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
573 if (sp->config.multiq) {
574 for (i = 0; i < sp->config.tx_fifo_num; i++)
575 netif_start_subqueue(sp->dev, i);
577 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
583 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
585 if (sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
588 sp->mac_control.fifos[fifo_no].queue_state =
590 netif_start_queue(sp->dev);
594 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
597 if (sp->config.multiq) {
598 for (i = 0; i < sp->config.tx_fifo_num; i++)
599 netif_wake_subqueue(sp->dev, i);
601 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
607 static inline void s2io_wake_tx_queue(
608 struct fifo_info *fifo, int cnt, u8 multiq)
612 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
614 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
615 if (netif_queue_stopped(fifo->dev)) {
616 fifo->queue_state = FIFO_QUEUE_START;
617 netif_wake_queue(fifo->dev);
623 * init_shared_mem - Allocation and Initialization of Memory
624 * @nic: Device private variable.
625 * Description: The function allocates all the memory areas shared
626 * between the NIC and the driver. This includes Tx descriptors,
627 * Rx descriptors and the statistics block.
630 static int init_shared_mem(struct s2io_nic *nic)
633 void *tmp_v_addr, *tmp_v_addr_next;
634 dma_addr_t tmp_p_addr, tmp_p_addr_next;
635 struct RxD_block *pre_rxd_blk = NULL;
637 int lst_size, lst_per_page;
638 struct net_device *dev = nic->dev;
642 struct mac_info *mac_control;
643 struct config_param *config;
644 unsigned long long mem_allocated = 0;
646 mac_control = &nic->mac_control;
647 config = &nic->config;
650 /* Allocation and initialization of TXDLs in FIOFs */
652 for (i = 0; i < config->tx_fifo_num; i++) {
653 size += config->tx_cfg[i].fifo_len;
655 if (size > MAX_AVAILABLE_TXDS) {
656 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
657 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
662 for (i = 0; i < config->tx_fifo_num; i++) {
663 size = config->tx_cfg[i].fifo_len;
665 * Legal values are from 2 to 8192
668 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
676 lst_size = (sizeof(struct TxD) * config->max_txds);
677 lst_per_page = PAGE_SIZE / lst_size;
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int fifo_len = config->tx_cfg[i].fifo_len;
681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
682 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
684 if (!mac_control->fifos[i].list_info) {
686 "Malloc failed for list_info\n");
689 mem_allocated += list_holder_size;
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
694 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695 mac_control->fifos[i].tx_curr_put_info.fifo_len =
696 config->tx_cfg[i].fifo_len - 1;
697 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698 mac_control->fifos[i].tx_curr_get_info.fifo_len =
699 config->tx_cfg[i].fifo_len - 1;
700 mac_control->fifos[i].fifo_no = i;
701 mac_control->fifos[i].nic = nic;
702 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
703 mac_control->fifos[i].dev = dev;
705 for (j = 0; j < page_num; j++) {
709 tmp_v = pci_alloc_consistent(nic->pdev,
713 "pci_alloc_consistent ");
714 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
717 /* If we got a zero DMA address(can happen on
718 * certain platforms like PPC), reallocate.
719 * Store virtual address of page we don't want,
723 mac_control->zerodma_virt_addr = tmp_v;
725 "%s: Zero DMA address for TxDL. ", dev->name);
727 "Virtual address %p\n", tmp_v);
728 tmp_v = pci_alloc_consistent(nic->pdev,
732 "pci_alloc_consistent ");
733 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
736 mem_allocated += PAGE_SIZE;
738 while (k < lst_per_page) {
739 int l = (j * lst_per_page) + k;
740 if (l == config->tx_cfg[i].fifo_len)
742 mac_control->fifos[i].list_info[l].list_virt_addr =
743 tmp_v + (k * lst_size);
744 mac_control->fifos[i].list_info[l].list_phy_addr =
745 tmp_p + (k * lst_size);
751 for (i = 0; i < config->tx_fifo_num; i++) {
752 size = config->tx_cfg[i].fifo_len;
753 mac_control->fifos[i].ufo_in_band_v
754 = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!mac_control->fifos[i].ufo_in_band_v)
757 mem_allocated += (size * sizeof(u64));
760 /* Allocation and initialization of RXDs in Rings */
762 for (i = 0; i < config->rx_ring_num; i++) {
763 if (config->rx_cfg[i].num_rxd %
764 (rxd_count[nic->rxd_mode] + 1)) {
765 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
768 DBG_PRINT(ERR_DBG, "RxDs per Block");
771 size += config->rx_cfg[i].num_rxd;
772 mac_control->rings[i].block_count =
773 config->rx_cfg[i].num_rxd /
774 (rxd_count[nic->rxd_mode] + 1 );
775 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776 mac_control->rings[i].block_count;
778 if (nic->rxd_mode == RXD_MODE_1)
779 size = (size * (sizeof(struct RxD1)));
781 size = (size * (sizeof(struct RxD3)));
783 for (i = 0; i < config->rx_ring_num; i++) {
784 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785 mac_control->rings[i].rx_curr_get_info.offset = 0;
786 mac_control->rings[i].rx_curr_get_info.ring_len =
787 config->rx_cfg[i].num_rxd - 1;
788 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789 mac_control->rings[i].rx_curr_put_info.offset = 0;
790 mac_control->rings[i].rx_curr_put_info.ring_len =
791 config->rx_cfg[i].num_rxd - 1;
792 mac_control->rings[i].nic = nic;
793 mac_control->rings[i].ring_no = i;
794 mac_control->rings[i].lro = lro_enable;
796 blk_cnt = config->rx_cfg[i].num_rxd /
797 (rxd_count[nic->rxd_mode] + 1);
798 /* Allocating all the Rx blocks */
799 for (j = 0; j < blk_cnt; j++) {
800 struct rx_block_info *rx_blocks;
803 rx_blocks = &mac_control->rings[i].rx_blocks[j];
804 size = SIZE_OF_BLOCK; //size is always page size
805 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
807 if (tmp_v_addr == NULL) {
809 * In case of failure, free_shared_mem()
810 * is called, which should free any
811 * memory that was alloced till the
814 rx_blocks->block_virt_addr = tmp_v_addr;
817 mem_allocated += size;
818 memset(tmp_v_addr, 0, size);
819 rx_blocks->block_virt_addr = tmp_v_addr;
820 rx_blocks->block_dma_addr = tmp_p_addr;
821 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
822 rxd_count[nic->rxd_mode],
824 if (!rx_blocks->rxds)
827 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
828 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829 rx_blocks->rxds[l].virt_addr =
830 rx_blocks->block_virt_addr +
831 (rxd_size[nic->rxd_mode] * l);
832 rx_blocks->rxds[l].dma_addr =
833 rx_blocks->block_dma_addr +
834 (rxd_size[nic->rxd_mode] * l);
837 /* Interlinking all Rx Blocks */
838 for (j = 0; j < blk_cnt; j++) {
840 mac_control->rings[i].rx_blocks[j].block_virt_addr;
842 mac_control->rings[i].rx_blocks[(j + 1) %
843 blk_cnt].block_virt_addr;
845 mac_control->rings[i].rx_blocks[j].block_dma_addr;
847 mac_control->rings[i].rx_blocks[(j + 1) %
848 blk_cnt].block_dma_addr;
850 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
851 pre_rxd_blk->reserved_2_pNext_RxD_block =
852 (unsigned long) tmp_v_addr_next;
853 pre_rxd_blk->pNext_RxD_Blk_physical =
854 (u64) tmp_p_addr_next;
857 if (nic->rxd_mode == RXD_MODE_3B) {
859 * Allocation of Storages for buffer addresses in 2BUFF mode
860 * and the buffers as well.
862 for (i = 0; i < config->rx_ring_num; i++) {
863 blk_cnt = config->rx_cfg[i].num_rxd /
864 (rxd_count[nic->rxd_mode]+ 1);
865 mac_control->rings[i].ba =
866 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
868 if (!mac_control->rings[i].ba)
870 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
871 for (j = 0; j < blk_cnt; j++) {
873 mac_control->rings[i].ba[j] =
874 kmalloc((sizeof(struct buffAdd) *
875 (rxd_count[nic->rxd_mode] + 1)),
877 if (!mac_control->rings[i].ba[j])
879 mem_allocated += (sizeof(struct buffAdd) * \
880 (rxd_count[nic->rxd_mode] + 1));
881 while (k != rxd_count[nic->rxd_mode]) {
882 ba = &mac_control->rings[i].ba[j][k];
884 ba->ba_0_org = (void *) kmalloc
885 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
889 (BUF0_LEN + ALIGN_SIZE);
890 tmp = (unsigned long)ba->ba_0_org;
892 tmp &= ~((unsigned long) ALIGN_SIZE);
893 ba->ba_0 = (void *) tmp;
895 ba->ba_1_org = (void *) kmalloc
896 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
900 += (BUF1_LEN + ALIGN_SIZE);
901 tmp = (unsigned long) ba->ba_1_org;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_1 = (void *) tmp;
911 /* Allocation and initialization of Statistics block */
912 size = sizeof(struct stat_block);
913 mac_control->stats_mem = pci_alloc_consistent
914 (nic->pdev, size, &mac_control->stats_mem_phy);
916 if (!mac_control->stats_mem) {
918 * In case of failure, free_shared_mem() is called, which
919 * should free any memory that was alloced till the
924 mem_allocated += size;
925 mac_control->stats_mem_sz = size;
927 tmp_v_addr = mac_control->stats_mem;
928 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
929 memset(tmp_v_addr, 0, size);
930 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931 (unsigned long long) tmp_p_addr);
932 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
937 * free_shared_mem - Free the allocated Memory
938 * @nic: Device private variable.
939 * Description: This function is to free all memory locations allocated by
940 * the init_shared_mem() function and return it to the kernel.
943 static void free_shared_mem(struct s2io_nic *nic)
945 int i, j, blk_cnt, size;
947 dma_addr_t tmp_p_addr;
948 struct mac_info *mac_control;
949 struct config_param *config;
950 int lst_size, lst_per_page;
951 struct net_device *dev;
959 mac_control = &nic->mac_control;
960 config = &nic->config;
962 lst_size = (sizeof(struct TxD) * config->max_txds);
963 lst_per_page = PAGE_SIZE / lst_size;
965 for (i = 0; i < config->tx_fifo_num; i++) {
966 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
968 for (j = 0; j < page_num; j++) {
969 int mem_blks = (j * lst_per_page);
970 if (!mac_control->fifos[i].list_info)
972 if (!mac_control->fifos[i].list_info[mem_blks].
975 pci_free_consistent(nic->pdev, PAGE_SIZE,
976 mac_control->fifos[i].
979 mac_control->fifos[i].
982 nic->mac_control.stats_info->sw_stat.mem_freed
985 /* If we got a zero DMA address during allocation,
988 if (mac_control->zerodma_virt_addr) {
989 pci_free_consistent(nic->pdev, PAGE_SIZE,
990 mac_control->zerodma_virt_addr,
993 "%s: Freeing TxDL with zero DMA addr. ",
995 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996 mac_control->zerodma_virt_addr);
997 nic->mac_control.stats_info->sw_stat.mem_freed
1000 kfree(mac_control->fifos[i].list_info);
1001 nic->mac_control.stats_info->sw_stat.mem_freed +=
1002 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1005 size = SIZE_OF_BLOCK;
1006 for (i = 0; i < config->rx_ring_num; i++) {
1007 blk_cnt = mac_control->rings[i].block_count;
1008 for (j = 0; j < blk_cnt; j++) {
1009 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1011 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1013 if (tmp_v_addr == NULL)
1015 pci_free_consistent(nic->pdev, size,
1016 tmp_v_addr, tmp_p_addr);
1017 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1018 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1019 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1024 if (nic->rxd_mode == RXD_MODE_3B) {
1025 /* Freeing buffer storage addresses in 2BUFF mode. */
1026 for (i = 0; i < config->rx_ring_num; i++) {
1027 blk_cnt = config->rx_cfg[i].num_rxd /
1028 (rxd_count[nic->rxd_mode] + 1);
1029 for (j = 0; j < blk_cnt; j++) {
1031 if (!mac_control->rings[i].ba[j])
1033 while (k != rxd_count[nic->rxd_mode]) {
1034 struct buffAdd *ba =
1035 &mac_control->rings[i].ba[j][k];
1036 kfree(ba->ba_0_org);
1037 nic->mac_control.stats_info->sw_stat.\
1038 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1039 kfree(ba->ba_1_org);
1040 nic->mac_control.stats_info->sw_stat.\
1041 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1044 kfree(mac_control->rings[i].ba[j]);
1045 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046 (sizeof(struct buffAdd) *
1047 (rxd_count[nic->rxd_mode] + 1));
1049 kfree(mac_control->rings[i].ba);
1050 nic->mac_control.stats_info->sw_stat.mem_freed +=
1051 (sizeof(struct buffAdd *) * blk_cnt);
1055 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056 if (mac_control->fifos[i].ufo_in_band_v) {
1057 nic->mac_control.stats_info->sw_stat.mem_freed
1058 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059 kfree(mac_control->fifos[i].ufo_in_band_v);
1063 if (mac_control->stats_mem) {
1064 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065 mac_control->stats_mem_sz;
1066 pci_free_consistent(nic->pdev,
1067 mac_control->stats_mem_sz,
1068 mac_control->stats_mem,
1069 mac_control->stats_mem_phy);
1074 * s2io_verify_pci_mode -
1077 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1079 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1080 register u64 val64 = 0;
1083 val64 = readq(&bar0->pci_mode);
1084 mode = (u8)GET_PCI_MODE(val64);
1086 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087 return -1; /* Unknown PCI mode */
1091 #define NEC_VENID 0x1033
1092 #define NEC_DEVID 0x0125
1093 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1095 struct pci_dev *tdev = NULL;
1096 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1098 if (tdev->bus == s2io_pdev->bus->parent) {
1107 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1109 * s2io_print_pci_mode -
1111 static int s2io_print_pci_mode(struct s2io_nic *nic)
1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114 register u64 val64 = 0;
1116 struct config_param *config = &nic->config;
1118 val64 = readq(&bar0->pci_mode);
1119 mode = (u8)GET_PCI_MODE(val64);
1121 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122 return -1; /* Unknown PCI mode */
1124 config->bus_speed = bus_speed[mode];
1126 if (s2io_on_nec_bridge(nic->pdev)) {
1127 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1132 if (val64 & PCI_MODE_32_BITS) {
1133 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1135 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1139 case PCI_MODE_PCI_33:
1140 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1142 case PCI_MODE_PCI_66:
1143 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1145 case PCI_MODE_PCIX_M1_66:
1146 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1148 case PCI_MODE_PCIX_M1_100:
1149 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1151 case PCI_MODE_PCIX_M1_133:
1152 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1154 case PCI_MODE_PCIX_M2_66:
1155 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1157 case PCI_MODE_PCIX_M2_100:
1158 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1160 case PCI_MODE_PCIX_M2_133:
1161 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1164 return -1; /* Unsupported bus speed */
1171 * init_tti - Initialization transmit traffic interrupt scheme
1172 * @nic: device private variable
1173 * @link: link status (UP/DOWN) used to enable/disable continuous
1174 * transmit interrupts
1175 * Description: The function configures transmit traffic interrupts
1176 * Return Value: SUCCESS on success and
1180 static int init_tti(struct s2io_nic *nic, int link)
1182 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183 register u64 val64 = 0;
1185 struct config_param *config;
1187 config = &nic->config;
1189 for (i = 0; i < config->tx_fifo_num; i++) {
1191 * TTI Initialization. Default Tx timer gets us about
1192 * 250 interrupts per sec. Continuous interrupts are enabled
1195 if (nic->device_type == XFRAME_II_DEVICE) {
1196 int count = (nic->config.bus_speed * 125)/2;
1197 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1199 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1201 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1206 if (use_continuous_tx_intrs && (link == LINK_UP))
1207 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1208 writeq(val64, &bar0->tti_data1_mem);
1210 if (nic->config.intr_type == MSI_X) {
1211 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214 TTI_DATA2_MEM_TX_UFC_D(0x300);
1216 if ((nic->config.tx_steering_type ==
1217 TX_DEFAULT_STEERING) &&
1218 (config->tx_fifo_num > 1) &&
1219 (i >= nic->udp_fifo_idx) &&
1220 (i < (nic->udp_fifo_idx +
1221 nic->total_udp_fifos)))
1222 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225 TTI_DATA2_MEM_TX_UFC_D(0x120);
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80);
1233 writeq(val64, &bar0->tti_data2_mem);
1235 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236 TTI_CMD_MEM_OFFSET(i);
1237 writeq(val64, &bar0->tti_command_mem);
1239 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1248 * init_nic - Initialization of hardware
1249 * @nic: device private variable
1250 * Description: The function sequentially configures every block
1251 * of the H/W from their reset values.
1252 * Return Value: SUCCESS on success and
1253 * '-1' on failure (endian settings incorrect).
1256 static int init_nic(struct s2io_nic *nic)
1258 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259 struct net_device *dev = nic->dev;
1260 register u64 val64 = 0;
1264 struct mac_info *mac_control;
1265 struct config_param *config;
1267 unsigned long long mem_share;
1270 mac_control = &nic->mac_control;
1271 config = &nic->config;
1273 /* to set the swapper controle on the card */
1274 if(s2io_set_swapper(nic)) {
1275 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1280 * Herc requires EOI to be removed from reset before XGXS, so..
1282 if (nic->device_type & XFRAME_II_DEVICE) {
1283 val64 = 0xA500000000ULL;
1284 writeq(val64, &bar0->sw_reset);
1286 val64 = readq(&bar0->sw_reset);
1289 /* Remove XGXS from reset state */
1291 writeq(val64, &bar0->sw_reset);
1293 val64 = readq(&bar0->sw_reset);
1295 /* Ensure that it's safe to access registers by checking
1296 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1298 if (nic->device_type == XFRAME_II_DEVICE) {
1299 for (i = 0; i < 50; i++) {
1300 val64 = readq(&bar0->adapter_status);
1301 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1309 /* Enable Receiving broadcasts */
1310 add = &bar0->mac_cfg;
1311 val64 = readq(&bar0->mac_cfg);
1312 val64 |= MAC_RMAC_BCAST_ENABLE;
1313 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314 writel((u32) val64, add);
1315 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316 writel((u32) (val64 >> 32), (add + 4));
1318 /* Read registers in all blocks */
1319 val64 = readq(&bar0->mac_int_mask);
1320 val64 = readq(&bar0->mc_int_mask);
1321 val64 = readq(&bar0->xgxs_int_mask);
1325 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1327 if (nic->device_type & XFRAME_II_DEVICE) {
1328 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control, UF);
1332 msleep(1); /* Necessary!! */
1336 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338 &bar0->dtx_control, UF);
1339 val64 = readq(&bar0->dtx_control);
1344 /* Tx DMA Initialization */
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1347 writeq(val64, &bar0->tx_fifo_partition_1);
1348 writeq(val64, &bar0->tx_fifo_partition_2);
1349 writeq(val64, &bar0->tx_fifo_partition_3);
1352 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1354 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355 13) | vBIT(config->tx_cfg[i].fifo_priority,
1358 if (i == (config->tx_fifo_num - 1)) {
1365 writeq(val64, &bar0->tx_fifo_partition_0);
1370 writeq(val64, &bar0->tx_fifo_partition_1);
1375 writeq(val64, &bar0->tx_fifo_partition_2);
1380 writeq(val64, &bar0->tx_fifo_partition_3);
1391 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1394 if ((nic->device_type == XFRAME_I_DEVICE) &&
1395 (nic->pdev->revision < 4))
1396 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1398 val64 = readq(&bar0->tx_fifo_partition_0);
1399 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1403 * Initialization of Tx_PA_CONFIG register to ignore packet
1404 * integrity checking.
1406 val64 = readq(&bar0->tx_pa_cfg);
1407 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409 writeq(val64, &bar0->tx_pa_cfg);
1411 /* Rx DMA intialization. */
1413 for (i = 0; i < config->rx_ring_num; i++) {
1415 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1418 writeq(val64, &bar0->rx_queue_priority);
1421 * Allocating equal share of memory to all the
1425 if (nic->device_type & XFRAME_II_DEVICE)
1430 for (i = 0; i < config->rx_ring_num; i++) {
1433 mem_share = (mem_size / config->rx_ring_num +
1434 mem_size % config->rx_ring_num);
1435 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1438 mem_share = (mem_size / config->rx_ring_num);
1439 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1442 mem_share = (mem_size / config->rx_ring_num);
1443 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1446 mem_share = (mem_size / config->rx_ring_num);
1447 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1450 mem_share = (mem_size / config->rx_ring_num);
1451 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1454 mem_share = (mem_size / config->rx_ring_num);
1455 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1458 mem_share = (mem_size / config->rx_ring_num);
1459 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1462 mem_share = (mem_size / config->rx_ring_num);
1463 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1467 writeq(val64, &bar0->rx_queue_cfg);
1470 * Filling Tx round robin registers
1471 * as per the number of FIFOs for equal scheduling priority
1473 switch (config->tx_fifo_num) {
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 writeq(val64, &bar0->tx_w_round_robin_4);
1483 val64 = 0x0001000100010001ULL;
1484 writeq(val64, &bar0->tx_w_round_robin_0);
1485 writeq(val64, &bar0->tx_w_round_robin_1);
1486 writeq(val64, &bar0->tx_w_round_robin_2);
1487 writeq(val64, &bar0->tx_w_round_robin_3);
1488 val64 = 0x0001000100000000ULL;
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1492 val64 = 0x0001020001020001ULL;
1493 writeq(val64, &bar0->tx_w_round_robin_0);
1494 val64 = 0x0200010200010200ULL;
1495 writeq(val64, &bar0->tx_w_round_robin_1);
1496 val64 = 0x0102000102000102ULL;
1497 writeq(val64, &bar0->tx_w_round_robin_2);
1498 val64 = 0x0001020001020001ULL;
1499 writeq(val64, &bar0->tx_w_round_robin_3);
1500 val64 = 0x0200010200000000ULL;
1501 writeq(val64, &bar0->tx_w_round_robin_4);
1504 val64 = 0x0001020300010203ULL;
1505 writeq(val64, &bar0->tx_w_round_robin_0);
1506 writeq(val64, &bar0->tx_w_round_robin_1);
1507 writeq(val64, &bar0->tx_w_round_robin_2);
1508 writeq(val64, &bar0->tx_w_round_robin_3);
1509 val64 = 0x0001020300000000ULL;
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1513 val64 = 0x0001020304000102ULL;
1514 writeq(val64, &bar0->tx_w_round_robin_0);
1515 val64 = 0x0304000102030400ULL;
1516 writeq(val64, &bar0->tx_w_round_robin_1);
1517 val64 = 0x0102030400010203ULL;
1518 writeq(val64, &bar0->tx_w_round_robin_2);
1519 val64 = 0x0400010203040001ULL;
1520 writeq(val64, &bar0->tx_w_round_robin_3);
1521 val64 = 0x0203040000000000ULL;
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1525 val64 = 0x0001020304050001ULL;
1526 writeq(val64, &bar0->tx_w_round_robin_0);
1527 val64 = 0x0203040500010203ULL;
1528 writeq(val64, &bar0->tx_w_round_robin_1);
1529 val64 = 0x0405000102030405ULL;
1530 writeq(val64, &bar0->tx_w_round_robin_2);
1531 val64 = 0x0001020304050001ULL;
1532 writeq(val64, &bar0->tx_w_round_robin_3);
1533 val64 = 0x0203040500000000ULL;
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1537 val64 = 0x0001020304050600ULL;
1538 writeq(val64, &bar0->tx_w_round_robin_0);
1539 val64 = 0x0102030405060001ULL;
1540 writeq(val64, &bar0->tx_w_round_robin_1);
1541 val64 = 0x0203040506000102ULL;
1542 writeq(val64, &bar0->tx_w_round_robin_2);
1543 val64 = 0x0304050600010203ULL;
1544 writeq(val64, &bar0->tx_w_round_robin_3);
1545 val64 = 0x0405060000000000ULL;
1546 writeq(val64, &bar0->tx_w_round_robin_4);
1549 val64 = 0x0001020304050607ULL;
1550 writeq(val64, &bar0->tx_w_round_robin_0);
1551 writeq(val64, &bar0->tx_w_round_robin_1);
1552 writeq(val64, &bar0->tx_w_round_robin_2);
1553 writeq(val64, &bar0->tx_w_round_robin_3);
1554 val64 = 0x0001020300000000ULL;
1555 writeq(val64, &bar0->tx_w_round_robin_4);
1559 /* Enable all configured Tx FIFO partitions */
1560 val64 = readq(&bar0->tx_fifo_partition_0);
1561 val64 |= (TX_FIFO_PARTITION_EN);
1562 writeq(val64, &bar0->tx_fifo_partition_0);
1564 /* Filling the Rx round robin registers as per the
1565 * number of Rings and steering based on QoS with
1568 switch (config->rx_ring_num) {
1571 writeq(val64, &bar0->rx_w_round_robin_0);
1572 writeq(val64, &bar0->rx_w_round_robin_1);
1573 writeq(val64, &bar0->rx_w_round_robin_2);
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1577 val64 = 0x8080808080808080ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1581 val64 = 0x0001000100010001ULL;
1582 writeq(val64, &bar0->rx_w_round_robin_0);
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 writeq(val64, &bar0->rx_w_round_robin_2);
1585 writeq(val64, &bar0->rx_w_round_robin_3);
1586 val64 = 0x0001000100000000ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1589 val64 = 0x8080808040404040ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1593 val64 = 0x0001020001020001ULL;
1594 writeq(val64, &bar0->rx_w_round_robin_0);
1595 val64 = 0x0200010200010200ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_1);
1597 val64 = 0x0102000102000102ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_2);
1599 val64 = 0x0001020001020001ULL;
1600 writeq(val64, &bar0->rx_w_round_robin_3);
1601 val64 = 0x0200010200000000ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1604 val64 = 0x8080804040402020ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1608 val64 = 0x0001020300010203ULL;
1609 writeq(val64, &bar0->rx_w_round_robin_0);
1610 writeq(val64, &bar0->rx_w_round_robin_1);
1611 writeq(val64, &bar0->rx_w_round_robin_2);
1612 writeq(val64, &bar0->rx_w_round_robin_3);
1613 val64 = 0x0001020300000000ULL;
1614 writeq(val64, &bar0->rx_w_round_robin_4);
1616 val64 = 0x8080404020201010ULL;
1617 writeq(val64, &bar0->rts_qos_steering);
1620 val64 = 0x0001020304000102ULL;
1621 writeq(val64, &bar0->rx_w_round_robin_0);
1622 val64 = 0x0304000102030400ULL;
1623 writeq(val64, &bar0->rx_w_round_robin_1);
1624 val64 = 0x0102030400010203ULL;
1625 writeq(val64, &bar0->rx_w_round_robin_2);
1626 val64 = 0x0400010203040001ULL;
1627 writeq(val64, &bar0->rx_w_round_robin_3);
1628 val64 = 0x0203040000000000ULL;
1629 writeq(val64, &bar0->rx_w_round_robin_4);
1631 val64 = 0x8080404020201008ULL;
1632 writeq(val64, &bar0->rts_qos_steering);
1635 val64 = 0x0001020304050001ULL;
1636 writeq(val64, &bar0->rx_w_round_robin_0);
1637 val64 = 0x0203040500010203ULL;
1638 writeq(val64, &bar0->rx_w_round_robin_1);
1639 val64 = 0x0405000102030405ULL;
1640 writeq(val64, &bar0->rx_w_round_robin_2);
1641 val64 = 0x0001020304050001ULL;
1642 writeq(val64, &bar0->rx_w_round_robin_3);
1643 val64 = 0x0203040500000000ULL;
1644 writeq(val64, &bar0->rx_w_round_robin_4);
1646 val64 = 0x8080404020100804ULL;
1647 writeq(val64, &bar0->rts_qos_steering);
1650 val64 = 0x0001020304050600ULL;
1651 writeq(val64, &bar0->rx_w_round_robin_0);
1652 val64 = 0x0102030405060001ULL;
1653 writeq(val64, &bar0->rx_w_round_robin_1);
1654 val64 = 0x0203040506000102ULL;
1655 writeq(val64, &bar0->rx_w_round_robin_2);
1656 val64 = 0x0304050600010203ULL;
1657 writeq(val64, &bar0->rx_w_round_robin_3);
1658 val64 = 0x0405060000000000ULL;
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1661 val64 = 0x8080402010080402ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1665 val64 = 0x0001020304050607ULL;
1666 writeq(val64, &bar0->rx_w_round_robin_0);
1667 writeq(val64, &bar0->rx_w_round_robin_1);
1668 writeq(val64, &bar0->rx_w_round_robin_2);
1669 writeq(val64, &bar0->rx_w_round_robin_3);
1670 val64 = 0x0001020300000000ULL;
1671 writeq(val64, &bar0->rx_w_round_robin_4);
1673 val64 = 0x8040201008040201ULL;
1674 writeq(val64, &bar0->rts_qos_steering);
1680 for (i = 0; i < 8; i++)
1681 writeq(val64, &bar0->rts_frm_len_n[i]);
1683 /* Set the default rts frame length for the rings configured */
1684 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685 for (i = 0 ; i < config->rx_ring_num ; i++)
1686 writeq(val64, &bar0->rts_frm_len_n[i]);
1688 /* Set the frame length for the configured rings
1689 * desired by the user
1691 for (i = 0; i < config->rx_ring_num; i++) {
1692 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693 * specified frame length steering.
1694 * If the user provides the frame length then program
1695 * the rts_frm_len register for those values or else
1696 * leave it as it is.
1698 if (rts_frm_len[i] != 0) {
1699 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700 &bar0->rts_frm_len_n[i]);
1704 /* Disable differentiated services steering logic */
1705 for (i = 0; i < 64; i++) {
1706 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1709 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1714 /* Program statistics memory */
1715 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1717 if (nic->device_type == XFRAME_II_DEVICE) {
1718 val64 = STAT_BC(0x320);
1719 writeq(val64, &bar0->stat_byte_cnt);
1723 * Initializing the sampling rate for the device to calculate the
1724 * bandwidth utilization.
1726 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728 writeq(val64, &bar0->mac_link_util);
1731 * Initializing the Transmit and Receive Traffic Interrupt
1735 /* Initialize TTI */
1736 if (SUCCESS != init_tti(nic, nic->last_link_state))
1739 /* RTI Initialization */
1740 if (nic->device_type == XFRAME_II_DEVICE) {
1742 * Programmed to generate Apprx 500 Intrs per
1745 int count = (nic->config.bus_speed * 125)/4;
1746 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1748 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1753 writeq(val64, &bar0->rti_data1_mem);
1755 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757 if (nic->config.intr_type == MSI_X)
1758 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759 RTI_DATA2_MEM_RX_UFC_D(0x40));
1761 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762 RTI_DATA2_MEM_RX_UFC_D(0x80));
1763 writeq(val64, &bar0->rti_data2_mem);
1765 for (i = 0; i < config->rx_ring_num; i++) {
1766 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767 | RTI_CMD_MEM_OFFSET(i);
1768 writeq(val64, &bar0->rti_command_mem);
1771 * Once the operation completes, the Strobe bit of the
1772 * command register will be reset. We poll for this
1773 * particular condition. We wait for a maximum of 500ms
1774 * for the operation to complete, if it's not complete
1775 * by then we return error.
1779 val64 = readq(&bar0->rti_command_mem);
1780 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1784 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1794 * Initializing proper values as Pause threshold into all
1795 * the 8 Queues on Rx side.
1797 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1800 /* Disable RMAC PAD STRIPPING */
1801 add = &bar0->mac_cfg;
1802 val64 = readq(&bar0->mac_cfg);
1803 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 val64 = readq(&bar0->mac_cfg);
1810 /* Enable FCS stripping by adapter */
1811 add = &bar0->mac_cfg;
1812 val64 = readq(&bar0->mac_cfg);
1813 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814 if (nic->device_type == XFRAME_II_DEVICE)
1815 writeq(val64, &bar0->mac_cfg);
1817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818 writel((u32) (val64), add);
1819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820 writel((u32) (val64 >> 32), (add + 4));
1824 * Set the time value to be inserted in the pause frame
1825 * generated by xena.
1827 val64 = readq(&bar0->rmac_pause_cfg);
1828 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830 writeq(val64, &bar0->rmac_pause_cfg);
1833 * Set the Threshold Limit for Generating the pause frame
1834 * If the amount of data in any Queue exceeds ratio of
1835 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836 * pause frame is generated
1839 for (i = 0; i < 4; i++) {
1841 (((u64) 0xFF00 | nic->mac_control.
1842 mc_pause_threshold_q0q3)
1845 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1848 for (i = 0; i < 4; i++) {
1850 (((u64) 0xFF00 | nic->mac_control.
1851 mc_pause_threshold_q4q7)
1854 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1857 * TxDMA will stop Read request if the number of read split has
1858 * exceeded the limit pointed by shared_splits
1860 val64 = readq(&bar0->pic_control);
1861 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862 writeq(val64, &bar0->pic_control);
1864 if (nic->config.bus_speed == 266) {
1865 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866 writeq(0x0, &bar0->read_retry_delay);
1867 writeq(0x0, &bar0->write_retry_delay);
1871 * Programming the Herc to split every write transaction
1872 * that does not start on an ADB to reduce disconnects.
1874 if (nic->device_type == XFRAME_II_DEVICE) {
1875 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876 MISC_LINK_STABILITY_PRD(3);
1877 writeq(val64, &bar0->misc_control);
1878 val64 = readq(&bar0->pic_control2);
1879 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880 writeq(val64, &bar0->pic_control2);
1882 if (strstr(nic->product_name, "CX4")) {
1883 val64 = TMAC_AVG_IPG(0x17);
1884 writeq(val64, &bar0->tmac_avg_ipg);
1889 #define LINK_UP_DOWN_INTERRUPT 1
1890 #define MAC_RMAC_ERR_TIMER 2
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1894 if (nic->device_type == XFRAME_II_DEVICE)
1895 return LINK_UP_DOWN_INTERRUPT;
1897 return MAC_RMAC_ERR_TIMER;
1901 * do_s2io_write_bits - update alarm bits in alarm register
1902 * @value: alarm bits
1903 * @flag: interrupt status
1904 * @addr: address value
1905 * Description: update alarm bits in alarm register
1909 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1913 temp64 = readq(addr);
1915 if(flag == ENABLE_INTRS)
1916 temp64 &= ~((u64) value);
1918 temp64 |= ((u64) value);
1919 writeq(temp64, addr);
1922 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1924 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1925 register u64 gen_int_mask = 0;
1928 writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1929 if (mask & TX_DMA_INTR) {
1931 gen_int_mask |= TXDMA_INT_M;
1933 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1938 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941 &bar0->pfc_err_mask);
1943 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1947 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1954 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1957 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960 flag, &bar0->lso_err_mask);
1962 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963 flag, &bar0->tpa_err_mask);
1965 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1969 if (mask & TX_MAC_INTR) {
1970 gen_int_mask |= TXMAC_INT_M;
1971 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972 &bar0->mac_int_mask);
1973 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976 flag, &bar0->mac_tmac_err_mask);
1979 if (mask & TX_XGXS_INTR) {
1980 gen_int_mask |= TXXGXS_INT_M;
1981 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982 &bar0->xgxs_int_mask);
1983 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985 flag, &bar0->xgxs_txgxs_err_mask);
1988 if (mask & RX_DMA_INTR) {
1989 gen_int_mask |= RXDMA_INT_M;
1990 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992 flag, &bar0->rxdma_int_mask);
1993 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000 &bar0->prc_pcix_err_mask);
2001 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003 &bar0->rpa_err_mask);
2004 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008 flag, &bar0->rda_err_mask);
2009 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011 flag, &bar0->rti_err_mask);
2014 if (mask & RX_MAC_INTR) {
2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask);
2018 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR;
2021 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2022 interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2023 do_s2io_write_bits(interruptible,
2024 flag, &bar0->mac_rmac_err_mask);
2027 if (mask & RX_XGXS_INTR)
2029 gen_int_mask |= RXXGXS_INT_M;
2030 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2031 &bar0->xgxs_int_mask);
2032 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2033 &bar0->xgxs_rxgxs_err_mask);
2036 if (mask & MC_INTR) {
2037 gen_int_mask |= MC_INT_M;
2038 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2039 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2040 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2041 &bar0->mc_err_mask);
2043 nic->general_int_mask = gen_int_mask;
2045 /* Remove this line when alarm interrupts are enabled */
2046 nic->general_int_mask = 0;
2049 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2050 * @nic: device private variable,
2051 * @mask: A mask indicating which Intr block must be modified and,
2052 * @flag: A flag indicating whether to enable or disable the Intrs.
2053 * Description: This function will either disable or enable the interrupts
2054 * depending on the flag argument. The mask argument can be used to
2055 * enable/disable any Intr block.
2056 * Return Value: NONE.
2059 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2061 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2062 register u64 temp64 = 0, intr_mask = 0;
2064 intr_mask = nic->general_int_mask;
2066 /* Top level interrupt classification */
2067 /* PIC Interrupts */
2068 if (mask & TX_PIC_INTR) {
2069 /* Enable PIC Intrs in the general intr mask register */
2070 intr_mask |= TXPIC_INT_M;
2071 if (flag == ENABLE_INTRS) {
2073 * If Hercules adapter enable GPIO otherwise
2074 * disable all PCIX, Flash, MDIO, IIC and GPIO
2075 * interrupts for now.
2078 if (s2io_link_fault_indication(nic) ==
2079 LINK_UP_DOWN_INTERRUPT ) {
2080 do_s2io_write_bits(PIC_INT_GPIO, flag,
2081 &bar0->pic_int_mask);
2082 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2083 &bar0->gpio_int_mask);
2085 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2086 } else if (flag == DISABLE_INTRS) {
2088 * Disable PIC Intrs in the general
2089 * intr mask register
2091 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2095 /* Tx traffic interrupts */
2096 if (mask & TX_TRAFFIC_INTR) {
2097 intr_mask |= TXTRAFFIC_INT_M;
2098 if (flag == ENABLE_INTRS) {
2100 * Enable all the Tx side interrupts
2101 * writing 0 Enables all 64 TX interrupt levels
2103 writeq(0x0, &bar0->tx_traffic_mask);
2104 } else if (flag == DISABLE_INTRS) {
2106 * Disable Tx Traffic Intrs in the general intr mask
2109 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2113 /* Rx traffic interrupts */
2114 if (mask & RX_TRAFFIC_INTR) {
2115 intr_mask |= RXTRAFFIC_INT_M;
2116 if (flag == ENABLE_INTRS) {
2117 /* writing 0 Enables all 8 RX interrupt levels */
2118 writeq(0x0, &bar0->rx_traffic_mask);
2119 } else if (flag == DISABLE_INTRS) {
2121 * Disable Rx Traffic Intrs in the general intr mask
2124 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2128 temp64 = readq(&bar0->general_int_mask);
2129 if (flag == ENABLE_INTRS)
2130 temp64 &= ~((u64) intr_mask);
2132 temp64 = DISABLE_ALL_INTRS;
2133 writeq(temp64, &bar0->general_int_mask);
2135 nic->general_int_mask = readq(&bar0->general_int_mask);
2139 * verify_pcc_quiescent- Checks for PCC quiescent state
2140 * Return: 1 If PCC is quiescence
2141 * 0 If PCC is not quiescence
2143 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2146 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2147 u64 val64 = readq(&bar0->adapter_status);
2149 herc = (sp->device_type == XFRAME_II_DEVICE);
2151 if (flag == FALSE) {
2152 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2153 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2156 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2160 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2161 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2162 ADAPTER_STATUS_RMAC_PCC_IDLE))
2165 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2166 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2174 * verify_xena_quiescence - Checks whether the H/W is ready
2175 * Description: Returns whether the H/W is ready to go or not. Depending
2176 * on whether adapter enable bit was written or not the comparison
2177 * differs and the calling function passes the input argument flag to
2179 * Return: 1 If xena is quiescence
2180 * 0 If Xena is not quiescence
2183 static int verify_xena_quiescence(struct s2io_nic *sp)
2186 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2187 u64 val64 = readq(&bar0->adapter_status);
2188 mode = s2io_verify_pci_mode(sp);
2190 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2191 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2194 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2195 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2198 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2199 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2202 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2203 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2206 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2207 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2210 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2211 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2214 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2215 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2218 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2219 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2224 * In PCI 33 mode, the P_PLL is not used, and therefore,
2225 * the the P_PLL_LOCK bit in the adapter_status register will
2228 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2229 sp->device_type == XFRAME_II_DEVICE && mode !=
2231 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2234 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2235 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2236 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2243 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2244 * @sp: Pointer to device specifc structure
2246 * New procedure to clear mac address reading problems on Alpha platforms
2250 static void fix_mac_address(struct s2io_nic * sp)
2252 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2256 while (fix_mac[i] != END_SIGN) {
2257 writeq(fix_mac[i++], &bar0->gpio_control);
2259 val64 = readq(&bar0->gpio_control);
2264 * start_nic - Turns the device on
2265 * @nic : device private variable.
2267 * This function actually turns the device on. Before this function is
2268 * called,all Registers are configured from their reset states
2269 * and shared memory is allocated but the NIC is still quiescent. On
2270 * calling this function, the device interrupts are cleared and the NIC is
2271 * literally switched on by writing into the adapter control register.
2273 * SUCCESS on success and -1 on failure.
2276 static int start_nic(struct s2io_nic *nic)
2278 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2279 struct net_device *dev = nic->dev;
2280 register u64 val64 = 0;
2282 struct mac_info *mac_control;
2283 struct config_param *config;
2285 mac_control = &nic->mac_control;
2286 config = &nic->config;
2288 /* PRC Initialization and configuration */
2289 for (i = 0; i < config->rx_ring_num; i++) {
2290 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2291 &bar0->prc_rxd0_n[i]);
2293 val64 = readq(&bar0->prc_ctrl_n[i]);
2294 if (nic->rxd_mode == RXD_MODE_1)
2295 val64 |= PRC_CTRL_RC_ENABLED;
2297 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2298 if (nic->device_type == XFRAME_II_DEVICE)
2299 val64 |= PRC_CTRL_GROUP_READS;
2300 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2301 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2302 writeq(val64, &bar0->prc_ctrl_n[i]);
2305 if (nic->rxd_mode == RXD_MODE_3B) {
2306 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2307 val64 = readq(&bar0->rx_pa_cfg);
2308 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2309 writeq(val64, &bar0->rx_pa_cfg);
2312 if (vlan_tag_strip == 0) {
2313 val64 = readq(&bar0->rx_pa_cfg);
2314 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2315 writeq(val64, &bar0->rx_pa_cfg);
2316 vlan_strip_flag = 0;
2320 * Enabling MC-RLDRAM. After enabling the device, we timeout
2321 * for around 100ms, which is approximately the time required
2322 * for the device to be ready for operation.
2324 val64 = readq(&bar0->mc_rldram_mrs);
2325 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2326 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2327 val64 = readq(&bar0->mc_rldram_mrs);
2329 msleep(100); /* Delay by around 100 ms. */
2331 /* Enabling ECC Protection. */
2332 val64 = readq(&bar0->adapter_control);
2333 val64 &= ~ADAPTER_ECC_EN;
2334 writeq(val64, &bar0->adapter_control);
2337 * Verify if the device is ready to be enabled, if so enable
2340 val64 = readq(&bar0->adapter_status);
2341 if (!verify_xena_quiescence(nic)) {
2342 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2343 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2344 (unsigned long long) val64);
2349 * With some switches, link might be already up at this point.
2350 * Because of this weird behavior, when we enable laser,
2351 * we may not get link. We need to handle this. We cannot
2352 * figure out which switch is misbehaving. So we are forced to
2353 * make a global change.
2356 /* Enabling Laser. */
2357 val64 = readq(&bar0->adapter_control);
2358 val64 |= ADAPTER_EOI_TX_ON;
2359 writeq(val64, &bar0->adapter_control);
2361 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2363 * Dont see link state interrupts initally on some switches,
2364 * so directly scheduling the link state task here.
2366 schedule_work(&nic->set_link_task);
2368 /* SXE-002: Initialize link and activity LED */
2369 subid = nic->pdev->subsystem_device;
2370 if (((subid & 0xFF) >= 0x07) &&
2371 (nic->device_type == XFRAME_I_DEVICE)) {
2372 val64 = readq(&bar0->gpio_control);
2373 val64 |= 0x0000800000000000ULL;
2374 writeq(val64, &bar0->gpio_control);
2375 val64 = 0x0411040400000000ULL;
2376 writeq(val64, (void __iomem *)bar0 + 0x2700);
2382 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2384 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2385 TxD *txdlp, int get_off)
2387 struct s2io_nic *nic = fifo_data->nic;
2388 struct sk_buff *skb;
2393 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2394 pci_unmap_single(nic->pdev, (dma_addr_t)
2395 txds->Buffer_Pointer, sizeof(u64),
2400 skb = (struct sk_buff *) ((unsigned long)
2401 txds->Host_Control);
2403 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2406 pci_unmap_single(nic->pdev, (dma_addr_t)
2407 txds->Buffer_Pointer,
2408 skb->len - skb->data_len,
2410 frg_cnt = skb_shinfo(skb)->nr_frags;
2413 for (j = 0; j < frg_cnt; j++, txds++) {
2414 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2415 if (!txds->Buffer_Pointer)
2417 pci_unmap_page(nic->pdev, (dma_addr_t)
2418 txds->Buffer_Pointer,
2419 frag->size, PCI_DMA_TODEVICE);
2422 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2427 * free_tx_buffers - Free all queued Tx buffers
2428 * @nic : device private variable.
2430 * Free all queued Tx buffers.
2431 * Return Value: void
2434 static void free_tx_buffers(struct s2io_nic *nic)
2436 struct net_device *dev = nic->dev;
2437 struct sk_buff *skb;
2440 struct mac_info *mac_control;
2441 struct config_param *config;
2444 mac_control = &nic->mac_control;
2445 config = &nic->config;
2447 for (i = 0; i < config->tx_fifo_num; i++) {
2448 unsigned long flags;
2449 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2450 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2451 txdp = (struct TxD *) \
2452 mac_control->fifos[i].list_info[j].list_virt_addr;
2453 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2455 nic->mac_control.stats_info->sw_stat.mem_freed
2462 "%s:forcibly freeing %d skbs on FIFO%d\n",
2464 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2465 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2466 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2471 * stop_nic - To stop the nic
2472 * @nic ; device private variable.
2474 * This function does exactly the opposite of what the start_nic()
2475 * function does. This function is called to stop the device.
2480 static void stop_nic(struct s2io_nic *nic)
2482 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2483 register u64 val64 = 0;
2485 struct mac_info *mac_control;
2486 struct config_param *config;
2488 mac_control = &nic->mac_control;
2489 config = &nic->config;
2491 /* Disable all interrupts */
2492 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2493 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2494 interruptible |= TX_PIC_INTR;
2495 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2497 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2498 val64 = readq(&bar0->adapter_control);
2499 val64 &= ~(ADAPTER_CNTL_EN);
2500 writeq(val64, &bar0->adapter_control);
2504 * fill_rx_buffers - Allocates the Rx side skbs
2505 * @ring_info: per ring structure
2506 * @from_card_up: If this is true, we will map the buffer to get
2507 * the dma address for buf0 and buf1 to give it to the card.
2508 * Else we will sync the already mapped buffer to give it to the card.
2510 * The function allocates Rx side skbs and puts the physical
2511 * address of these buffers into the RxD buffer pointers, so that the NIC
2512 * can DMA the received frame into these locations.
2513 * The NIC supports 3 receive modes, viz
2515 * 2. three buffer and
2516 * 3. Five buffer modes.
2517 * Each mode defines how many fragments the received frame will be split
2518 * up into by the NIC. The frame is split into L3 header, L4 Header,
2519 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2520 * is split into 3 fragments. As of now only single buffer mode is
2523 * SUCCESS on success or an appropriate -ve value on failure.
2526 static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2528 struct sk_buff *skb;
2530 int off, size, block_no, block_no1;
2535 struct RxD_t *first_rxdp = NULL;
2536 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2540 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2542 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2544 block_no1 = ring->rx_curr_get_info.block_index;
2545 while (alloc_tab < alloc_cnt) {
2546 block_no = ring->rx_curr_put_info.block_index;
2548 off = ring->rx_curr_put_info.offset;
2550 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2552 rxd_index = off + 1;
2554 rxd_index += (block_no * ring->rxd_count);
2556 if ((block_no == block_no1) &&
2557 (off == ring->rx_curr_get_info.offset) &&
2558 (rxdp->Host_Control)) {
2559 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2561 DBG_PRINT(INTR_DBG, " info equated\n");
2564 if (off && (off == ring->rxd_count)) {
2565 ring->rx_curr_put_info.block_index++;
2566 if (ring->rx_curr_put_info.block_index ==
2568 ring->rx_curr_put_info.block_index = 0;
2569 block_no = ring->rx_curr_put_info.block_index;
2571 ring->rx_curr_put_info.offset = off;
2572 rxdp = ring->rx_blocks[block_no].block_virt_addr;
2573 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2574 ring->dev->name, rxdp);
2578 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2579 ((ring->rxd_mode == RXD_MODE_3B) &&
2580 (rxdp->Control_2 & s2BIT(0)))) {
2581 ring->rx_curr_put_info.offset = off;
2584 /* calculate size of skb based on ring mode */
2585 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2586 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2587 if (ring->rxd_mode == RXD_MODE_1)
2588 size += NET_IP_ALIGN;
2590 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2593 skb = dev_alloc_skb(size);
2595 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2596 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2599 first_rxdp->Control_1 |= RXD_OWN_XENA;
2601 stats->mem_alloc_fail_cnt++;
2605 stats->mem_allocated += skb->truesize;
2607 if (ring->rxd_mode == RXD_MODE_1) {
2608 /* 1 buffer mode - normal operation mode */
2609 rxdp1 = (struct RxD1*)rxdp;
2610 memset(rxdp, 0, sizeof(struct RxD1));
2611 skb_reserve(skb, NET_IP_ALIGN);
2612 rxdp1->Buffer0_ptr = pci_map_single
2613 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2614 PCI_DMA_FROMDEVICE);
2615 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2616 goto pci_map_failed;
2619 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2620 rxdp->Host_Control = (unsigned long) (skb);
2621 } else if (ring->rxd_mode == RXD_MODE_3B) {
2624 * 2 buffer mode provides 128
2625 * byte aligned receive buffers.
2628 rxdp3 = (struct RxD3*)rxdp;
2629 /* save buffer pointers to avoid frequent dma mapping */
2630 Buffer0_ptr = rxdp3->Buffer0_ptr;
2631 Buffer1_ptr = rxdp3->Buffer1_ptr;
2632 memset(rxdp, 0, sizeof(struct RxD3));
2633 /* restore the buffer pointers for dma sync*/
2634 rxdp3->Buffer0_ptr = Buffer0_ptr;
2635 rxdp3->Buffer1_ptr = Buffer1_ptr;
2637 ba = &ring->ba[block_no][off];
2638 skb_reserve(skb, BUF0_LEN);
2639 tmp = (u64)(unsigned long) skb->data;
2642 skb->data = (void *) (unsigned long)tmp;
2643 skb_reset_tail_pointer(skb);
2646 rxdp3->Buffer0_ptr =
2647 pci_map_single(ring->pdev, ba->ba_0,
2648 BUF0_LEN, PCI_DMA_FROMDEVICE);
2649 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2650 goto pci_map_failed;
2652 pci_dma_sync_single_for_device(ring->pdev,
2653 (dma_addr_t) rxdp3->Buffer0_ptr,
2654 BUF0_LEN, PCI_DMA_FROMDEVICE);
2656 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2657 if (ring->rxd_mode == RXD_MODE_3B) {
2658 /* Two buffer mode */
2661 * Buffer2 will have L3/L4 header plus
2664 rxdp3->Buffer2_ptr = pci_map_single
2665 (ring->pdev, skb->data, ring->mtu + 4,
2666 PCI_DMA_FROMDEVICE);
2668 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2669 goto pci_map_failed;
2672 rxdp3->Buffer1_ptr =
2673 pci_map_single(ring->pdev,
2675 PCI_DMA_FROMDEVICE);
2677 if (pci_dma_mapping_error
2678 (rxdp3->Buffer1_ptr)) {
2681 (dma_addr_t)(unsigned long)
2684 PCI_DMA_FROMDEVICE);
2685 goto pci_map_failed;
2688 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2689 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2692 rxdp->Control_2 |= s2BIT(0);
2693 rxdp->Host_Control = (unsigned long) (skb);
2695 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2696 rxdp->Control_1 |= RXD_OWN_XENA;
2698 if (off == (ring->rxd_count + 1))
2700 ring->rx_curr_put_info.offset = off;
2702 rxdp->Control_2 |= SET_RXD_MARKER;
2703 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2706 first_rxdp->Control_1 |= RXD_OWN_XENA;
2710 ring->rx_bufs_left += 1;
2715 /* Transfer ownership of first descriptor to adapter just before
2716 * exiting. Before that, use memory barrier so that ownership
2717 * and other fields are seen by adapter correctly.
2721 first_rxdp->Control_1 |= RXD_OWN_XENA;
2726 stats->pci_map_fail_cnt++;
2727 stats->mem_freed += skb->truesize;
2728 dev_kfree_skb_irq(skb);
2732 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2734 struct net_device *dev = sp->dev;
2736 struct sk_buff *skb;
2738 struct mac_info *mac_control;
2743 mac_control = &sp->mac_control;
2744 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2745 rxdp = mac_control->rings[ring_no].
2746 rx_blocks[blk].rxds[j].virt_addr;
2747 skb = (struct sk_buff *)
2748 ((unsigned long) rxdp->Host_Control);
2752 if (sp->rxd_mode == RXD_MODE_1) {
2753 rxdp1 = (struct RxD1*)rxdp;
2754 pci_unmap_single(sp->pdev, (dma_addr_t)
2757 HEADER_ETHERNET_II_802_3_SIZE
2758 + HEADER_802_2_SIZE +
2760 PCI_DMA_FROMDEVICE);
2761 memset(rxdp, 0, sizeof(struct RxD1));
2762 } else if(sp->rxd_mode == RXD_MODE_3B) {
2763 rxdp3 = (struct RxD3*)rxdp;
2764 ba = &mac_control->rings[ring_no].
2766 pci_unmap_single(sp->pdev, (dma_addr_t)
2769 PCI_DMA_FROMDEVICE);
2770 pci_unmap_single(sp->pdev, (dma_addr_t)
2773 PCI_DMA_FROMDEVICE);
2774 pci_unmap_single(sp->pdev, (dma_addr_t)
2777 PCI_DMA_FROMDEVICE);
2778 memset(rxdp, 0, sizeof(struct RxD3));
2780 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2782 mac_control->rings[ring_no].rx_bufs_left -= 1;
2787 * free_rx_buffers - Frees all Rx buffers
2788 * @sp: device private variable.
2790 * This function will free all Rx buffers allocated by host.
2795 static void free_rx_buffers(struct s2io_nic *sp)
2797 struct net_device *dev = sp->dev;
2798 int i, blk = 0, buf_cnt = 0;
2799 struct mac_info *mac_control;
2800 struct config_param *config;
2802 mac_control = &sp->mac_control;
2803 config = &sp->config;
2805 for (i = 0; i < config->rx_ring_num; i++) {
2806 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2807 free_rxd_blk(sp,i,blk);
2809 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2810 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2811 mac_control->rings[i].rx_curr_put_info.offset = 0;
2812 mac_control->rings[i].rx_curr_get_info.offset = 0;
2813 mac_control->rings[i].rx_bufs_left = 0;
2814 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2815 dev->name, buf_cnt, i);
2819 static int s2io_chk_rx_buffers(struct ring_info *ring)
2821 if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2822 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2823 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2829 * s2io_poll - Rx interrupt handler for NAPI support
2830 * @napi : pointer to the napi structure.
2831 * @budget : The number of packets that were budgeted to be processed
2832 * during one pass through the 'Poll" function.
2834 * Comes into picture only if NAPI support has been incorporated. It does
2835 * the same thing that rx_intr_handler does, but not in a interrupt context
2836 * also It will process only a given number of packets.
2838 * 0 on success and 1 if there are No Rx packets to be processed.
2841 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2843 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2844 struct net_device *dev = ring->dev;
2845 struct config_param *config;
2846 struct mac_info *mac_control;
2847 int pkts_processed = 0;
2848 u8 __iomem *addr = NULL;
2850 struct s2io_nic *nic = dev->priv;
2851 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2852 int budget_org = budget;
2854 config = &nic->config;
2855 mac_control = &nic->mac_control;
2857 if (unlikely(!is_s2io_card_up(nic)))
2860 pkts_processed = rx_intr_handler(ring, budget);
2861 s2io_chk_rx_buffers(ring);
2863 if (pkts_processed < budget_org) {
2864 netif_rx_complete(dev, napi);
2865 /*Re Enable MSI-Rx Vector*/
2866 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2867 addr += 7 - ring->ring_no;
2868 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2872 return pkts_processed;
2874 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2876 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2877 struct ring_info *ring;
2878 struct net_device *dev = nic->dev;
2879 struct config_param *config;
2880 struct mac_info *mac_control;
2881 int pkts_processed = 0;
2882 int ring_pkts_processed, i;
2883 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2884 int budget_org = budget;
2886 config = &nic->config;
2887 mac_control = &nic->mac_control;
2889 if (unlikely(!is_s2io_card_up(nic)))
2892 for (i = 0; i < config->rx_ring_num; i++) {
2893 ring = &mac_control->rings[i];
2894 ring_pkts_processed = rx_intr_handler(ring, budget);
2895 s2io_chk_rx_buffers(ring);
2896 pkts_processed += ring_pkts_processed;
2897 budget -= ring_pkts_processed;
2901 if (pkts_processed < budget_org) {
2902 netif_rx_complete(dev, napi);
2903 /* Re enable the Rx interrupts for the ring */
2904 writeq(0, &bar0->rx_traffic_mask);
2905 readl(&bar0->rx_traffic_mask);
2907 return pkts_processed;
2910 #ifdef CONFIG_NET_POLL_CONTROLLER
2912 * s2io_netpoll - netpoll event handler entry point
2913 * @dev : pointer to the device structure.
2915 * This function will be called by upper layer to check for events on the
2916 * interface in situations where interrupts are disabled. It is used for
2917 * specific in-kernel networking tasks, such as remote consoles and kernel
2918 * debugging over the network (example netdump in RedHat).
2920 static void s2io_netpoll(struct net_device *dev)
2922 struct s2io_nic *nic = dev->priv;
2923 struct mac_info *mac_control;
2924 struct config_param *config;
2925 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2926 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2929 if (pci_channel_offline(nic->pdev))
2932 disable_irq(dev->irq);
2934 mac_control = &nic->mac_control;
2935 config = &nic->config;
2937 writeq(val64, &bar0->rx_traffic_int);
2938 writeq(val64, &bar0->tx_traffic_int);
2940 /* we need to free up the transmitted skbufs or else netpoll will
2941 * run out of skbs and will fail and eventually netpoll application such
2942 * as netdump will fail.
2944 for (i = 0; i < config->tx_fifo_num; i++)
2945 tx_intr_handler(&mac_control->fifos[i]);
2947 /* check for received packet and indicate up to network */
2948 for (i = 0; i < config->rx_ring_num; i++)
2949 rx_intr_handler(&mac_control->rings[i], 0);
2951 for (i = 0; i < config->rx_ring_num; i++) {
2952 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2953 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2954 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2958 enable_irq(dev->irq);
2964 * rx_intr_handler - Rx interrupt handler
2965 * @ring_info: per ring structure.
2966 * @budget: budget for napi processing.
2968 * If the interrupt is because of a received frame or if the
2969 * receive ring contains fresh as yet un-processed frames,this function is
2970 * called. It picks out the RxD at which place the last Rx processing had
2971 * stopped and sends the skb to the OSM's Rx handler and then increments
2974 * No. of napi packets processed.
2976 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2978 int get_block, put_block;
2979 struct rx_curr_get_info get_info, put_info;
2981 struct sk_buff *skb;
2982 int pkt_cnt = 0, napi_pkts = 0;
2987 get_info = ring_data->rx_curr_get_info;
2988 get_block = get_info.block_index;
2989 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2990 put_block = put_info.block_index;
2991 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2993 while (RXD_IS_UP2DT(rxdp)) {
2995 * If your are next to put index then it's
2996 * FIFO full condition
2998 if ((get_block == put_block) &&
2999 (get_info.offset + 1) == put_info.offset) {
3000 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3001 ring_data->dev->name);
3004 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3006 DBG_PRINT(ERR_DBG, "%s: The skb is ",
3007 ring_data->dev->name);
3008 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3011 if (ring_data->rxd_mode == RXD_MODE_1) {
3012 rxdp1 = (struct RxD1*)rxdp;
3013 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3016 HEADER_ETHERNET_II_802_3_SIZE +
3019 PCI_DMA_FROMDEVICE);
3020 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3021 rxdp3 = (struct RxD3*)rxdp;
3022 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3024 BUF0_LEN, PCI_DMA_FROMDEVICE);
3025 pci_unmap_single(ring_data->pdev, (dma_addr_t)
3028 PCI_DMA_FROMDEVICE);
3030 prefetch(skb->data);
3031 rx_osm_handler(ring_data, rxdp);
3033 ring_data->rx_curr_get_info.offset = get_info.offset;
3034 rxdp = ring_data->rx_blocks[get_block].
3035 rxds[get_info.offset].virt_addr;
3036 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3037 get_info.offset = 0;
3038 ring_data->rx_curr_get_info.offset = get_info.offset;
3040 if (get_block == ring_data->block_count)
3042 ring_data->rx_curr_get_info.block_index = get_block;
3043 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3046 if (ring_data->nic->config.napi) {
3053 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3056 if (ring_data->lro) {
3057 /* Clear all LRO sessions before exiting */
3058 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3059 struct lro *lro = &ring_data->lro0_n[i];
3061 update_L3L4_header(ring_data->nic, lro);
3062 queue_rx_frame(lro->parent, lro->vlan_tag);
3063 clear_lro_session(lro);
3071 * tx_intr_handler - Transmit interrupt handler
3072 * @nic : device private variable
3074 * If an interrupt was raised to indicate DMA complete of the
3075 * Tx packet, this function is called. It identifies the last TxD
3076 * whose buffer was freed and frees all skbs whose data have already
3077 * DMA'ed into the NICs internal memory.
3082 static void tx_intr_handler(struct fifo_info *fifo_data)
3084 struct s2io_nic *nic = fifo_data->nic;
3085 struct tx_curr_get_info get_info, put_info;
3086 struct sk_buff *skb = NULL;
3089 unsigned long flags = 0;
3092 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3095 get_info = fifo_data->tx_curr_get_info;
3096 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3097 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3099 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3100 (get_info.offset != put_info.offset) &&
3101 (txdlp->Host_Control)) {
3102 /* Check for TxD errors */
3103 if (txdlp->Control_1 & TXD_T_CODE) {
3104 unsigned long long err;
3105 err = txdlp->Control_1 & TXD_T_CODE;
3107 nic->mac_control.stats_info->sw_stat.
3111 /* update t_code statistics */
3112 err_mask = err >> 48;
3115 nic->mac_control.stats_info->sw_stat.
3120 nic->mac_control.stats_info->sw_stat.
3121 tx_desc_abort_cnt++;
3125 nic->mac_control.stats_info->sw_stat.
3126 tx_parity_err_cnt++;
3130 nic->mac_control.stats_info->sw_stat.
3135 nic->mac_control.stats_info->sw_stat.
3136 tx_list_proc_err_cnt++;
3141 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3143 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3144 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3146 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3151 /* Updating the statistics block */
3152 nic->stats.tx_bytes += skb->len;
3153 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3154 dev_kfree_skb_irq(skb);
3157 if (get_info.offset == get_info.fifo_len + 1)
3158 get_info.offset = 0;
3159 txdlp = (struct TxD *) fifo_data->list_info
3160 [get_info.offset].list_virt_addr;
3161 fifo_data->tx_curr_get_info.offset =
3165 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3167 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3171 * s2io_mdio_write - Function to write in to MDIO registers
3172 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3173 * @addr : address value
3174 * @value : data value
3175 * @dev : pointer to net_device structure
3177 * This function is used to write values to the MDIO registers
3180 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3183 struct s2io_nic *sp = dev->priv;
3184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3186 //address transaction
3187 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3188 | MDIO_MMD_DEV_ADDR(mmd_type)
3189 | MDIO_MMS_PRT_ADDR(0x0);
3190 writeq(val64, &bar0->mdio_control);
3191 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3192 writeq(val64, &bar0->mdio_control);
3197 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3198 | MDIO_MMD_DEV_ADDR(mmd_type)
3199 | MDIO_MMS_PRT_ADDR(0x0)
3200 | MDIO_MDIO_DATA(value)
3201 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3202 writeq(val64, &bar0->mdio_control);
3203 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3204 writeq(val64, &bar0->mdio_control);
3208 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3209 | MDIO_MMD_DEV_ADDR(mmd_type)
3210 | MDIO_MMS_PRT_ADDR(0x0)
3211 | MDIO_OP(MDIO_OP_READ_TRANS);
3212 writeq(val64, &bar0->mdio_control);
3213 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3214 writeq(val64, &bar0->mdio_control);
3220 * s2io_mdio_read - Function to write in to MDIO registers
3221 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3222 * @addr : address value
3223 * @dev : pointer to net_device structure
3225 * This function is used to read values to the MDIO registers
3228 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3232 struct s2io_nic *sp = dev->priv;
3233 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3235 /* address transaction */
3236 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3237 | MDIO_MMD_DEV_ADDR(mmd_type)
3238 | MDIO_MMS_PRT_ADDR(0x0);
3239 writeq(val64, &bar0->mdio_control);
3240 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3241 writeq(val64, &bar0->mdio_control);
3244 /* Data transaction */
3246 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3247 | MDIO_MMD_DEV_ADDR(mmd_type)
3248 | MDIO_MMS_PRT_ADDR(0x0)
3249 | MDIO_OP(MDIO_OP_READ_TRANS);
3250 writeq(val64, &bar0->mdio_control);
3251 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3252 writeq(val64, &bar0->mdio_control);
3255 /* Read the value from regs */
3256 rval64 = readq(&bar0->mdio_control);
3257 rval64 = rval64 & 0xFFFF0000;
3258 rval64 = rval64 >> 16;
3262 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3263 * @counter : couter value to be updated
3264 * @flag : flag to indicate the status
3265 * @type : counter type
3267 * This function is to check the status of the xpak counters value
3271 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3276 for(i = 0; i <index; i++)
3281 *counter = *counter + 1;
3282 val64 = *regs_stat & mask;
3283 val64 = val64 >> (index * 0x2);
3290 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3291 "service. Excessive temperatures may "
3292 "result in premature transceiver "
3296 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3297 "service Excessive bias currents may "
3298 "indicate imminent laser diode "
3302 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3303 "service Excessive laser output "
3304 "power may saturate far-end "
3308 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3313 val64 = val64 << (index * 0x2);
3314 *regs_stat = (*regs_stat & (~mask)) | (val64);
3317 *regs_stat = *regs_stat & (~mask);
3322 * s2io_updt_xpak_counter - Function to update the xpak counters
3323 * @dev : pointer to net_device struct
3325 * This function is to upate the status of the xpak counters value
3328 static void s2io_updt_xpak_counter(struct net_device *dev)
3336 struct s2io_nic *sp = dev->priv;
3337 struct stat_block *stat_info = sp->mac_control.stats_info;
3339 /* Check the communication with the MDIO slave */
3342 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3343 if((val64 == 0xFFFF) || (val64 == 0x0000))
3345 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3346 "Returned %llx\n", (unsigned long long)val64);
3350 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3353 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3354 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3355 (unsigned long long)val64);
3359 /* Loading the DOM register to MDIO register */
3361 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3362 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3364 /* Reading the Alarm flags */
3367 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3369 flag = CHECKBIT(val64, 0x7);
3371 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3372 &stat_info->xpak_stat.xpak_regs_stat,
3375 if(CHECKBIT(val64, 0x6))
3376 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3378 flag = CHECKBIT(val64, 0x3);
3380 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3381 &stat_info->xpak_stat.xpak_regs_stat,
3384 if(CHECKBIT(val64, 0x2))
3385 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3387 flag = CHECKBIT(val64, 0x1);
3389 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3390 &stat_info->xpak_stat.xpak_regs_stat,
3393 if(CHECKBIT(val64, 0x0))
3394 stat_info->xpak_stat.alarm_laser_output_power_low++;
3396 /* Reading the Warning flags */
3399 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3401 if(CHECKBIT(val64, 0x7))
3402 stat_info->xpak_stat.warn_transceiver_temp_high++;
3404 if(CHECKBIT(val64, 0x6))
3405 stat_info->xpak_stat.warn_transceiver_temp_low++;
3407 if(CHECKBIT(val64, 0x3))
3408 stat_info->xpak_stat.warn_laser_bias_current_high++;
3410 if(CHECKBIT(val64, 0x2))
3411 stat_info->xpak_stat.warn_laser_bias_current_low++;
3413 if(CHECKBIT(val64, 0x1))
3414 stat_info->xpak_stat.warn_laser_output_power_high++;
3416 if(CHECKBIT(val64, 0x0))
3417 stat_info->xpak_stat.warn_laser_output_power_low++;
3421 * wait_for_cmd_complete - waits for a command to complete.
3422 * @sp : private member of the device structure, which is a pointer to the
3423 * s2io_nic structure.
3424 * Description: Function that waits for a command to Write into RMAC
3425 * ADDR DATA registers to be completed and returns either success or
3426 * error depending on whether the command was complete or not.
3428 * SUCCESS on success and FAILURE on failure.
3431 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3434 int ret = FAILURE, cnt = 0, delay = 1;
3437 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3441 val64 = readq(addr);
3442 if (bit_state == S2IO_BIT_RESET) {
3443 if (!(val64 & busy_bit)) {
3448 if (!(val64 & busy_bit)) {
3465 * check_pci_device_id - Checks if the device id is supported
3467 * Description: Function to check if the pci device id is supported by driver.
3468 * Return value: Actual device id if supported else PCI_ANY_ID
3470 static u16 check_pci_device_id(u16 id)
3473 case PCI_DEVICE_ID_HERC_WIN:
3474 case PCI_DEVICE_ID_HERC_UNI:
3475 return XFRAME_II_DEVICE;
3476 case PCI_DEVICE_ID_S2IO_UNI:
3477 case PCI_DEVICE_ID_S2IO_WIN:
3478 return XFRAME_I_DEVICE;
3485 * s2io_reset - Resets the card.
3486 * @sp : private member of the device structure.
3487 * Description: Function to Reset the card. This function then also
3488 * restores the previously saved PCI configuration space registers as
3489 * the card reset also resets the configuration space.
3494 static void s2io_reset(struct s2io_nic * sp)
3496 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3501 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3502 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3504 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3505 __FUNCTION__, sp->dev->name);
3507 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3508 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3510 val64 = SW_RESET_ALL;
3511 writeq(val64, &bar0->sw_reset);
3512 if (strstr(sp->product_name, "CX4")) {
3516 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3518 /* Restore the PCI state saved during initialization. */
3519 pci_restore_state(sp->pdev);
3520 pci_read_config_word(sp->pdev, 0x2, &val16);
3521 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3526 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3527 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3530 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3534 /* Set swapper to enable I/O register access */
3535 s2io_set_swapper(sp);
3537 /* restore mac_addr entries */
3538 do_s2io_restore_unicast_mc(sp);
3540 /* Restore the MSIX table entries from local variables */
3541 restore_xmsi_data(sp);
3543 /* Clear certain PCI/PCI-X fields after reset */
3544 if (sp->device_type == XFRAME_II_DEVICE) {
3545 /* Clear "detected parity error" bit */
3546 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3548 /* Clearing PCIX Ecc status register */
3549 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3551 /* Clearing PCI_STATUS error reflected here */
3552 writeq(s2BIT(62), &bar0->txpic_int_reg);
3555 /* Reset device statistics maintained by OS */
3556 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3558 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3559 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3560 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3561 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3562 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3563 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3564 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3565 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3566 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3567 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3568 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3569 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3570 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3571 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3572 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3573 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3574 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3575 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3576 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3578 /* SXE-002: Configure link and activity LED to turn it off */
3579 subid = sp->pdev->subsystem_device;
3580 if (((subid & 0xFF) >= 0x07) &&
3581 (sp->device_type == XFRAME_I_DEVICE)) {
3582 val64 = readq(&bar0->gpio_control);
3583 val64 |= 0x0000800000000000ULL;
3584 writeq(val64, &bar0->gpio_control);
3585 val64 = 0x0411040400000000ULL;
3586 writeq(val64, (void __iomem *)bar0 + 0x2700);
3590 * Clear spurious ECC interrupts that would have occured on
3591 * XFRAME II cards after reset.
3593 if (sp->device_type == XFRAME_II_DEVICE) {
3594 val64 = readq(&bar0->pcc_err_reg);
3595 writeq(val64, &bar0->pcc_err_reg);
3598 sp->device_enabled_once = FALSE;
3602 * s2io_set_swapper - to set the swapper controle on the card
3603 * @sp : private member of the device structure,
3604 * pointer to the s2io_nic structure.
3605 * Description: Function to set the swapper control on the card
3606 * correctly depending on the 'endianness' of the system.
3608 * SUCCESS on success and FAILURE on failure.
3611 static int s2io_set_swapper(struct s2io_nic * sp)
3613 struct net_device *dev = sp->dev;
3614 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3615 u64 val64, valt, valr;
3618 * Set proper endian settings and verify the same by reading
3619 * the PIF Feed-back register.
3622 val64 = readq(&bar0->pif_rd_swapper_fb);
3623 if (val64 != 0x0123456789ABCDEFULL) {
3625 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3626 0x8100008181000081ULL, /* FE=1, SE=0 */
3627 0x4200004242000042ULL, /* FE=0, SE=1 */
3628 0}; /* FE=0, SE=0 */
3631 writeq(value[i], &bar0->swapper_ctrl);
3632 val64 = readq(&bar0->pif_rd_swapper_fb);
3633 if (val64 == 0x0123456789ABCDEFULL)
3638 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3640 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3641 (unsigned long long) val64);
3646 valr = readq(&bar0->swapper_ctrl);
3649 valt = 0x0123456789ABCDEFULL;
3650 writeq(valt, &bar0->xmsi_address);
3651 val64 = readq(&bar0->xmsi_address);
3655 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3656 0x0081810000818100ULL, /* FE=1, SE=0 */
3657 0x0042420000424200ULL, /* FE=0, SE=1 */
3658 0}; /* FE=0, SE=0 */
3661 writeq((value[i] | valr), &bar0->swapper_ctrl);
3662 writeq(valt, &bar0->xmsi_address);
3663 val64 = readq(&bar0->xmsi_address);
3669 unsigned long long x = val64;
3670 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3671 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3675 val64 = readq(&bar0->swapper_ctrl);
3676 val64 &= 0xFFFF000000000000ULL;
3680 * The device by default set to a big endian format, so a
3681 * big endian driver need not set anything.
3683 val64 |= (SWAPPER_CTRL_TXP_FE |
3684 SWAPPER_CTRL_TXP_SE |
3685 SWAPPER_CTRL_TXD_R_FE |
3686 SWAPPER_CTRL_TXD_W_FE |
3687 SWAPPER_CTRL_TXF_R_FE |
3688 SWAPPER_CTRL_RXD_R_FE |
3689 SWAPPER_CTRL_RXD_W_FE |
3690 SWAPPER_CTRL_RXF_W_FE |
3691 SWAPPER_CTRL_XMSI_FE |
3692 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3693 if (sp->config.intr_type == INTA)
3694 val64 |= SWAPPER_CTRL_XMSI_SE;
3695 writeq(val64, &bar0->swapper_ctrl);
3698 * Initially we enable all bits to make it accessible by the
3699 * driver, then we selectively enable only those bits that
3702 val64 |= (SWAPPER_CTRL_TXP_FE |
3703 SWAPPER_CTRL_TXP_SE |
3704 SWAPPER_CTRL_TXD_R_FE |
3705 SWAPPER_CTRL_TXD_R_SE |
3706 SWAPPER_CTRL_TXD_W_FE |
3707 SWAPPER_CTRL_TXD_W_SE |
3708 SWAPPER_CTRL_TXF_R_FE |
3709 SWAPPER_CTRL_RXD_R_FE |
3710 SWAPPER_CTRL_RXD_R_SE |
3711 SWAPPER_CTRL_RXD_W_FE |
3712 SWAPPER_CTRL_RXD_W_SE |
3713 SWAPPER_CTRL_RXF_W_FE |
3714 SWAPPER_CTRL_XMSI_FE |
3715 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3716 if (sp->config.intr_type == INTA)
3717 val64 |= SWAPPER_CTRL_XMSI_SE;
3718 writeq(val64, &bar0->swapper_ctrl);
3720 val64 = readq(&bar0->swapper_ctrl);
3723 * Verifying if endian settings are accurate by reading a
3724 * feedback register.
3726 val64 = readq(&bar0->pif_rd_swapper_fb);
3727 if (val64 != 0x0123456789ABCDEFULL) {
3728 /* Endian settings are incorrect, calls for another dekko. */
3729 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3731 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3732 (unsigned long long) val64);
3739 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3741 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3743 int ret = 0, cnt = 0;
3746 val64 = readq(&bar0->xmsi_access);
3747 if (!(val64 & s2BIT(15)))
3753 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3760 static void restore_xmsi_data(struct s2io_nic *nic)
3762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3767 if (nic->device_type == XFRAME_I_DEVICE)
3770 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3771 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3772 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3773 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3774 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3775 writeq(val64, &bar0->xmsi_access);
3776 if (wait_for_msix_trans(nic, msix_index)) {
3777 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3783 static void store_xmsi_data(struct s2io_nic *nic)
3785 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3786 u64 val64, addr, data;
3789 if (nic->device_type == XFRAME_I_DEVICE)
3792 /* Store and display */
3793 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3794 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3795 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3796 writeq(val64, &bar0->xmsi_access);
3797 if (wait_for_msix_trans(nic, msix_index)) {
3798 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3801 addr = readq(&bar0->xmsi_address);
3802 data = readq(&bar0->xmsi_data);
3804 nic->msix_info[i].addr = addr;
3805 nic->msix_info[i].data = data;
3810 static int s2io_enable_msi_x(struct s2io_nic *nic)
3812 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3814 u16 msi_control; /* Temp variable */
3815 int ret, i, j, msix_indx = 1;
3817 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3819 if (!nic->entries) {
3820 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3822 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3825 nic->mac_control.stats_info->sw_stat.mem_allocated
3826 += (nic->num_entries * sizeof(struct msix_entry));
3828 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3831 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3833 if (!nic->s2io_entries) {
3834 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3836 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3837 kfree(nic->entries);
3838 nic->mac_control.stats_info->sw_stat.mem_freed
3839 += (nic->num_entries * sizeof(struct msix_entry));
3842 nic->mac_control.stats_info->sw_stat.mem_allocated
3843 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3844 memset(nic->s2io_entries, 0,
3845 nic->num_entries * sizeof(struct s2io_msix_entry));
3847 nic->entries[0].entry = 0;
3848 nic->s2io_entries[0].entry = 0;
3849 nic->s2io_entries[0].in_use = MSIX_FLG;
3850 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3851 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3853 for (i = 1; i < nic->num_entries; i++) {
3854 nic->entries[i].entry = ((i - 1) * 8) + 1;
3855 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3856 nic->s2io_entries[i].arg = NULL;
3857 nic->s2io_entries[i].in_use = 0;
3860 rx_mat = readq(&bar0->rx_mat);
3861 for (j = 0; j < nic->config.rx_ring_num; j++) {
3862 rx_mat |= RX_MAT_SET(j, msix_indx);
3863 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3864 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3865 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3868 writeq(rx_mat, &bar0->rx_mat);
3869 readq(&bar0->rx_mat);
3871 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3872 /* We fail init if error or we get less vectors than min required */
3874 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3875 kfree(nic->entries);
3876 nic->mac_control.stats_info->sw_stat.mem_freed
3877 += (nic->num_entries * sizeof(struct msix_entry));
3878 kfree(nic->s2io_entries);
3879 nic->mac_control.stats_info->sw_stat.mem_freed
3880 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3881 nic->entries = NULL;
3882 nic->s2io_entries = NULL;
3887 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3888 * in the herc NIC. (Temp change, needs to be removed later)
3890 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3891 msi_control |= 0x1; /* Enable MSI */
3892 pci_write_config_word(nic->pdev, 0x42, msi_control);
3897 /* Handle software interrupt used during MSI(X) test */
3898 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3900 struct s2io_nic *sp = dev_id;
3902 sp->msi_detected = 1;
3903 wake_up(&sp->msi_wait);
3908 /* Test interrupt path by forcing a a software IRQ */
3909 static int s2io_test_msi(struct s2io_nic *sp)
3911 struct pci_dev *pdev = sp->pdev;
3912 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3916 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3919 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3920 sp->dev->name, pci_name(pdev), pdev->irq);
3924 init_waitqueue_head (&sp->msi_wait);
3925 sp->msi_detected = 0;
3927 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3928 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3929 val64 |= SCHED_INT_CTRL_TIMER_EN;
3930 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3931 writeq(val64, &bar0->scheduled_int_ctrl);
3933 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3935 if (!sp->msi_detected) {
3936 /* MSI(X) test failed, go back to INTx mode */
3937 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3938 "using MSI(X) during test\n", sp->dev->name,
3944 free_irq(sp->entries[1].vector, sp);
3946 writeq(saved64, &bar0->scheduled_int_ctrl);
3951 static void remove_msix_isr(struct s2io_nic *sp)
3956 for (i = 0; i < sp->num_entries; i++) {
3957 if (sp->s2io_entries[i].in_use ==
3958 MSIX_REGISTERED_SUCCESS) {
3959 int vector = sp->entries[i].vector;
3960 void *arg = sp->s2io_entries[i].arg;
3961 free_irq(vector, arg);
3966 kfree(sp->s2io_entries);
3968 sp->s2io_entries = NULL;
3970 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3971 msi_control &= 0xFFFE; /* Disable MSI */
3972 pci_write_config_word(sp->pdev, 0x42, msi_control);
3974 pci_disable_msix(sp->pdev);
3977 static void remove_inta_isr(struct s2io_nic *sp)
3979 struct net_device *dev = sp->dev;
3981 free_irq(sp->pdev->irq, dev);
3984 /* ********************************************************* *
3985 * Functions defined below concern the OS part of the driver *
3986 * ********************************************************* */
3989 * s2io_open - open entry point of the driver
3990 * @dev : pointer to the device structure.
3992 * This function is the open entry point of the driver. It mainly calls a
3993 * function to allocate Rx buffers and inserts them into the buffer
3994 * descriptors and then enables the Rx part of the NIC.
3996 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4000 static int s2io_open(struct net_device *dev)
4002 struct s2io_nic *sp = dev->priv;
4006 * Make sure you have link off by default every time
4007 * Nic is initialized
4009 netif_carrier_off(dev);
4010 sp->last_link_state = 0;
4012 /* Initialize H/W and enable interrupts */
4013 err = s2io_card_up(sp);
4015 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4017 goto hw_init_failed;
4020 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4021 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4024 goto hw_init_failed;
4026 s2io_start_all_tx_queue(sp);
4030 if (sp->config.intr_type == MSI_X) {
4033 sp->mac_control.stats_info->sw_stat.mem_freed
4034 += (sp->num_entries * sizeof(struct msix_entry));
4036 if (sp->s2io_entries) {
4037 kfree(sp->s2io_entries);
4038 sp->mac_control.stats_info->sw_stat.mem_freed
4039 += (sp->num_entries * sizeof(struct s2io_msix_entry));
4046 * s2io_close -close entry point of the driver
4047 * @dev : device pointer.
4049 * This is the stop entry point of the driver. It needs to undo exactly
4050 * whatever was done by the open entry point,thus it's usually referred to
4051 * as the close function.Among other things this function mainly stops the
4052 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4054 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4058 static int s2io_close(struct net_device *dev)
4060 struct s2io_nic *sp = dev->priv;
4061 struct config_param *config = &sp->config;
4065 /* Return if the device is already closed *
4066 * Can happen when s2io_card_up failed in change_mtu *
4068 if (!is_s2io_card_up(sp))
4071 s2io_stop_all_tx_queue(sp);
4072 /* delete all populated mac entries */
4073 for (offset = 1; offset < config->max_mc_addr; offset++) {
4074 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4075 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4076 do_s2io_delete_unicast_mc(sp, tmp64);
4085 * s2io_xmit - Tx entry point of te driver
4086 * @skb : the socket buffer containing the Tx data.
4087 * @dev : device pointer.
4089 * This function is the Tx entry point of the driver. S2IO NIC supports
4090 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4091 * NOTE: when device cant queue the pkt,just the trans_start variable will
4094 * 0 on success & 1 on failure.
4097 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4099 struct s2io_nic *sp = dev->priv;
4100 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4103 struct TxFIFO_element __iomem *tx_fifo;
4104 unsigned long flags = 0;
4106 struct fifo_info *fifo = NULL;
4107 struct mac_info *mac_control;
4108 struct config_param *config;
4109 int do_spin_lock = 1;
4111 int enable_per_list_interrupt = 0;
4112 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4114 mac_control = &sp->mac_control;
4115 config = &sp->config;
4117 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4119 if (unlikely(skb->len <= 0)) {
4120 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4121 dev_kfree_skb_any(skb);
4125 if (!is_s2io_card_up(sp)) {
4126 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4133 if (sp->vlgrp && vlan_tx_tag_present(skb))
4134 vlan_tag = vlan_tx_tag_get(skb);
4135 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4136 if (skb->protocol == htons(ETH_P_IP)) {
4141 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4142 th = (struct tcphdr *)(((unsigned char *)ip) +
4145 if (ip->protocol == IPPROTO_TCP) {
4146 queue_len = sp->total_tcp_fifos;
4147 queue = (ntohs(th->source) +
4149 sp->fifo_selector[queue_len - 1];
4150 if (queue >= queue_len)
4151 queue = queue_len - 1;
4152 } else if (ip->protocol == IPPROTO_UDP) {
4153 queue_len = sp->total_udp_fifos;
4154 queue = (ntohs(th->source) +
4156 sp->fifo_selector[queue_len - 1];
4157 if (queue >= queue_len)
4158 queue = queue_len - 1;
4159 queue += sp->udp_fifo_idx;
4160 if (skb->len > 1024)
4161 enable_per_list_interrupt = 1;
4166 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4167 /* get fifo number based on skb->priority value */
4168 queue = config->fifo_mapping
4169 [skb->priority & (MAX_TX_FIFOS - 1)];
4170 fifo = &mac_control->fifos[queue];
4173 spin_lock_irqsave(&fifo->tx_lock, flags);
4175 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4176 return NETDEV_TX_LOCKED;
4179 if (sp->config.multiq) {
4180 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4181 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4182 return NETDEV_TX_BUSY;
4184 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4185 if (netif_queue_stopped(dev)) {
4186 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4187 return NETDEV_TX_BUSY;
4191 put_off = (u16) fifo->tx_curr_put_info.offset;
4192 get_off = (u16) fifo->tx_curr_get_info.offset;
4193 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4195 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4196 /* Avoid "put" pointer going beyond "get" pointer */
4197 if (txdp->Host_Control ||
4198 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4199 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4200 s2io_stop_tx_queue(sp, fifo->fifo_no);
4202 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4206 offload_type = s2io_offload_type(skb);
4207 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4208 txdp->Control_1 |= TXD_TCP_LSO_EN;
4209 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4211 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4213 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4216 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4217 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4218 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4219 if (enable_per_list_interrupt)
4220 if (put_off & (queue_len >> 5))
4221 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4223 txdp->Control_2 |= TXD_VLAN_ENABLE;
4224 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4227 frg_len = skb->len - skb->data_len;
4228 if (offload_type == SKB_GSO_UDP) {
4231 ufo_size = s2io_udp_mss(skb);
4233 txdp->Control_1 |= TXD_UFO_EN;
4234 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4235 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4237 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4238 fifo->ufo_in_band_v[put_off] =
4239 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4241 fifo->ufo_in_band_v[put_off] =
4242 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4244 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4245 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4246 fifo->ufo_in_band_v,
4247 sizeof(u64), PCI_DMA_TODEVICE);
4248 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4249 goto pci_map_failed;
4253 txdp->Buffer_Pointer = pci_map_single
4254 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4255 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4256 goto pci_map_failed;
4258 txdp->Host_Control = (unsigned long) skb;
4259 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4260 if (offload_type == SKB_GSO_UDP)
4261 txdp->Control_1 |= TXD_UFO_EN;
4263 frg_cnt = skb_shinfo(skb)->nr_frags;
4264 /* For fragmented SKB. */
4265 for (i = 0; i < frg_cnt; i++) {
4266 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4267 /* A '0' length fragment will be ignored */
4271 txdp->Buffer_Pointer = (u64) pci_map_page
4272 (sp->pdev, frag->page, frag->page_offset,
4273 frag->size, PCI_DMA_TODEVICE);
4274 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4275 if (offload_type == SKB_GSO_UDP)
4276 txdp->Control_1 |= TXD_UFO_EN;
4278 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4280 if (offload_type == SKB_GSO_UDP)
4281 frg_cnt++; /* as Txd0 was used for inband header */
4283 tx_fifo = mac_control->tx_FIFO_start[queue];
4284 val64 = fifo->list_info[put_off].list_phy_addr;
4285 writeq(val64, &tx_fifo->TxDL_Pointer);
4287 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4290 val64 |= TX_FIFO_SPECIAL_FUNC;
4292 writeq(val64, &tx_fifo->List_Control);
4297 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4299 fifo->tx_curr_put_info.offset = put_off;
4301 /* Avoid "put" pointer going beyond "get" pointer */
4302 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4303 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4305 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4307 s2io_stop_tx_queue(sp, fifo->fifo_no);
4309 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4310 dev->trans_start = jiffies;
4311 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4313 if (sp->config.intr_type == MSI_X)
4314 tx_intr_handler(fifo);
4318 stats->pci_map_fail_cnt++;
4319 s2io_stop_tx_queue(sp, fifo->fifo_no);
4320 stats->mem_freed += skb->truesize;
4322 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4327 s2io_alarm_handle(unsigned long data)
4329 struct s2io_nic *sp = (struct s2io_nic *)data;
4330 struct net_device *dev = sp->dev;
4332 s2io_handle_errors(dev);
4333 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4336 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4338 struct ring_info *ring = (struct ring_info *)dev_id;
4339 struct s2io_nic *sp = ring->nic;
4340 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4341 struct net_device *dev = sp->dev;
4343 if (unlikely(!is_s2io_card_up(sp)))
4346 if (sp->config.napi) {
4347 u8 __iomem *addr = NULL;
4350 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4351 addr += (7 - ring->ring_no);
4352 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4355 netif_rx_schedule(dev, &ring->napi);
4357 rx_intr_handler(ring, 0);
4358 s2io_chk_rx_buffers(ring);
4364 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4367 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4368 struct s2io_nic *sp = fifos->nic;
4369 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4370 struct config_param *config = &sp->config;
4373 if (unlikely(!is_s2io_card_up(sp)))
4376 reason = readq(&bar0->general_int_status);
4377 if (unlikely(reason == S2IO_MINUS_ONE))
4378 /* Nothing much can be done. Get out */
4381 if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4382 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4384 if (reason & GEN_INTR_TXPIC)
4385 s2io_txpic_intr_handle(sp);
4387 if (reason & GEN_INTR_TXTRAFFIC)
4388 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4390 for (i = 0; i < config->tx_fifo_num; i++)
4391 tx_intr_handler(&fifos[i]);
4393 writeq(sp->general_int_mask, &bar0->general_int_mask);
4394 readl(&bar0->general_int_status);
4397 /* The interrupt was not raised by us */
4401 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4403 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4406 val64 = readq(&bar0->pic_int_status);
4407 if (val64 & PIC_INT_GPIO) {
4408 val64 = readq(&bar0->gpio_int_reg);
4409 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4410 (val64 & GPIO_INT_REG_LINK_UP)) {
4412 * This is unstable state so clear both up/down
4413 * interrupt and adapter to re-evaluate the link state.
4415 val64 |= GPIO_INT_REG_LINK_DOWN;
4416 val64 |= GPIO_INT_REG_LINK_UP;
4417 writeq(val64, &bar0->gpio_int_reg);
4418 val64 = readq(&bar0->gpio_int_mask);
4419 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4420 GPIO_INT_MASK_LINK_DOWN);
4421 writeq(val64, &bar0->gpio_int_mask);
4423 else if (val64 & GPIO_INT_REG_LINK_UP) {
4424 val64 = readq(&bar0->adapter_status);
4425 /* Enable Adapter */
4426 val64 = readq(&bar0->adapter_control);
4427 val64 |= ADAPTER_CNTL_EN;
4428 writeq(val64, &bar0->adapter_control);
4429 val64 |= ADAPTER_LED_ON;
4430 writeq(val64, &bar0->adapter_control);
4431 if (!sp->device_enabled_once)
4432 sp->device_enabled_once = 1;
4434 s2io_link(sp, LINK_UP);
4436 * unmask link down interrupt and mask link-up
4439 val64 = readq(&bar0->gpio_int_mask);
4440 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4441 val64 |= GPIO_INT_MASK_LINK_UP;
4442 writeq(val64, &bar0->gpio_int_mask);
4444 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4445 val64 = readq(&bar0->adapter_status);
4446 s2io_link(sp, LINK_DOWN);
4447 /* Link is down so unmaks link up interrupt */
4448 val64 = readq(&bar0->gpio_int_mask);
4449 val64 &= ~GPIO_INT_MASK_LINK_UP;
4450 val64 |= GPIO_INT_MASK_LINK_DOWN;
4451 writeq(val64, &bar0->gpio_int_mask);
4454 val64 = readq(&bar0->adapter_control);
4455 val64 = val64 &(~ADAPTER_LED_ON);
4456 writeq(val64, &bar0->adapter_control);
4459 val64 = readq(&bar0->gpio_int_mask);
4463 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4464 * @value: alarm bits
4465 * @addr: address value
4466 * @cnt: counter variable
4467 * Description: Check for alarm and increment the counter
4469 * 1 - if alarm bit set
4470 * 0 - if alarm bit is not set
4472 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4473 unsigned long long *cnt)
4476 val64 = readq(addr);
4477 if ( val64 & value ) {
4478 writeq(val64, addr);
4487 * s2io_handle_errors - Xframe error indication handler
4488 * @nic: device private variable
4489 * Description: Handle alarms such as loss of link, single or
4490 * double ECC errors, critical and serious errors.
4494 static void s2io_handle_errors(void * dev_id)
4496 struct net_device *dev = (struct net_device *) dev_id;
4497 struct s2io_nic *sp = dev->priv;
4498 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499 u64 temp64 = 0,val64=0;
4502 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4503 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4505 if (!is_s2io_card_up(sp))
4508 if (pci_channel_offline(sp->pdev))
4511 memset(&sw_stat->ring_full_cnt, 0,
4512 sizeof(sw_stat->ring_full_cnt));
4514 /* Handling the XPAK counters update */
4515 if(stats->xpak_timer_count < 72000) {
4516 /* waiting for an hour */
4517 stats->xpak_timer_count++;
4519 s2io_updt_xpak_counter(dev);
4520 /* reset the count to zero */
4521 stats->xpak_timer_count = 0;
4524 /* Handling link status change error Intr */
4525 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4526 val64 = readq(&bar0->mac_rmac_err_reg);
4527 writeq(val64, &bar0->mac_rmac_err_reg);
4528 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4529 schedule_work(&sp->set_link_task);
4532 /* In case of a serious error, the device will be Reset. */
4533 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4534 &sw_stat->serious_err_cnt))
4537 /* Check for data parity error */
4538 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4539 &sw_stat->parity_err_cnt))
4542 /* Check for ring full counter */
4543 if (sp->device_type == XFRAME_II_DEVICE) {
4544 val64 = readq(&bar0->ring_bump_counter1);
4545 for (i=0; i<4; i++) {
4546 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4547 temp64 >>= 64 - ((i+1)*16);
4548 sw_stat->ring_full_cnt[i] += temp64;
4551 val64 = readq(&bar0->ring_bump_counter2);
4552 for (i=0; i<4; i++) {
4553 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4554 temp64 >>= 64 - ((i+1)*16);
4555 sw_stat->ring_full_cnt[i+4] += temp64;
4559 val64 = readq(&bar0->txdma_int_status);
4560 /*check for pfc_err*/
4561 if (val64 & TXDMA_PFC_INT) {
4562 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4563 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4564 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4565 &sw_stat->pfc_err_cnt))
4567 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4568 &sw_stat->pfc_err_cnt);
4571 /*check for tda_err*/
4572 if (val64 & TXDMA_TDA_INT) {
4573 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4574 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4575 &sw_stat->tda_err_cnt))
4577 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4578 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4580 /*check for pcc_err*/
4581 if (val64 & TXDMA_PCC_INT) {
4582 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4583 | PCC_N_SERR | PCC_6_COF_OV_ERR
4584 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4585 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4586 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4587 &sw_stat->pcc_err_cnt))
4589 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4590 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4593 /*check for tti_err*/
4594 if (val64 & TXDMA_TTI_INT) {
4595 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4596 &sw_stat->tti_err_cnt))
4598 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4599 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4602 /*check for lso_err*/
4603 if (val64 & TXDMA_LSO_INT) {
4604 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4605 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4606 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4608 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4609 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4612 /*check for tpa_err*/
4613 if (val64 & TXDMA_TPA_INT) {
4614 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4615 &sw_stat->tpa_err_cnt))
4617 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4618 &sw_stat->tpa_err_cnt);
4621 /*check for sm_err*/
4622 if (val64 & TXDMA_SM_INT) {
4623 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4624 &sw_stat->sm_err_cnt))
4628 val64 = readq(&bar0->mac_int_status);
4629 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4630 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4631 &bar0->mac_tmac_err_reg,
4632 &sw_stat->mac_tmac_err_cnt))
4634 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4635 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4636 &bar0->mac_tmac_err_reg,
4637 &sw_stat->mac_tmac_err_cnt);
4640 val64 = readq(&bar0->xgxs_int_status);
4641 if (val64 & XGXS_INT_STATUS_TXGXS) {
4642 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4643 &bar0->xgxs_txgxs_err_reg,
4644 &sw_stat->xgxs_txgxs_err_cnt))
4646 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4647 &bar0->xgxs_txgxs_err_reg,
4648 &sw_stat->xgxs_txgxs_err_cnt);
4651 val64 = readq(&bar0->rxdma_int_status);
4652 if (val64 & RXDMA_INT_RC_INT_M) {
4653 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4654 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4655 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4657 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4658 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4659 &sw_stat->rc_err_cnt);
4660 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4661 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4662 &sw_stat->prc_pcix_err_cnt))
4664 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4665 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4666 &sw_stat->prc_pcix_err_cnt);
4669 if (val64 & RXDMA_INT_RPA_INT_M) {
4670 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4671 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4673 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4674 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4677 if (val64 & RXDMA_INT_RDA_INT_M) {
4678 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4679 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4680 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4681 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4684 | RDA_MISC_ERR | RDA_PCIX_ERR,
4685 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4688 if (val64 & RXDMA_INT_RTI_INT_M) {
4689 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4690 &sw_stat->rti_err_cnt))
4692 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4693 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4696 val64 = readq(&bar0->mac_int_status);
4697 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4698 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4699 &bar0->mac_rmac_err_reg,
4700 &sw_stat->mac_rmac_err_cnt))
4702 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4703 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt);
4707 val64 = readq(&bar0->xgxs_int_status);
4708 if (val64 & XGXS_INT_STATUS_RXGXS) {
4709 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4710 &bar0->xgxs_rxgxs_err_reg,
4711 &sw_stat->xgxs_rxgxs_err_cnt))
4715 val64 = readq(&bar0->mc_int_status);
4716 if(val64 & MC_INT_STATUS_MC_INT) {
4717 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4718 &sw_stat->mc_err_cnt))
4721 /* Handling Ecc errors */
4722 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4723 writeq(val64, &bar0->mc_err_reg);
4724 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4725 sw_stat->double_ecc_errs++;
4726 if (sp->device_type != XFRAME_II_DEVICE) {
4728 * Reset XframeI only if critical error
4731 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4732 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4736 sw_stat->single_ecc_errs++;
4742 s2io_stop_all_tx_queue(sp);
4743 schedule_work(&sp->rst_timer_task);
4744 sw_stat->soft_reset_cnt++;
4749 * s2io_isr - ISR handler of the device .
4750 * @irq: the irq of the device.
4751 * @dev_id: a void pointer to the dev structure of the NIC.
4752 * Description: This function is the ISR handler of the device. It
4753 * identifies the reason for the interrupt and calls the relevant
4754 * service routines. As a contongency measure, this ISR allocates the
4755 * recv buffers, if their numbers are below the panic value which is
4756 * presently set to 25% of the original number of rcv buffers allocated.
4758 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4759 * IRQ_NONE: will be returned if interrupt is not from our device
4761 static irqreturn_t s2io_isr(int irq, void *dev_id)
4763 struct net_device *dev = (struct net_device *) dev_id;
4764 struct s2io_nic *sp = dev->priv;
4765 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4768 struct mac_info *mac_control;
4769 struct config_param *config;
4771 /* Pretend we handled any irq's from a disconnected card */
4772 if (pci_channel_offline(sp->pdev))
4775 if (!is_s2io_card_up(sp))
4778 mac_control = &sp->mac_control;
4779 config = &sp->config;
4782 * Identify the cause for interrupt and call the appropriate
4783 * interrupt handler. Causes for the interrupt could be;
4788 reason = readq(&bar0->general_int_status);
4790 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4791 /* Nothing much can be done. Get out */
4795 if (reason & (GEN_INTR_RXTRAFFIC |
4796 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4798 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4801 if (reason & GEN_INTR_RXTRAFFIC) {
4802 netif_rx_schedule(dev, &sp->napi);
4803 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4804 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805 readl(&bar0->rx_traffic_int);
4809 * rx_traffic_int reg is an R1 register, writing all 1's
4810 * will ensure that the actual interrupt causing bit
4811 * get's cleared and hence a read can be avoided.
4813 if (reason & GEN_INTR_RXTRAFFIC)
4814 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4816 for (i = 0; i < config->rx_ring_num; i++)
4817 rx_intr_handler(&mac_control->rings[i], 0);
4821 * tx_traffic_int reg is an R1 register, writing all 1's
4822 * will ensure that the actual interrupt causing bit get's
4823 * cleared and hence a read can be avoided.
4825 if (reason & GEN_INTR_TXTRAFFIC)
4826 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4828 for (i = 0; i < config->tx_fifo_num; i++)
4829 tx_intr_handler(&mac_control->fifos[i]);
4831 if (reason & GEN_INTR_TXPIC)
4832 s2io_txpic_intr_handle(sp);
4835 * Reallocate the buffers from the interrupt handler itself.
4837 if (!config->napi) {
4838 for (i = 0; i < config->rx_ring_num; i++)
4839 s2io_chk_rx_buffers(&mac_control->rings[i]);
4841 writeq(sp->general_int_mask, &bar0->general_int_mask);
4842 readl(&bar0->general_int_status);
4848 /* The interrupt was not raised by us */
4858 static void s2io_updt_stats(struct s2io_nic *sp)
4860 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4864 if (is_s2io_card_up(sp)) {
4865 /* Apprx 30us on a 133 MHz bus */
4866 val64 = SET_UPDT_CLICKS(10) |
4867 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4868 writeq(val64, &bar0->stat_cfg);
4871 val64 = readq(&bar0->stat_cfg);
4872 if (!(val64 & s2BIT(0)))
4876 break; /* Updt failed */
4882 * s2io_get_stats - Updates the device statistics structure.
4883 * @dev : pointer to the device structure.
4885 * This function updates the device statistics structure in the s2io_nic
4886 * structure and returns a pointer to the same.
4888 * pointer to the updated net_device_stats structure.
4891 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4893 struct s2io_nic *sp = dev->priv;
4894 struct mac_info *mac_control;
4895 struct config_param *config;
4899 mac_control = &sp->mac_control;
4900 config = &sp->config;
4902 /* Configure Stats for immediate updt */
4903 s2io_updt_stats(sp);
4905 sp->stats.tx_packets =
4906 le32_to_cpu(mac_control->stats_info->tmac_frms);
4907 sp->stats.tx_errors =
4908 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4909 sp->stats.rx_errors =
4910 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4911 sp->stats.multicast =
4912 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4913 sp->stats.rx_length_errors =
4914 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4916 /* collect per-ring rx_packets and rx_bytes */
4917 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4918 for (i = 0; i < config->rx_ring_num; i++) {
4919 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4920 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4923 return (&sp->stats);
4927 * s2io_set_multicast - entry point for multicast address enable/disable.
4928 * @dev : pointer to the device structure
4930 * This function is a driver entry point which gets called by the kernel
4931 * whenever multicast addresses must be enabled/disabled. This also gets
4932 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4933 * determine, if multicast address must be enabled or if promiscuous mode
4934 * is to be disabled etc.
4939 static void s2io_set_multicast(struct net_device *dev)
4942 struct dev_mc_list *mclist;
4943 struct s2io_nic *sp = dev->priv;
4944 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4945 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4947 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4949 struct config_param *config = &sp->config;
4951 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4952 /* Enable all Multicast addresses */
4953 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4954 &bar0->rmac_addr_data0_mem);
4955 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4956 &bar0->rmac_addr_data1_mem);
4957 val64 = RMAC_ADDR_CMD_MEM_WE |
4958 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4959 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4960 writeq(val64, &bar0->rmac_addr_cmd_mem);
4961 /* Wait till command completes */
4962 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4963 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4967 sp->all_multi_pos = config->max_mc_addr - 1;
4968 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4969 /* Disable all Multicast addresses */
4970 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4971 &bar0->rmac_addr_data0_mem);
4972 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4973 &bar0->rmac_addr_data1_mem);
4974 val64 = RMAC_ADDR_CMD_MEM_WE |
4975 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4976 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4977 writeq(val64, &bar0->rmac_addr_cmd_mem);
4978 /* Wait till command completes */
4979 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4980 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4984 sp->all_multi_pos = 0;
4987 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4988 /* Put the NIC into promiscuous mode */
4989 add = &bar0->mac_cfg;
4990 val64 = readq(&bar0->mac_cfg);
4991 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4993 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4994 writel((u32) val64, add);
4995 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4996 writel((u32) (val64 >> 32), (add + 4));
4998 if (vlan_tag_strip != 1) {
4999 val64 = readq(&bar0->rx_pa_cfg);
5000 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5001 writeq(val64, &bar0->rx_pa_cfg);
5002 vlan_strip_flag = 0;
5005 val64 = readq(&bar0->mac_cfg);
5006 sp->promisc_flg = 1;
5007 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5009 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5010 /* Remove the NIC from promiscuous mode */
5011 add = &bar0->mac_cfg;
5012 val64 = readq(&bar0->mac_cfg);
5013 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5015 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5016 writel((u32) val64, add);
5017 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5018 writel((u32) (val64 >> 32), (add + 4));
5020 if (vlan_tag_strip != 0) {
5021 val64 = readq(&bar0->rx_pa_cfg);
5022 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5023 writeq(val64, &bar0->rx_pa_cfg);
5024 vlan_strip_flag = 1;
5027 val64 = readq(&bar0->mac_cfg);
5028 sp->promisc_flg = 0;
5029 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5033 /* Update individual M_CAST address list */
5034 if ((!sp->m_cast_flg) && dev->mc_count) {
5036 (config->max_mc_addr - config->max_mac_addr)) {
5037 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5039 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5040 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5044 prev_cnt = sp->mc_addr_count;
5045 sp->mc_addr_count = dev->mc_count;
5047 /* Clear out the previous list of Mc in the H/W. */
5048 for (i = 0; i < prev_cnt; i++) {
5049 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5050 &bar0->rmac_addr_data0_mem);
5051 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5052 &bar0->rmac_addr_data1_mem);
5053 val64 = RMAC_ADDR_CMD_MEM_WE |
5054 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5055 RMAC_ADDR_CMD_MEM_OFFSET
5056 (config->mc_start_offset + i);
5057 writeq(val64, &bar0->rmac_addr_cmd_mem);
5059 /* Wait for command completes */
5060 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5061 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5063 DBG_PRINT(ERR_DBG, "%s: Adding ",
5065 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5070 /* Create the new Rx filter list and update the same in H/W. */
5071 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5072 i++, mclist = mclist->next) {
5073 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5076 for (j = 0; j < ETH_ALEN; j++) {
5077 mac_addr |= mclist->dmi_addr[j];
5081 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5082 &bar0->rmac_addr_data0_mem);
5083 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5084 &bar0->rmac_addr_data1_mem);
5085 val64 = RMAC_ADDR_CMD_MEM_WE |
5086 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5087 RMAC_ADDR_CMD_MEM_OFFSET
5088 (i + config->mc_start_offset);
5089 writeq(val64, &bar0->rmac_addr_cmd_mem);
5091 /* Wait for command completes */
5092 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5093 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5095 DBG_PRINT(ERR_DBG, "%s: Adding ",
5097 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5104 /* read from CAM unicast & multicast addresses and store it in
5105 * def_mac_addr structure
5107 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5111 struct config_param *config = &sp->config;
5113 /* store unicast & multicast mac addresses */
5114 for (offset = 0; offset < config->max_mc_addr; offset++) {
5115 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5116 /* if read fails disable the entry */
5117 if (mac_addr == FAILURE)
5118 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5119 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5123 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5124 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5127 struct config_param *config = &sp->config;
5128 /* restore unicast mac address */
5129 for (offset = 0; offset < config->max_mac_addr; offset++)
5130 do_s2io_prog_unicast(sp->dev,
5131 sp->def_mac_addr[offset].mac_addr);
5133 /* restore multicast mac address */
5134 for (offset = config->mc_start_offset;
5135 offset < config->max_mc_addr; offset++)
5136 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5139 /* add a multicast MAC address to CAM */
5140 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5144 struct config_param *config = &sp->config;
5146 for (i = 0; i < ETH_ALEN; i++) {
5148 mac_addr |= addr[i];
5150 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5153 /* check if the multicast mac already preset in CAM */
5154 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5156 tmp64 = do_s2io_read_unicast_mc(sp, i);
5157 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5160 if (tmp64 == mac_addr)
5163 if (i == config->max_mc_addr) {
5165 "CAM full no space left for multicast MAC\n");
5168 /* Update the internal structure with this new mac address */
5169 do_s2io_copy_mac_addr(sp, i, mac_addr);
5171 return (do_s2io_add_mac(sp, mac_addr, i));
5174 /* add MAC address to CAM */
5175 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5178 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5180 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5181 &bar0->rmac_addr_data0_mem);
5184 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5185 RMAC_ADDR_CMD_MEM_OFFSET(off);
5186 writeq(val64, &bar0->rmac_addr_cmd_mem);
5188 /* Wait till command completes */
5189 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5190 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5192 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5197 /* deletes a specified unicast/multicast mac entry from CAM */
5198 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5201 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5202 struct config_param *config = &sp->config;
5205 offset < config->max_mc_addr; offset++) {
5206 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5207 if (tmp64 == addr) {
5208 /* disable the entry by writing 0xffffffffffffULL */
5209 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5211 /* store the new mac list from CAM */
5212 do_s2io_store_unicast_mc(sp);
5216 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5217 (unsigned long long)addr);
5221 /* read mac entries from CAM */
5222 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5224 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5225 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5229 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5230 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5231 writeq(val64, &bar0->rmac_addr_cmd_mem);
5233 /* Wait till command completes */
5234 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5235 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5237 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5240 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5241 return (tmp64 >> 16);
5245 * s2io_set_mac_addr driver entry point
5248 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5250 struct sockaddr *addr = p;
5252 if (!is_valid_ether_addr(addr->sa_data))
5255 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5257 /* store the MAC address in CAM */
5258 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5261 * do_s2io_prog_unicast - Programs the Xframe mac address
5262 * @dev : pointer to the device structure.
5263 * @addr: a uchar pointer to the new mac address which is to be set.
5264 * Description : This procedure will program the Xframe to receive
5265 * frames with new Mac Address
5266 * Return value: SUCCESS on success and an appropriate (-)ve integer
5267 * as defined in errno.h file on failure.
5270 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5272 struct s2io_nic *sp = dev->priv;
5273 register u64 mac_addr = 0, perm_addr = 0;
5276 struct config_param *config = &sp->config;
5279 * Set the new MAC address as the new unicast filter and reflect this
5280 * change on the device address registered with the OS. It will be
5283 for (i = 0; i < ETH_ALEN; i++) {
5285 mac_addr |= addr[i];
5287 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5290 /* check if the dev_addr is different than perm_addr */
5291 if (mac_addr == perm_addr)
5294 /* check if the mac already preset in CAM */
5295 for (i = 1; i < config->max_mac_addr; i++) {
5296 tmp64 = do_s2io_read_unicast_mc(sp, i);
5297 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5300 if (tmp64 == mac_addr) {
5302 "MAC addr:0x%llx already present in CAM\n",
5303 (unsigned long long)mac_addr);
5307 if (i == config->max_mac_addr) {
5308 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5311 /* Update the internal structure with this new mac address */
5312 do_s2io_copy_mac_addr(sp, i, mac_addr);
5313 return (do_s2io_add_mac(sp, mac_addr, i));
5317 * s2io_ethtool_sset - Sets different link parameters.
5318 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5319 * @info: pointer to the structure with parameters given by ethtool to set
5322 * The function sets different link parameters provided by the user onto
5328 static int s2io_ethtool_sset(struct net_device *dev,
5329 struct ethtool_cmd *info)
5331 struct s2io_nic *sp = dev->priv;
5332 if ((info->autoneg == AUTONEG_ENABLE) ||
5333 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5336 s2io_close(sp->dev);
5344 * s2io_ethtol_gset - Return link specific information.
5345 * @sp : private member of the device structure, pointer to the
5346 * s2io_nic structure.
5347 * @info : pointer to the structure with parameters given by ethtool
5348 * to return link information.
5350 * Returns link specific information like speed, duplex etc.. to ethtool.
5352 * return 0 on success.
5355 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5357 struct s2io_nic *sp = dev->priv;
5358 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5359 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5360 info->port = PORT_FIBRE;
5362 /* info->transceiver */
5363 info->transceiver = XCVR_EXTERNAL;
5365 if (netif_carrier_ok(sp->dev)) {
5366 info->speed = 10000;
5367 info->duplex = DUPLEX_FULL;
5373 info->autoneg = AUTONEG_DISABLE;
5378 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5379 * @sp : private member of the device structure, which is a pointer to the
5380 * s2io_nic structure.
5381 * @info : pointer to the structure with parameters given by ethtool to
5382 * return driver information.
5384 * Returns driver specefic information like name, version etc.. to ethtool.
5389 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5390 struct ethtool_drvinfo *info)
5392 struct s2io_nic *sp = dev->priv;
5394 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5395 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5396 strncpy(info->fw_version, "", sizeof(info->fw_version));
5397 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5398 info->regdump_len = XENA_REG_SPACE;
5399 info->eedump_len = XENA_EEPROM_SPACE;
5403 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5404 * @sp: private member of the device structure, which is a pointer to the
5405 * s2io_nic structure.
5406 * @regs : pointer to the structure with parameters given by ethtool for
5407 * dumping the registers.
5408 * @reg_space: The input argumnet into which all the registers are dumped.
5410 * Dumps the entire register space of xFrame NIC into the user given
5416 static void s2io_ethtool_gregs(struct net_device *dev,
5417 struct ethtool_regs *regs, void *space)
5421 u8 *reg_space = (u8 *) space;
5422 struct s2io_nic *sp = dev->priv;
5424 regs->len = XENA_REG_SPACE;
5425 regs->version = sp->pdev->subsystem_device;
5427 for (i = 0; i < regs->len; i += 8) {
5428 reg = readq(sp->bar0 + i);
5429 memcpy((reg_space + i), ®, 8);
5434 * s2io_phy_id - timer function that alternates adapter LED.
5435 * @data : address of the private member of the device structure, which
5436 * is a pointer to the s2io_nic structure, provided as an u32.
5437 * Description: This is actually the timer function that alternates the
5438 * adapter LED bit of the adapter control bit to set/reset every time on
5439 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5440 * once every second.
5442 static void s2io_phy_id(unsigned long data)
5444 struct s2io_nic *sp = (struct s2io_nic *) data;
5445 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5449 subid = sp->pdev->subsystem_device;
5450 if ((sp->device_type == XFRAME_II_DEVICE) ||
5451 ((subid & 0xFF) >= 0x07)) {
5452 val64 = readq(&bar0->gpio_control);
5453 val64 ^= GPIO_CTRL_GPIO_0;
5454 writeq(val64, &bar0->gpio_control);
5456 val64 = readq(&bar0->adapter_control);
5457 val64 ^= ADAPTER_LED_ON;
5458 writeq(val64, &bar0->adapter_control);
5461 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5465 * s2io_ethtool_idnic - To physically identify the nic on the system.
5466 * @sp : private member of the device structure, which is a pointer to the
5467 * s2io_nic structure.
5468 * @id : pointer to the structure with identification parameters given by
5470 * Description: Used to physically identify the NIC on the system.
5471 * The Link LED will blink for a time specified by the user for
5473 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5474 * identification is possible only if it's link is up.
5476 * int , returns 0 on success
5479 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5481 u64 val64 = 0, last_gpio_ctrl_val;
5482 struct s2io_nic *sp = dev->priv;
5483 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5486 subid = sp->pdev->subsystem_device;
5487 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5488 if ((sp->device_type == XFRAME_I_DEVICE) &&
5489 ((subid & 0xFF) < 0x07)) {
5490 val64 = readq(&bar0->adapter_control);
5491 if (!(val64 & ADAPTER_CNTL_EN)) {
5493 "Adapter Link down, cannot blink LED\n");
5497 if (sp->id_timer.function == NULL) {
5498 init_timer(&sp->id_timer);
5499 sp->id_timer.function = s2io_phy_id;
5500 sp->id_timer.data = (unsigned long) sp;
5502 mod_timer(&sp->id_timer, jiffies);
5504 msleep_interruptible(data * HZ);
5506 msleep_interruptible(MAX_FLICKER_TIME);
5507 del_timer_sync(&sp->id_timer);
5509 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5510 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5511 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5517 static void s2io_ethtool_gringparam(struct net_device *dev,
5518 struct ethtool_ringparam *ering)
5520 struct s2io_nic *sp = dev->priv;
5521 int i,tx_desc_count=0,rx_desc_count=0;
5523 if (sp->rxd_mode == RXD_MODE_1)
5524 ering->rx_max_pending = MAX_RX_DESC_1;
5525 else if (sp->rxd_mode == RXD_MODE_3B)
5526 ering->rx_max_pending = MAX_RX_DESC_2;
5528 ering->tx_max_pending = MAX_TX_DESC;
5529 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5530 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5532 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5533 ering->tx_pending = tx_desc_count;
5535 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5536 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5538 ering->rx_pending = rx_desc_count;
5540 ering->rx_mini_max_pending = 0;
5541 ering->rx_mini_pending = 0;
5542 if(sp->rxd_mode == RXD_MODE_1)
5543 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5544 else if (sp->rxd_mode == RXD_MODE_3B)
5545 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5546 ering->rx_jumbo_pending = rx_desc_count;
5550 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5551 * @sp : private member of the device structure, which is a pointer to the
5552 * s2io_nic structure.
5553 * @ep : pointer to the structure with pause parameters given by ethtool.
5555 * Returns the Pause frame generation and reception capability of the NIC.
5559 static void s2io_ethtool_getpause_data(struct net_device *dev,
5560 struct ethtool_pauseparam *ep)
5563 struct s2io_nic *sp = dev->priv;
5564 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5566 val64 = readq(&bar0->rmac_pause_cfg);
5567 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5568 ep->tx_pause = TRUE;
5569 if (val64 & RMAC_PAUSE_RX_ENABLE)
5570 ep->rx_pause = TRUE;
5571 ep->autoneg = FALSE;
5575 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5576 * @sp : private member of the device structure, which is a pointer to the
5577 * s2io_nic structure.
5578 * @ep : pointer to the structure with pause parameters given by ethtool.
5580 * It can be used to set or reset Pause frame generation or reception
5581 * support of the NIC.
5583 * int, returns 0 on Success
5586 static int s2io_ethtool_setpause_data(struct net_device *dev,
5587 struct ethtool_pauseparam *ep)
5590 struct s2io_nic *sp = dev->priv;
5591 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5593 val64 = readq(&bar0->rmac_pause_cfg);
5595 val64 |= RMAC_PAUSE_GEN_ENABLE;
5597 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5599 val64 |= RMAC_PAUSE_RX_ENABLE;
5601 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5602 writeq(val64, &bar0->rmac_pause_cfg);
5607 * read_eeprom - reads 4 bytes of data from user given offset.
5608 * @sp : private member of the device structure, which is a pointer to the
5609 * s2io_nic structure.
5610 * @off : offset at which the data must be written
5611 * @data : Its an output parameter where the data read at the given
5614 * Will read 4 bytes of data from the user given offset and return the
5616 * NOTE: Will allow to read only part of the EEPROM visible through the
5619 * -1 on failure and 0 on success.
5622 #define S2IO_DEV_ID 5
5623 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5628 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5630 if (sp->device_type == XFRAME_I_DEVICE) {
5631 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5632 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5633 I2C_CONTROL_CNTL_START;
5634 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5636 while (exit_cnt < 5) {
5637 val64 = readq(&bar0->i2c_control);
5638 if (I2C_CONTROL_CNTL_END(val64)) {
5639 *data = I2C_CONTROL_GET_DATA(val64);
5648 if (sp->device_type == XFRAME_II_DEVICE) {
5649 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5650 SPI_CONTROL_BYTECNT(0x3) |
5651 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5652 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5653 val64 |= SPI_CONTROL_REQ;
5654 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5655 while (exit_cnt < 5) {
5656 val64 = readq(&bar0->spi_control);
5657 if (val64 & SPI_CONTROL_NACK) {
5660 } else if (val64 & SPI_CONTROL_DONE) {
5661 *data = readq(&bar0->spi_data);
5674 * write_eeprom - actually writes the relevant part of the data value.
5675 * @sp : private member of the device structure, which is a pointer to the
5676 * s2io_nic structure.
5677 * @off : offset at which the data must be written
5678 * @data : The data that is to be written
5679 * @cnt : Number of bytes of the data that are actually to be written into
5680 * the Eeprom. (max of 3)
5682 * Actually writes the relevant part of the data value into the Eeprom
5683 * through the I2C bus.
5685 * 0 on success, -1 on failure.
5688 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5690 int exit_cnt = 0, ret = -1;
5692 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5694 if (sp->device_type == XFRAME_I_DEVICE) {
5695 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5696 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5697 I2C_CONTROL_CNTL_START;
5698 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5700 while (exit_cnt < 5) {
5701 val64 = readq(&bar0->i2c_control);
5702 if (I2C_CONTROL_CNTL_END(val64)) {
5703 if (!(val64 & I2C_CONTROL_NACK))
5712 if (sp->device_type == XFRAME_II_DEVICE) {
5713 int write_cnt = (cnt == 8) ? 0 : cnt;
5714 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5716 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5717 SPI_CONTROL_BYTECNT(write_cnt) |
5718 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5719 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5720 val64 |= SPI_CONTROL_REQ;
5721 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5722 while (exit_cnt < 5) {
5723 val64 = readq(&bar0->spi_control);
5724 if (val64 & SPI_CONTROL_NACK) {
5727 } else if (val64 & SPI_CONTROL_DONE) {
5737 static void s2io_vpd_read(struct s2io_nic *nic)
5741 int i=0, cnt, fail = 0;
5742 int vpd_addr = 0x80;
5744 if (nic->device_type == XFRAME_II_DEVICE) {
5745 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5749 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5752 strcpy(nic->serial_num, "NOT AVAILABLE");
5754 vpd_data = kmalloc(256, GFP_KERNEL);
5756 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5759 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5761 for (i = 0; i < 256; i +=4 ) {
5762 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5763 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5764 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5765 for (cnt = 0; cnt <5; cnt++) {
5767 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5772 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5776 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5777 (u32 *)&vpd_data[i]);
5781 /* read serial number of adapter */
5782 for (cnt = 0; cnt < 256; cnt++) {
5783 if ((vpd_data[cnt] == 'S') &&
5784 (vpd_data[cnt+1] == 'N') &&
5785 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5786 memset(nic->serial_num, 0, VPD_STRING_LEN);
5787 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5794 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5795 memset(nic->product_name, 0, vpd_data[1]);
5796 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5799 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5803 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5804 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5805 * @eeprom : pointer to the user level structure provided by ethtool,
5806 * containing all relevant information.
5807 * @data_buf : user defined value to be written into Eeprom.
5808 * Description: Reads the values stored in the Eeprom at given offset
5809 * for a given length. Stores these values int the input argument data
5810 * buffer 'data_buf' and returns these to the caller (ethtool.)
5815 static int s2io_ethtool_geeprom(struct net_device *dev,
5816 struct ethtool_eeprom *eeprom, u8 * data_buf)
5820 struct s2io_nic *sp = dev->priv;
5822 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5824 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5825 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5827 for (i = 0; i < eeprom->len; i += 4) {
5828 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5829 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5833 memcpy((data_buf + i), &valid, 4);
5839 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5840 * @sp : private member of the device structure, which is a pointer to the
5841 * s2io_nic structure.
5842 * @eeprom : pointer to the user level structure provided by ethtool,
5843 * containing all relevant information.
5844 * @data_buf ; user defined value to be written into Eeprom.
5846 * Tries to write the user provided value in the Eeprom, at the offset
5847 * given by the user.
5849 * 0 on success, -EFAULT on failure.
5852 static int s2io_ethtool_seeprom(struct net_device *dev,
5853 struct ethtool_eeprom *eeprom,
5856 int len = eeprom->len, cnt = 0;
5857 u64 valid = 0, data;
5858 struct s2io_nic *sp = dev->priv;
5860 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5862 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5863 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5869 data = (u32) data_buf[cnt] & 0x000000FF;
5871 valid = (u32) (data << 24);
5875 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5877 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5879 "write into the specified offset\n");
5890 * s2io_register_test - reads and writes into all clock domains.
5891 * @sp : private member of the device structure, which is a pointer to the
5892 * s2io_nic structure.
5893 * @data : variable that returns the result of each of the test conducted b
5896 * Read and write into all clock domains. The NIC has 3 clock domains,
5897 * see that registers in all the three regions are accessible.
5902 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5904 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5905 u64 val64 = 0, exp_val;
5908 val64 = readq(&bar0->pif_rd_swapper_fb);
5909 if (val64 != 0x123456789abcdefULL) {
5911 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5914 val64 = readq(&bar0->rmac_pause_cfg);
5915 if (val64 != 0xc000ffff00000000ULL) {
5917 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5920 val64 = readq(&bar0->rx_queue_cfg);
5921 if (sp->device_type == XFRAME_II_DEVICE)
5922 exp_val = 0x0404040404040404ULL;
5924 exp_val = 0x0808080808080808ULL;
5925 if (val64 != exp_val) {
5927 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5930 val64 = readq(&bar0->xgxs_efifo_cfg);
5931 if (val64 != 0x000000001923141EULL) {
5933 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5936 val64 = 0x5A5A5A5A5A5A5A5AULL;
5937 writeq(val64, &bar0->xmsi_data);
5938 val64 = readq(&bar0->xmsi_data);
5939 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5941 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5944 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5945 writeq(val64, &bar0->xmsi_data);
5946 val64 = readq(&bar0->xmsi_data);
5947 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5949 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5957 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5958 * @sp : private member of the device structure, which is a pointer to the
5959 * s2io_nic structure.
5960 * @data:variable that returns the result of each of the test conducted by
5963 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5969 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5972 u64 ret_data, org_4F0, org_7F0;
5973 u8 saved_4F0 = 0, saved_7F0 = 0;
5974 struct net_device *dev = sp->dev;
5976 /* Test Write Error at offset 0 */
5977 /* Note that SPI interface allows write access to all areas
5978 * of EEPROM. Hence doing all negative testing only for Xframe I.
5980 if (sp->device_type == XFRAME_I_DEVICE)
5981 if (!write_eeprom(sp, 0, 0, 3))
5984 /* Save current values at offsets 0x4F0 and 0x7F0 */
5985 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5987 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5990 /* Test Write at offset 4f0 */
5991 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5993 if (read_eeprom(sp, 0x4F0, &ret_data))
5996 if (ret_data != 0x012345) {
5997 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5998 "Data written %llx Data read %llx\n",
5999 dev->name, (unsigned long long)0x12345,
6000 (unsigned long long)ret_data);
6004 /* Reset the EEPROM data go FFFF */
6005 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6007 /* Test Write Request Error at offset 0x7c */
6008 if (sp->device_type == XFRAME_I_DEVICE)
6009 if (!write_eeprom(sp, 0x07C, 0, 3))
6012 /* Test Write Request at offset 0x7f0 */
6013 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6015 if (read_eeprom(sp, 0x7F0, &ret_data))
6018 if (ret_data != 0x012345) {
6019 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6020 "Data written %llx Data read %llx\n",
6021 dev->name, (unsigned long long)0x12345,
6022 (unsigned long long)ret_data);
6026 /* Reset the EEPROM data go FFFF */
6027 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6029 if (sp->device_type == XFRAME_I_DEVICE) {
6030 /* Test Write Error at offset 0x80 */
6031 if (!write_eeprom(sp, 0x080, 0, 3))
6034 /* Test Write Error at offset 0xfc */
6035 if (!write_eeprom(sp, 0x0FC, 0, 3))
6038 /* Test Write Error at offset 0x100 */
6039 if (!write_eeprom(sp, 0x100, 0, 3))
6042 /* Test Write Error at offset 4ec */
6043 if (!write_eeprom(sp, 0x4EC, 0, 3))
6047 /* Restore values at offsets 0x4F0 and 0x7F0 */
6049 write_eeprom(sp, 0x4F0, org_4F0, 3);
6051 write_eeprom(sp, 0x7F0, org_7F0, 3);
6058 * s2io_bist_test - invokes the MemBist test of the card .
6059 * @sp : private member of the device structure, which is a pointer to the
6060 * s2io_nic structure.
6061 * @data:variable that returns the result of each of the test conducted by
6064 * This invokes the MemBist test of the card. We give around
6065 * 2 secs time for the Test to complete. If it's still not complete
6066 * within this peiod, we consider that the test failed.
6068 * 0 on success and -1 on failure.
6071 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6074 int cnt = 0, ret = -1;
6076 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6077 bist |= PCI_BIST_START;
6078 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6081 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6082 if (!(bist & PCI_BIST_START)) {
6083 *data = (bist & PCI_BIST_CODE_MASK);
6095 * s2io-link_test - verifies the link state of the nic
6096 * @sp ; private member of the device structure, which is a pointer to the
6097 * s2io_nic structure.
6098 * @data: variable that returns the result of each of the test conducted by
6101 * The function verifies the link state of the NIC and updates the input
6102 * argument 'data' appropriately.
6107 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6109 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6112 val64 = readq(&bar0->adapter_status);
6113 if(!(LINK_IS_UP(val64)))
6122 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6123 * @sp - private member of the device structure, which is a pointer to the
6124 * s2io_nic structure.
6125 * @data - variable that returns the result of each of the test
6126 * conducted by the driver.
6128 * This is one of the offline test that tests the read and write
6129 * access to the RldRam chip on the NIC.
6134 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6136 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6138 int cnt, iteration = 0, test_fail = 0;
6140 val64 = readq(&bar0->adapter_control);
6141 val64 &= ~ADAPTER_ECC_EN;
6142 writeq(val64, &bar0->adapter_control);
6144 val64 = readq(&bar0->mc_rldram_test_ctrl);
6145 val64 |= MC_RLDRAM_TEST_MODE;
6146 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6148 val64 = readq(&bar0->mc_rldram_mrs);
6149 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6150 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6152 val64 |= MC_RLDRAM_MRS_ENABLE;
6153 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6155 while (iteration < 2) {
6156 val64 = 0x55555555aaaa0000ULL;
6157 if (iteration == 1) {
6158 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6160 writeq(val64, &bar0->mc_rldram_test_d0);
6162 val64 = 0xaaaa5a5555550000ULL;
6163 if (iteration == 1) {
6164 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6166 writeq(val64, &bar0->mc_rldram_test_d1);
6168 val64 = 0x55aaaaaaaa5a0000ULL;
6169 if (iteration == 1) {
6170 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6172 writeq(val64, &bar0->mc_rldram_test_d2);
6174 val64 = (u64) (0x0000003ffffe0100ULL);
6175 writeq(val64, &bar0->mc_rldram_test_add);
6177 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6179 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6181 for (cnt = 0; cnt < 5; cnt++) {
6182 val64 = readq(&bar0->mc_rldram_test_ctrl);
6183 if (val64 & MC_RLDRAM_TEST_DONE)
6191 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6192 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6194 for (cnt = 0; cnt < 5; cnt++) {
6195 val64 = readq(&bar0->mc_rldram_test_ctrl);
6196 if (val64 & MC_RLDRAM_TEST_DONE)
6204 val64 = readq(&bar0->mc_rldram_test_ctrl);
6205 if (!(val64 & MC_RLDRAM_TEST_PASS))
6213 /* Bring the adapter out of test mode */
6214 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6220 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6221 * @sp : private member of the device structure, which is a pointer to the
6222 * s2io_nic structure.
6223 * @ethtest : pointer to a ethtool command specific structure that will be
6224 * returned to the user.
6225 * @data : variable that returns the result of each of the test
6226 * conducted by the driver.
6228 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6229 * the health of the card.
6234 static void s2io_ethtool_test(struct net_device *dev,
6235 struct ethtool_test *ethtest,
6238 struct s2io_nic *sp = dev->priv;
6239 int orig_state = netif_running(sp->dev);
6241 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6242 /* Offline Tests. */
6244 s2io_close(sp->dev);
6246 if (s2io_register_test(sp, &data[0]))
6247 ethtest->flags |= ETH_TEST_FL_FAILED;
6251 if (s2io_rldram_test(sp, &data[3]))
6252 ethtest->flags |= ETH_TEST_FL_FAILED;
6256 if (s2io_eeprom_test(sp, &data[1]))
6257 ethtest->flags |= ETH_TEST_FL_FAILED;
6259 if (s2io_bist_test(sp, &data[4]))
6260 ethtest->flags |= ETH_TEST_FL_FAILED;
6270 "%s: is not up, cannot run test\n",
6279 if (s2io_link_test(sp, &data[2]))
6280 ethtest->flags |= ETH_TEST_FL_FAILED;
6289 static void s2io_get_ethtool_stats(struct net_device *dev,
6290 struct ethtool_stats *estats,
6294 struct s2io_nic *sp = dev->priv;
6295 struct stat_block *stat_info = sp->mac_control.stats_info;
6297 s2io_updt_stats(sp);
6299 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6300 le32_to_cpu(stat_info->tmac_frms);
6302 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6303 le32_to_cpu(stat_info->tmac_data_octets);
6304 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6306 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6307 le32_to_cpu(stat_info->tmac_mcst_frms);
6309 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6310 le32_to_cpu(stat_info->tmac_bcst_frms);
6311 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6313 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6314 le32_to_cpu(stat_info->tmac_ttl_octets);
6316 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6317 le32_to_cpu(stat_info->tmac_ucst_frms);
6319 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6320 le32_to_cpu(stat_info->tmac_nucst_frms);
6322 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6323 le32_to_cpu(stat_info->tmac_any_err_frms);
6324 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6325 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6327 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6328 le32_to_cpu(stat_info->tmac_vld_ip);
6330 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6331 le32_to_cpu(stat_info->tmac_drop_ip);
6333 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6334 le32_to_cpu(stat_info->tmac_icmp);
6336 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6337 le32_to_cpu(stat_info->tmac_rst_tcp);
6338 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6339 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6340 le32_to_cpu(stat_info->tmac_udp);
6342 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6343 le32_to_cpu(stat_info->rmac_vld_frms);
6345 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6346 le32_to_cpu(stat_info->rmac_data_octets);
6347 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6348 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6350 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6351 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6353 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6354 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6355 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6356 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6357 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6358 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6359 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6361 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6362 le32_to_cpu(stat_info->rmac_ttl_octets);
6364 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6365 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6367 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6368 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6370 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6371 le32_to_cpu(stat_info->rmac_discarded_frms);
6373 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6374 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6375 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6376 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6378 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6379 le32_to_cpu(stat_info->rmac_usized_frms);
6381 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6382 le32_to_cpu(stat_info->rmac_osized_frms);
6384 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6385 le32_to_cpu(stat_info->rmac_frag_frms);
6387 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6388 le32_to_cpu(stat_info->rmac_jabber_frms);
6389 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6390 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6391 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6392 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6393 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6394 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6396 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6397 le32_to_cpu(stat_info->rmac_ip);
6398 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6399 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6401 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6402 le32_to_cpu(stat_info->rmac_drop_ip);
6404 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6405 le32_to_cpu(stat_info->rmac_icmp);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6408 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6409 le32_to_cpu(stat_info->rmac_udp);
6411 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6412 le32_to_cpu(stat_info->rmac_err_drp_udp);
6413 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6414 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6415 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6416 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6417 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6418 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6419 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6420 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6421 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6422 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6423 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6424 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6425 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6426 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6427 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6428 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6429 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6431 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6432 le32_to_cpu(stat_info->rmac_pause_cnt);
6433 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6434 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6436 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6437 le32_to_cpu(stat_info->rmac_accepted_ip);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6453 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6454 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6455 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6456 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6458 /* Enhanced statistics exist only for Hercules */
6459 if(sp->device_type == XFRAME_II_DEVICE) {
6461 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6463 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6465 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6466 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6467 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6468 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6469 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6470 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6471 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6472 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6473 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6474 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6475 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6476 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6477 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6478 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6482 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6483 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6484 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6485 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6486 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6487 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6488 for (k = 0; k < MAX_RX_RINGS; k++)
6489 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6490 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6491 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6492 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6494 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6495 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6496 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6497 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6498 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6499 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6500 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6501 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6502 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6503 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6504 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6505 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6506 if (stat_info->sw_stat.num_aggregations) {
6507 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6510 * Since 64-bit divide does not work on all platforms,
6511 * do repeated subtraction.
6513 while (tmp >= stat_info->sw_stat.num_aggregations) {
6514 tmp -= stat_info->sw_stat.num_aggregations;
6517 tmp_stats[i++] = count;
6521 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6522 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6523 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6525 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6526 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6527 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6528 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6529 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6531 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6555 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6556 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6557 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6558 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6559 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6560 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6561 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6562 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6565 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6567 return (XENA_REG_SPACE);
6571 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6573 struct s2io_nic *sp = dev->priv;
6575 return (sp->rx_csum);
6578 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6580 struct s2io_nic *sp = dev->priv;
6590 static int s2io_get_eeprom_len(struct net_device *dev)
6592 return (XENA_EEPROM_SPACE);
6595 static int s2io_get_sset_count(struct net_device *dev, int sset)
6597 struct s2io_nic *sp = dev->priv;
6601 return S2IO_TEST_LEN;
6603 switch(sp->device_type) {
6604 case XFRAME_I_DEVICE:
6605 return XFRAME_I_STAT_LEN;
6606 case XFRAME_II_DEVICE:
6607 return XFRAME_II_STAT_LEN;
6616 static void s2io_ethtool_get_strings(struct net_device *dev,
6617 u32 stringset, u8 * data)
6620 struct s2io_nic *sp = dev->priv;
6622 switch (stringset) {
6624 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6627 stat_size = sizeof(ethtool_xena_stats_keys);
6628 memcpy(data, ðtool_xena_stats_keys,stat_size);
6629 if(sp->device_type == XFRAME_II_DEVICE) {
6630 memcpy(data + stat_size,
6631 ðtool_enhanced_stats_keys,
6632 sizeof(ethtool_enhanced_stats_keys));
6633 stat_size += sizeof(ethtool_enhanced_stats_keys);
6636 memcpy(data + stat_size, ðtool_driver_stats_keys,
6637 sizeof(ethtool_driver_stats_keys));
6641 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6644 dev->features |= NETIF_F_IP_CSUM;
6646 dev->features &= ~NETIF_F_IP_CSUM;
6651 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6653 return (dev->features & NETIF_F_TSO) != 0;
6655 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6658 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6660 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6665 static const struct ethtool_ops netdev_ethtool_ops = {
6666 .get_settings = s2io_ethtool_gset,
6667 .set_settings = s2io_ethtool_sset,
6668 .get_drvinfo = s2io_ethtool_gdrvinfo,
6669 .get_regs_len = s2io_ethtool_get_regs_len,
6670 .get_regs = s2io_ethtool_gregs,
6671 .get_link = ethtool_op_get_link,
6672 .get_eeprom_len = s2io_get_eeprom_len,
6673 .get_eeprom = s2io_ethtool_geeprom,
6674 .set_eeprom = s2io_ethtool_seeprom,
6675 .get_ringparam = s2io_ethtool_gringparam,
6676 .get_pauseparam = s2io_ethtool_getpause_data,
6677 .set_pauseparam = s2io_ethtool_setpause_data,
6678 .get_rx_csum = s2io_ethtool_get_rx_csum,
6679 .set_rx_csum = s2io_ethtool_set_rx_csum,
6680 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6681 .set_sg = ethtool_op_set_sg,
6682 .get_tso = s2io_ethtool_op_get_tso,
6683 .set_tso = s2io_ethtool_op_set_tso,
6684 .set_ufo = ethtool_op_set_ufo,
6685 .self_test = s2io_ethtool_test,
6686 .get_strings = s2io_ethtool_get_strings,
6687 .phys_id = s2io_ethtool_idnic,
6688 .get_ethtool_stats = s2io_get_ethtool_stats,
6689 .get_sset_count = s2io_get_sset_count,
6693 * s2io_ioctl - Entry point for the Ioctl
6694 * @dev : Device pointer.
6695 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6696 * a proprietary structure used to pass information to the driver.
6697 * @cmd : This is used to distinguish between the different commands that
6698 * can be passed to the IOCTL functions.
6700 * Currently there are no special functionality supported in IOCTL, hence
6701 * function always return EOPNOTSUPPORTED
6704 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6710 * s2io_change_mtu - entry point to change MTU size for the device.
6711 * @dev : device pointer.
6712 * @new_mtu : the new MTU size for the device.
6713 * Description: A driver entry point to change MTU size for the device.
6714 * Before changing the MTU the device must be stopped.
6716 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6720 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6722 struct s2io_nic *sp = dev->priv;
6725 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6726 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6732 if (netif_running(dev)) {
6733 s2io_stop_all_tx_queue(sp);
6735 ret = s2io_card_up(sp);
6737 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6741 s2io_wake_all_tx_queue(sp);
6742 } else { /* Device is down */
6743 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6744 u64 val64 = new_mtu;
6746 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6753 * s2io_set_link - Set the LInk status
6754 * @data: long pointer to device private structue
6755 * Description: Sets the link status for the adapter
6758 static void s2io_set_link(struct work_struct *work)
6760 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6761 struct net_device *dev = nic->dev;
6762 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6768 if (!netif_running(dev))
6771 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6772 /* The card is being reset, no point doing anything */
6776 subid = nic->pdev->subsystem_device;
6777 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6779 * Allow a small delay for the NICs self initiated
6780 * cleanup to complete.
6785 val64 = readq(&bar0->adapter_status);
6786 if (LINK_IS_UP(val64)) {
6787 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6788 if (verify_xena_quiescence(nic)) {
6789 val64 = readq(&bar0->adapter_control);
6790 val64 |= ADAPTER_CNTL_EN;
6791 writeq(val64, &bar0->adapter_control);
6792 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6793 nic->device_type, subid)) {
6794 val64 = readq(&bar0->gpio_control);
6795 val64 |= GPIO_CTRL_GPIO_0;
6796 writeq(val64, &bar0->gpio_control);
6797 val64 = readq(&bar0->gpio_control);
6799 val64 |= ADAPTER_LED_ON;
6800 writeq(val64, &bar0->adapter_control);
6802 nic->device_enabled_once = TRUE;
6804 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6805 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6806 s2io_stop_all_tx_queue(nic);
6809 val64 = readq(&bar0->adapter_control);
6810 val64 |= ADAPTER_LED_ON;
6811 writeq(val64, &bar0->adapter_control);
6812 s2io_link(nic, LINK_UP);
6814 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6816 val64 = readq(&bar0->gpio_control);
6817 val64 &= ~GPIO_CTRL_GPIO_0;
6818 writeq(val64, &bar0->gpio_control);
6819 val64 = readq(&bar0->gpio_control);
6822 val64 = readq(&bar0->adapter_control);
6823 val64 = val64 &(~ADAPTER_LED_ON);
6824 writeq(val64, &bar0->adapter_control);
6825 s2io_link(nic, LINK_DOWN);
6827 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6833 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6835 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6836 u64 *temp2, int size)
6838 struct net_device *dev = sp->dev;
6839 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6841 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6842 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6845 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6847 * As Rx frame are not going to be processed,
6848 * using same mapped address for the Rxd
6851 rxdp1->Buffer0_ptr = *temp0;
6853 *skb = dev_alloc_skb(size);
6855 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6856 DBG_PRINT(INFO_DBG, "memory to allocate ");
6857 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6858 sp->mac_control.stats_info->sw_stat. \
6859 mem_alloc_fail_cnt++;
6862 sp->mac_control.stats_info->sw_stat.mem_allocated
6863 += (*skb)->truesize;
6864 /* storing the mapped addr in a temp variable
6865 * such it will be used for next rxd whose
6866 * Host Control is NULL
6868 rxdp1->Buffer0_ptr = *temp0 =
6869 pci_map_single( sp->pdev, (*skb)->data,
6870 size - NET_IP_ALIGN,
6871 PCI_DMA_FROMDEVICE);
6872 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
6873 goto memalloc_failed;
6874 rxdp->Host_Control = (unsigned long) (*skb);
6876 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6877 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6878 /* Two buffer Mode */
6880 rxdp3->Buffer2_ptr = *temp2;
6881 rxdp3->Buffer0_ptr = *temp0;
6882 rxdp3->Buffer1_ptr = *temp1;
6884 *skb = dev_alloc_skb(size);
6886 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6887 DBG_PRINT(INFO_DBG, "memory to allocate ");
6888 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6889 sp->mac_control.stats_info->sw_stat. \
6890 mem_alloc_fail_cnt++;
6893 sp->mac_control.stats_info->sw_stat.mem_allocated
6894 += (*skb)->truesize;
6895 rxdp3->Buffer2_ptr = *temp2 =
6896 pci_map_single(sp->pdev, (*skb)->data,
6898 PCI_DMA_FROMDEVICE);
6899 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
6900 goto memalloc_failed;
6901 rxdp3->Buffer0_ptr = *temp0 =
6902 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6903 PCI_DMA_FROMDEVICE);
6904 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
6905 pci_unmap_single (sp->pdev,
6906 (dma_addr_t)rxdp3->Buffer2_ptr,
6907 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6908 goto memalloc_failed;
6910 rxdp->Host_Control = (unsigned long) (*skb);
6912 /* Buffer-1 will be dummy buffer not used */
6913 rxdp3->Buffer1_ptr = *temp1 =
6914 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6915 PCI_DMA_FROMDEVICE);
6916 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
6917 pci_unmap_single (sp->pdev,
6918 (dma_addr_t)rxdp3->Buffer0_ptr,
6919 BUF0_LEN, PCI_DMA_FROMDEVICE);
6920 pci_unmap_single (sp->pdev,
6921 (dma_addr_t)rxdp3->Buffer2_ptr,
6922 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6923 goto memalloc_failed;
6929 stats->pci_map_fail_cnt++;
6930 stats->mem_freed += (*skb)->truesize;
6931 dev_kfree_skb(*skb);
6935 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6938 struct net_device *dev = sp->dev;
6939 if (sp->rxd_mode == RXD_MODE_1) {
6940 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6941 } else if (sp->rxd_mode == RXD_MODE_3B) {
6942 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6943 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6944 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6948 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6950 int i, j, k, blk_cnt = 0, size;
6951 struct mac_info * mac_control = &sp->mac_control;
6952 struct config_param *config = &sp->config;
6953 struct net_device *dev = sp->dev;
6954 struct RxD_t *rxdp = NULL;
6955 struct sk_buff *skb = NULL;
6956 struct buffAdd *ba = NULL;
6957 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6959 /* Calculate the size based on ring mode */
6960 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6961 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6962 if (sp->rxd_mode == RXD_MODE_1)
6963 size += NET_IP_ALIGN;
6964 else if (sp->rxd_mode == RXD_MODE_3B)
6965 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6967 for (i = 0; i < config->rx_ring_num; i++) {
6968 blk_cnt = config->rx_cfg[i].num_rxd /
6969 (rxd_count[sp->rxd_mode] +1);
6971 for (j = 0; j < blk_cnt; j++) {
6972 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6973 rxdp = mac_control->rings[i].
6974 rx_blocks[j].rxds[k].virt_addr;
6975 if(sp->rxd_mode == RXD_MODE_3B)
6976 ba = &mac_control->rings[i].ba[j][k];
6977 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6978 &skb,(u64 *)&temp0_64,
6985 set_rxd_buffer_size(sp, rxdp, size);
6987 /* flip the Ownership bit to Hardware */
6988 rxdp->Control_1 |= RXD_OWN_XENA;
6996 static int s2io_add_isr(struct s2io_nic * sp)
6999 struct net_device *dev = sp->dev;
7002 if (sp->config.intr_type == MSI_X)
7003 ret = s2io_enable_msi_x(sp);
7005 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7006 sp->config.intr_type = INTA;
7009 /* Store the values of the MSIX table in the struct s2io_nic structure */
7010 store_xmsi_data(sp);
7012 /* After proper initialization of H/W, register ISR */
7013 if (sp->config.intr_type == MSI_X) {
7014 int i, msix_rx_cnt = 0;
7016 for (i = 0; i < sp->num_entries; i++) {
7017 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7018 if (sp->s2io_entries[i].type ==
7020 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7022 err = request_irq(sp->entries[i].vector,
7023 s2io_msix_ring_handle, 0,
7025 sp->s2io_entries[i].arg);
7026 } else if (sp->s2io_entries[i].type ==
7028 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7030 err = request_irq(sp->entries[i].vector,
7031 s2io_msix_fifo_handle, 0,
7033 sp->s2io_entries[i].arg);
7036 /* if either data or addr is zero print it. */
7037 if (!(sp->msix_info[i].addr &&
7038 sp->msix_info[i].data)) {
7040 "%s @Addr:0x%llx Data:0x%llx\n",
7042 (unsigned long long)
7043 sp->msix_info[i].addr,
7044 (unsigned long long)
7045 ntohl(sp->msix_info[i].data));
7049 remove_msix_isr(sp);
7052 "%s:MSI-X-%d registration "
7053 "failed\n", dev->name, i);
7056 "%s: Defaulting to INTA\n",
7058 sp->config.intr_type = INTA;
7061 sp->s2io_entries[i].in_use =
7062 MSIX_REGISTERED_SUCCESS;
7066 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7068 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7069 " through alarm vector\n");
7072 if (sp->config.intr_type == INTA) {
7073 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7076 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7083 static void s2io_rem_isr(struct s2io_nic * sp)
7085 if (sp->config.intr_type == MSI_X)
7086 remove_msix_isr(sp);
7088 remove_inta_isr(sp);
7091 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7094 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7095 register u64 val64 = 0;
7096 struct config_param *config;
7097 config = &sp->config;
7099 if (!is_s2io_card_up(sp))
7102 del_timer_sync(&sp->alarm_timer);
7103 /* If s2io_set_link task is executing, wait till it completes. */
7104 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7107 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7110 if (sp->config.napi) {
7112 if (config->intr_type == MSI_X) {
7113 for (; off < sp->config.rx_ring_num; off++)
7114 napi_disable(&sp->mac_control.rings[off].napi);
7117 napi_disable(&sp->napi);
7120 /* disable Tx and Rx traffic on the NIC */
7126 /* stop the tx queue, indicate link down */
7127 s2io_link(sp, LINK_DOWN);
7129 /* Check if the device is Quiescent and then Reset the NIC */
7131 /* As per the HW requirement we need to replenish the
7132 * receive buffer to avoid the ring bump. Since there is
7133 * no intention of processing the Rx frame at this pointwe are
7134 * just settting the ownership bit of rxd in Each Rx
7135 * ring to HW and set the appropriate buffer size
7136 * based on the ring mode
7138 rxd_owner_bit_reset(sp);
7140 val64 = readq(&bar0->adapter_status);
7141 if (verify_xena_quiescence(sp)) {
7142 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7150 "s2io_close:Device not Quiescent ");
7151 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7152 (unsigned long long) val64);
7159 /* Free all Tx buffers */
7160 free_tx_buffers(sp);
7162 /* Free all Rx buffers */
7163 free_rx_buffers(sp);
7165 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7168 static void s2io_card_down(struct s2io_nic * sp)
7170 do_s2io_card_down(sp, 1);
7173 static int s2io_card_up(struct s2io_nic * sp)
7176 struct mac_info *mac_control;
7177 struct config_param *config;
7178 struct net_device *dev = (struct net_device *) sp->dev;
7181 /* Initialize the H/W I/O registers */
7184 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7192 * Initializing the Rx buffers. For now we are considering only 1
7193 * Rx ring and initializing buffers into 30 Rx blocks
7195 mac_control = &sp->mac_control;
7196 config = &sp->config;
7198 for (i = 0; i < config->rx_ring_num; i++) {
7199 mac_control->rings[i].mtu = dev->mtu;
7200 ret = fill_rx_buffers(&mac_control->rings[i], 1);
7202 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7205 free_rx_buffers(sp);
7208 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7209 mac_control->rings[i].rx_bufs_left);
7212 /* Initialise napi */
7215 if (config->intr_type == MSI_X) {
7216 for (i = 0; i < sp->config.rx_ring_num; i++)
7217 napi_enable(&sp->mac_control.rings[i].napi);
7219 napi_enable(&sp->napi);
7223 /* Maintain the state prior to the open */
7224 if (sp->promisc_flg)
7225 sp->promisc_flg = 0;
7226 if (sp->m_cast_flg) {
7228 sp->all_multi_pos= 0;
7231 /* Setting its receive mode */
7232 s2io_set_multicast(dev);
7235 /* Initialize max aggregatable pkts per session based on MTU */
7236 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7237 /* Check if we can use(if specified) user provided value */
7238 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7239 sp->lro_max_aggr_per_sess = lro_max_pkts;
7242 /* Enable Rx Traffic and interrupts on the NIC */
7243 if (start_nic(sp)) {
7244 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7246 free_rx_buffers(sp);
7250 /* Add interrupt service routine */
7251 if (s2io_add_isr(sp) != 0) {
7252 if (sp->config.intr_type == MSI_X)
7255 free_rx_buffers(sp);
7259 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7261 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7263 /* Enable select interrupts */
7264 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7265 if (sp->config.intr_type != INTA) {
7266 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7267 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7269 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7270 interruptible |= TX_PIC_INTR;
7271 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7278 * s2io_restart_nic - Resets the NIC.
7279 * @data : long pointer to the device private structure
7281 * This function is scheduled to be run by the s2io_tx_watchdog
7282 * function after 0.5 secs to reset the NIC. The idea is to reduce
7283 * the run time of the watch dog routine which is run holding a
7287 static void s2io_restart_nic(struct work_struct *work)
7289 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7290 struct net_device *dev = sp->dev;
7294 if (!netif_running(dev))
7298 if (s2io_card_up(sp)) {
7299 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7302 s2io_wake_all_tx_queue(sp);
7303 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7310 * s2io_tx_watchdog - Watchdog for transmit side.
7311 * @dev : Pointer to net device structure
7313 * This function is triggered if the Tx Queue is stopped
7314 * for a pre-defined amount of time when the Interface is still up.
7315 * If the Interface is jammed in such a situation, the hardware is
7316 * reset (by s2io_close) and restarted again (by s2io_open) to
7317 * overcome any problem that might have been caused in the hardware.
7322 static void s2io_tx_watchdog(struct net_device *dev)
7324 struct s2io_nic *sp = dev->priv;
7326 if (netif_carrier_ok(dev)) {
7327 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7328 schedule_work(&sp->rst_timer_task);
7329 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7334 * rx_osm_handler - To perform some OS related operations on SKB.
7335 * @sp: private member of the device structure,pointer to s2io_nic structure.
7336 * @skb : the socket buffer pointer.
7337 * @len : length of the packet
7338 * @cksum : FCS checksum of the frame.
7339 * @ring_no : the ring from which this RxD was extracted.
7341 * This function is called by the Rx interrupt serivce routine to perform
7342 * some OS related operations on the SKB before passing it to the upper
7343 * layers. It mainly checks if the checksum is OK, if so adds it to the
7344 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7345 * to the upper layer. If the checksum is wrong, it increments the Rx
7346 * packet error count, frees the SKB and returns error.
7348 * SUCCESS on success and -1 on failure.
7350 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7352 struct s2io_nic *sp = ring_data->nic;
7353 struct net_device *dev = (struct net_device *) ring_data->dev;
7354 struct sk_buff *skb = (struct sk_buff *)
7355 ((unsigned long) rxdp->Host_Control);
7356 int ring_no = ring_data->ring_no;
7357 u16 l3_csum, l4_csum;
7358 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7365 /* Check for parity error */
7367 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7369 err_mask = err >> 48;
7372 sp->mac_control.stats_info->sw_stat.
7373 rx_parity_err_cnt++;
7377 sp->mac_control.stats_info->sw_stat.
7382 sp->mac_control.stats_info->sw_stat.
7383 rx_parity_abort_cnt++;
7387 sp->mac_control.stats_info->sw_stat.
7392 sp->mac_control.stats_info->sw_stat.
7397 sp->mac_control.stats_info->sw_stat.
7402 sp->mac_control.stats_info->sw_stat.
7403 rx_buf_size_err_cnt++;
7407 sp->mac_control.stats_info->sw_stat.
7408 rx_rxd_corrupt_cnt++;
7412 sp->mac_control.stats_info->sw_stat.
7417 * Drop the packet if bad transfer code. Exception being
7418 * 0x5, which could be due to unsupported IPv6 extension header.
7419 * In this case, we let stack handle the packet.
7420 * Note that in this case, since checksum will be incorrect,
7421 * stack will validate the same.
7423 if (err_mask != 0x5) {
7424 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7425 dev->name, err_mask);
7426 sp->stats.rx_crc_errors++;
7427 sp->mac_control.stats_info->sw_stat.mem_freed
7430 ring_data->rx_bufs_left -= 1;
7431 rxdp->Host_Control = 0;
7436 /* Updating statistics */
7437 ring_data->rx_packets++;
7438 rxdp->Host_Control = 0;
7439 if (sp->rxd_mode == RXD_MODE_1) {
7440 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7442 ring_data->rx_bytes += len;
7445 } else if (sp->rxd_mode == RXD_MODE_3B) {
7446 int get_block = ring_data->rx_curr_get_info.block_index;
7447 int get_off = ring_data->rx_curr_get_info.offset;
7448 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7449 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7450 unsigned char *buff = skb_push(skb, buf0_len);
7452 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7453 ring_data->rx_bytes += buf0_len + buf2_len;
7454 memcpy(buff, ba->ba_0, buf0_len);
7455 skb_put(skb, buf2_len);
7458 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7459 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7461 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7462 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7463 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7465 * NIC verifies if the Checksum of the received
7466 * frame is Ok or not and accordingly returns
7467 * a flag in the RxD.
7469 skb->ip_summed = CHECKSUM_UNNECESSARY;
7470 if (ring_data->lro) {
7475 ret = s2io_club_tcp_session(ring_data,
7476 skb->data, &tcp, &tcp_len, &lro,
7479 case 3: /* Begin anew */
7482 case 1: /* Aggregate */
7484 lro_append_pkt(sp, lro,
7488 case 4: /* Flush session */
7490 lro_append_pkt(sp, lro,
7492 queue_rx_frame(lro->parent,
7494 clear_lro_session(lro);
7495 sp->mac_control.stats_info->
7496 sw_stat.flush_max_pkts++;
7499 case 2: /* Flush both */
7500 lro->parent->data_len =
7502 sp->mac_control.stats_info->
7503 sw_stat.sending_both++;
7504 queue_rx_frame(lro->parent,
7506 clear_lro_session(lro);
7508 case 0: /* sessions exceeded */
7509 case -1: /* non-TCP or not
7513 * First pkt in session not
7514 * L3/L4 aggregatable
7519 "%s: Samadhana!!\n",
7526 * Packet with erroneous checksum, let the
7527 * upper layers deal with it.
7529 skb->ip_summed = CHECKSUM_NONE;
7532 skb->ip_summed = CHECKSUM_NONE;
7534 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7536 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7537 dev->last_rx = jiffies;
7539 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7544 * s2io_link - stops/starts the Tx queue.
7545 * @sp : private member of the device structure, which is a pointer to the
7546 * s2io_nic structure.
7547 * @link : inidicates whether link is UP/DOWN.
7549 * This function stops/starts the Tx queue depending on whether the link
7550 * status of the NIC is is down or up. This is called by the Alarm
7551 * interrupt handler whenever a link change interrupt comes up.
7556 static void s2io_link(struct s2io_nic * sp, int link)
7558 struct net_device *dev = (struct net_device *) sp->dev;
7560 if (link != sp->last_link_state) {
7562 if (link == LINK_DOWN) {
7563 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7564 s2io_stop_all_tx_queue(sp);
7565 netif_carrier_off(dev);
7566 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7567 sp->mac_control.stats_info->sw_stat.link_up_time =
7568 jiffies - sp->start_time;
7569 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7571 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7572 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7573 sp->mac_control.stats_info->sw_stat.link_down_time =
7574 jiffies - sp->start_time;
7575 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7576 netif_carrier_on(dev);
7577 s2io_wake_all_tx_queue(sp);
7580 sp->last_link_state = link;
7581 sp->start_time = jiffies;
7585 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7586 * @sp : private member of the device structure, which is a pointer to the
7587 * s2io_nic structure.
7589 * This function initializes a few of the PCI and PCI-X configuration registers
7590 * with recommended values.
7595 static void s2io_init_pci(struct s2io_nic * sp)
7597 u16 pci_cmd = 0, pcix_cmd = 0;
7599 /* Enable Data Parity Error Recovery in PCI-X command register. */
7600 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7602 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7604 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7607 /* Set the PErr Response bit in PCI command register. */
7608 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7609 pci_write_config_word(sp->pdev, PCI_COMMAND,
7610 (pci_cmd | PCI_COMMAND_PARITY));
7611 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7614 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7617 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7618 (tx_fifo_num < 1)) {
7619 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7620 "(%d) not supported\n", tx_fifo_num);
7622 if (tx_fifo_num < 1)
7625 tx_fifo_num = MAX_TX_FIFOS;
7627 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7628 DBG_PRINT(ERR_DBG, "tx fifos\n");
7632 *dev_multiq = multiq;
7634 if (tx_steering_type && (1 == tx_fifo_num)) {
7635 if (tx_steering_type != TX_DEFAULT_STEERING)
7637 "s2io: Tx steering is not supported with "
7638 "one fifo. Disabling Tx steering.\n");
7639 tx_steering_type = NO_STEERING;
7642 if ((tx_steering_type < NO_STEERING) ||
7643 (tx_steering_type > TX_DEFAULT_STEERING)) {
7644 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7646 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7647 tx_steering_type = NO_STEERING;
7650 if (rx_ring_num > MAX_RX_RINGS) {
7651 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7653 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7655 rx_ring_num = MAX_RX_RINGS;
7658 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7659 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7660 "Defaulting to INTA\n");
7661 *dev_intr_type = INTA;
7664 if ((*dev_intr_type == MSI_X) &&
7665 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7666 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7667 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7668 "Defaulting to INTA\n");
7669 *dev_intr_type = INTA;
7672 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7673 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7674 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7681 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7682 * or Traffic class respectively.
7683 * @nic: device private variable
7684 * Description: The function configures the receive steering to
7685 * desired receive ring.
7686 * Return Value: SUCCESS on success and
7687 * '-1' on failure (endian settings incorrect).
7689 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7691 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7692 register u64 val64 = 0;
7694 if (ds_codepoint > 63)
7697 val64 = RTS_DS_MEM_DATA(ring);
7698 writeq(val64, &bar0->rts_ds_mem_data);
7700 val64 = RTS_DS_MEM_CTRL_WE |
7701 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7702 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7704 writeq(val64, &bar0->rts_ds_mem_ctrl);
7706 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7707 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7712 * s2io_init_nic - Initialization of the adapter .
7713 * @pdev : structure containing the PCI related information of the device.
7714 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7716 * The function initializes an adapter identified by the pci_dec structure.
7717 * All OS related initialization including memory and device structure and
7718 * initlaization of the device private variable is done. Also the swapper
7719 * control register is initialized to enable read and write into the I/O
7720 * registers of the device.
7722 * returns 0 on success and negative on failure.
7725 static int __devinit
7726 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7728 struct s2io_nic *sp;
7729 struct net_device *dev;
7731 int dma_flag = FALSE;
7732 u32 mac_up, mac_down;
7733 u64 val64 = 0, tmp64 = 0;
7734 struct XENA_dev_config __iomem *bar0 = NULL;
7736 struct mac_info *mac_control;
7737 struct config_param *config;
7739 u8 dev_intr_type = intr_type;
7741 DECLARE_MAC_BUF(mac);
7743 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7747 if ((ret = pci_enable_device(pdev))) {
7749 "s2io_init_nic: pci_enable_device failed\n");
7753 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7754 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7756 if (pci_set_consistent_dma_mask
7757 (pdev, DMA_64BIT_MASK)) {
7759 "Unable to obtain 64bit DMA for \
7760 consistent allocations\n");
7761 pci_disable_device(pdev);
7764 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7765 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7767 pci_disable_device(pdev);
7770 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7771 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7772 pci_disable_device(pdev);
7776 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7778 dev = alloc_etherdev(sizeof(struct s2io_nic));
7780 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7781 pci_disable_device(pdev);
7782 pci_release_regions(pdev);
7786 pci_set_master(pdev);
7787 pci_set_drvdata(pdev, dev);
7788 SET_NETDEV_DEV(dev, &pdev->dev);
7790 /* Private member variable initialized to s2io NIC structure */
7792 memset(sp, 0, sizeof(struct s2io_nic));
7795 sp->high_dma_flag = dma_flag;
7796 sp->device_enabled_once = FALSE;
7797 if (rx_ring_mode == 1)
7798 sp->rxd_mode = RXD_MODE_1;
7799 if (rx_ring_mode == 2)
7800 sp->rxd_mode = RXD_MODE_3B;
7802 sp->config.intr_type = dev_intr_type;
7804 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7805 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7806 sp->device_type = XFRAME_II_DEVICE;
7808 sp->device_type = XFRAME_I_DEVICE;
7810 sp->lro = lro_enable;
7812 /* Initialize some PCI/PCI-X fields of the NIC. */
7816 * Setting the device configuration parameters.
7817 * Most of these parameters can be specified by the user during
7818 * module insertion as they are module loadable parameters. If
7819 * these parameters are not not specified during load time, they
7820 * are initialized with default values.
7822 mac_control = &sp->mac_control;
7823 config = &sp->config;
7825 config->napi = napi;
7826 config->tx_steering_type = tx_steering_type;
7828 /* Tx side parameters. */
7829 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7830 config->tx_fifo_num = MAX_TX_FIFOS;
7832 config->tx_fifo_num = tx_fifo_num;
7834 /* Initialize the fifos used for tx steering */
7835 if (config->tx_fifo_num < 5) {
7836 if (config->tx_fifo_num == 1)
7837 sp->total_tcp_fifos = 1;
7839 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7840 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7841 sp->total_udp_fifos = 1;
7842 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7844 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7845 FIFO_OTHER_MAX_NUM);
7846 sp->udp_fifo_idx = sp->total_tcp_fifos;
7847 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7848 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7851 config->multiq = dev_multiq;
7852 for (i = 0; i < config->tx_fifo_num; i++) {
7853 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7854 config->tx_cfg[i].fifo_priority = i;
7857 /* mapping the QoS priority to the configured fifos */
7858 for (i = 0; i < MAX_TX_FIFOS; i++)
7859 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7861 /* map the hashing selector table to the configured fifos */
7862 for (i = 0; i < config->tx_fifo_num; i++)
7863 sp->fifo_selector[i] = fifo_selector[i];
7866 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7867 for (i = 0; i < config->tx_fifo_num; i++) {
7868 config->tx_cfg[i].f_no_snoop =
7869 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7870 if (config->tx_cfg[i].fifo_len < 65) {
7871 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7875 /* + 2 because one Txd for skb->data and one Txd for UFO */
7876 config->max_txds = MAX_SKB_FRAGS + 2;
7878 /* Rx side parameters. */
7879 config->rx_ring_num = rx_ring_num;
7880 for (i = 0; i < config->rx_ring_num; i++) {
7881 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7882 (rxd_count[sp->rxd_mode] + 1);
7883 config->rx_cfg[i].ring_priority = i;
7884 mac_control->rings[i].rx_bufs_left = 0;
7885 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7886 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7887 mac_control->rings[i].pdev = sp->pdev;
7888 mac_control->rings[i].dev = sp->dev;
7891 for (i = 0; i < rx_ring_num; i++) {
7892 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7893 config->rx_cfg[i].f_no_snoop =
7894 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7897 /* Setting Mac Control parameters */
7898 mac_control->rmac_pause_time = rmac_pause_time;
7899 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7900 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7903 /* initialize the shared memory used by the NIC and the host */
7904 if (init_shared_mem(sp)) {
7905 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7908 goto mem_alloc_failed;
7911 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7912 pci_resource_len(pdev, 0));
7914 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7917 goto bar0_remap_failed;
7920 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7921 pci_resource_len(pdev, 2));
7923 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7926 goto bar1_remap_failed;
7929 dev->irq = pdev->irq;
7930 dev->base_addr = (unsigned long) sp->bar0;
7932 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7933 for (j = 0; j < MAX_TX_FIFOS; j++) {
7934 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7935 (sp->bar1 + (j * 0x00020000));
7938 /* Driver entry points */
7939 dev->open = &s2io_open;
7940 dev->stop = &s2io_close;
7941 dev->hard_start_xmit = &s2io_xmit;
7942 dev->get_stats = &s2io_get_stats;
7943 dev->set_multicast_list = &s2io_set_multicast;
7944 dev->do_ioctl = &s2io_ioctl;
7945 dev->set_mac_address = &s2io_set_mac_addr;
7946 dev->change_mtu = &s2io_change_mtu;
7947 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7948 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7949 dev->vlan_rx_register = s2io_vlan_rx_register;
7950 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7953 * will use eth_mac_addr() for dev->set_mac_address
7954 * mac address will be set every time dev->open() is called
7956 #ifdef CONFIG_NET_POLL_CONTROLLER
7957 dev->poll_controller = s2io_netpoll;
7960 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7961 if (sp->high_dma_flag == TRUE)
7962 dev->features |= NETIF_F_HIGHDMA;
7963 dev->features |= NETIF_F_TSO;
7964 dev->features |= NETIF_F_TSO6;
7965 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7966 dev->features |= NETIF_F_UFO;
7967 dev->features |= NETIF_F_HW_CSUM;
7970 dev->features |= NETIF_F_MULTI_QUEUE;
7971 dev->tx_timeout = &s2io_tx_watchdog;
7972 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7973 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7974 INIT_WORK(&sp->set_link_task, s2io_set_link);
7976 pci_save_state(sp->pdev);
7978 /* Setting swapper control on the NIC, for proper reset operation */
7979 if (s2io_set_swapper(sp)) {
7980 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7983 goto set_swap_failed;
7986 /* Verify if the Herc works on the slot its placed into */
7987 if (sp->device_type & XFRAME_II_DEVICE) {
7988 mode = s2io_verify_pci_mode(sp);
7990 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7991 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7993 goto set_swap_failed;
7997 if (sp->config.intr_type == MSI_X) {
7998 sp->num_entries = config->rx_ring_num + 1;
7999 ret = s2io_enable_msi_x(sp);
8002 ret = s2io_test_msi(sp);
8003 /* rollback MSI-X, will re-enable during add_isr() */
8004 remove_msix_isr(sp);
8009 "%s: MSI-X requested but failed to enable\n",
8011 sp->config.intr_type = INTA;
8015 if (config->intr_type == MSI_X) {
8016 for (i = 0; i < config->rx_ring_num ; i++)
8017 netif_napi_add(dev, &mac_control->rings[i].napi,
8018 s2io_poll_msix, 64);
8020 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8023 /* Not needed for Herc */
8024 if (sp->device_type & XFRAME_I_DEVICE) {
8026 * Fix for all "FFs" MAC address problems observed on
8029 fix_mac_address(sp);
8034 * MAC address initialization.
8035 * For now only one mac address will be read and used.
8038 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8039 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8040 writeq(val64, &bar0->rmac_addr_cmd_mem);
8041 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8042 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8043 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8044 mac_down = (u32) tmp64;
8045 mac_up = (u32) (tmp64 >> 32);
8047 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8048 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8049 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8050 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8051 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8052 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8054 /* Set the factory defined MAC address initially */
8055 dev->addr_len = ETH_ALEN;
8056 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8057 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8059 /* initialize number of multicast & unicast MAC entries variables */
8060 if (sp->device_type == XFRAME_I_DEVICE) {
8061 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8062 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8063 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8064 } else if (sp->device_type == XFRAME_II_DEVICE) {
8065 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8066 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8067 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8070 /* store mac addresses from CAM to s2io_nic structure */
8071 do_s2io_store_unicast_mc(sp);
8073 /* Configure MSIX vector for number of rings configured plus one */
8074 if ((sp->device_type == XFRAME_II_DEVICE) &&
8075 (config->intr_type == MSI_X))
8076 sp->num_entries = config->rx_ring_num + 1;
8078 /* Store the values of the MSIX table in the s2io_nic structure */
8079 store_xmsi_data(sp);
8080 /* reset Nic and bring it to known state */
8084 * Initialize link state flags
8085 * and the card state parameter
8089 /* Initialize spinlocks */
8090 for (i = 0; i < sp->config.tx_fifo_num; i++)
8091 spin_lock_init(&mac_control->fifos[i].tx_lock);
8094 * SXE-002: Configure link and activity LED to init state
8097 subid = sp->pdev->subsystem_device;
8098 if ((subid & 0xFF) >= 0x07) {
8099 val64 = readq(&bar0->gpio_control);
8100 val64 |= 0x0000800000000000ULL;
8101 writeq(val64, &bar0->gpio_control);
8102 val64 = 0x0411040400000000ULL;
8103 writeq(val64, (void __iomem *) bar0 + 0x2700);
8104 val64 = readq(&bar0->gpio_control);
8107 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8109 if (register_netdev(dev)) {
8110 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8112 goto register_failed;
8115 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8116 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8117 sp->product_name, pdev->revision);
8118 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8119 s2io_driver_version);
8120 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8121 dev->name, print_mac(mac, dev->dev_addr));
8122 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8123 if (sp->device_type & XFRAME_II_DEVICE) {
8124 mode = s2io_print_pci_mode(sp);
8126 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8128 unregister_netdev(dev);
8129 goto set_swap_failed;
8132 switch(sp->rxd_mode) {
8134 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8138 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8143 switch (sp->config.napi) {
8145 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8148 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8152 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8153 sp->config.tx_fifo_num);
8155 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8156 sp->config.rx_ring_num);
8158 switch(sp->config.intr_type) {
8160 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8163 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8166 if (sp->config.multiq) {
8167 for (i = 0; i < sp->config.tx_fifo_num; i++)
8168 mac_control->fifos[i].multiq = config->multiq;
8169 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8172 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8175 switch (sp->config.tx_steering_type) {
8177 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8178 " transmit\n", dev->name);
8180 case TX_PRIORITY_STEERING:
8181 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8182 " transmit\n", dev->name);
8184 case TX_DEFAULT_STEERING:
8185 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8186 " transmit\n", dev->name);
8190 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8193 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8194 " enabled\n", dev->name);
8195 /* Initialize device name */
8196 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8199 * Make Link state as off at this point, when the Link change
8200 * interrupt comes the state will be automatically changed to
8203 netif_carrier_off(dev);
8214 free_shared_mem(sp);
8215 pci_disable_device(pdev);
8216 pci_release_regions(pdev);
8217 pci_set_drvdata(pdev, NULL);
8224 * s2io_rem_nic - Free the PCI device
8225 * @pdev: structure containing the PCI related information of the device.
8226 * Description: This function is called by the Pci subsystem to release a
8227 * PCI device and free up all resource held up by the device. This could
8228 * be in response to a Hot plug event or when the driver is to be removed
8232 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8234 struct net_device *dev =
8235 (struct net_device *) pci_get_drvdata(pdev);
8236 struct s2io_nic *sp;
8239 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8243 flush_scheduled_work();
8246 unregister_netdev(dev);
8248 free_shared_mem(sp);
8251 pci_release_regions(pdev);
8252 pci_set_drvdata(pdev, NULL);
8254 pci_disable_device(pdev);
8258 * s2io_starter - Entry point for the driver
8259 * Description: This function is the entry point for the driver. It verifies
8260 * the module loadable parameters and initializes PCI configuration space.
8263 static int __init s2io_starter(void)
8265 return pci_register_driver(&s2io_driver);
8269 * s2io_closer - Cleanup routine for the driver
8270 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8273 static __exit void s2io_closer(void)
8275 pci_unregister_driver(&s2io_driver);
8276 DBG_PRINT(INIT_DBG, "cleanup done\n");
8279 module_init(s2io_starter);
8280 module_exit(s2io_closer);
8282 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8283 struct tcphdr **tcp, struct RxD_t *rxdp,
8284 struct s2io_nic *sp)
8287 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8289 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8290 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8295 /* Checking for DIX type or DIX type with VLAN */
8297 || (l2_type == 4)) {
8298 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8300 * If vlan stripping is disabled and the frame is VLAN tagged,
8301 * shift the offset by the VLAN header size bytes.
8303 if ((!vlan_strip_flag) &&
8304 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8305 ip_off += HEADER_VLAN_SIZE;
8307 /* LLC, SNAP etc are considered non-mergeable */
8311 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8312 ip_len = (u8)((*ip)->ihl);
8314 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8319 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8322 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8323 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8324 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8329 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8331 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8334 static void initiate_new_session(struct lro *lro, u8 *l2h,
8335 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8337 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8341 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8342 lro->tcp_ack = tcp->ack_seq;
8344 lro->total_len = ntohs(ip->tot_len);
8346 lro->vlan_tag = vlan_tag;
8348 * check if we saw TCP timestamp. Other consistency checks have
8349 * already been done.
8351 if (tcp->doff == 8) {
8353 ptr = (__be32 *)(tcp+1);
8355 lro->cur_tsval = ntohl(*(ptr+1));
8356 lro->cur_tsecr = *(ptr+2);
8361 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8363 struct iphdr *ip = lro->iph;
8364 struct tcphdr *tcp = lro->tcph;
8366 struct stat_block *statinfo = sp->mac_control.stats_info;
8367 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8369 /* Update L3 header */
8370 ip->tot_len = htons(lro->total_len);
8372 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8375 /* Update L4 header */
8376 tcp->ack_seq = lro->tcp_ack;
8377 tcp->window = lro->window;
8379 /* Update tsecr field if this session has timestamps enabled */
8381 __be32 *ptr = (__be32 *)(tcp + 1);
8382 *(ptr+2) = lro->cur_tsecr;
8385 /* Update counters required for calculation of
8386 * average no. of packets aggregated.
8388 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8389 statinfo->sw_stat.num_aggregations++;
8392 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8393 struct tcphdr *tcp, u32 l4_pyld)
8395 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8396 lro->total_len += l4_pyld;
8397 lro->frags_len += l4_pyld;
8398 lro->tcp_next_seq += l4_pyld;
8401 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8402 lro->tcp_ack = tcp->ack_seq;
8403 lro->window = tcp->window;
8407 /* Update tsecr and tsval from this packet */
8408 ptr = (__be32 *)(tcp+1);
8409 lro->cur_tsval = ntohl(*(ptr+1));
8410 lro->cur_tsecr = *(ptr + 2);
8414 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8415 struct tcphdr *tcp, u32 tcp_pyld_len)
8419 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8421 if (!tcp_pyld_len) {
8422 /* Runt frame or a pure ack */
8426 if (ip->ihl != 5) /* IP has options */
8429 /* If we see CE codepoint in IP header, packet is not mergeable */
8430 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8433 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8434 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8435 tcp->ece || tcp->cwr || !tcp->ack) {
8437 * Currently recognize only the ack control word and
8438 * any other control field being set would result in
8439 * flushing the LRO session
8445 * Allow only one TCP timestamp option. Don't aggregate if
8446 * any other options are detected.
8448 if (tcp->doff != 5 && tcp->doff != 8)
8451 if (tcp->doff == 8) {
8452 ptr = (u8 *)(tcp + 1);
8453 while (*ptr == TCPOPT_NOP)
8455 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8458 /* Ensure timestamp value increases monotonically */
8460 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8463 /* timestamp echo reply should be non-zero */
8464 if (*((__be32 *)(ptr+6)) == 0)
8472 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8473 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8474 struct s2io_nic *sp)
8477 struct tcphdr *tcph;
8481 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8483 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8484 ip->saddr, ip->daddr);
8488 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8489 tcph = (struct tcphdr *)*tcp;
8490 *tcp_len = get_l4_pyld_length(ip, tcph);
8491 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8492 struct lro *l_lro = &ring_data->lro0_n[i];
8493 if (l_lro->in_use) {
8494 if (check_for_socket_match(l_lro, ip, tcph))
8496 /* Sock pair matched */
8499 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8500 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8501 "0x%x, actual 0x%x\n", __FUNCTION__,
8502 (*lro)->tcp_next_seq,
8505 sp->mac_control.stats_info->
8506 sw_stat.outof_sequence_pkts++;
8511 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8512 ret = 1; /* Aggregate */
8514 ret = 2; /* Flush both */
8520 /* Before searching for available LRO objects,
8521 * check if the pkt is L3/L4 aggregatable. If not
8522 * don't create new LRO session. Just send this
8525 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8529 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8530 struct lro *l_lro = &ring_data->lro0_n[i];
8531 if (!(l_lro->in_use)) {
8533 ret = 3; /* Begin anew */
8539 if (ret == 0) { /* sessions exceeded */
8540 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8548 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8552 update_L3L4_header(sp, *lro);
8555 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8556 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8557 update_L3L4_header(sp, *lro);
8558 ret = 4; /* Flush the LRO */
8562 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8570 static void clear_lro_session(struct lro *lro)
8572 static u16 lro_struct_size = sizeof(struct lro);
8574 memset(lro, 0, lro_struct_size);
8577 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8579 struct net_device *dev = skb->dev;
8580 struct s2io_nic *sp = dev->priv;
8582 skb->protocol = eth_type_trans(skb, dev);
8583 if (sp->vlgrp && vlan_tag
8584 && (vlan_strip_flag)) {
8585 /* Queueing the vlan frame to the upper layer */
8586 if (sp->config.napi)
8587 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8589 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8591 if (sp->config.napi)
8592 netif_receive_skb(skb);
8598 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8599 struct sk_buff *skb,
8602 struct sk_buff *first = lro->parent;
8604 first->len += tcp_len;
8605 first->data_len = lro->frags_len;
8606 skb_pull(skb, (skb->len - tcp_len));
8607 if (skb_shinfo(first)->frag_list)
8608 lro->last_frag->next = skb;
8610 skb_shinfo(first)->frag_list = skb;
8611 first->truesize += skb->truesize;
8612 lro->last_frag = skb;
8613 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8618 * s2io_io_error_detected - called when PCI error is detected
8619 * @pdev: Pointer to PCI device
8620 * @state: The current pci connection state
8622 * This function is called after a PCI bus error affecting
8623 * this device has been detected.
8625 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8626 pci_channel_state_t state)
8628 struct net_device *netdev = pci_get_drvdata(pdev);
8629 struct s2io_nic *sp = netdev->priv;
8631 netif_device_detach(netdev);
8633 if (netif_running(netdev)) {
8634 /* Bring down the card, while avoiding PCI I/O */
8635 do_s2io_card_down(sp, 0);
8637 pci_disable_device(pdev);
8639 return PCI_ERS_RESULT_NEED_RESET;
8643 * s2io_io_slot_reset - called after the pci bus has been reset.
8644 * @pdev: Pointer to PCI device
8646 * Restart the card from scratch, as if from a cold-boot.
8647 * At this point, the card has exprienced a hard reset,
8648 * followed by fixups by BIOS, and has its config space
8649 * set up identically to what it was at cold boot.
8651 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8653 struct net_device *netdev = pci_get_drvdata(pdev);
8654 struct s2io_nic *sp = netdev->priv;
8656 if (pci_enable_device(pdev)) {
8657 printk(KERN_ERR "s2io: "
8658 "Cannot re-enable PCI device after reset.\n");
8659 return PCI_ERS_RESULT_DISCONNECT;
8662 pci_set_master(pdev);
8665 return PCI_ERS_RESULT_RECOVERED;
8669 * s2io_io_resume - called when traffic can start flowing again.
8670 * @pdev: Pointer to PCI device
8672 * This callback is called when the error recovery driver tells
8673 * us that its OK to resume normal operation.
8675 static void s2io_io_resume(struct pci_dev *pdev)
8677 struct net_device *netdev = pci_get_drvdata(pdev);
8678 struct s2io_nic *sp = netdev->priv;
8680 if (netif_running(netdev)) {
8681 if (s2io_card_up(sp)) {
8682 printk(KERN_ERR "s2io: "
8683 "Can't bring device back up after reset.\n");
8687 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8689 printk(KERN_ERR "s2io: "
8690 "Can't resetore mac addr after reset.\n");
8695 netif_device_attach(netdev);
8696 netif_wake_queue(netdev);