bnx2: Add TX multiqueue support.
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.25"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         if (!sp->config.multiq) {
549                 int i;
550
551                 for (i = 0; i < sp->config.tx_fifo_num; i++)
552                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
553         }
554         netif_tx_stop_all_queues(sp->dev);
555 }
556
557 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
558 {
559         if (!sp->config.multiq)
560                 sp->mac_control.fifos[fifo_no].queue_state =
561                         FIFO_QUEUE_STOP;
562
563         netif_tx_stop_all_queues(sp->dev);
564 }
565
566 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
567 {
568         if (!sp->config.multiq) {
569                 int i;
570
571                 for (i = 0; i < sp->config.tx_fifo_num; i++)
572                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
573         }
574         netif_tx_start_all_queues(sp->dev);
575 }
576
577 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
578 {
579         if (!sp->config.multiq)
580                 sp->mac_control.fifos[fifo_no].queue_state =
581                         FIFO_QUEUE_START;
582
583         netif_tx_start_all_queues(sp->dev);
584 }
585
586 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
587 {
588         if (!sp->config.multiq) {
589                 int i;
590
591                 for (i = 0; i < sp->config.tx_fifo_num; i++)
592                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
593         }
594         netif_tx_wake_all_queues(sp->dev);
595 }
596
597 static inline void s2io_wake_tx_queue(
598         struct fifo_info *fifo, int cnt, u8 multiq)
599 {
600
601         if (multiq) {
602                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
603                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
604         } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
605                 if (netif_queue_stopped(fifo->dev)) {
606                         fifo->queue_state = FIFO_QUEUE_START;
607                         netif_wake_queue(fifo->dev);
608                 }
609         }
610 }
611
612 /**
613  * init_shared_mem - Allocation and Initialization of Memory
614  * @nic: Device private variable.
615  * Description: The function allocates all the memory areas shared
616  * between the NIC and the driver. This includes Tx descriptors,
617  * Rx descriptors and the statistics block.
618  */
619
620 static int init_shared_mem(struct s2io_nic *nic)
621 {
622         u32 size;
623         void *tmp_v_addr, *tmp_v_addr_next;
624         dma_addr_t tmp_p_addr, tmp_p_addr_next;
625         struct RxD_block *pre_rxd_blk = NULL;
626         int i, j, blk_cnt;
627         int lst_size, lst_per_page;
628         struct net_device *dev = nic->dev;
629         unsigned long tmp;
630         struct buffAdd *ba;
631
632         struct mac_info *mac_control;
633         struct config_param *config;
634         unsigned long long mem_allocated = 0;
635
636         mac_control = &nic->mac_control;
637         config = &nic->config;
638
639
640         /* Allocation and initialization of TXDLs in FIOFs */
641         size = 0;
642         for (i = 0; i < config->tx_fifo_num; i++) {
643                 size += config->tx_cfg[i].fifo_len;
644         }
645         if (size > MAX_AVAILABLE_TXDS) {
646                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
647                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
648                 return -EINVAL;
649         }
650
651         size = 0;
652         for (i = 0; i < config->tx_fifo_num; i++) {
653                 size = config->tx_cfg[i].fifo_len;
654                 /*
655                  * Legal values are from 2 to 8192
656                  */
657                 if (size < 2) {
658                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
659                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
660                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
661                                 "are 2 to 8192\n");
662                         return -EINVAL;
663                 }
664         }
665
666         lst_size = (sizeof(struct TxD) * config->max_txds);
667         lst_per_page = PAGE_SIZE / lst_size;
668
669         for (i = 0; i < config->tx_fifo_num; i++) {
670                 int fifo_len = config->tx_cfg[i].fifo_len;
671                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
672                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
673                                                           GFP_KERNEL);
674                 if (!mac_control->fifos[i].list_info) {
675                         DBG_PRINT(INFO_DBG,
676                                   "Malloc failed for list_info\n");
677                         return -ENOMEM;
678                 }
679                 mem_allocated += list_holder_size;
680         }
681         for (i = 0; i < config->tx_fifo_num; i++) {
682                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
683                                                 lst_per_page);
684                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
685                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
686                     config->tx_cfg[i].fifo_len - 1;
687                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
688                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
689                     config->tx_cfg[i].fifo_len - 1;
690                 mac_control->fifos[i].fifo_no = i;
691                 mac_control->fifos[i].nic = nic;
692                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
693                 mac_control->fifos[i].dev = dev;
694
695                 for (j = 0; j < page_num; j++) {
696                         int k = 0;
697                         dma_addr_t tmp_p;
698                         void *tmp_v;
699                         tmp_v = pci_alloc_consistent(nic->pdev,
700                                                      PAGE_SIZE, &tmp_p);
701                         if (!tmp_v) {
702                                 DBG_PRINT(INFO_DBG,
703                                           "pci_alloc_consistent ");
704                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
705                                 return -ENOMEM;
706                         }
707                         /* If we got a zero DMA address(can happen on
708                          * certain platforms like PPC), reallocate.
709                          * Store virtual address of page we don't want,
710                          * to be freed later.
711                          */
712                         if (!tmp_p) {
713                                 mac_control->zerodma_virt_addr = tmp_v;
714                                 DBG_PRINT(INIT_DBG,
715                                 "%s: Zero DMA address for TxDL. ", dev->name);
716                                 DBG_PRINT(INIT_DBG,
717                                 "Virtual address %p\n", tmp_v);
718                                 tmp_v = pci_alloc_consistent(nic->pdev,
719                                                      PAGE_SIZE, &tmp_p);
720                                 if (!tmp_v) {
721                                         DBG_PRINT(INFO_DBG,
722                                           "pci_alloc_consistent ");
723                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
724                                         return -ENOMEM;
725                                 }
726                                 mem_allocated += PAGE_SIZE;
727                         }
728                         while (k < lst_per_page) {
729                                 int l = (j * lst_per_page) + k;
730                                 if (l == config->tx_cfg[i].fifo_len)
731                                         break;
732                                 mac_control->fifos[i].list_info[l].list_virt_addr =
733                                     tmp_v + (k * lst_size);
734                                 mac_control->fifos[i].list_info[l].list_phy_addr =
735                                     tmp_p + (k * lst_size);
736                                 k++;
737                         }
738                 }
739         }
740
741         for (i = 0; i < config->tx_fifo_num; i++) {
742                 size = config->tx_cfg[i].fifo_len;
743                 mac_control->fifos[i].ufo_in_band_v
744                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
745                 if (!mac_control->fifos[i].ufo_in_band_v)
746                         return -ENOMEM;
747                 mem_allocated += (size * sizeof(u64));
748         }
749
750         /* Allocation and initialization of RXDs in Rings */
751         size = 0;
752         for (i = 0; i < config->rx_ring_num; i++) {
753                 if (config->rx_cfg[i].num_rxd %
754                     (rxd_count[nic->rxd_mode] + 1)) {
755                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
756                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
757                                   i);
758                         DBG_PRINT(ERR_DBG, "RxDs per Block");
759                         return FAILURE;
760                 }
761                 size += config->rx_cfg[i].num_rxd;
762                 mac_control->rings[i].block_count =
763                         config->rx_cfg[i].num_rxd /
764                         (rxd_count[nic->rxd_mode] + 1 );
765                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
766                         mac_control->rings[i].block_count;
767         }
768         if (nic->rxd_mode == RXD_MODE_1)
769                 size = (size * (sizeof(struct RxD1)));
770         else
771                 size = (size * (sizeof(struct RxD3)));
772
773         for (i = 0; i < config->rx_ring_num; i++) {
774                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
775                 mac_control->rings[i].rx_curr_get_info.offset = 0;
776                 mac_control->rings[i].rx_curr_get_info.ring_len =
777                     config->rx_cfg[i].num_rxd - 1;
778                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
779                 mac_control->rings[i].rx_curr_put_info.offset = 0;
780                 mac_control->rings[i].rx_curr_put_info.ring_len =
781                     config->rx_cfg[i].num_rxd - 1;
782                 mac_control->rings[i].nic = nic;
783                 mac_control->rings[i].ring_no = i;
784                 mac_control->rings[i].lro = lro_enable;
785
786                 blk_cnt = config->rx_cfg[i].num_rxd /
787                                 (rxd_count[nic->rxd_mode] + 1);
788                 /*  Allocating all the Rx blocks */
789                 for (j = 0; j < blk_cnt; j++) {
790                         struct rx_block_info *rx_blocks;
791                         int l;
792
793                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
794                         size = SIZE_OF_BLOCK; //size is always page size
795                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
796                                                           &tmp_p_addr);
797                         if (tmp_v_addr == NULL) {
798                                 /*
799                                  * In case of failure, free_shared_mem()
800                                  * is called, which should free any
801                                  * memory that was alloced till the
802                                  * failure happened.
803                                  */
804                                 rx_blocks->block_virt_addr = tmp_v_addr;
805                                 return -ENOMEM;
806                         }
807                         mem_allocated += size;
808                         memset(tmp_v_addr, 0, size);
809                         rx_blocks->block_virt_addr = tmp_v_addr;
810                         rx_blocks->block_dma_addr = tmp_p_addr;
811                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
812                                                   rxd_count[nic->rxd_mode],
813                                                   GFP_KERNEL);
814                         if (!rx_blocks->rxds)
815                                 return -ENOMEM;
816                         mem_allocated +=
817                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
818                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
819                                 rx_blocks->rxds[l].virt_addr =
820                                         rx_blocks->block_virt_addr +
821                                         (rxd_size[nic->rxd_mode] * l);
822                                 rx_blocks->rxds[l].dma_addr =
823                                         rx_blocks->block_dma_addr +
824                                         (rxd_size[nic->rxd_mode] * l);
825                         }
826                 }
827                 /* Interlinking all Rx Blocks */
828                 for (j = 0; j < blk_cnt; j++) {
829                         tmp_v_addr =
830                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
831                         tmp_v_addr_next =
832                                 mac_control->rings[i].rx_blocks[(j + 1) %
833                                               blk_cnt].block_virt_addr;
834                         tmp_p_addr =
835                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
836                         tmp_p_addr_next =
837                                 mac_control->rings[i].rx_blocks[(j + 1) %
838                                               blk_cnt].block_dma_addr;
839
840                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
841                         pre_rxd_blk->reserved_2_pNext_RxD_block =
842                             (unsigned long) tmp_v_addr_next;
843                         pre_rxd_blk->pNext_RxD_Blk_physical =
844                             (u64) tmp_p_addr_next;
845                 }
846         }
847         if (nic->rxd_mode == RXD_MODE_3B) {
848                 /*
849                  * Allocation of Storages for buffer addresses in 2BUFF mode
850                  * and the buffers as well.
851                  */
852                 for (i = 0; i < config->rx_ring_num; i++) {
853                         blk_cnt = config->rx_cfg[i].num_rxd /
854                            (rxd_count[nic->rxd_mode]+ 1);
855                         mac_control->rings[i].ba =
856                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
857                                      GFP_KERNEL);
858                         if (!mac_control->rings[i].ba)
859                                 return -ENOMEM;
860                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
861                         for (j = 0; j < blk_cnt; j++) {
862                                 int k = 0;
863                                 mac_control->rings[i].ba[j] =
864                                         kmalloc((sizeof(struct buffAdd) *
865                                                 (rxd_count[nic->rxd_mode] + 1)),
866                                                 GFP_KERNEL);
867                                 if (!mac_control->rings[i].ba[j])
868                                         return -ENOMEM;
869                                 mem_allocated += (sizeof(struct buffAdd) *  \
870                                         (rxd_count[nic->rxd_mode] + 1));
871                                 while (k != rxd_count[nic->rxd_mode]) {
872                                         ba = &mac_control->rings[i].ba[j][k];
873
874                                         ba->ba_0_org = (void *) kmalloc
875                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
876                                         if (!ba->ba_0_org)
877                                                 return -ENOMEM;
878                                         mem_allocated +=
879                                                 (BUF0_LEN + ALIGN_SIZE);
880                                         tmp = (unsigned long)ba->ba_0_org;
881                                         tmp += ALIGN_SIZE;
882                                         tmp &= ~((unsigned long) ALIGN_SIZE);
883                                         ba->ba_0 = (void *) tmp;
884
885                                         ba->ba_1_org = (void *) kmalloc
886                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
887                                         if (!ba->ba_1_org)
888                                                 return -ENOMEM;
889                                         mem_allocated
890                                                 += (BUF1_LEN + ALIGN_SIZE);
891                                         tmp = (unsigned long) ba->ba_1_org;
892                                         tmp += ALIGN_SIZE;
893                                         tmp &= ~((unsigned long) ALIGN_SIZE);
894                                         ba->ba_1 = (void *) tmp;
895                                         k++;
896                                 }
897                         }
898                 }
899         }
900
901         /* Allocation and initialization of Statistics block */
902         size = sizeof(struct stat_block);
903         mac_control->stats_mem = pci_alloc_consistent
904             (nic->pdev, size, &mac_control->stats_mem_phy);
905
906         if (!mac_control->stats_mem) {
907                 /*
908                  * In case of failure, free_shared_mem() is called, which
909                  * should free any memory that was alloced till the
910                  * failure happened.
911                  */
912                 return -ENOMEM;
913         }
914         mem_allocated += size;
915         mac_control->stats_mem_sz = size;
916
917         tmp_v_addr = mac_control->stats_mem;
918         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
919         memset(tmp_v_addr, 0, size);
920         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
921                   (unsigned long long) tmp_p_addr);
922         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
923         return SUCCESS;
924 }
925
926 /**
927  * free_shared_mem - Free the allocated Memory
928  * @nic:  Device private variable.
929  * Description: This function is to free all memory locations allocated by
930  * the init_shared_mem() function and return it to the kernel.
931  */
932
933 static void free_shared_mem(struct s2io_nic *nic)
934 {
935         int i, j, blk_cnt, size;
936         void *tmp_v_addr;
937         dma_addr_t tmp_p_addr;
938         struct mac_info *mac_control;
939         struct config_param *config;
940         int lst_size, lst_per_page;
941         struct net_device *dev;
942         int page_num = 0;
943
944         if (!nic)
945                 return;
946
947         dev = nic->dev;
948
949         mac_control = &nic->mac_control;
950         config = &nic->config;
951
952         lst_size = (sizeof(struct TxD) * config->max_txds);
953         lst_per_page = PAGE_SIZE / lst_size;
954
955         for (i = 0; i < config->tx_fifo_num; i++) {
956                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
957                                                         lst_per_page);
958                 for (j = 0; j < page_num; j++) {
959                         int mem_blks = (j * lst_per_page);
960                         if (!mac_control->fifos[i].list_info)
961                                 return;
962                         if (!mac_control->fifos[i].list_info[mem_blks].
963                                  list_virt_addr)
964                                 break;
965                         pci_free_consistent(nic->pdev, PAGE_SIZE,
966                                             mac_control->fifos[i].
967                                             list_info[mem_blks].
968                                             list_virt_addr,
969                                             mac_control->fifos[i].
970                                             list_info[mem_blks].
971                                             list_phy_addr);
972                         nic->mac_control.stats_info->sw_stat.mem_freed
973                                                 += PAGE_SIZE;
974                 }
975                 /* If we got a zero DMA address during allocation,
976                  * free the page now
977                  */
978                 if (mac_control->zerodma_virt_addr) {
979                         pci_free_consistent(nic->pdev, PAGE_SIZE,
980                                             mac_control->zerodma_virt_addr,
981                                             (dma_addr_t)0);
982                         DBG_PRINT(INIT_DBG,
983                                 "%s: Freeing TxDL with zero DMA addr. ",
984                                 dev->name);
985                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
986                                 mac_control->zerodma_virt_addr);
987                         nic->mac_control.stats_info->sw_stat.mem_freed
988                                                 += PAGE_SIZE;
989                 }
990                 kfree(mac_control->fifos[i].list_info);
991                 nic->mac_control.stats_info->sw_stat.mem_freed +=
992                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
993         }
994
995         size = SIZE_OF_BLOCK;
996         for (i = 0; i < config->rx_ring_num; i++) {
997                 blk_cnt = mac_control->rings[i].block_count;
998                 for (j = 0; j < blk_cnt; j++) {
999                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1000                                 block_virt_addr;
1001                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1002                                 block_dma_addr;
1003                         if (tmp_v_addr == NULL)
1004                                 break;
1005                         pci_free_consistent(nic->pdev, size,
1006                                             tmp_v_addr, tmp_p_addr);
1007                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1008                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1009                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1010                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1011                 }
1012         }
1013
1014         if (nic->rxd_mode == RXD_MODE_3B) {
1015                 /* Freeing buffer storage addresses in 2BUFF mode. */
1016                 for (i = 0; i < config->rx_ring_num; i++) {
1017                         blk_cnt = config->rx_cfg[i].num_rxd /
1018                             (rxd_count[nic->rxd_mode] + 1);
1019                         for (j = 0; j < blk_cnt; j++) {
1020                                 int k = 0;
1021                                 if (!mac_control->rings[i].ba[j])
1022                                         continue;
1023                                 while (k != rxd_count[nic->rxd_mode]) {
1024                                         struct buffAdd *ba =
1025                                                 &mac_control->rings[i].ba[j][k];
1026                                         kfree(ba->ba_0_org);
1027                                         nic->mac_control.stats_info->sw_stat.\
1028                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1029                                         kfree(ba->ba_1_org);
1030                                         nic->mac_control.stats_info->sw_stat.\
1031                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1032                                         k++;
1033                                 }
1034                                 kfree(mac_control->rings[i].ba[j]);
1035                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1036                                         (sizeof(struct buffAdd) *
1037                                         (rxd_count[nic->rxd_mode] + 1));
1038                         }
1039                         kfree(mac_control->rings[i].ba);
1040                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1041                         (sizeof(struct buffAdd *) * blk_cnt);
1042                 }
1043         }
1044
1045         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1046                 if (mac_control->fifos[i].ufo_in_band_v) {
1047                         nic->mac_control.stats_info->sw_stat.mem_freed
1048                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1049                         kfree(mac_control->fifos[i].ufo_in_band_v);
1050                 }
1051         }
1052
1053         if (mac_control->stats_mem) {
1054                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1055                         mac_control->stats_mem_sz;
1056                 pci_free_consistent(nic->pdev,
1057                                     mac_control->stats_mem_sz,
1058                                     mac_control->stats_mem,
1059                                     mac_control->stats_mem_phy);
1060         }
1061 }
1062
1063 /**
1064  * s2io_verify_pci_mode -
1065  */
1066
1067 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1068 {
1069         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1070         register u64 val64 = 0;
1071         int     mode;
1072
1073         val64 = readq(&bar0->pci_mode);
1074         mode = (u8)GET_PCI_MODE(val64);
1075
1076         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1077                 return -1;      /* Unknown PCI mode */
1078         return mode;
1079 }
1080
1081 #define NEC_VENID   0x1033
1082 #define NEC_DEVID   0x0125
1083 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1084 {
1085         struct pci_dev *tdev = NULL;
1086         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1087                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1088                         if (tdev->bus == s2io_pdev->bus->parent) {
1089                                 pci_dev_put(tdev);
1090                                 return 1;
1091                         }
1092                 }
1093         }
1094         return 0;
1095 }
1096
1097 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1098 /**
1099  * s2io_print_pci_mode -
1100  */
1101 static int s2io_print_pci_mode(struct s2io_nic *nic)
1102 {
1103         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1104         register u64 val64 = 0;
1105         int     mode;
1106         struct config_param *config = &nic->config;
1107
1108         val64 = readq(&bar0->pci_mode);
1109         mode = (u8)GET_PCI_MODE(val64);
1110
1111         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1112                 return -1;      /* Unknown PCI mode */
1113
1114         config->bus_speed = bus_speed[mode];
1115
1116         if (s2io_on_nec_bridge(nic->pdev)) {
1117                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1118                                                         nic->dev->name);
1119                 return mode;
1120         }
1121
1122         if (val64 & PCI_MODE_32_BITS) {
1123                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1124         } else {
1125                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1126         }
1127
1128         switch(mode) {
1129                 case PCI_MODE_PCI_33:
1130                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1131                         break;
1132                 case PCI_MODE_PCI_66:
1133                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1134                         break;
1135                 case PCI_MODE_PCIX_M1_66:
1136                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1137                         break;
1138                 case PCI_MODE_PCIX_M1_100:
1139                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1140                         break;
1141                 case PCI_MODE_PCIX_M1_133:
1142                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1143                         break;
1144                 case PCI_MODE_PCIX_M2_66:
1145                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1146                         break;
1147                 case PCI_MODE_PCIX_M2_100:
1148                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1149                         break;
1150                 case PCI_MODE_PCIX_M2_133:
1151                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1152                         break;
1153                 default:
1154                         return -1;      /* Unsupported bus speed */
1155         }
1156
1157         return mode;
1158 }
1159
1160 /**
1161  *  init_tti - Initialization transmit traffic interrupt scheme
1162  *  @nic: device private variable
1163  *  @link: link status (UP/DOWN) used to enable/disable continuous
1164  *  transmit interrupts
1165  *  Description: The function configures transmit traffic interrupts
1166  *  Return Value:  SUCCESS on success and
1167  *  '-1' on failure
1168  */
1169
1170 static int init_tti(struct s2io_nic *nic, int link)
1171 {
1172         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1173         register u64 val64 = 0;
1174         int i;
1175         struct config_param *config;
1176
1177         config = &nic->config;
1178
1179         for (i = 0; i < config->tx_fifo_num; i++) {
1180                 /*
1181                  * TTI Initialization. Default Tx timer gets us about
1182                  * 250 interrupts per sec. Continuous interrupts are enabled
1183                  * by default.
1184                  */
1185                 if (nic->device_type == XFRAME_II_DEVICE) {
1186                         int count = (nic->config.bus_speed * 125)/2;
1187                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1188                 } else
1189                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1190
1191                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1192                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1193                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1194                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1195                 if (i == 0)
1196                         if (use_continuous_tx_intrs && (link == LINK_UP))
1197                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1198                 writeq(val64, &bar0->tti_data1_mem);
1199
1200                 if (nic->config.intr_type == MSI_X) {
1201                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1202                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1203                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1204                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1205                 } else {
1206                         if ((nic->config.tx_steering_type ==
1207                                 TX_DEFAULT_STEERING) &&
1208                                 (config->tx_fifo_num > 1) &&
1209                                 (i >= nic->udp_fifo_idx) &&
1210                                 (i < (nic->udp_fifo_idx +
1211                                 nic->total_udp_fifos)))
1212                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1213                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1214                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1215                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1216                         else
1217                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1218                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1219                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1220                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1221                 }
1222
1223                 writeq(val64, &bar0->tti_data2_mem);
1224
1225                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1226                                 TTI_CMD_MEM_OFFSET(i);
1227                 writeq(val64, &bar0->tti_command_mem);
1228
1229                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1230                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1231                         return FAILURE;
1232         }
1233
1234         return SUCCESS;
1235 }
1236
1237 /**
1238  *  init_nic - Initialization of hardware
1239  *  @nic: device private variable
1240  *  Description: The function sequentially configures every block
1241  *  of the H/W from their reset values.
1242  *  Return Value:  SUCCESS on success and
1243  *  '-1' on failure (endian settings incorrect).
1244  */
1245
1246 static int init_nic(struct s2io_nic *nic)
1247 {
1248         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1249         struct net_device *dev = nic->dev;
1250         register u64 val64 = 0;
1251         void __iomem *add;
1252         u32 time;
1253         int i, j;
1254         struct mac_info *mac_control;
1255         struct config_param *config;
1256         int dtx_cnt = 0;
1257         unsigned long long mem_share;
1258         int mem_size;
1259
1260         mac_control = &nic->mac_control;
1261         config = &nic->config;
1262
1263         /* to set the swapper controle on the card */
1264         if(s2io_set_swapper(nic)) {
1265                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1266                 return -EIO;
1267         }
1268
1269         /*
1270          * Herc requires EOI to be removed from reset before XGXS, so..
1271          */
1272         if (nic->device_type & XFRAME_II_DEVICE) {
1273                 val64 = 0xA500000000ULL;
1274                 writeq(val64, &bar0->sw_reset);
1275                 msleep(500);
1276                 val64 = readq(&bar0->sw_reset);
1277         }
1278
1279         /* Remove XGXS from reset state */
1280         val64 = 0;
1281         writeq(val64, &bar0->sw_reset);
1282         msleep(500);
1283         val64 = readq(&bar0->sw_reset);
1284
1285         /* Ensure that it's safe to access registers by checking
1286          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1287          */
1288         if (nic->device_type == XFRAME_II_DEVICE) {
1289                 for (i = 0; i < 50; i++) {
1290                         val64 = readq(&bar0->adapter_status);
1291                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1292                                 break;
1293                         msleep(10);
1294                 }
1295                 if (i == 50)
1296                         return -ENODEV;
1297         }
1298
1299         /*  Enable Receiving broadcasts */
1300         add = &bar0->mac_cfg;
1301         val64 = readq(&bar0->mac_cfg);
1302         val64 |= MAC_RMAC_BCAST_ENABLE;
1303         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1304         writel((u32) val64, add);
1305         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1306         writel((u32) (val64 >> 32), (add + 4));
1307
1308         /* Read registers in all blocks */
1309         val64 = readq(&bar0->mac_int_mask);
1310         val64 = readq(&bar0->mc_int_mask);
1311         val64 = readq(&bar0->xgxs_int_mask);
1312
1313         /*  Set MTU */
1314         val64 = dev->mtu;
1315         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1316
1317         if (nic->device_type & XFRAME_II_DEVICE) {
1318                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1319                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1320                                           &bar0->dtx_control, UF);
1321                         if (dtx_cnt & 0x1)
1322                                 msleep(1); /* Necessary!! */
1323                         dtx_cnt++;
1324                 }
1325         } else {
1326                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1327                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1328                                           &bar0->dtx_control, UF);
1329                         val64 = readq(&bar0->dtx_control);
1330                         dtx_cnt++;
1331                 }
1332         }
1333
1334         /*  Tx DMA Initialization */
1335         val64 = 0;
1336         writeq(val64, &bar0->tx_fifo_partition_0);
1337         writeq(val64, &bar0->tx_fifo_partition_1);
1338         writeq(val64, &bar0->tx_fifo_partition_2);
1339         writeq(val64, &bar0->tx_fifo_partition_3);
1340
1341
1342         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1343                 val64 |=
1344                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1345                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1346                                     ((j * 32) + 5), 3);
1347
1348                 if (i == (config->tx_fifo_num - 1)) {
1349                         if (i % 2 == 0)
1350                                 i++;
1351                 }
1352
1353                 switch (i) {
1354                 case 1:
1355                         writeq(val64, &bar0->tx_fifo_partition_0);
1356                         val64 = 0;
1357                         j = 0;
1358                         break;
1359                 case 3:
1360                         writeq(val64, &bar0->tx_fifo_partition_1);
1361                         val64 = 0;
1362                         j = 0;
1363                         break;
1364                 case 5:
1365                         writeq(val64, &bar0->tx_fifo_partition_2);
1366                         val64 = 0;
1367                         j = 0;
1368                         break;
1369                 case 7:
1370                         writeq(val64, &bar0->tx_fifo_partition_3);
1371                         val64 = 0;
1372                         j = 0;
1373                         break;
1374                 default:
1375                         j++;
1376                         break;
1377                 }
1378         }
1379
1380         /*
1381          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1382          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1383          */
1384         if ((nic->device_type == XFRAME_I_DEVICE) &&
1385                 (nic->pdev->revision < 4))
1386                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1387
1388         val64 = readq(&bar0->tx_fifo_partition_0);
1389         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1390                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1391
1392         /*
1393          * Initialization of Tx_PA_CONFIG register to ignore packet
1394          * integrity checking.
1395          */
1396         val64 = readq(&bar0->tx_pa_cfg);
1397         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1398             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1399         writeq(val64, &bar0->tx_pa_cfg);
1400
1401         /* Rx DMA intialization. */
1402         val64 = 0;
1403         for (i = 0; i < config->rx_ring_num; i++) {
1404                 val64 |=
1405                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1406                          3);
1407         }
1408         writeq(val64, &bar0->rx_queue_priority);
1409
1410         /*
1411          * Allocating equal share of memory to all the
1412          * configured Rings.
1413          */
1414         val64 = 0;
1415         if (nic->device_type & XFRAME_II_DEVICE)
1416                 mem_size = 32;
1417         else
1418                 mem_size = 64;
1419
1420         for (i = 0; i < config->rx_ring_num; i++) {
1421                 switch (i) {
1422                 case 0:
1423                         mem_share = (mem_size / config->rx_ring_num +
1424                                      mem_size % config->rx_ring_num);
1425                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1426                         continue;
1427                 case 1:
1428                         mem_share = (mem_size / config->rx_ring_num);
1429                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1430                         continue;
1431                 case 2:
1432                         mem_share = (mem_size / config->rx_ring_num);
1433                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1434                         continue;
1435                 case 3:
1436                         mem_share = (mem_size / config->rx_ring_num);
1437                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1438                         continue;
1439                 case 4:
1440                         mem_share = (mem_size / config->rx_ring_num);
1441                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1442                         continue;
1443                 case 5:
1444                         mem_share = (mem_size / config->rx_ring_num);
1445                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1446                         continue;
1447                 case 6:
1448                         mem_share = (mem_size / config->rx_ring_num);
1449                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1450                         continue;
1451                 case 7:
1452                         mem_share = (mem_size / config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1454                         continue;
1455                 }
1456         }
1457         writeq(val64, &bar0->rx_queue_cfg);
1458
1459         /*
1460          * Filling Tx round robin registers
1461          * as per the number of FIFOs for equal scheduling priority
1462          */
1463         switch (config->tx_fifo_num) {
1464         case 1:
1465                 val64 = 0x0;
1466                 writeq(val64, &bar0->tx_w_round_robin_0);
1467                 writeq(val64, &bar0->tx_w_round_robin_1);
1468                 writeq(val64, &bar0->tx_w_round_robin_2);
1469                 writeq(val64, &bar0->tx_w_round_robin_3);
1470                 writeq(val64, &bar0->tx_w_round_robin_4);
1471                 break;
1472         case 2:
1473                 val64 = 0x0001000100010001ULL;
1474                 writeq(val64, &bar0->tx_w_round_robin_0);
1475                 writeq(val64, &bar0->tx_w_round_robin_1);
1476                 writeq(val64, &bar0->tx_w_round_robin_2);
1477                 writeq(val64, &bar0->tx_w_round_robin_3);
1478                 val64 = 0x0001000100000000ULL;
1479                 writeq(val64, &bar0->tx_w_round_robin_4);
1480                 break;
1481         case 3:
1482                 val64 = 0x0001020001020001ULL;
1483                 writeq(val64, &bar0->tx_w_round_robin_0);
1484                 val64 = 0x0200010200010200ULL;
1485                 writeq(val64, &bar0->tx_w_round_robin_1);
1486                 val64 = 0x0102000102000102ULL;
1487                 writeq(val64, &bar0->tx_w_round_robin_2);
1488                 val64 = 0x0001020001020001ULL;
1489                 writeq(val64, &bar0->tx_w_round_robin_3);
1490                 val64 = 0x0200010200000000ULL;
1491                 writeq(val64, &bar0->tx_w_round_robin_4);
1492                 break;
1493         case 4:
1494                 val64 = 0x0001020300010203ULL;
1495                 writeq(val64, &bar0->tx_w_round_robin_0);
1496                 writeq(val64, &bar0->tx_w_round_robin_1);
1497                 writeq(val64, &bar0->tx_w_round_robin_2);
1498                 writeq(val64, &bar0->tx_w_round_robin_3);
1499                 val64 = 0x0001020300000000ULL;
1500                 writeq(val64, &bar0->tx_w_round_robin_4);
1501                 break;
1502         case 5:
1503                 val64 = 0x0001020304000102ULL;
1504                 writeq(val64, &bar0->tx_w_round_robin_0);
1505                 val64 = 0x0304000102030400ULL;
1506                 writeq(val64, &bar0->tx_w_round_robin_1);
1507                 val64 = 0x0102030400010203ULL;
1508                 writeq(val64, &bar0->tx_w_round_robin_2);
1509                 val64 = 0x0400010203040001ULL;
1510                 writeq(val64, &bar0->tx_w_round_robin_3);
1511                 val64 = 0x0203040000000000ULL;
1512                 writeq(val64, &bar0->tx_w_round_robin_4);
1513                 break;
1514         case 6:
1515                 val64 = 0x0001020304050001ULL;
1516                 writeq(val64, &bar0->tx_w_round_robin_0);
1517                 val64 = 0x0203040500010203ULL;
1518                 writeq(val64, &bar0->tx_w_round_robin_1);
1519                 val64 = 0x0405000102030405ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_2);
1521                 val64 = 0x0001020304050001ULL;
1522                 writeq(val64, &bar0->tx_w_round_robin_3);
1523                 val64 = 0x0203040500000000ULL;
1524                 writeq(val64, &bar0->tx_w_round_robin_4);
1525                 break;
1526         case 7:
1527                 val64 = 0x0001020304050600ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_0);
1529                 val64 = 0x0102030405060001ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_1);
1531                 val64 = 0x0203040506000102ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_2);
1533                 val64 = 0x0304050600010203ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_3);
1535                 val64 = 0x0405060000000000ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_4);
1537                 break;
1538         case 8:
1539                 val64 = 0x0001020304050607ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_0);
1541                 writeq(val64, &bar0->tx_w_round_robin_1);
1542                 writeq(val64, &bar0->tx_w_round_robin_2);
1543                 writeq(val64, &bar0->tx_w_round_robin_3);
1544                 val64 = 0x0001020300000000ULL;
1545                 writeq(val64, &bar0->tx_w_round_robin_4);
1546                 break;
1547         }
1548
1549         /* Enable all configured Tx FIFO partitions */
1550         val64 = readq(&bar0->tx_fifo_partition_0);
1551         val64 |= (TX_FIFO_PARTITION_EN);
1552         writeq(val64, &bar0->tx_fifo_partition_0);
1553
1554         /* Filling the Rx round robin registers as per the
1555          * number of Rings and steering based on QoS with
1556          * equal priority.
1557          */
1558         switch (config->rx_ring_num) {
1559         case 1:
1560                 val64 = 0x0;
1561                 writeq(val64, &bar0->rx_w_round_robin_0);
1562                 writeq(val64, &bar0->rx_w_round_robin_1);
1563                 writeq(val64, &bar0->rx_w_round_robin_2);
1564                 writeq(val64, &bar0->rx_w_round_robin_3);
1565                 writeq(val64, &bar0->rx_w_round_robin_4);
1566
1567                 val64 = 0x8080808080808080ULL;
1568                 writeq(val64, &bar0->rts_qos_steering);
1569                 break;
1570         case 2:
1571                 val64 = 0x0001000100010001ULL;
1572                 writeq(val64, &bar0->rx_w_round_robin_0);
1573                 writeq(val64, &bar0->rx_w_round_robin_1);
1574                 writeq(val64, &bar0->rx_w_round_robin_2);
1575                 writeq(val64, &bar0->rx_w_round_robin_3);
1576                 val64 = 0x0001000100000000ULL;
1577                 writeq(val64, &bar0->rx_w_round_robin_4);
1578
1579                 val64 = 0x8080808040404040ULL;
1580                 writeq(val64, &bar0->rts_qos_steering);
1581                 break;
1582         case 3:
1583                 val64 = 0x0001020001020001ULL;
1584                 writeq(val64, &bar0->rx_w_round_robin_0);
1585                 val64 = 0x0200010200010200ULL;
1586                 writeq(val64, &bar0->rx_w_round_robin_1);
1587                 val64 = 0x0102000102000102ULL;
1588                 writeq(val64, &bar0->rx_w_round_robin_2);
1589                 val64 = 0x0001020001020001ULL;
1590                 writeq(val64, &bar0->rx_w_round_robin_3);
1591                 val64 = 0x0200010200000000ULL;
1592                 writeq(val64, &bar0->rx_w_round_robin_4);
1593
1594                 val64 = 0x8080804040402020ULL;
1595                 writeq(val64, &bar0->rts_qos_steering);
1596                 break;
1597         case 4:
1598                 val64 = 0x0001020300010203ULL;
1599                 writeq(val64, &bar0->rx_w_round_robin_0);
1600                 writeq(val64, &bar0->rx_w_round_robin_1);
1601                 writeq(val64, &bar0->rx_w_round_robin_2);
1602                 writeq(val64, &bar0->rx_w_round_robin_3);
1603                 val64 = 0x0001020300000000ULL;
1604                 writeq(val64, &bar0->rx_w_round_robin_4);
1605
1606                 val64 = 0x8080404020201010ULL;
1607                 writeq(val64, &bar0->rts_qos_steering);
1608                 break;
1609         case 5:
1610                 val64 = 0x0001020304000102ULL;
1611                 writeq(val64, &bar0->rx_w_round_robin_0);
1612                 val64 = 0x0304000102030400ULL;
1613                 writeq(val64, &bar0->rx_w_round_robin_1);
1614                 val64 = 0x0102030400010203ULL;
1615                 writeq(val64, &bar0->rx_w_round_robin_2);
1616                 val64 = 0x0400010203040001ULL;
1617                 writeq(val64, &bar0->rx_w_round_robin_3);
1618                 val64 = 0x0203040000000000ULL;
1619                 writeq(val64, &bar0->rx_w_round_robin_4);
1620
1621                 val64 = 0x8080404020201008ULL;
1622                 writeq(val64, &bar0->rts_qos_steering);
1623                 break;
1624         case 6:
1625                 val64 = 0x0001020304050001ULL;
1626                 writeq(val64, &bar0->rx_w_round_robin_0);
1627                 val64 = 0x0203040500010203ULL;
1628                 writeq(val64, &bar0->rx_w_round_robin_1);
1629                 val64 = 0x0405000102030405ULL;
1630                 writeq(val64, &bar0->rx_w_round_robin_2);
1631                 val64 = 0x0001020304050001ULL;
1632                 writeq(val64, &bar0->rx_w_round_robin_3);
1633                 val64 = 0x0203040500000000ULL;
1634                 writeq(val64, &bar0->rx_w_round_robin_4);
1635
1636                 val64 = 0x8080404020100804ULL;
1637                 writeq(val64, &bar0->rts_qos_steering);
1638                 break;
1639         case 7:
1640                 val64 = 0x0001020304050600ULL;
1641                 writeq(val64, &bar0->rx_w_round_robin_0);
1642                 val64 = 0x0102030405060001ULL;
1643                 writeq(val64, &bar0->rx_w_round_robin_1);
1644                 val64 = 0x0203040506000102ULL;
1645                 writeq(val64, &bar0->rx_w_round_robin_2);
1646                 val64 = 0x0304050600010203ULL;
1647                 writeq(val64, &bar0->rx_w_round_robin_3);
1648                 val64 = 0x0405060000000000ULL;
1649                 writeq(val64, &bar0->rx_w_round_robin_4);
1650
1651                 val64 = 0x8080402010080402ULL;
1652                 writeq(val64, &bar0->rts_qos_steering);
1653                 break;
1654         case 8:
1655                 val64 = 0x0001020304050607ULL;
1656                 writeq(val64, &bar0->rx_w_round_robin_0);
1657                 writeq(val64, &bar0->rx_w_round_robin_1);
1658                 writeq(val64, &bar0->rx_w_round_robin_2);
1659                 writeq(val64, &bar0->rx_w_round_robin_3);
1660                 val64 = 0x0001020300000000ULL;
1661                 writeq(val64, &bar0->rx_w_round_robin_4);
1662
1663                 val64 = 0x8040201008040201ULL;
1664                 writeq(val64, &bar0->rts_qos_steering);
1665                 break;
1666         }
1667
1668         /* UDP Fix */
1669         val64 = 0;
1670         for (i = 0; i < 8; i++)
1671                 writeq(val64, &bar0->rts_frm_len_n[i]);
1672
1673         /* Set the default rts frame length for the rings configured */
1674         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1675         for (i = 0 ; i < config->rx_ring_num ; i++)
1676                 writeq(val64, &bar0->rts_frm_len_n[i]);
1677
1678         /* Set the frame length for the configured rings
1679          * desired by the user
1680          */
1681         for (i = 0; i < config->rx_ring_num; i++) {
1682                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1683                  * specified frame length steering.
1684                  * If the user provides the frame length then program
1685                  * the rts_frm_len register for those values or else
1686                  * leave it as it is.
1687                  */
1688                 if (rts_frm_len[i] != 0) {
1689                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1690                                 &bar0->rts_frm_len_n[i]);
1691                 }
1692         }
1693
1694         /* Disable differentiated services steering logic */
1695         for (i = 0; i < 64; i++) {
1696                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1697                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1698                                 dev->name);
1699                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1700                         return -ENODEV;
1701                 }
1702         }
1703
1704         /* Program statistics memory */
1705         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1706
1707         if (nic->device_type == XFRAME_II_DEVICE) {
1708                 val64 = STAT_BC(0x320);
1709                 writeq(val64, &bar0->stat_byte_cnt);
1710         }
1711
1712         /*
1713          * Initializing the sampling rate for the device to calculate the
1714          * bandwidth utilization.
1715          */
1716         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1717             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1718         writeq(val64, &bar0->mac_link_util);
1719
1720         /*
1721          * Initializing the Transmit and Receive Traffic Interrupt
1722          * Scheme.
1723          */
1724
1725         /* Initialize TTI */
1726         if (SUCCESS != init_tti(nic, nic->last_link_state))
1727                 return -ENODEV;
1728
1729         /* RTI Initialization */
1730         if (nic->device_type == XFRAME_II_DEVICE) {
1731                 /*
1732                  * Programmed to generate Apprx 500 Intrs per
1733                  * second
1734                  */
1735                 int count = (nic->config.bus_speed * 125)/4;
1736                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1737         } else
1738                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1739         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1740                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1741                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1742
1743         writeq(val64, &bar0->rti_data1_mem);
1744
1745         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1746                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1747         if (nic->config.intr_type == MSI_X)
1748             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1749                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1750         else
1751             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1752                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1753         writeq(val64, &bar0->rti_data2_mem);
1754
1755         for (i = 0; i < config->rx_ring_num; i++) {
1756                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1757                                 | RTI_CMD_MEM_OFFSET(i);
1758                 writeq(val64, &bar0->rti_command_mem);
1759
1760                 /*
1761                  * Once the operation completes, the Strobe bit of the
1762                  * command register will be reset. We poll for this
1763                  * particular condition. We wait for a maximum of 500ms
1764                  * for the operation to complete, if it's not complete
1765                  * by then we return error.
1766                  */
1767                 time = 0;
1768                 while (TRUE) {
1769                         val64 = readq(&bar0->rti_command_mem);
1770                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1771                                 break;
1772
1773                         if (time > 10) {
1774                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1775                                           dev->name);
1776                                 return -ENODEV;
1777                         }
1778                         time++;
1779                         msleep(50);
1780                 }
1781         }
1782
1783         /*
1784          * Initializing proper values as Pause threshold into all
1785          * the 8 Queues on Rx side.
1786          */
1787         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1788         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1789
1790         /* Disable RMAC PAD STRIPPING */
1791         add = &bar0->mac_cfg;
1792         val64 = readq(&bar0->mac_cfg);
1793         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1794         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1795         writel((u32) (val64), add);
1796         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1797         writel((u32) (val64 >> 32), (add + 4));
1798         val64 = readq(&bar0->mac_cfg);
1799
1800         /* Enable FCS stripping by adapter */
1801         add = &bar0->mac_cfg;
1802         val64 = readq(&bar0->mac_cfg);
1803         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1804         if (nic->device_type == XFRAME_II_DEVICE)
1805                 writeq(val64, &bar0->mac_cfg);
1806         else {
1807                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1808                 writel((u32) (val64), add);
1809                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1810                 writel((u32) (val64 >> 32), (add + 4));
1811         }
1812
1813         /*
1814          * Set the time value to be inserted in the pause frame
1815          * generated by xena.
1816          */
1817         val64 = readq(&bar0->rmac_pause_cfg);
1818         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1819         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1820         writeq(val64, &bar0->rmac_pause_cfg);
1821
1822         /*
1823          * Set the Threshold Limit for Generating the pause frame
1824          * If the amount of data in any Queue exceeds ratio of
1825          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1826          * pause frame is generated
1827          */
1828         val64 = 0;
1829         for (i = 0; i < 4; i++) {
1830                 val64 |=
1831                     (((u64) 0xFF00 | nic->mac_control.
1832                       mc_pause_threshold_q0q3)
1833                      << (i * 2 * 8));
1834         }
1835         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1836
1837         val64 = 0;
1838         for (i = 0; i < 4; i++) {
1839                 val64 |=
1840                     (((u64) 0xFF00 | nic->mac_control.
1841                       mc_pause_threshold_q4q7)
1842                      << (i * 2 * 8));
1843         }
1844         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1845
1846         /*
1847          * TxDMA will stop Read request if the number of read split has
1848          * exceeded the limit pointed by shared_splits
1849          */
1850         val64 = readq(&bar0->pic_control);
1851         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1852         writeq(val64, &bar0->pic_control);
1853
1854         if (nic->config.bus_speed == 266) {
1855                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1856                 writeq(0x0, &bar0->read_retry_delay);
1857                 writeq(0x0, &bar0->write_retry_delay);
1858         }
1859
1860         /*
1861          * Programming the Herc to split every write transaction
1862          * that does not start on an ADB to reduce disconnects.
1863          */
1864         if (nic->device_type == XFRAME_II_DEVICE) {
1865                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1866                         MISC_LINK_STABILITY_PRD(3);
1867                 writeq(val64, &bar0->misc_control);
1868                 val64 = readq(&bar0->pic_control2);
1869                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1870                 writeq(val64, &bar0->pic_control2);
1871         }
1872         if (strstr(nic->product_name, "CX4")) {
1873                 val64 = TMAC_AVG_IPG(0x17);
1874                 writeq(val64, &bar0->tmac_avg_ipg);
1875         }
1876
1877         return SUCCESS;
1878 }
1879 #define LINK_UP_DOWN_INTERRUPT          1
1880 #define MAC_RMAC_ERR_TIMER              2
1881
1882 static int s2io_link_fault_indication(struct s2io_nic *nic)
1883 {
1884         if (nic->device_type == XFRAME_II_DEVICE)
1885                 return LINK_UP_DOWN_INTERRUPT;
1886         else
1887                 return MAC_RMAC_ERR_TIMER;
1888 }
1889
1890 /**
1891  *  do_s2io_write_bits -  update alarm bits in alarm register
1892  *  @value: alarm bits
1893  *  @flag: interrupt status
1894  *  @addr: address value
1895  *  Description: update alarm bits in alarm register
1896  *  Return Value:
1897  *  NONE.
1898  */
1899 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1900 {
1901         u64 temp64;
1902
1903         temp64 = readq(addr);
1904
1905         if(flag == ENABLE_INTRS)
1906                 temp64 &= ~((u64) value);
1907         else
1908                 temp64 |= ((u64) value);
1909         writeq(temp64, addr);
1910 }
1911
1912 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1913 {
1914         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1915         register u64 gen_int_mask = 0;
1916         u64 interruptible;
1917
1918         writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1919         if (mask & TX_DMA_INTR) {
1920
1921                 gen_int_mask |= TXDMA_INT_M;
1922
1923                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1924                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1925                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1926                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1927
1928                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1929                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1930                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1931                                 &bar0->pfc_err_mask);
1932
1933                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1934                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1935                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1936
1937                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1938                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1939                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1940                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1941                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1942                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1943
1944                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1945                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1946
1947                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1948                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1949                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1950                                 flag, &bar0->lso_err_mask);
1951
1952                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1953                                 flag, &bar0->tpa_err_mask);
1954
1955                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1956
1957         }
1958
1959         if (mask & TX_MAC_INTR) {
1960                 gen_int_mask |= TXMAC_INT_M;
1961                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1962                                 &bar0->mac_int_mask);
1963                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1964                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1965                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1966                                 flag, &bar0->mac_tmac_err_mask);
1967         }
1968
1969         if (mask & TX_XGXS_INTR) {
1970                 gen_int_mask |= TXXGXS_INT_M;
1971                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1972                                 &bar0->xgxs_int_mask);
1973                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1974                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1975                                 flag, &bar0->xgxs_txgxs_err_mask);
1976         }
1977
1978         if (mask & RX_DMA_INTR) {
1979                 gen_int_mask |= RXDMA_INT_M;
1980                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1981                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1982                                 flag, &bar0->rxdma_int_mask);
1983                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1984                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1985                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1986                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1987                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1988                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1989                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1990                                 &bar0->prc_pcix_err_mask);
1991                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1992                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1993                                 &bar0->rpa_err_mask);
1994                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1995                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1996                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1997                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
1998                                 flag, &bar0->rda_err_mask);
1999                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2000                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2001                                 flag, &bar0->rti_err_mask);
2002         }
2003
2004         if (mask & RX_MAC_INTR) {
2005                 gen_int_mask |= RXMAC_INT_M;
2006                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2007                                 &bar0->mac_int_mask);
2008                 interruptible = RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2009                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2010                                 RMAC_DOUBLE_ECC_ERR;
2011                 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
2012                         interruptible |= RMAC_LINK_STATE_CHANGE_INT;
2013                 do_s2io_write_bits(interruptible,
2014                                 flag, &bar0->mac_rmac_err_mask);
2015         }
2016
2017         if (mask & RX_XGXS_INTR)
2018         {
2019                 gen_int_mask |= RXXGXS_INT_M;
2020                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2021                                 &bar0->xgxs_int_mask);
2022                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2023                                 &bar0->xgxs_rxgxs_err_mask);
2024         }
2025
2026         if (mask & MC_INTR) {
2027                 gen_int_mask |= MC_INT_M;
2028                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2029                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2030                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2031                                 &bar0->mc_err_mask);
2032         }
2033         nic->general_int_mask = gen_int_mask;
2034
2035         /* Remove this line when alarm interrupts are enabled */
2036         nic->general_int_mask = 0;
2037 }
2038 /**
2039  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2040  *  @nic: device private variable,
2041  *  @mask: A mask indicating which Intr block must be modified and,
2042  *  @flag: A flag indicating whether to enable or disable the Intrs.
2043  *  Description: This function will either disable or enable the interrupts
2044  *  depending on the flag argument. The mask argument can be used to
2045  *  enable/disable any Intr block.
2046  *  Return Value: NONE.
2047  */
2048
2049 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2050 {
2051         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2052         register u64 temp64 = 0, intr_mask = 0;
2053
2054         intr_mask = nic->general_int_mask;
2055
2056         /*  Top level interrupt classification */
2057         /*  PIC Interrupts */
2058         if (mask & TX_PIC_INTR) {
2059                 /*  Enable PIC Intrs in the general intr mask register */
2060                 intr_mask |= TXPIC_INT_M;
2061                 if (flag == ENABLE_INTRS) {
2062                         /*
2063                          * If Hercules adapter enable GPIO otherwise
2064                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2065                          * interrupts for now.
2066                          * TODO
2067                          */
2068                         if (s2io_link_fault_indication(nic) ==
2069                                         LINK_UP_DOWN_INTERRUPT ) {
2070                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2071                                                 &bar0->pic_int_mask);
2072                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2073                                                 &bar0->gpio_int_mask);
2074                         } else
2075                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2076                 } else if (flag == DISABLE_INTRS) {
2077                         /*
2078                          * Disable PIC Intrs in the general
2079                          * intr mask register
2080                          */
2081                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2082                 }
2083         }
2084
2085         /*  Tx traffic interrupts */
2086         if (mask & TX_TRAFFIC_INTR) {
2087                 intr_mask |= TXTRAFFIC_INT_M;
2088                 if (flag == ENABLE_INTRS) {
2089                         /*
2090                          * Enable all the Tx side interrupts
2091                          * writing 0 Enables all 64 TX interrupt levels
2092                          */
2093                         writeq(0x0, &bar0->tx_traffic_mask);
2094                 } else if (flag == DISABLE_INTRS) {
2095                         /*
2096                          * Disable Tx Traffic Intrs in the general intr mask
2097                          * register.
2098                          */
2099                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2100                 }
2101         }
2102
2103         /*  Rx traffic interrupts */
2104         if (mask & RX_TRAFFIC_INTR) {
2105                 intr_mask |= RXTRAFFIC_INT_M;
2106                 if (flag == ENABLE_INTRS) {
2107                         /* writing 0 Enables all 8 RX interrupt levels */
2108                         writeq(0x0, &bar0->rx_traffic_mask);
2109                 } else if (flag == DISABLE_INTRS) {
2110                         /*
2111                          * Disable Rx Traffic Intrs in the general intr mask
2112                          * register.
2113                          */
2114                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2115                 }
2116         }
2117
2118         temp64 = readq(&bar0->general_int_mask);
2119         if (flag == ENABLE_INTRS)
2120                 temp64 &= ~((u64) intr_mask);
2121         else
2122                 temp64 = DISABLE_ALL_INTRS;
2123         writeq(temp64, &bar0->general_int_mask);
2124
2125         nic->general_int_mask = readq(&bar0->general_int_mask);
2126 }
2127
2128 /**
2129  *  verify_pcc_quiescent- Checks for PCC quiescent state
2130  *  Return: 1 If PCC is quiescence
2131  *          0 If PCC is not quiescence
2132  */
2133 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2134 {
2135         int ret = 0, herc;
2136         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2137         u64 val64 = readq(&bar0->adapter_status);
2138
2139         herc = (sp->device_type == XFRAME_II_DEVICE);
2140
2141         if (flag == FALSE) {
2142                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2143                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2144                                 ret = 1;
2145                 } else {
2146                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2147                                 ret = 1;
2148                 }
2149         } else {
2150                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2151                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2152                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2153                                 ret = 1;
2154                 } else {
2155                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2156                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2157                                 ret = 1;
2158                 }
2159         }
2160
2161         return ret;
2162 }
2163 /**
2164  *  verify_xena_quiescence - Checks whether the H/W is ready
2165  *  Description: Returns whether the H/W is ready to go or not. Depending
2166  *  on whether adapter enable bit was written or not the comparison
2167  *  differs and the calling function passes the input argument flag to
2168  *  indicate this.
2169  *  Return: 1 If xena is quiescence
2170  *          0 If Xena is not quiescence
2171  */
2172
2173 static int verify_xena_quiescence(struct s2io_nic *sp)
2174 {
2175         int  mode;
2176         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2177         u64 val64 = readq(&bar0->adapter_status);
2178         mode = s2io_verify_pci_mode(sp);
2179
2180         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2181                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2182                 return 0;
2183         }
2184         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2185         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2186                 return 0;
2187         }
2188         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2189                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2190                 return 0;
2191         }
2192         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2193                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2194                 return 0;
2195         }
2196         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2197                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2198                 return 0;
2199         }
2200         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2201                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2202                 return 0;
2203         }
2204         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2205                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2206                 return 0;
2207         }
2208         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2209                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2210                 return 0;
2211         }
2212
2213         /*
2214          * In PCI 33 mode, the P_PLL is not used, and therefore,
2215          * the the P_PLL_LOCK bit in the adapter_status register will
2216          * not be asserted.
2217          */
2218         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2219                 sp->device_type == XFRAME_II_DEVICE && mode !=
2220                 PCI_MODE_PCI_33) {
2221                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2222                 return 0;
2223         }
2224         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2225                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2226                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2227                 return 0;
2228         }
2229         return 1;
2230 }
2231
2232 /**
2233  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2234  * @sp: Pointer to device specifc structure
2235  * Description :
2236  * New procedure to clear mac address reading  problems on Alpha platforms
2237  *
2238  */
2239
2240 static void fix_mac_address(struct s2io_nic * sp)
2241 {
2242         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2243         u64 val64;
2244         int i = 0;
2245
2246         while (fix_mac[i] != END_SIGN) {
2247                 writeq(fix_mac[i++], &bar0->gpio_control);
2248                 udelay(10);
2249                 val64 = readq(&bar0->gpio_control);
2250         }
2251 }
2252
2253 /**
2254  *  start_nic - Turns the device on
2255  *  @nic : device private variable.
2256  *  Description:
2257  *  This function actually turns the device on. Before this  function is
2258  *  called,all Registers are configured from their reset states
2259  *  and shared memory is allocated but the NIC is still quiescent. On
2260  *  calling this function, the device interrupts are cleared and the NIC is
2261  *  literally switched on by writing into the adapter control register.
2262  *  Return Value:
2263  *  SUCCESS on success and -1 on failure.
2264  */
2265
2266 static int start_nic(struct s2io_nic *nic)
2267 {
2268         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2269         struct net_device *dev = nic->dev;
2270         register u64 val64 = 0;
2271         u16 subid, i;
2272         struct mac_info *mac_control;
2273         struct config_param *config;
2274
2275         mac_control = &nic->mac_control;
2276         config = &nic->config;
2277
2278         /*  PRC Initialization and configuration */
2279         for (i = 0; i < config->rx_ring_num; i++) {
2280                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2281                        &bar0->prc_rxd0_n[i]);
2282
2283                 val64 = readq(&bar0->prc_ctrl_n[i]);
2284                 if (nic->rxd_mode == RXD_MODE_1)
2285                         val64 |= PRC_CTRL_RC_ENABLED;
2286                 else
2287                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2288                 if (nic->device_type == XFRAME_II_DEVICE)
2289                         val64 |= PRC_CTRL_GROUP_READS;
2290                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2291                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2292                 writeq(val64, &bar0->prc_ctrl_n[i]);
2293         }
2294
2295         if (nic->rxd_mode == RXD_MODE_3B) {
2296                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2297                 val64 = readq(&bar0->rx_pa_cfg);
2298                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2299                 writeq(val64, &bar0->rx_pa_cfg);
2300         }
2301
2302         if (vlan_tag_strip == 0) {
2303                 val64 = readq(&bar0->rx_pa_cfg);
2304                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2305                 writeq(val64, &bar0->rx_pa_cfg);
2306                 vlan_strip_flag = 0;
2307         }
2308
2309         /*
2310          * Enabling MC-RLDRAM. After enabling the device, we timeout
2311          * for around 100ms, which is approximately the time required
2312          * for the device to be ready for operation.
2313          */
2314         val64 = readq(&bar0->mc_rldram_mrs);
2315         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2316         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2317         val64 = readq(&bar0->mc_rldram_mrs);
2318
2319         msleep(100);    /* Delay by around 100 ms. */
2320
2321         /* Enabling ECC Protection. */
2322         val64 = readq(&bar0->adapter_control);
2323         val64 &= ~ADAPTER_ECC_EN;
2324         writeq(val64, &bar0->adapter_control);
2325
2326         /*
2327          * Verify if the device is ready to be enabled, if so enable
2328          * it.
2329          */
2330         val64 = readq(&bar0->adapter_status);
2331         if (!verify_xena_quiescence(nic)) {
2332                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2333                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2334                           (unsigned long long) val64);
2335                 return FAILURE;
2336         }
2337
2338         /*
2339          * With some switches, link might be already up at this point.
2340          * Because of this weird behavior, when we enable laser,
2341          * we may not get link. We need to handle this. We cannot
2342          * figure out which switch is misbehaving. So we are forced to
2343          * make a global change.
2344          */
2345
2346         /* Enabling Laser. */
2347         val64 = readq(&bar0->adapter_control);
2348         val64 |= ADAPTER_EOI_TX_ON;
2349         writeq(val64, &bar0->adapter_control);
2350
2351         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2352                 /*
2353                  * Dont see link state interrupts initally on some switches,
2354                  * so directly scheduling the link state task here.
2355                  */
2356                 schedule_work(&nic->set_link_task);
2357         }
2358         /* SXE-002: Initialize link and activity LED */
2359         subid = nic->pdev->subsystem_device;
2360         if (((subid & 0xFF) >= 0x07) &&
2361             (nic->device_type == XFRAME_I_DEVICE)) {
2362                 val64 = readq(&bar0->gpio_control);
2363                 val64 |= 0x0000800000000000ULL;
2364                 writeq(val64, &bar0->gpio_control);
2365                 val64 = 0x0411040400000000ULL;
2366                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2367         }
2368
2369         return SUCCESS;
2370 }
2371 /**
2372  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2373  */
2374 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2375                                         TxD *txdlp, int get_off)
2376 {
2377         struct s2io_nic *nic = fifo_data->nic;
2378         struct sk_buff *skb;
2379         struct TxD *txds;
2380         u16 j, frg_cnt;
2381
2382         txds = txdlp;
2383         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2384                 pci_unmap_single(nic->pdev, (dma_addr_t)
2385                         txds->Buffer_Pointer, sizeof(u64),
2386                         PCI_DMA_TODEVICE);
2387                 txds++;
2388         }
2389
2390         skb = (struct sk_buff *) ((unsigned long)
2391                         txds->Host_Control);
2392         if (!skb) {
2393                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2394                 return NULL;
2395         }
2396         pci_unmap_single(nic->pdev, (dma_addr_t)
2397                          txds->Buffer_Pointer,
2398                          skb->len - skb->data_len,
2399                          PCI_DMA_TODEVICE);
2400         frg_cnt = skb_shinfo(skb)->nr_frags;
2401         if (frg_cnt) {
2402                 txds++;
2403                 for (j = 0; j < frg_cnt; j++, txds++) {
2404                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2405                         if (!txds->Buffer_Pointer)
2406                                 break;
2407                         pci_unmap_page(nic->pdev, (dma_addr_t)
2408                                         txds->Buffer_Pointer,
2409                                        frag->size, PCI_DMA_TODEVICE);
2410                 }
2411         }
2412         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2413         return(skb);
2414 }
2415
2416 /**
2417  *  free_tx_buffers - Free all queued Tx buffers
2418  *  @nic : device private variable.
2419  *  Description:
2420  *  Free all queued Tx buffers.
2421  *  Return Value: void
2422 */
2423
2424 static void free_tx_buffers(struct s2io_nic *nic)
2425 {
2426         struct net_device *dev = nic->dev;
2427         struct sk_buff *skb;
2428         struct TxD *txdp;
2429         int i, j;
2430         struct mac_info *mac_control;
2431         struct config_param *config;
2432         int cnt = 0;
2433
2434         mac_control = &nic->mac_control;
2435         config = &nic->config;
2436
2437         for (i = 0; i < config->tx_fifo_num; i++) {
2438                 unsigned long flags;
2439                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2440                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2441                         txdp = (struct TxD *) \
2442                         mac_control->fifos[i].list_info[j].list_virt_addr;
2443                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2444                         if (skb) {
2445                                 nic->mac_control.stats_info->sw_stat.mem_freed
2446                                         += skb->truesize;
2447                                 dev_kfree_skb(skb);
2448                                 cnt++;
2449                         }
2450                 }
2451                 DBG_PRINT(INTR_DBG,
2452                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2453                           dev->name, cnt, i);
2454                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2455                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2456                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2457         }
2458 }
2459
2460 /**
2461  *   stop_nic -  To stop the nic
2462  *   @nic ; device private variable.
2463  *   Description:
2464  *   This function does exactly the opposite of what the start_nic()
2465  *   function does. This function is called to stop the device.
2466  *   Return Value:
2467  *   void.
2468  */
2469
2470 static void stop_nic(struct s2io_nic *nic)
2471 {
2472         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2473         register u64 val64 = 0;
2474         u16 interruptible;
2475         struct mac_info *mac_control;
2476         struct config_param *config;
2477
2478         mac_control = &nic->mac_control;
2479         config = &nic->config;
2480
2481         /*  Disable all interrupts */
2482         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2483         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2484         interruptible |= TX_PIC_INTR;
2485         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2486
2487         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2488         val64 = readq(&bar0->adapter_control);
2489         val64 &= ~(ADAPTER_CNTL_EN);
2490         writeq(val64, &bar0->adapter_control);
2491 }
2492
2493 /**
2494  *  fill_rx_buffers - Allocates the Rx side skbs
2495  *  @ring_info: per ring structure
2496  *  @from_card_up: If this is true, we will map the buffer to get
2497  *     the dma address for buf0 and buf1 to give it to the card.
2498  *     Else we will sync the already mapped buffer to give it to the card.
2499  *  Description:
2500  *  The function allocates Rx side skbs and puts the physical
2501  *  address of these buffers into the RxD buffer pointers, so that the NIC
2502  *  can DMA the received frame into these locations.
2503  *  The NIC supports 3 receive modes, viz
2504  *  1. single buffer,
2505  *  2. three buffer and
2506  *  3. Five buffer modes.
2507  *  Each mode defines how many fragments the received frame will be split
2508  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2509  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2510  *  is split into 3 fragments. As of now only single buffer mode is
2511  *  supported.
2512  *   Return Value:
2513  *  SUCCESS on success or an appropriate -ve value on failure.
2514  */
2515
2516 static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2517 {
2518         struct sk_buff *skb;
2519         struct RxD_t *rxdp;
2520         int off, size, block_no, block_no1;
2521         u32 alloc_tab = 0;
2522         u32 alloc_cnt;
2523         u64 tmp;
2524         struct buffAdd *ba;
2525         struct RxD_t *first_rxdp = NULL;
2526         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2527         int rxd_index = 0;
2528         struct RxD1 *rxdp1;
2529         struct RxD3 *rxdp3;
2530         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2531
2532         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2533
2534         block_no1 = ring->rx_curr_get_info.block_index;
2535         while (alloc_tab < alloc_cnt) {
2536                 block_no = ring->rx_curr_put_info.block_index;
2537
2538                 off = ring->rx_curr_put_info.offset;
2539
2540                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2541
2542                 rxd_index = off + 1;
2543                 if (block_no)
2544                         rxd_index += (block_no * ring->rxd_count);
2545
2546                 if ((block_no == block_no1) &&
2547                         (off == ring->rx_curr_get_info.offset) &&
2548                         (rxdp->Host_Control)) {
2549                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2550                                 ring->dev->name);
2551                         DBG_PRINT(INTR_DBG, " info equated\n");
2552                         goto end;
2553                 }
2554                 if (off && (off == ring->rxd_count)) {
2555                         ring->rx_curr_put_info.block_index++;
2556                         if (ring->rx_curr_put_info.block_index ==
2557                                                         ring->block_count)
2558                                 ring->rx_curr_put_info.block_index = 0;
2559                         block_no = ring->rx_curr_put_info.block_index;
2560                         off = 0;
2561                         ring->rx_curr_put_info.offset = off;
2562                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2563                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2564                                   ring->dev->name, rxdp);
2565
2566                 }
2567
2568                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2569                         ((ring->rxd_mode == RXD_MODE_3B) &&
2570                                 (rxdp->Control_2 & s2BIT(0)))) {
2571                         ring->rx_curr_put_info.offset = off;
2572                         goto end;
2573                 }
2574                 /* calculate size of skb based on ring mode */
2575                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2576                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2577                 if (ring->rxd_mode == RXD_MODE_1)
2578                         size += NET_IP_ALIGN;
2579                 else
2580                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2581
2582                 /* allocate skb */
2583                 skb = dev_alloc_skb(size);
2584                 if(!skb) {
2585                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2586                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2587                         if (first_rxdp) {
2588                                 wmb();
2589                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2590                         }
2591                         stats->mem_alloc_fail_cnt++;
2592
2593                         return -ENOMEM ;
2594                 }
2595                 stats->mem_allocated += skb->truesize;
2596
2597                 if (ring->rxd_mode == RXD_MODE_1) {
2598                         /* 1 buffer mode - normal operation mode */
2599                         rxdp1 = (struct RxD1*)rxdp;
2600                         memset(rxdp, 0, sizeof(struct RxD1));
2601                         skb_reserve(skb, NET_IP_ALIGN);
2602                         rxdp1->Buffer0_ptr = pci_map_single
2603                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2604                                 PCI_DMA_FROMDEVICE);
2605                         if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
2606                                 goto pci_map_failed;
2607
2608                         rxdp->Control_2 =
2609                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2610                         rxdp->Host_Control = (unsigned long) (skb);
2611                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2612                         /*
2613                          * 2 buffer mode -
2614                          * 2 buffer mode provides 128
2615                          * byte aligned receive buffers.
2616                          */
2617
2618                         rxdp3 = (struct RxD3*)rxdp;
2619                         /* save buffer pointers to avoid frequent dma mapping */
2620                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2621                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2622                         memset(rxdp, 0, sizeof(struct RxD3));
2623                         /* restore the buffer pointers for dma sync*/
2624                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2625                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2626
2627                         ba = &ring->ba[block_no][off];
2628                         skb_reserve(skb, BUF0_LEN);
2629                         tmp = (u64)(unsigned long) skb->data;
2630                         tmp += ALIGN_SIZE;
2631                         tmp &= ~ALIGN_SIZE;
2632                         skb->data = (void *) (unsigned long)tmp;
2633                         skb_reset_tail_pointer(skb);
2634
2635                         if (from_card_up) {
2636                                 rxdp3->Buffer0_ptr =
2637                                    pci_map_single(ring->pdev, ba->ba_0,
2638                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2639                                 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2640                                         goto pci_map_failed;
2641                         } else
2642                                 pci_dma_sync_single_for_device(ring->pdev,
2643                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2644                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2645
2646                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2647                         if (ring->rxd_mode == RXD_MODE_3B) {
2648                                 /* Two buffer mode */
2649
2650                                 /*
2651                                  * Buffer2 will have L3/L4 header plus
2652                                  * L4 payload
2653                                  */
2654                                 rxdp3->Buffer2_ptr = pci_map_single
2655                                 (ring->pdev, skb->data, ring->mtu + 4,
2656                                                 PCI_DMA_FROMDEVICE);
2657
2658                                 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
2659                                         goto pci_map_failed;
2660
2661                                 if (from_card_up) {
2662                                         rxdp3->Buffer1_ptr =
2663                                                 pci_map_single(ring->pdev,
2664                                                 ba->ba_1, BUF1_LEN,
2665                                                 PCI_DMA_FROMDEVICE);
2666
2667                                         if (pci_dma_mapping_error
2668                                                 (rxdp3->Buffer1_ptr)) {
2669                                                 pci_unmap_single
2670                                                         (ring->pdev,
2671                                                     (dma_addr_t)(unsigned long)
2672                                                         skb->data,
2673                                                         ring->mtu + 4,
2674                                                         PCI_DMA_FROMDEVICE);
2675                                                 goto pci_map_failed;
2676                                         }
2677                                 }
2678                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2679                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2680                                                                 (ring->mtu + 4);
2681                         }
2682                         rxdp->Control_2 |= s2BIT(0);
2683                         rxdp->Host_Control = (unsigned long) (skb);
2684                 }
2685                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2686                         rxdp->Control_1 |= RXD_OWN_XENA;
2687                 off++;
2688                 if (off == (ring->rxd_count + 1))
2689                         off = 0;
2690                 ring->rx_curr_put_info.offset = off;
2691
2692                 rxdp->Control_2 |= SET_RXD_MARKER;
2693                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2694                         if (first_rxdp) {
2695                                 wmb();
2696                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2697                         }
2698                         first_rxdp = rxdp;
2699                 }
2700                 ring->rx_bufs_left += 1;
2701                 alloc_tab++;
2702         }
2703
2704       end:
2705         /* Transfer ownership of first descriptor to adapter just before
2706          * exiting. Before that, use memory barrier so that ownership
2707          * and other fields are seen by adapter correctly.
2708          */
2709         if (first_rxdp) {
2710                 wmb();
2711                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2712         }
2713
2714         return SUCCESS;
2715 pci_map_failed:
2716         stats->pci_map_fail_cnt++;
2717         stats->mem_freed += skb->truesize;
2718         dev_kfree_skb_irq(skb);
2719         return -ENOMEM;
2720 }
2721
2722 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2723 {
2724         struct net_device *dev = sp->dev;
2725         int j;
2726         struct sk_buff *skb;
2727         struct RxD_t *rxdp;
2728         struct mac_info *mac_control;
2729         struct buffAdd *ba;
2730         struct RxD1 *rxdp1;
2731         struct RxD3 *rxdp3;
2732
2733         mac_control = &sp->mac_control;
2734         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2735                 rxdp = mac_control->rings[ring_no].
2736                                 rx_blocks[blk].rxds[j].virt_addr;
2737                 skb = (struct sk_buff *)
2738                         ((unsigned long) rxdp->Host_Control);
2739                 if (!skb) {
2740                         continue;
2741                 }
2742                 if (sp->rxd_mode == RXD_MODE_1) {
2743                         rxdp1 = (struct RxD1*)rxdp;
2744                         pci_unmap_single(sp->pdev, (dma_addr_t)
2745                                 rxdp1->Buffer0_ptr,
2746                                 dev->mtu +
2747                                 HEADER_ETHERNET_II_802_3_SIZE
2748                                 + HEADER_802_2_SIZE +
2749                                 HEADER_SNAP_SIZE,
2750                                 PCI_DMA_FROMDEVICE);
2751                         memset(rxdp, 0, sizeof(struct RxD1));
2752                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2753                         rxdp3 = (struct RxD3*)rxdp;
2754                         ba = &mac_control->rings[ring_no].
2755                                 ba[blk][j];
2756                         pci_unmap_single(sp->pdev, (dma_addr_t)
2757                                 rxdp3->Buffer0_ptr,
2758                                 BUF0_LEN,
2759                                 PCI_DMA_FROMDEVICE);
2760                         pci_unmap_single(sp->pdev, (dma_addr_t)
2761                                 rxdp3->Buffer1_ptr,
2762                                 BUF1_LEN,
2763                                 PCI_DMA_FROMDEVICE);
2764                         pci_unmap_single(sp->pdev, (dma_addr_t)
2765                                 rxdp3->Buffer2_ptr,
2766                                 dev->mtu + 4,
2767                                 PCI_DMA_FROMDEVICE);
2768                         memset(rxdp, 0, sizeof(struct RxD3));
2769                 }
2770                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2771                 dev_kfree_skb(skb);
2772                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2773         }
2774 }
2775
2776 /**
2777  *  free_rx_buffers - Frees all Rx buffers
2778  *  @sp: device private variable.
2779  *  Description:
2780  *  This function will free all Rx buffers allocated by host.
2781  *  Return Value:
2782  *  NONE.
2783  */
2784
2785 static void free_rx_buffers(struct s2io_nic *sp)
2786 {
2787         struct net_device *dev = sp->dev;
2788         int i, blk = 0, buf_cnt = 0;
2789         struct mac_info *mac_control;
2790         struct config_param *config;
2791
2792         mac_control = &sp->mac_control;
2793         config = &sp->config;
2794
2795         for (i = 0; i < config->rx_ring_num; i++) {
2796                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2797                         free_rxd_blk(sp,i,blk);
2798
2799                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2800                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2801                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2802                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2803                 mac_control->rings[i].rx_bufs_left = 0;
2804                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2805                           dev->name, buf_cnt, i);
2806         }
2807 }
2808
2809 static int s2io_chk_rx_buffers(struct ring_info *ring)
2810 {
2811         if (fill_rx_buffers(ring, 0) == -ENOMEM) {
2812                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2813                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2814         }
2815         return 0;
2816 }
2817
2818 /**
2819  * s2io_poll - Rx interrupt handler for NAPI support
2820  * @napi : pointer to the napi structure.
2821  * @budget : The number of packets that were budgeted to be processed
2822  * during  one pass through the 'Poll" function.
2823  * Description:
2824  * Comes into picture only if NAPI support has been incorporated. It does
2825  * the same thing that rx_intr_handler does, but not in a interrupt context
2826  * also It will process only a given number of packets.
2827  * Return value:
2828  * 0 on success and 1 if there are No Rx packets to be processed.
2829  */
2830
2831 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2832 {
2833         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2834         struct net_device *dev = ring->dev;
2835         struct config_param *config;
2836         struct mac_info *mac_control;
2837         int pkts_processed = 0;
2838         u8 __iomem *addr = NULL;
2839         u8 val8 = 0;
2840         struct s2io_nic *nic = dev->priv;
2841         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2842         int budget_org = budget;
2843
2844         config = &nic->config;
2845         mac_control = &nic->mac_control;
2846
2847         if (unlikely(!is_s2io_card_up(nic)))
2848                 return 0;
2849
2850         pkts_processed = rx_intr_handler(ring, budget);
2851         s2io_chk_rx_buffers(ring);
2852
2853         if (pkts_processed < budget_org) {
2854                 netif_rx_complete(dev, napi);
2855                 /*Re Enable MSI-Rx Vector*/
2856                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2857                 addr += 7 - ring->ring_no;
2858                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2859                 writeb(val8, addr);
2860                 val8 = readb(addr);
2861         }
2862         return pkts_processed;
2863 }
2864 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2865 {
2866         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2867         struct ring_info *ring;
2868         struct net_device *dev = nic->dev;
2869         struct config_param *config;
2870         struct mac_info *mac_control;
2871         int pkts_processed = 0;
2872         int ring_pkts_processed, i;
2873         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2874         int budget_org = budget;
2875
2876         config = &nic->config;
2877         mac_control = &nic->mac_control;
2878
2879         if (unlikely(!is_s2io_card_up(nic)))
2880                 return 0;
2881
2882         for (i = 0; i < config->rx_ring_num; i++) {
2883                 ring = &mac_control->rings[i];
2884                 ring_pkts_processed = rx_intr_handler(ring, budget);
2885                 s2io_chk_rx_buffers(ring);
2886                 pkts_processed += ring_pkts_processed;
2887                 budget -= ring_pkts_processed;
2888                 if (budget <= 0)
2889                         break;
2890         }
2891         if (pkts_processed < budget_org) {
2892                 netif_rx_complete(dev, napi);
2893                 /* Re enable the Rx interrupts for the ring */
2894                 writeq(0, &bar0->rx_traffic_mask);
2895                 readl(&bar0->rx_traffic_mask);
2896         }
2897         return pkts_processed;
2898 }
2899
2900 #ifdef CONFIG_NET_POLL_CONTROLLER
2901 /**
2902  * s2io_netpoll - netpoll event handler entry point
2903  * @dev : pointer to the device structure.
2904  * Description:
2905  *      This function will be called by upper layer to check for events on the
2906  * interface in situations where interrupts are disabled. It is used for
2907  * specific in-kernel networking tasks, such as remote consoles and kernel
2908  * debugging over the network (example netdump in RedHat).
2909  */
2910 static void s2io_netpoll(struct net_device *dev)
2911 {
2912         struct s2io_nic *nic = dev->priv;
2913         struct mac_info *mac_control;
2914         struct config_param *config;
2915         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2916         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2917         int i;
2918
2919         if (pci_channel_offline(nic->pdev))
2920                 return;
2921
2922         disable_irq(dev->irq);
2923
2924         mac_control = &nic->mac_control;
2925         config = &nic->config;
2926
2927         writeq(val64, &bar0->rx_traffic_int);
2928         writeq(val64, &bar0->tx_traffic_int);
2929
2930         /* we need to free up the transmitted skbufs or else netpoll will
2931          * run out of skbs and will fail and eventually netpoll application such
2932          * as netdump will fail.
2933          */
2934         for (i = 0; i < config->tx_fifo_num; i++)
2935                 tx_intr_handler(&mac_control->fifos[i]);
2936
2937         /* check for received packet and indicate up to network */
2938         for (i = 0; i < config->rx_ring_num; i++)
2939                 rx_intr_handler(&mac_control->rings[i], 0);
2940
2941         for (i = 0; i < config->rx_ring_num; i++) {
2942                 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
2943                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2944                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2945                         break;
2946                 }
2947         }
2948         enable_irq(dev->irq);
2949         return;
2950 }
2951 #endif
2952
2953 /**
2954  *  rx_intr_handler - Rx interrupt handler
2955  *  @ring_info: per ring structure.
2956  *  @budget: budget for napi processing.
2957  *  Description:
2958  *  If the interrupt is because of a received frame or if the
2959  *  receive ring contains fresh as yet un-processed frames,this function is
2960  *  called. It picks out the RxD at which place the last Rx processing had
2961  *  stopped and sends the skb to the OSM's Rx handler and then increments
2962  *  the offset.
2963  *  Return Value:
2964  *  No. of napi packets processed.
2965  */
2966 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2967 {
2968         int get_block, put_block;
2969         struct rx_curr_get_info get_info, put_info;
2970         struct RxD_t *rxdp;
2971         struct sk_buff *skb;
2972         int pkt_cnt = 0, napi_pkts = 0;
2973         int i;
2974         struct RxD1* rxdp1;
2975         struct RxD3* rxdp3;
2976
2977         get_info = ring_data->rx_curr_get_info;
2978         get_block = get_info.block_index;
2979         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2980         put_block = put_info.block_index;
2981         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2982
2983         while (RXD_IS_UP2DT(rxdp)) {
2984                 /*
2985                  * If your are next to put index then it's
2986                  * FIFO full condition
2987                  */
2988                 if ((get_block == put_block) &&
2989                     (get_info.offset + 1) == put_info.offset) {
2990                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2991                                 ring_data->dev->name);
2992                         break;
2993                 }
2994                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2995                 if (skb == NULL) {
2996                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2997                                   ring_data->dev->name);
2998                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2999                         return 0;
3000                 }
3001                 if (ring_data->rxd_mode == RXD_MODE_1) {
3002                         rxdp1 = (struct RxD1*)rxdp;
3003                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3004                                 rxdp1->Buffer0_ptr,
3005                                 ring_data->mtu +
3006                                 HEADER_ETHERNET_II_802_3_SIZE +
3007                                 HEADER_802_2_SIZE +
3008                                 HEADER_SNAP_SIZE,
3009                                 PCI_DMA_FROMDEVICE);
3010                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3011                         rxdp3 = (struct RxD3*)rxdp;
3012                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3013                                 rxdp3->Buffer0_ptr,
3014                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3015                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3016                                 rxdp3->Buffer2_ptr,
3017                                 ring_data->mtu + 4,
3018                                 PCI_DMA_FROMDEVICE);
3019                 }
3020                 prefetch(skb->data);
3021                 rx_osm_handler(ring_data, rxdp);
3022                 get_info.offset++;
3023                 ring_data->rx_curr_get_info.offset = get_info.offset;
3024                 rxdp = ring_data->rx_blocks[get_block].
3025                                 rxds[get_info.offset].virt_addr;
3026                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3027                         get_info.offset = 0;
3028                         ring_data->rx_curr_get_info.offset = get_info.offset;
3029                         get_block++;
3030                         if (get_block == ring_data->block_count)
3031                                 get_block = 0;
3032                         ring_data->rx_curr_get_info.block_index = get_block;
3033                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3034                 }
3035
3036                 if (ring_data->nic->config.napi) {
3037                         budget--;
3038                         napi_pkts++;
3039                         if (!budget)
3040                                 break;
3041                 }
3042                 pkt_cnt++;
3043                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3044                         break;
3045         }
3046         if (ring_data->lro) {
3047                 /* Clear all LRO sessions before exiting */
3048                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3049                         struct lro *lro = &ring_data->lro0_n[i];
3050                         if (lro->in_use) {
3051                                 update_L3L4_header(ring_data->nic, lro);
3052                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3053                                 clear_lro_session(lro);
3054                         }
3055                 }
3056         }
3057         return(napi_pkts);
3058 }
3059
3060 /**
3061  *  tx_intr_handler - Transmit interrupt handler
3062  *  @nic : device private variable
3063  *  Description:
3064  *  If an interrupt was raised to indicate DMA complete of the
3065  *  Tx packet, this function is called. It identifies the last TxD
3066  *  whose buffer was freed and frees all skbs whose data have already
3067  *  DMA'ed into the NICs internal memory.
3068  *  Return Value:
3069  *  NONE
3070  */
3071
3072 static void tx_intr_handler(struct fifo_info *fifo_data)
3073 {
3074         struct s2io_nic *nic = fifo_data->nic;
3075         struct tx_curr_get_info get_info, put_info;
3076         struct sk_buff *skb = NULL;
3077         struct TxD *txdlp;
3078         int pkt_cnt = 0;
3079         unsigned long flags = 0;
3080         u8 err_mask;
3081
3082         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3083                         return;
3084
3085         get_info = fifo_data->tx_curr_get_info;
3086         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3087         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3088             list_virt_addr;
3089         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3090                (get_info.offset != put_info.offset) &&
3091                (txdlp->Host_Control)) {
3092                 /* Check for TxD errors */
3093                 if (txdlp->Control_1 & TXD_T_CODE) {
3094                         unsigned long long err;
3095                         err = txdlp->Control_1 & TXD_T_CODE;
3096                         if (err & 0x1) {
3097                                 nic->mac_control.stats_info->sw_stat.
3098                                                 parity_err_cnt++;
3099                         }
3100
3101                         /* update t_code statistics */
3102                         err_mask = err >> 48;
3103                         switch(err_mask) {
3104                                 case 2:
3105                                         nic->mac_control.stats_info->sw_stat.
3106                                                         tx_buf_abort_cnt++;
3107                                 break;
3108
3109                                 case 3:
3110                                         nic->mac_control.stats_info->sw_stat.
3111                                                         tx_desc_abort_cnt++;
3112                                 break;
3113
3114                                 case 7:
3115                                         nic->mac_control.stats_info->sw_stat.
3116                                                         tx_parity_err_cnt++;
3117                                 break;
3118
3119                                 case 10:
3120                                         nic->mac_control.stats_info->sw_stat.
3121                                                         tx_link_loss_cnt++;
3122                                 break;
3123
3124                                 case 15:
3125                                         nic->mac_control.stats_info->sw_stat.
3126                                                         tx_list_proc_err_cnt++;
3127                                 break;
3128                         }
3129                 }
3130
3131                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3132                 if (skb == NULL) {
3133                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3134                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3135                         __FUNCTION__);
3136                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3137                         return;
3138                 }
3139                 pkt_cnt++;
3140
3141                 /* Updating the statistics block */
3142                 nic->stats.tx_bytes += skb->len;
3143                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3144                 dev_kfree_skb_irq(skb);
3145
3146                 get_info.offset++;
3147                 if (get_info.offset == get_info.fifo_len + 1)
3148                         get_info.offset = 0;
3149                 txdlp = (struct TxD *) fifo_data->list_info
3150                     [get_info.offset].list_virt_addr;
3151                 fifo_data->tx_curr_get_info.offset =
3152                     get_info.offset;
3153         }
3154
3155         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3156
3157         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3158 }
3159
3160 /**
3161  *  s2io_mdio_write - Function to write in to MDIO registers
3162  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3163  *  @addr     : address value
3164  *  @value    : data value
3165  *  @dev      : pointer to net_device structure
3166  *  Description:
3167  *  This function is used to write values to the MDIO registers
3168  *  NONE
3169  */
3170 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3171 {
3172         u64 val64 = 0x0;
3173         struct s2io_nic *sp = dev->priv;
3174         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3175
3176         //address transaction
3177         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3178                         | MDIO_MMD_DEV_ADDR(mmd_type)
3179                         | MDIO_MMS_PRT_ADDR(0x0);
3180         writeq(val64, &bar0->mdio_control);
3181         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3182         writeq(val64, &bar0->mdio_control);
3183         udelay(100);
3184
3185         //Data transaction
3186         val64 = 0x0;
3187         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3188                         | MDIO_MMD_DEV_ADDR(mmd_type)
3189                         | MDIO_MMS_PRT_ADDR(0x0)
3190                         | MDIO_MDIO_DATA(value)
3191                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3192         writeq(val64, &bar0->mdio_control);
3193         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3194         writeq(val64, &bar0->mdio_control);
3195         udelay(100);
3196
3197         val64 = 0x0;
3198         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3199         | MDIO_MMD_DEV_ADDR(mmd_type)
3200         | MDIO_MMS_PRT_ADDR(0x0)
3201         | MDIO_OP(MDIO_OP_READ_TRANS);
3202         writeq(val64, &bar0->mdio_control);
3203         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3204         writeq(val64, &bar0->mdio_control);
3205         udelay(100);
3206
3207 }
3208
3209 /**
3210  *  s2io_mdio_read - Function to write in to MDIO registers
3211  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3212  *  @addr     : address value
3213  *  @dev      : pointer to net_device structure
3214  *  Description:
3215  *  This function is used to read values to the MDIO registers
3216  *  NONE
3217  */
3218 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3219 {
3220         u64 val64 = 0x0;
3221         u64 rval64 = 0x0;
3222         struct s2io_nic *sp = dev->priv;
3223         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3224
3225         /* address transaction */
3226         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3227                         | MDIO_MMD_DEV_ADDR(mmd_type)
3228                         | MDIO_MMS_PRT_ADDR(0x0);
3229         writeq(val64, &bar0->mdio_control);
3230         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3231         writeq(val64, &bar0->mdio_control);
3232         udelay(100);
3233
3234         /* Data transaction */
3235         val64 = 0x0;
3236         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3237                         | MDIO_MMD_DEV_ADDR(mmd_type)
3238                         | MDIO_MMS_PRT_ADDR(0x0)
3239                         | MDIO_OP(MDIO_OP_READ_TRANS);
3240         writeq(val64, &bar0->mdio_control);
3241         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3242         writeq(val64, &bar0->mdio_control);
3243         udelay(100);
3244
3245         /* Read the value from regs */
3246         rval64 = readq(&bar0->mdio_control);
3247         rval64 = rval64 & 0xFFFF0000;
3248         rval64 = rval64 >> 16;
3249         return rval64;
3250 }
3251 /**
3252  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3253  *  @counter      : couter value to be updated
3254  *  @flag         : flag to indicate the status
3255  *  @type         : counter type
3256  *  Description:
3257  *  This function is to check the status of the xpak counters value
3258  *  NONE
3259  */
3260
3261 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3262 {
3263         u64 mask = 0x3;
3264         u64 val64;
3265         int i;
3266         for(i = 0; i <index; i++)
3267                 mask = mask << 0x2;
3268
3269         if(flag > 0)
3270         {
3271                 *counter = *counter + 1;
3272                 val64 = *regs_stat & mask;
3273                 val64 = val64 >> (index * 0x2);
3274                 val64 = val64 + 1;
3275                 if(val64 == 3)
3276                 {
3277                         switch(type)
3278                         {
3279                         case 1:
3280                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3281                                           "service. Excessive temperatures may "
3282                                           "result in premature transceiver "
3283                                           "failure \n");
3284                         break;
3285                         case 2:
3286                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3287                                           "service Excessive bias currents may "
3288                                           "indicate imminent laser diode "
3289                                           "failure \n");
3290                         break;
3291                         case 3:
3292                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3293                                           "service Excessive laser output "
3294                                           "power may saturate far-end "
3295                                           "receiver\n");
3296                         break;
3297                         default:
3298                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3299                                           "type \n");
3300                         }
3301                         val64 = 0x0;
3302                 }
3303                 val64 = val64 << (index * 0x2);
3304                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3305
3306         } else {
3307                 *regs_stat = *regs_stat & (~mask);
3308         }
3309 }
3310
3311 /**
3312  *  s2io_updt_xpak_counter - Function to update the xpak counters
3313  *  @dev         : pointer to net_device struct
3314  *  Description:
3315  *  This function is to upate the status of the xpak counters value
3316  *  NONE
3317  */
3318 static void s2io_updt_xpak_counter(struct net_device *dev)
3319 {
3320         u16 flag  = 0x0;
3321         u16 type  = 0x0;
3322         u16 val16 = 0x0;
3323         u64 val64 = 0x0;
3324         u64 addr  = 0x0;
3325
3326         struct s2io_nic *sp = dev->priv;
3327         struct stat_block *stat_info = sp->mac_control.stats_info;
3328
3329         /* Check the communication with the MDIO slave */
3330         addr = 0x0000;
3331         val64 = 0x0;
3332         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3333         if((val64 == 0xFFFF) || (val64 == 0x0000))
3334         {
3335                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3336                           "Returned %llx\n", (unsigned long long)val64);
3337                 return;
3338         }
3339
3340         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3341         if(val64 != 0x2040)
3342         {
3343                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3344                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3345                           (unsigned long long)val64);
3346                 return;
3347         }
3348
3349         /* Loading the DOM register to MDIO register */
3350         addr = 0xA100;
3351         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3352         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3353
3354         /* Reading the Alarm flags */
3355         addr = 0xA070;
3356         val64 = 0x0;
3357         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3358
3359         flag = CHECKBIT(val64, 0x7);
3360         type = 1;
3361         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3362                                 &stat_info->xpak_stat.xpak_regs_stat,
3363                                 0x0, flag, type);
3364
3365         if(CHECKBIT(val64, 0x6))
3366                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3367
3368         flag = CHECKBIT(val64, 0x3);
3369         type = 2;
3370         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3371                                 &stat_info->xpak_stat.xpak_regs_stat,
3372                                 0x2, flag, type);
3373
3374         if(CHECKBIT(val64, 0x2))
3375                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3376
3377         flag = CHECKBIT(val64, 0x1);
3378         type = 3;
3379         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3380                                 &stat_info->xpak_stat.xpak_regs_stat,
3381                                 0x4, flag, type);
3382
3383         if(CHECKBIT(val64, 0x0))
3384                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3385
3386         /* Reading the Warning flags */
3387         addr = 0xA074;
3388         val64 = 0x0;
3389         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3390
3391         if(CHECKBIT(val64, 0x7))
3392                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3393
3394         if(CHECKBIT(val64, 0x6))
3395                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3396
3397         if(CHECKBIT(val64, 0x3))
3398                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3399
3400         if(CHECKBIT(val64, 0x2))
3401                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3402
3403         if(CHECKBIT(val64, 0x1))
3404                 stat_info->xpak_stat.warn_laser_output_power_high++;
3405
3406         if(CHECKBIT(val64, 0x0))
3407                 stat_info->xpak_stat.warn_laser_output_power_low++;
3408 }
3409
3410 /**
3411  *  wait_for_cmd_complete - waits for a command to complete.
3412  *  @sp : private member of the device structure, which is a pointer to the
3413  *  s2io_nic structure.
3414  *  Description: Function that waits for a command to Write into RMAC
3415  *  ADDR DATA registers to be completed and returns either success or
3416  *  error depending on whether the command was complete or not.
3417  *  Return value:
3418  *   SUCCESS on success and FAILURE on failure.
3419  */
3420
3421 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3422                                 int bit_state)
3423 {
3424         int ret = FAILURE, cnt = 0, delay = 1;
3425         u64 val64;
3426
3427         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3428                 return FAILURE;
3429
3430         do {
3431                 val64 = readq(addr);
3432                 if (bit_state == S2IO_BIT_RESET) {
3433                         if (!(val64 & busy_bit)) {
3434                                 ret = SUCCESS;
3435                                 break;
3436                         }
3437                 } else {
3438                         if (!(val64 & busy_bit)) {
3439                                 ret = SUCCESS;
3440                                 break;
3441                         }
3442                 }
3443
3444                 if(in_interrupt())
3445                         mdelay(delay);
3446                 else
3447                         msleep(delay);
3448
3449                 if (++cnt >= 10)
3450                         delay = 50;
3451         } while (cnt < 20);
3452         return ret;
3453 }
3454 /*
3455  * check_pci_device_id - Checks if the device id is supported
3456  * @id : device id
3457  * Description: Function to check if the pci device id is supported by driver.
3458  * Return value: Actual device id if supported else PCI_ANY_ID
3459  */
3460 static u16 check_pci_device_id(u16 id)
3461 {
3462         switch (id) {
3463         case PCI_DEVICE_ID_HERC_WIN:
3464         case PCI_DEVICE_ID_HERC_UNI:
3465                 return XFRAME_II_DEVICE;
3466         case PCI_DEVICE_ID_S2IO_UNI:
3467         case PCI_DEVICE_ID_S2IO_WIN:
3468                 return XFRAME_I_DEVICE;
3469         default:
3470                 return PCI_ANY_ID;
3471         }
3472 }
3473
3474 /**
3475  *  s2io_reset - Resets the card.
3476  *  @sp : private member of the device structure.
3477  *  Description: Function to Reset the card. This function then also
3478  *  restores the previously saved PCI configuration space registers as
3479  *  the card reset also resets the configuration space.
3480  *  Return value:
3481  *  void.
3482  */
3483
3484 static void s2io_reset(struct s2io_nic * sp)
3485 {
3486         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3487         u64 val64;
3488         u16 subid, pci_cmd;
3489         int i;
3490         u16 val16;
3491         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3492         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3493
3494         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3495                         __FUNCTION__, sp->dev->name);
3496
3497         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3498         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3499
3500         val64 = SW_RESET_ALL;
3501         writeq(val64, &bar0->sw_reset);
3502         if (strstr(sp->product_name, "CX4")) {
3503                 msleep(750);
3504         }
3505         msleep(250);
3506         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3507
3508                 /* Restore the PCI state saved during initialization. */
3509                 pci_restore_state(sp->pdev);
3510                 pci_read_config_word(sp->pdev, 0x2, &val16);
3511                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3512                         break;
3513                 msleep(200);
3514         }
3515
3516         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3517                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3518         }
3519
3520         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3521
3522         s2io_init_pci(sp);
3523
3524         /* Set swapper to enable I/O register access */
3525         s2io_set_swapper(sp);
3526
3527         /* restore mac_addr entries */
3528         do_s2io_restore_unicast_mc(sp);
3529
3530         /* Restore the MSIX table entries from local variables */
3531         restore_xmsi_data(sp);
3532
3533         /* Clear certain PCI/PCI-X fields after reset */
3534         if (sp->device_type == XFRAME_II_DEVICE) {
3535                 /* Clear "detected parity error" bit */
3536                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3537
3538                 /* Clearing PCIX Ecc status register */
3539                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3540
3541                 /* Clearing PCI_STATUS error reflected here */
3542                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3543         }
3544
3545         /* Reset device statistics maintained by OS */
3546         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3547
3548         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3549         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3550         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3551         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3552         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3553         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3554         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3555         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3556         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3557         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3558         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3559         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3560         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3561         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3562         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3563         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3564         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3565         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3566         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3567
3568         /* SXE-002: Configure link and activity LED to turn it off */
3569         subid = sp->pdev->subsystem_device;
3570         if (((subid & 0xFF) >= 0x07) &&
3571             (sp->device_type == XFRAME_I_DEVICE)) {
3572                 val64 = readq(&bar0->gpio_control);
3573                 val64 |= 0x0000800000000000ULL;
3574                 writeq(val64, &bar0->gpio_control);
3575                 val64 = 0x0411040400000000ULL;
3576                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3577         }
3578
3579         /*
3580          * Clear spurious ECC interrupts that would have occured on
3581          * XFRAME II cards after reset.
3582          */
3583         if (sp->device_type == XFRAME_II_DEVICE) {
3584                 val64 = readq(&bar0->pcc_err_reg);
3585                 writeq(val64, &bar0->pcc_err_reg);
3586         }
3587
3588         sp->device_enabled_once = FALSE;
3589 }
3590
3591 /**
3592  *  s2io_set_swapper - to set the swapper controle on the card
3593  *  @sp : private member of the device structure,
3594  *  pointer to the s2io_nic structure.
3595  *  Description: Function to set the swapper control on the card
3596  *  correctly depending on the 'endianness' of the system.
3597  *  Return value:
3598  *  SUCCESS on success and FAILURE on failure.
3599  */
3600
3601 static int s2io_set_swapper(struct s2io_nic * sp)
3602 {
3603         struct net_device *dev = sp->dev;
3604         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3605         u64 val64, valt, valr;
3606
3607         /*
3608          * Set proper endian settings and verify the same by reading
3609          * the PIF Feed-back register.
3610          */
3611
3612         val64 = readq(&bar0->pif_rd_swapper_fb);
3613         if (val64 != 0x0123456789ABCDEFULL) {
3614                 int i = 0;
3615                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3616                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3617                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3618                                 0};                     /* FE=0, SE=0 */
3619
3620                 while(i<4) {
3621                         writeq(value[i], &bar0->swapper_ctrl);
3622                         val64 = readq(&bar0->pif_rd_swapper_fb);
3623                         if (val64 == 0x0123456789ABCDEFULL)
3624                                 break;
3625                         i++;
3626                 }
3627                 if (i == 4) {
3628                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3629                                 dev->name);
3630                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3631                                 (unsigned long long) val64);
3632                         return FAILURE;
3633                 }
3634                 valr = value[i];
3635         } else {
3636                 valr = readq(&bar0->swapper_ctrl);
3637         }
3638
3639         valt = 0x0123456789ABCDEFULL;
3640         writeq(valt, &bar0->xmsi_address);
3641         val64 = readq(&bar0->xmsi_address);
3642
3643         if(val64 != valt) {
3644                 int i = 0;
3645                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3646                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3647                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3648                                 0};                     /* FE=0, SE=0 */
3649
3650                 while(i<4) {
3651                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3652                         writeq(valt, &bar0->xmsi_address);
3653                         val64 = readq(&bar0->xmsi_address);
3654                         if(val64 == valt)
3655                                 break;
3656                         i++;
3657                 }
3658                 if(i == 4) {
3659                         unsigned long long x = val64;
3660                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3661                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3662                         return FAILURE;
3663                 }
3664         }
3665         val64 = readq(&bar0->swapper_ctrl);
3666         val64 &= 0xFFFF000000000000ULL;
3667
3668 #ifdef  __BIG_ENDIAN
3669         /*
3670          * The device by default set to a big endian format, so a
3671          * big endian driver need not set anything.
3672          */
3673         val64 |= (SWAPPER_CTRL_TXP_FE |
3674                  SWAPPER_CTRL_TXP_SE |
3675                  SWAPPER_CTRL_TXD_R_FE |
3676                  SWAPPER_CTRL_TXD_W_FE |
3677                  SWAPPER_CTRL_TXF_R_FE |
3678                  SWAPPER_CTRL_RXD_R_FE |
3679                  SWAPPER_CTRL_RXD_W_FE |
3680                  SWAPPER_CTRL_RXF_W_FE |
3681                  SWAPPER_CTRL_XMSI_FE |
3682                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3683         if (sp->config.intr_type == INTA)
3684                 val64 |= SWAPPER_CTRL_XMSI_SE;
3685         writeq(val64, &bar0->swapper_ctrl);
3686 #else
3687         /*
3688          * Initially we enable all bits to make it accessible by the
3689          * driver, then we selectively enable only those bits that
3690          * we want to set.
3691          */
3692         val64 |= (SWAPPER_CTRL_TXP_FE |
3693                  SWAPPER_CTRL_TXP_SE |
3694                  SWAPPER_CTRL_TXD_R_FE |
3695                  SWAPPER_CTRL_TXD_R_SE |
3696                  SWAPPER_CTRL_TXD_W_FE |
3697                  SWAPPER_CTRL_TXD_W_SE |
3698                  SWAPPER_CTRL_TXF_R_FE |
3699                  SWAPPER_CTRL_RXD_R_FE |
3700                  SWAPPER_CTRL_RXD_R_SE |
3701                  SWAPPER_CTRL_RXD_W_FE |
3702                  SWAPPER_CTRL_RXD_W_SE |
3703                  SWAPPER_CTRL_RXF_W_FE |
3704                  SWAPPER_CTRL_XMSI_FE |
3705                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3706         if (sp->config.intr_type == INTA)
3707                 val64 |= SWAPPER_CTRL_XMSI_SE;
3708         writeq(val64, &bar0->swapper_ctrl);
3709 #endif
3710         val64 = readq(&bar0->swapper_ctrl);
3711
3712         /*
3713          * Verifying if endian settings are accurate by reading a
3714          * feedback register.
3715          */
3716         val64 = readq(&bar0->pif_rd_swapper_fb);
3717         if (val64 != 0x0123456789ABCDEFULL) {
3718                 /* Endian settings are incorrect, calls for another dekko. */
3719                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3720                           dev->name);
3721                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3722                           (unsigned long long) val64);
3723                 return FAILURE;
3724         }
3725
3726         return SUCCESS;
3727 }
3728
3729 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3730 {
3731         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3732         u64 val64;
3733         int ret = 0, cnt = 0;
3734
3735         do {
3736                 val64 = readq(&bar0->xmsi_access);
3737                 if (!(val64 & s2BIT(15)))
3738                         break;
3739                 mdelay(1);
3740                 cnt++;
3741         } while(cnt < 5);
3742         if (cnt == 5) {
3743                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3744                 ret = 1;
3745         }
3746
3747         return ret;
3748 }
3749
3750 static void restore_xmsi_data(struct s2io_nic *nic)
3751 {
3752         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3753         u64 val64;
3754         int i, msix_index;
3755
3756
3757         if (nic->device_type == XFRAME_I_DEVICE)
3758                 return;
3759
3760         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3761                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3762                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3763                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3764                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3765                 writeq(val64, &bar0->xmsi_access);
3766                 if (wait_for_msix_trans(nic, msix_index)) {
3767                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3768                         continue;
3769                 }
3770         }
3771 }
3772
3773 static void store_xmsi_data(struct s2io_nic *nic)
3774 {
3775         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3776         u64 val64, addr, data;
3777         int i, msix_index;
3778
3779         if (nic->device_type == XFRAME_I_DEVICE)
3780                 return;
3781
3782         /* Store and display */
3783         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3784                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3785                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3786                 writeq(val64, &bar0->xmsi_access);
3787                 if (wait_for_msix_trans(nic, msix_index)) {
3788                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3789                         continue;
3790                 }
3791                 addr = readq(&bar0->xmsi_address);
3792                 data = readq(&bar0->xmsi_data);
3793                 if (addr && data) {
3794                         nic->msix_info[i].addr = addr;
3795                         nic->msix_info[i].data = data;
3796                 }
3797         }
3798 }
3799
3800 static int s2io_enable_msi_x(struct s2io_nic *nic)
3801 {
3802         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3803         u64 rx_mat;
3804         u16 msi_control; /* Temp variable */
3805         int ret, i, j, msix_indx = 1;
3806
3807         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3808                                GFP_KERNEL);
3809         if (!nic->entries) {
3810                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3811                         __FUNCTION__);
3812                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3813                 return -ENOMEM;
3814         }
3815         nic->mac_control.stats_info->sw_stat.mem_allocated
3816                 += (nic->num_entries * sizeof(struct msix_entry));
3817
3818         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3819
3820         nic->s2io_entries =
3821                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3822                                    GFP_KERNEL);
3823         if (!nic->s2io_entries) {
3824                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3825                         __FUNCTION__);
3826                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3827                 kfree(nic->entries);
3828                 nic->mac_control.stats_info->sw_stat.mem_freed
3829                         += (nic->num_entries * sizeof(struct msix_entry));
3830                 return -ENOMEM;
3831         }
3832          nic->mac_control.stats_info->sw_stat.mem_allocated
3833                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3834         memset(nic->s2io_entries, 0,
3835                 nic->num_entries * sizeof(struct s2io_msix_entry));
3836
3837         nic->entries[0].entry = 0;
3838         nic->s2io_entries[0].entry = 0;
3839         nic->s2io_entries[0].in_use = MSIX_FLG;
3840         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3841         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3842
3843         for (i = 1; i < nic->num_entries; i++) {
3844                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3845                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3846                 nic->s2io_entries[i].arg = NULL;
3847                 nic->s2io_entries[i].in_use = 0;
3848         }
3849
3850         rx_mat = readq(&bar0->rx_mat);
3851         for (j = 0; j < nic->config.rx_ring_num; j++) {
3852                 rx_mat |= RX_MAT_SET(j, msix_indx);
3853                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3854                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3855                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3856                 msix_indx += 8;
3857         }
3858         writeq(rx_mat, &bar0->rx_mat);
3859         readq(&bar0->rx_mat);
3860
3861         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3862         /* We fail init if error or we get less vectors than min required */
3863         if (ret) {
3864                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3865                 kfree(nic->entries);
3866                 nic->mac_control.stats_info->sw_stat.mem_freed
3867                         += (nic->num_entries * sizeof(struct msix_entry));
3868                 kfree(nic->s2io_entries);
3869                 nic->mac_control.stats_info->sw_stat.mem_freed
3870                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3871                 nic->entries = NULL;
3872                 nic->s2io_entries = NULL;
3873                 return -ENOMEM;
3874         }
3875
3876         /*
3877          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3878          * in the herc NIC. (Temp change, needs to be removed later)
3879          */
3880         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3881         msi_control |= 0x1; /* Enable MSI */
3882         pci_write_config_word(nic->pdev, 0x42, msi_control);
3883
3884         return 0;
3885 }
3886
3887 /* Handle software interrupt used during MSI(X) test */
3888 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3889 {
3890         struct s2io_nic *sp = dev_id;
3891
3892         sp->msi_detected = 1;
3893         wake_up(&sp->msi_wait);
3894
3895         return IRQ_HANDLED;
3896 }
3897
3898 /* Test interrupt path by forcing a a software IRQ */
3899 static int s2io_test_msi(struct s2io_nic *sp)
3900 {
3901         struct pci_dev *pdev = sp->pdev;
3902         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3903         int err;
3904         u64 val64, saved64;
3905
3906         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3907                         sp->name, sp);
3908         if (err) {
3909                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3910                        sp->dev->name, pci_name(pdev), pdev->irq);
3911                 return err;
3912         }
3913
3914         init_waitqueue_head (&sp->msi_wait);
3915         sp->msi_detected = 0;
3916
3917         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3918         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3919         val64 |= SCHED_INT_CTRL_TIMER_EN;
3920         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3921         writeq(val64, &bar0->scheduled_int_ctrl);
3922
3923         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3924
3925         if (!sp->msi_detected) {
3926                 /* MSI(X) test failed, go back to INTx mode */
3927                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3928                         "using MSI(X) during test\n", sp->dev->name,
3929                         pci_name(pdev));
3930
3931                 err = -EOPNOTSUPP;
3932         }
3933
3934         free_irq(sp->entries[1].vector, sp);
3935
3936         writeq(saved64, &bar0->scheduled_int_ctrl);
3937
3938         return err;
3939 }
3940
3941 static void remove_msix_isr(struct s2io_nic *sp)
3942 {
3943         int i;
3944         u16 msi_control;
3945
3946         for (i = 0; i < sp->num_entries; i++) {
3947                 if (sp->s2io_entries[i].in_use ==
3948                         MSIX_REGISTERED_SUCCESS) {
3949                         int vector = sp->entries[i].vector;
3950                         void *arg = sp->s2io_entries[i].arg;
3951                         free_irq(vector, arg);
3952                 }
3953         }
3954
3955         kfree(sp->entries);
3956         kfree(sp->s2io_entries);
3957         sp->entries = NULL;
3958         sp->s2io_entries = NULL;
3959
3960         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3961         msi_control &= 0xFFFE; /* Disable MSI */
3962         pci_write_config_word(sp->pdev, 0x42, msi_control);
3963
3964         pci_disable_msix(sp->pdev);
3965 }
3966
3967 static void remove_inta_isr(struct s2io_nic *sp)
3968 {
3969         struct net_device *dev = sp->dev;
3970
3971         free_irq(sp->pdev->irq, dev);
3972 }
3973
3974 /* ********************************************************* *
3975  * Functions defined below concern the OS part of the driver *
3976  * ********************************************************* */
3977
3978 /**
3979  *  s2io_open - open entry point of the driver
3980  *  @dev : pointer to the device structure.
3981  *  Description:
3982  *  This function is the open entry point of the driver. It mainly calls a
3983  *  function to allocate Rx buffers and inserts them into the buffer
3984  *  descriptors and then enables the Rx part of the NIC.
3985  *  Return value:
3986  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3987  *   file on failure.
3988  */
3989
3990 static int s2io_open(struct net_device *dev)
3991 {
3992         struct s2io_nic *sp = dev->priv;
3993         int err = 0;
3994
3995         /*
3996          * Make sure you have link off by default every time
3997          * Nic is initialized
3998          */
3999         netif_carrier_off(dev);
4000         sp->last_link_state = 0;
4001
4002         /* Initialize H/W and enable interrupts */
4003         err = s2io_card_up(sp);
4004         if (err) {
4005                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4006                           dev->name);
4007                 goto hw_init_failed;
4008         }
4009
4010         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4011                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4012                 s2io_card_down(sp);
4013                 err = -ENODEV;
4014                 goto hw_init_failed;
4015         }
4016         s2io_start_all_tx_queue(sp);
4017         return 0;
4018
4019 hw_init_failed:
4020         if (sp->config.intr_type == MSI_X) {
4021                 if (sp->entries) {
4022                         kfree(sp->entries);
4023                         sp->mac_control.stats_info->sw_stat.mem_freed
4024                         += (sp->num_entries * sizeof(struct msix_entry));
4025                 }
4026                 if (sp->s2io_entries) {
4027                         kfree(sp->s2io_entries);
4028                         sp->mac_control.stats_info->sw_stat.mem_freed
4029                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4030                 }
4031         }
4032         return err;
4033 }
4034
4035 /**
4036  *  s2io_close -close entry point of the driver
4037  *  @dev : device pointer.
4038  *  Description:
4039  *  This is the stop entry point of the driver. It needs to undo exactly
4040  *  whatever was done by the open entry point,thus it's usually referred to
4041  *  as the close function.Among other things this function mainly stops the
4042  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4043  *  Return value:
4044  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4045  *  file on failure.
4046  */
4047
4048 static int s2io_close(struct net_device *dev)
4049 {
4050         struct s2io_nic *sp = dev->priv;
4051         struct config_param *config = &sp->config;
4052         u64 tmp64;
4053         int offset;
4054
4055         /* Return if the device is already closed               *
4056         *  Can happen when s2io_card_up failed in change_mtu    *
4057         */
4058         if (!is_s2io_card_up(sp))
4059                 return 0;
4060
4061         s2io_stop_all_tx_queue(sp);
4062         /* delete all populated mac entries */
4063         for (offset = 1; offset < config->max_mc_addr; offset++) {
4064                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4065                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4066                         do_s2io_delete_unicast_mc(sp, tmp64);
4067         }
4068
4069         s2io_card_down(sp);
4070
4071         return 0;
4072 }
4073
4074 /**
4075  *  s2io_xmit - Tx entry point of te driver
4076  *  @skb : the socket buffer containing the Tx data.
4077  *  @dev : device pointer.
4078  *  Description :
4079  *  This function is the Tx entry point of the driver. S2IO NIC supports
4080  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4081  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4082  *  not be upadted.
4083  *  Return value:
4084  *  0 on success & 1 on failure.
4085  */
4086
4087 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4088 {
4089         struct s2io_nic *sp = dev->priv;
4090         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4091         register u64 val64;
4092         struct TxD *txdp;
4093         struct TxFIFO_element __iomem *tx_fifo;
4094         unsigned long flags = 0;
4095         u16 vlan_tag = 0;
4096         struct fifo_info *fifo = NULL;
4097         struct mac_info *mac_control;
4098         struct config_param *config;
4099         int do_spin_lock = 1;
4100         int offload_type;
4101         int enable_per_list_interrupt = 0;
4102         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4103
4104         mac_control = &sp->mac_control;
4105         config = &sp->config;
4106
4107         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4108
4109         if (unlikely(skb->len <= 0)) {
4110                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4111                 dev_kfree_skb_any(skb);
4112                 return 0;
4113         }
4114
4115         if (!is_s2io_card_up(sp)) {
4116                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4117                           dev->name);
4118                 dev_kfree_skb(skb);
4119                 return 0;
4120         }
4121
4122         queue = 0;
4123         if (sp->vlgrp && vlan_tx_tag_present(skb))
4124                 vlan_tag = vlan_tx_tag_get(skb);
4125         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4126                 if (skb->protocol == htons(ETH_P_IP)) {
4127                         struct iphdr *ip;
4128                         struct tcphdr *th;
4129                         ip = ip_hdr(skb);
4130
4131                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4132                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4133                                                 ip->ihl*4);
4134
4135                                 if (ip->protocol == IPPROTO_TCP) {
4136                                         queue_len = sp->total_tcp_fifos;
4137                                         queue = (ntohs(th->source) +
4138                                                         ntohs(th->dest)) &
4139                                             sp->fifo_selector[queue_len - 1];
4140                                         if (queue >= queue_len)
4141                                                 queue = queue_len - 1;
4142                                 } else if (ip->protocol == IPPROTO_UDP) {
4143                                         queue_len = sp->total_udp_fifos;
4144                                         queue = (ntohs(th->source) +
4145                                                         ntohs(th->dest)) &
4146                                             sp->fifo_selector[queue_len - 1];
4147                                         if (queue >= queue_len)
4148                                                 queue = queue_len - 1;
4149                                         queue += sp->udp_fifo_idx;
4150                                         if (skb->len > 1024)
4151                                                 enable_per_list_interrupt = 1;
4152                                         do_spin_lock = 0;
4153                                 }
4154                         }
4155                 }
4156         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4157                 /* get fifo number based on skb->priority value */
4158                 queue = config->fifo_mapping
4159                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4160         fifo = &mac_control->fifos[queue];
4161
4162         if (do_spin_lock)
4163                 spin_lock_irqsave(&fifo->tx_lock, flags);
4164         else {
4165                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4166                         return NETDEV_TX_LOCKED;
4167         }
4168
4169         if (sp->config.multiq) {
4170                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4171                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4172                         return NETDEV_TX_BUSY;
4173                 }
4174         } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4175                 if (netif_queue_stopped(dev)) {
4176                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4177                         return NETDEV_TX_BUSY;
4178                 }
4179         }
4180
4181         put_off = (u16) fifo->tx_curr_put_info.offset;
4182         get_off = (u16) fifo->tx_curr_get_info.offset;
4183         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4184
4185         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4186         /* Avoid "put" pointer going beyond "get" pointer */
4187         if (txdp->Host_Control ||
4188                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4189                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4190                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4191                 dev_kfree_skb(skb);
4192                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4193                 return 0;
4194         }
4195
4196         offload_type = s2io_offload_type(skb);
4197         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4198                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4199                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4200         }
4201         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4202                 txdp->Control_2 |=
4203                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4204                      TXD_TX_CKO_UDP_EN);
4205         }
4206         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4207         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4208         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4209         if (enable_per_list_interrupt)
4210                 if (put_off & (queue_len >> 5))
4211                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4212         if (vlan_tag) {
4213                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4214                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4215         }
4216
4217         frg_len = skb->len - skb->data_len;
4218         if (offload_type == SKB_GSO_UDP) {
4219                 int ufo_size;
4220
4221                 ufo_size = s2io_udp_mss(skb);
4222                 ufo_size &= ~7;
4223                 txdp->Control_1 |= TXD_UFO_EN;
4224                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4225                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4226 #ifdef __BIG_ENDIAN
4227                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4228                 fifo->ufo_in_band_v[put_off] =
4229                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4230 #else
4231                 fifo->ufo_in_band_v[put_off] =
4232                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4233 #endif
4234                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4235                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4236                                         fifo->ufo_in_band_v,
4237                                         sizeof(u64), PCI_DMA_TODEVICE);
4238                 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4239                         goto pci_map_failed;
4240                 txdp++;
4241         }
4242
4243         txdp->Buffer_Pointer = pci_map_single
4244             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4245         if (pci_dma_mapping_error(txdp->Buffer_Pointer))
4246                 goto pci_map_failed;
4247
4248         txdp->Host_Control = (unsigned long) skb;
4249         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4250         if (offload_type == SKB_GSO_UDP)
4251                 txdp->Control_1 |= TXD_UFO_EN;
4252
4253         frg_cnt = skb_shinfo(skb)->nr_frags;
4254         /* For fragmented SKB. */
4255         for (i = 0; i < frg_cnt; i++) {
4256                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4257                 /* A '0' length fragment will be ignored */
4258                 if (!frag->size)
4259                         continue;
4260                 txdp++;
4261                 txdp->Buffer_Pointer = (u64) pci_map_page
4262                     (sp->pdev, frag->page, frag->page_offset,
4263                      frag->size, PCI_DMA_TODEVICE);
4264                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4265                 if (offload_type == SKB_GSO_UDP)
4266                         txdp->Control_1 |= TXD_UFO_EN;
4267         }
4268         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4269
4270         if (offload_type == SKB_GSO_UDP)
4271                 frg_cnt++; /* as Txd0 was used for inband header */
4272
4273         tx_fifo = mac_control->tx_FIFO_start[queue];
4274         val64 = fifo->list_info[put_off].list_phy_addr;
4275         writeq(val64, &tx_fifo->TxDL_Pointer);
4276
4277         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4278                  TX_FIFO_LAST_LIST);
4279         if (offload_type)
4280                 val64 |= TX_FIFO_SPECIAL_FUNC;
4281
4282         writeq(val64, &tx_fifo->List_Control);
4283
4284         mmiowb();
4285
4286         put_off++;
4287         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4288                 put_off = 0;
4289         fifo->tx_curr_put_info.offset = put_off;
4290
4291         /* Avoid "put" pointer going beyond "get" pointer */
4292         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4293                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4294                 DBG_PRINT(TX_DBG,
4295                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4296                           put_off, get_off);
4297                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4298         }
4299         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4300         dev->trans_start = jiffies;
4301         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4302
4303         if (sp->config.intr_type == MSI_X)
4304                 tx_intr_handler(fifo);
4305
4306         return 0;
4307 pci_map_failed:
4308         stats->pci_map_fail_cnt++;
4309         s2io_stop_tx_queue(sp, fifo->fifo_no);
4310         stats->mem_freed += skb->truesize;
4311         dev_kfree_skb(skb);
4312         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4313         return 0;
4314 }
4315
4316 static void
4317 s2io_alarm_handle(unsigned long data)
4318 {
4319         struct s2io_nic *sp = (struct s2io_nic *)data;
4320         struct net_device *dev = sp->dev;
4321
4322         s2io_handle_errors(dev);
4323         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4324 }
4325
4326 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4327 {
4328         struct ring_info *ring = (struct ring_info *)dev_id;
4329         struct s2io_nic *sp = ring->nic;
4330         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4331         struct net_device *dev = sp->dev;
4332
4333         if (unlikely(!is_s2io_card_up(sp)))
4334                 return IRQ_HANDLED;
4335
4336         if (sp->config.napi) {
4337                 u8 __iomem *addr = NULL;
4338                 u8 val8 = 0;
4339
4340                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4341                 addr += (7 - ring->ring_no);
4342                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4343                 writeb(val8, addr);
4344                 val8 = readb(addr);
4345                 netif_rx_schedule(dev, &ring->napi);
4346         } else {
4347                 rx_intr_handler(ring, 0);
4348                 s2io_chk_rx_buffers(ring);
4349         }
4350
4351         return IRQ_HANDLED;
4352 }
4353
4354 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4355 {
4356         int i;
4357         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4358         struct s2io_nic *sp = fifos->nic;
4359         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4360         struct config_param *config  = &sp->config;
4361         u64 reason;
4362
4363         if (unlikely(!is_s2io_card_up(sp)))
4364                 return IRQ_NONE;
4365
4366         reason = readq(&bar0->general_int_status);
4367         if (unlikely(reason == S2IO_MINUS_ONE))
4368                 /* Nothing much can be done. Get out */
4369                 return IRQ_HANDLED;
4370
4371         if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4372                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4373
4374                 if (reason & GEN_INTR_TXPIC)
4375                         s2io_txpic_intr_handle(sp);
4376
4377                 if (reason & GEN_INTR_TXTRAFFIC)
4378                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4379
4380                 for (i = 0; i < config->tx_fifo_num; i++)
4381                         tx_intr_handler(&fifos[i]);
4382
4383                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4384                 readl(&bar0->general_int_status);
4385                 return IRQ_HANDLED;
4386         }
4387         /* The interrupt was not raised by us */
4388         return IRQ_NONE;
4389 }
4390
4391 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4392 {
4393         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4394         u64 val64;
4395
4396         val64 = readq(&bar0->pic_int_status);
4397         if (val64 & PIC_INT_GPIO) {
4398                 val64 = readq(&bar0->gpio_int_reg);
4399                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4400                     (val64 & GPIO_INT_REG_LINK_UP)) {
4401                         /*
4402                          * This is unstable state so clear both up/down
4403                          * interrupt and adapter to re-evaluate the link state.
4404                          */
4405                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4406                         val64 |= GPIO_INT_REG_LINK_UP;
4407                         writeq(val64, &bar0->gpio_int_reg);
4408                         val64 = readq(&bar0->gpio_int_mask);
4409                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4410                                    GPIO_INT_MASK_LINK_DOWN);
4411                         writeq(val64, &bar0->gpio_int_mask);
4412                 }
4413                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4414                         val64 = readq(&bar0->adapter_status);
4415                                 /* Enable Adapter */
4416                         val64 = readq(&bar0->adapter_control);
4417                         val64 |= ADAPTER_CNTL_EN;
4418                         writeq(val64, &bar0->adapter_control);
4419                         val64 |= ADAPTER_LED_ON;
4420                         writeq(val64, &bar0->adapter_control);
4421                         if (!sp->device_enabled_once)
4422                                 sp->device_enabled_once = 1;
4423
4424                         s2io_link(sp, LINK_UP);
4425                         /*
4426                          * unmask link down interrupt and mask link-up
4427                          * intr
4428                          */
4429                         val64 = readq(&bar0->gpio_int_mask);
4430                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4431                         val64 |= GPIO_INT_MASK_LINK_UP;
4432                         writeq(val64, &bar0->gpio_int_mask);
4433
4434                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4435                         val64 = readq(&bar0->adapter_status);
4436                         s2io_link(sp, LINK_DOWN);
4437                         /* Link is down so unmaks link up interrupt */
4438                         val64 = readq(&bar0->gpio_int_mask);
4439                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4440                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4441                         writeq(val64, &bar0->gpio_int_mask);
4442
4443                         /* turn off LED */
4444                         val64 = readq(&bar0->adapter_control);
4445                         val64 = val64 &(~ADAPTER_LED_ON);
4446                         writeq(val64, &bar0->adapter_control);
4447                 }
4448         }
4449         val64 = readq(&bar0->gpio_int_mask);
4450 }
4451
4452 /**
4453  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4454  *  @value: alarm bits
4455  *  @addr: address value
4456  *  @cnt: counter variable
4457  *  Description: Check for alarm and increment the counter
4458  *  Return Value:
4459  *  1 - if alarm bit set
4460  *  0 - if alarm bit is not set
4461  */
4462 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4463                           unsigned long long *cnt)
4464 {
4465         u64 val64;
4466         val64 = readq(addr);
4467         if ( val64 & value ) {
4468                 writeq(val64, addr);
4469                 (*cnt)++;
4470                 return 1;
4471         }
4472         return 0;
4473
4474 }
4475
4476 /**
4477  *  s2io_handle_errors - Xframe error indication handler
4478  *  @nic: device private variable
4479  *  Description: Handle alarms such as loss of link, single or
4480  *  double ECC errors, critical and serious errors.
4481  *  Return Value:
4482  *  NONE
4483  */
4484 static void s2io_handle_errors(void * dev_id)
4485 {
4486         struct net_device *dev = (struct net_device *) dev_id;
4487         struct s2io_nic *sp = dev->priv;
4488         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4489         u64 temp64 = 0,val64=0;
4490         int i = 0;
4491
4492         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4493         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4494
4495         if (!is_s2io_card_up(sp))
4496                 return;
4497
4498         if (pci_channel_offline(sp->pdev))
4499                 return;
4500
4501         memset(&sw_stat->ring_full_cnt, 0,
4502                 sizeof(sw_stat->ring_full_cnt));
4503
4504         /* Handling the XPAK counters update */
4505         if(stats->xpak_timer_count < 72000) {
4506                 /* waiting for an hour */
4507                 stats->xpak_timer_count++;
4508         } else {
4509                 s2io_updt_xpak_counter(dev);
4510                 /* reset the count to zero */
4511                 stats->xpak_timer_count = 0;
4512         }
4513
4514         /* Handling link status change error Intr */
4515         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4516                 val64 = readq(&bar0->mac_rmac_err_reg);
4517                 writeq(val64, &bar0->mac_rmac_err_reg);
4518                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4519                         schedule_work(&sp->set_link_task);
4520         }
4521
4522         /* In case of a serious error, the device will be Reset. */
4523         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4524                                 &sw_stat->serious_err_cnt))
4525                 goto reset;
4526
4527         /* Check for data parity error */
4528         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4529                                 &sw_stat->parity_err_cnt))
4530                 goto reset;
4531
4532         /* Check for ring full counter */
4533         if (sp->device_type == XFRAME_II_DEVICE) {
4534                 val64 = readq(&bar0->ring_bump_counter1);
4535                 for (i=0; i<4; i++) {
4536                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4537                         temp64 >>= 64 - ((i+1)*16);
4538                         sw_stat->ring_full_cnt[i] += temp64;
4539                 }
4540
4541                 val64 = readq(&bar0->ring_bump_counter2);
4542                 for (i=0; i<4; i++) {
4543                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4544                         temp64 >>= 64 - ((i+1)*16);
4545                          sw_stat->ring_full_cnt[i+4] += temp64;
4546                 }
4547         }
4548
4549         val64 = readq(&bar0->txdma_int_status);
4550         /*check for pfc_err*/
4551         if (val64 & TXDMA_PFC_INT) {
4552                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4553                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4554                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4555                                 &sw_stat->pfc_err_cnt))
4556                         goto reset;
4557                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4558                                 &sw_stat->pfc_err_cnt);
4559         }
4560
4561         /*check for tda_err*/
4562         if (val64 & TXDMA_TDA_INT) {
4563                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4564                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4565                                 &sw_stat->tda_err_cnt))
4566                         goto reset;
4567                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4568                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4569         }
4570         /*check for pcc_err*/
4571         if (val64 & TXDMA_PCC_INT) {
4572                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4573                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4574                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4575                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4576                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4577                                 &sw_stat->pcc_err_cnt))
4578                         goto reset;
4579                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4580                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4581         }
4582
4583         /*check for tti_err*/
4584         if (val64 & TXDMA_TTI_INT) {
4585                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4586                                 &sw_stat->tti_err_cnt))
4587                         goto reset;
4588                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4589                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4590         }
4591
4592         /*check for lso_err*/
4593         if (val64 & TXDMA_LSO_INT) {
4594                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4595                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4596                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4597                         goto reset;
4598                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4599                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4600         }
4601
4602         /*check for tpa_err*/
4603         if (val64 & TXDMA_TPA_INT) {
4604                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4605                         &sw_stat->tpa_err_cnt))
4606                         goto reset;
4607                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4608                         &sw_stat->tpa_err_cnt);
4609         }
4610
4611         /*check for sm_err*/
4612         if (val64 & TXDMA_SM_INT) {
4613                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4614                         &sw_stat->sm_err_cnt))
4615                         goto reset;
4616         }
4617
4618         val64 = readq(&bar0->mac_int_status);
4619         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4620                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4621                                 &bar0->mac_tmac_err_reg,
4622                                 &sw_stat->mac_tmac_err_cnt))
4623                         goto reset;
4624                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4625                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4626                                 &bar0->mac_tmac_err_reg,
4627                                 &sw_stat->mac_tmac_err_cnt);
4628         }
4629
4630         val64 = readq(&bar0->xgxs_int_status);
4631         if (val64 & XGXS_INT_STATUS_TXGXS) {
4632                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4633                                 &bar0->xgxs_txgxs_err_reg,
4634                                 &sw_stat->xgxs_txgxs_err_cnt))
4635                         goto reset;
4636                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4637                                 &bar0->xgxs_txgxs_err_reg,
4638                                 &sw_stat->xgxs_txgxs_err_cnt);
4639         }
4640
4641         val64 = readq(&bar0->rxdma_int_status);
4642         if (val64 & RXDMA_INT_RC_INT_M) {
4643                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4644                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4645                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4646                         goto reset;
4647                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4648                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4649                                 &sw_stat->rc_err_cnt);
4650                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4651                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4652                                 &sw_stat->prc_pcix_err_cnt))
4653                         goto reset;
4654                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4655                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4656                                 &sw_stat->prc_pcix_err_cnt);
4657         }
4658
4659         if (val64 & RXDMA_INT_RPA_INT_M) {
4660                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4661                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4662                         goto reset;
4663                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4664                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4665         }
4666
4667         if (val64 & RXDMA_INT_RDA_INT_M) {
4668                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4669                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4670                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4671                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4672                         goto reset;
4673                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4674                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4675                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4676         }
4677
4678         if (val64 & RXDMA_INT_RTI_INT_M) {
4679                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4680                                 &sw_stat->rti_err_cnt))
4681                         goto reset;
4682                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4683                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4684         }
4685
4686         val64 = readq(&bar0->mac_int_status);
4687         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4688                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4689                                 &bar0->mac_rmac_err_reg,
4690                                 &sw_stat->mac_rmac_err_cnt))
4691                         goto reset;
4692                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4693                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4694                                 &sw_stat->mac_rmac_err_cnt);
4695         }
4696
4697         val64 = readq(&bar0->xgxs_int_status);
4698         if (val64 & XGXS_INT_STATUS_RXGXS) {
4699                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4700                                 &bar0->xgxs_rxgxs_err_reg,
4701                                 &sw_stat->xgxs_rxgxs_err_cnt))
4702                         goto reset;
4703         }
4704
4705         val64 = readq(&bar0->mc_int_status);
4706         if(val64 & MC_INT_STATUS_MC_INT) {
4707                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4708                                 &sw_stat->mc_err_cnt))
4709                         goto reset;
4710
4711                 /* Handling Ecc errors */
4712                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4713                         writeq(val64, &bar0->mc_err_reg);
4714                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4715                                 sw_stat->double_ecc_errs++;
4716                                 if (sp->device_type != XFRAME_II_DEVICE) {
4717                                         /*
4718                                          * Reset XframeI only if critical error
4719                                          */
4720                                         if (val64 &
4721                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4722                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4723                                                                 goto reset;
4724                                         }
4725                         } else
4726                                 sw_stat->single_ecc_errs++;
4727                 }
4728         }
4729         return;
4730
4731 reset:
4732         s2io_stop_all_tx_queue(sp);
4733         schedule_work(&sp->rst_timer_task);
4734         sw_stat->soft_reset_cnt++;
4735         return;
4736 }
4737
4738 /**
4739  *  s2io_isr - ISR handler of the device .
4740  *  @irq: the irq of the device.
4741  *  @dev_id: a void pointer to the dev structure of the NIC.
4742  *  Description:  This function is the ISR handler of the device. It
4743  *  identifies the reason for the interrupt and calls the relevant
4744  *  service routines. As a contongency measure, this ISR allocates the
4745  *  recv buffers, if their numbers are below the panic value which is
4746  *  presently set to 25% of the original number of rcv buffers allocated.
4747  *  Return value:
4748  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4749  *   IRQ_NONE: will be returned if interrupt is not from our device
4750  */
4751 static irqreturn_t s2io_isr(int irq, void *dev_id)
4752 {
4753         struct net_device *dev = (struct net_device *) dev_id;
4754         struct s2io_nic *sp = dev->priv;
4755         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4756         int i;
4757         u64 reason = 0;
4758         struct mac_info *mac_control;
4759         struct config_param *config;
4760
4761         /* Pretend we handled any irq's from a disconnected card */
4762         if (pci_channel_offline(sp->pdev))
4763                 return IRQ_NONE;
4764
4765         if (!is_s2io_card_up(sp))
4766                 return IRQ_NONE;
4767
4768         mac_control = &sp->mac_control;
4769         config = &sp->config;
4770
4771         /*
4772          * Identify the cause for interrupt and call the appropriate
4773          * interrupt handler. Causes for the interrupt could be;
4774          * 1. Rx of packet.
4775          * 2. Tx complete.
4776          * 3. Link down.
4777          */
4778         reason = readq(&bar0->general_int_status);
4779
4780         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4781                 /* Nothing much can be done. Get out */
4782                 return IRQ_HANDLED;
4783         }
4784
4785         if (reason & (GEN_INTR_RXTRAFFIC |
4786                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4787         {
4788                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4789
4790                 if (config->napi) {
4791                         if (reason & GEN_INTR_RXTRAFFIC) {
4792                                 netif_rx_schedule(dev, &sp->napi);
4793                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4794                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4795                                 readl(&bar0->rx_traffic_int);
4796                         }
4797                 } else {
4798                         /*
4799                          * rx_traffic_int reg is an R1 register, writing all 1's
4800                          * will ensure that the actual interrupt causing bit
4801                          * get's cleared and hence a read can be avoided.
4802                          */
4803                         if (reason & GEN_INTR_RXTRAFFIC)
4804                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4805
4806                         for (i = 0; i < config->rx_ring_num; i++)
4807                                 rx_intr_handler(&mac_control->rings[i], 0);
4808                 }
4809
4810                 /*
4811                  * tx_traffic_int reg is an R1 register, writing all 1's
4812                  * will ensure that the actual interrupt causing bit get's
4813                  * cleared and hence a read can be avoided.
4814                  */
4815                 if (reason & GEN_INTR_TXTRAFFIC)
4816                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4817
4818                 for (i = 0; i < config->tx_fifo_num; i++)
4819                         tx_intr_handler(&mac_control->fifos[i]);
4820
4821                 if (reason & GEN_INTR_TXPIC)
4822                         s2io_txpic_intr_handle(sp);
4823
4824                 /*
4825                  * Reallocate the buffers from the interrupt handler itself.
4826                  */
4827                 if (!config->napi) {
4828                         for (i = 0; i < config->rx_ring_num; i++)
4829                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4830                 }
4831                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4832                 readl(&bar0->general_int_status);
4833
4834                 return IRQ_HANDLED;
4835
4836         }
4837         else if (!reason) {
4838                 /* The interrupt was not raised by us */
4839                 return IRQ_NONE;
4840         }
4841
4842         return IRQ_HANDLED;
4843 }
4844
4845 /**
4846  * s2io_updt_stats -
4847  */
4848 static void s2io_updt_stats(struct s2io_nic *sp)
4849 {
4850         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4851         u64 val64;
4852         int cnt = 0;
4853
4854         if (is_s2io_card_up(sp)) {
4855                 /* Apprx 30us on a 133 MHz bus */
4856                 val64 = SET_UPDT_CLICKS(10) |
4857                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4858                 writeq(val64, &bar0->stat_cfg);
4859                 do {
4860                         udelay(100);
4861                         val64 = readq(&bar0->stat_cfg);
4862                         if (!(val64 & s2BIT(0)))
4863                                 break;
4864                         cnt++;
4865                         if (cnt == 5)
4866                                 break; /* Updt failed */
4867                 } while(1);
4868         }
4869 }
4870
4871 /**
4872  *  s2io_get_stats - Updates the device statistics structure.
4873  *  @dev : pointer to the device structure.
4874  *  Description:
4875  *  This function updates the device statistics structure in the s2io_nic
4876  *  structure and returns a pointer to the same.
4877  *  Return value:
4878  *  pointer to the updated net_device_stats structure.
4879  */
4880
4881 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4882 {
4883         struct s2io_nic *sp = dev->priv;
4884         struct mac_info *mac_control;
4885         struct config_param *config;
4886         int i;
4887
4888
4889         mac_control = &sp->mac_control;
4890         config = &sp->config;
4891
4892         /* Configure Stats for immediate updt */
4893         s2io_updt_stats(sp);
4894
4895         sp->stats.tx_packets =
4896                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4897         sp->stats.tx_errors =
4898                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4899         sp->stats.rx_errors =
4900                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4901         sp->stats.multicast =
4902                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4903         sp->stats.rx_length_errors =
4904                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4905
4906         /* collect per-ring rx_packets and rx_bytes */
4907         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4908         for (i = 0; i < config->rx_ring_num; i++) {
4909                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4910                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4911         }
4912
4913         return (&sp->stats);
4914 }
4915
4916 /**
4917  *  s2io_set_multicast - entry point for multicast address enable/disable.
4918  *  @dev : pointer to the device structure
4919  *  Description:
4920  *  This function is a driver entry point which gets called by the kernel
4921  *  whenever multicast addresses must be enabled/disabled. This also gets
4922  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4923  *  determine, if multicast address must be enabled or if promiscuous mode
4924  *  is to be disabled etc.
4925  *  Return value:
4926  *  void.
4927  */
4928
4929 static void s2io_set_multicast(struct net_device *dev)
4930 {
4931         int i, j, prev_cnt;
4932         struct dev_mc_list *mclist;
4933         struct s2io_nic *sp = dev->priv;
4934         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4935         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4936             0xfeffffffffffULL;
4937         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4938         void __iomem *add;
4939         struct config_param *config = &sp->config;
4940
4941         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4942                 /*  Enable all Multicast addresses */
4943                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4944                        &bar0->rmac_addr_data0_mem);
4945                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4946                        &bar0->rmac_addr_data1_mem);
4947                 val64 = RMAC_ADDR_CMD_MEM_WE |
4948                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4949                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4950                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4951                 /* Wait till command completes */
4952                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4953                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4954                                         S2IO_BIT_RESET);
4955
4956                 sp->m_cast_flg = 1;
4957                 sp->all_multi_pos = config->max_mc_addr - 1;
4958         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4959                 /*  Disable all Multicast addresses */
4960                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4961                        &bar0->rmac_addr_data0_mem);
4962                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4963                        &bar0->rmac_addr_data1_mem);
4964                 val64 = RMAC_ADDR_CMD_MEM_WE |
4965                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4966                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4967                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4968                 /* Wait till command completes */
4969                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4970                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4971                                         S2IO_BIT_RESET);
4972
4973                 sp->m_cast_flg = 0;
4974                 sp->all_multi_pos = 0;
4975         }
4976
4977         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4978                 /*  Put the NIC into promiscuous mode */
4979                 add = &bar0->mac_cfg;
4980                 val64 = readq(&bar0->mac_cfg);
4981                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4982
4983                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4984                 writel((u32) val64, add);
4985                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4986                 writel((u32) (val64 >> 32), (add + 4));
4987
4988                 if (vlan_tag_strip != 1) {
4989                         val64 = readq(&bar0->rx_pa_cfg);
4990                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4991                         writeq(val64, &bar0->rx_pa_cfg);
4992                         vlan_strip_flag = 0;
4993                 }
4994
4995                 val64 = readq(&bar0->mac_cfg);
4996                 sp->promisc_flg = 1;
4997                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4998                           dev->name);
4999         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5000                 /*  Remove the NIC from promiscuous mode */
5001                 add = &bar0->mac_cfg;
5002                 val64 = readq(&bar0->mac_cfg);
5003                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5004
5005                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5006                 writel((u32) val64, add);
5007                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5008                 writel((u32) (val64 >> 32), (add + 4));
5009
5010                 if (vlan_tag_strip != 0) {
5011                         val64 = readq(&bar0->rx_pa_cfg);
5012                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5013                         writeq(val64, &bar0->rx_pa_cfg);
5014                         vlan_strip_flag = 1;
5015                 }
5016
5017                 val64 = readq(&bar0->mac_cfg);
5018                 sp->promisc_flg = 0;
5019                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5020                           dev->name);
5021         }
5022
5023         /*  Update individual M_CAST address list */
5024         if ((!sp->m_cast_flg) && dev->mc_count) {
5025                 if (dev->mc_count >
5026                     (config->max_mc_addr - config->max_mac_addr)) {
5027                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5028                                   dev->name);
5029                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5030                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5031                         return;
5032                 }
5033
5034                 prev_cnt = sp->mc_addr_count;
5035                 sp->mc_addr_count = dev->mc_count;
5036
5037                 /* Clear out the previous list of Mc in the H/W. */
5038                 for (i = 0; i < prev_cnt; i++) {
5039                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5040                                &bar0->rmac_addr_data0_mem);
5041                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5042                                 &bar0->rmac_addr_data1_mem);
5043                         val64 = RMAC_ADDR_CMD_MEM_WE |
5044                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5045                             RMAC_ADDR_CMD_MEM_OFFSET
5046                             (config->mc_start_offset + i);
5047                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5048
5049                         /* Wait for command completes */
5050                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5051                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5052                                         S2IO_BIT_RESET)) {
5053                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5054                                           dev->name);
5055                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5056                                 return;
5057                         }
5058                 }
5059
5060                 /* Create the new Rx filter list and update the same in H/W. */
5061                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5062                      i++, mclist = mclist->next) {
5063                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5064                                ETH_ALEN);
5065                         mac_addr = 0;
5066                         for (j = 0; j < ETH_ALEN; j++) {
5067                                 mac_addr |= mclist->dmi_addr[j];
5068                                 mac_addr <<= 8;
5069                         }
5070                         mac_addr >>= 8;
5071                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5072                                &bar0->rmac_addr_data0_mem);
5073                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5074                                 &bar0->rmac_addr_data1_mem);
5075                         val64 = RMAC_ADDR_CMD_MEM_WE |
5076                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5077                             RMAC_ADDR_CMD_MEM_OFFSET
5078                             (i + config->mc_start_offset);
5079                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5080
5081                         /* Wait for command completes */
5082                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5083                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5084                                         S2IO_BIT_RESET)) {
5085                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5086                                           dev->name);
5087                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5088                                 return;
5089                         }
5090                 }
5091         }
5092 }
5093
5094 /* read from CAM unicast & multicast addresses and store it in
5095  * def_mac_addr structure
5096  */
5097 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5098 {
5099         int offset;
5100         u64 mac_addr = 0x0;
5101         struct config_param *config = &sp->config;
5102
5103         /* store unicast & multicast mac addresses */
5104         for (offset = 0; offset < config->max_mc_addr; offset++) {
5105                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5106                 /* if read fails disable the entry */
5107                 if (mac_addr == FAILURE)
5108                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5109                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5110         }
5111 }
5112
5113 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5114 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5115 {
5116         int offset;
5117         struct config_param *config = &sp->config;
5118         /* restore unicast mac address */
5119         for (offset = 0; offset < config->max_mac_addr; offset++)
5120                 do_s2io_prog_unicast(sp->dev,
5121                         sp->def_mac_addr[offset].mac_addr);
5122
5123         /* restore multicast mac address */
5124         for (offset = config->mc_start_offset;
5125                 offset < config->max_mc_addr; offset++)
5126                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5127 }
5128
5129 /* add a multicast MAC address to CAM */
5130 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5131 {
5132         int i;
5133         u64 mac_addr = 0;
5134         struct config_param *config = &sp->config;
5135
5136         for (i = 0; i < ETH_ALEN; i++) {
5137                 mac_addr <<= 8;
5138                 mac_addr |= addr[i];
5139         }
5140         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5141                 return SUCCESS;
5142
5143         /* check if the multicast mac already preset in CAM */
5144         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5145                 u64 tmp64;
5146                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5147                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5148                         break;
5149
5150                 if (tmp64 == mac_addr)
5151                         return SUCCESS;
5152         }
5153         if (i == config->max_mc_addr) {
5154                 DBG_PRINT(ERR_DBG,
5155                         "CAM full no space left for multicast MAC\n");
5156                 return FAILURE;
5157         }
5158         /* Update the internal structure with this new mac address */
5159         do_s2io_copy_mac_addr(sp, i, mac_addr);
5160
5161         return (do_s2io_add_mac(sp, mac_addr, i));
5162 }
5163
5164 /* add MAC address to CAM */
5165 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5166 {
5167         u64 val64;
5168         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5169
5170         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5171                 &bar0->rmac_addr_data0_mem);
5172
5173         val64 =
5174                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5175                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5176         writeq(val64, &bar0->rmac_addr_cmd_mem);
5177
5178         /* Wait till command completes */
5179         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5180                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5181                 S2IO_BIT_RESET)) {
5182                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5183                 return FAILURE;
5184         }
5185         return SUCCESS;
5186 }
5187 /* deletes a specified unicast/multicast mac entry from CAM */
5188 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5189 {
5190         int offset;
5191         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5192         struct config_param *config = &sp->config;
5193
5194         for (offset = 1;
5195                 offset < config->max_mc_addr; offset++) {
5196                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5197                 if (tmp64 == addr) {
5198                         /* disable the entry by writing  0xffffffffffffULL */
5199                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5200                                 return FAILURE;
5201                         /* store the new mac list from CAM */
5202                         do_s2io_store_unicast_mc(sp);
5203                         return SUCCESS;
5204                 }
5205         }
5206         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5207                         (unsigned long long)addr);
5208         return FAILURE;
5209 }
5210
5211 /* read mac entries from CAM */
5212 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5213 {
5214         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5215         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5216
5217         /* read mac addr */
5218         val64 =
5219                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5220                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5221         writeq(val64, &bar0->rmac_addr_cmd_mem);
5222
5223         /* Wait till command completes */
5224         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5225                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5226                 S2IO_BIT_RESET)) {
5227                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5228                 return FAILURE;
5229         }
5230         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5231         return (tmp64 >> 16);
5232 }
5233
5234 /**
5235  * s2io_set_mac_addr driver entry point
5236  */
5237
5238 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5239 {
5240         struct sockaddr *addr = p;
5241
5242         if (!is_valid_ether_addr(addr->sa_data))
5243                 return -EINVAL;
5244
5245         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5246
5247         /* store the MAC address in CAM */
5248         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5249 }
5250 /**
5251  *  do_s2io_prog_unicast - Programs the Xframe mac address
5252  *  @dev : pointer to the device structure.
5253  *  @addr: a uchar pointer to the new mac address which is to be set.
5254  *  Description : This procedure will program the Xframe to receive
5255  *  frames with new Mac Address
5256  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5257  *  as defined in errno.h file on failure.
5258  */
5259
5260 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5261 {
5262         struct s2io_nic *sp = dev->priv;
5263         register u64 mac_addr = 0, perm_addr = 0;
5264         int i;
5265         u64 tmp64;
5266         struct config_param *config = &sp->config;
5267
5268         /*
5269         * Set the new MAC address as the new unicast filter and reflect this
5270         * change on the device address registered with the OS. It will be
5271         * at offset 0.
5272         */
5273         for (i = 0; i < ETH_ALEN; i++) {
5274                 mac_addr <<= 8;
5275                 mac_addr |= addr[i];
5276                 perm_addr <<= 8;
5277                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5278         }
5279
5280         /* check if the dev_addr is different than perm_addr */
5281         if (mac_addr == perm_addr)
5282                 return SUCCESS;
5283
5284         /* check if the mac already preset in CAM */
5285         for (i = 1; i < config->max_mac_addr; i++) {
5286                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5287                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5288                         break;
5289
5290                 if (tmp64 == mac_addr) {
5291                         DBG_PRINT(INFO_DBG,
5292                         "MAC addr:0x%llx already present in CAM\n",
5293                         (unsigned long long)mac_addr);
5294                         return SUCCESS;
5295                 }
5296         }
5297         if (i == config->max_mac_addr) {
5298                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5299                 return FAILURE;
5300         }
5301         /* Update the internal structure with this new mac address */
5302         do_s2io_copy_mac_addr(sp, i, mac_addr);
5303         return (do_s2io_add_mac(sp, mac_addr, i));
5304 }
5305
5306 /**
5307  * s2io_ethtool_sset - Sets different link parameters.
5308  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5309  * @info: pointer to the structure with parameters given by ethtool to set
5310  * link information.
5311  * Description:
5312  * The function sets different link parameters provided by the user onto
5313  * the NIC.
5314  * Return value:
5315  * 0 on success.
5316 */
5317
5318 static int s2io_ethtool_sset(struct net_device *dev,
5319                              struct ethtool_cmd *info)
5320 {
5321         struct s2io_nic *sp = dev->priv;
5322         if ((info->autoneg == AUTONEG_ENABLE) ||
5323             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5324                 return -EINVAL;
5325         else {
5326                 s2io_close(sp->dev);
5327                 s2io_open(sp->dev);
5328         }
5329
5330         return 0;
5331 }
5332
5333 /**
5334  * s2io_ethtol_gset - Return link specific information.
5335  * @sp : private member of the device structure, pointer to the
5336  *      s2io_nic structure.
5337  * @info : pointer to the structure with parameters given by ethtool
5338  * to return link information.
5339  * Description:
5340  * Returns link specific information like speed, duplex etc.. to ethtool.
5341  * Return value :
5342  * return 0 on success.
5343  */
5344
5345 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5346 {
5347         struct s2io_nic *sp = dev->priv;
5348         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5349         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5350         info->port = PORT_FIBRE;
5351
5352         /* info->transceiver */
5353         info->transceiver = XCVR_EXTERNAL;
5354
5355         if (netif_carrier_ok(sp->dev)) {
5356                 info->speed = 10000;
5357                 info->duplex = DUPLEX_FULL;
5358         } else {
5359                 info->speed = -1;
5360                 info->duplex = -1;
5361         }
5362
5363         info->autoneg = AUTONEG_DISABLE;
5364         return 0;
5365 }
5366
5367 /**
5368  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5369  * @sp : private member of the device structure, which is a pointer to the
5370  * s2io_nic structure.
5371  * @info : pointer to the structure with parameters given by ethtool to
5372  * return driver information.
5373  * Description:
5374  * Returns driver specefic information like name, version etc.. to ethtool.
5375  * Return value:
5376  *  void
5377  */
5378
5379 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5380                                   struct ethtool_drvinfo *info)
5381 {
5382         struct s2io_nic *sp = dev->priv;
5383
5384         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5385         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5386         strncpy(info->fw_version, "", sizeof(info->fw_version));
5387         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5388         info->regdump_len = XENA_REG_SPACE;
5389         info->eedump_len = XENA_EEPROM_SPACE;
5390 }
5391
5392 /**
5393  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5394  *  @sp: private member of the device structure, which is a pointer to the
5395  *  s2io_nic structure.
5396  *  @regs : pointer to the structure with parameters given by ethtool for
5397  *  dumping the registers.
5398  *  @reg_space: The input argumnet into which all the registers are dumped.
5399  *  Description:
5400  *  Dumps the entire register space of xFrame NIC into the user given
5401  *  buffer area.
5402  * Return value :
5403  * void .
5404 */
5405
5406 static void s2io_ethtool_gregs(struct net_device *dev,
5407                                struct ethtool_regs *regs, void *space)
5408 {
5409         int i;
5410         u64 reg;
5411         u8 *reg_space = (u8 *) space;
5412         struct s2io_nic *sp = dev->priv;
5413
5414         regs->len = XENA_REG_SPACE;
5415         regs->version = sp->pdev->subsystem_device;
5416
5417         for (i = 0; i < regs->len; i += 8) {
5418                 reg = readq(sp->bar0 + i);
5419                 memcpy((reg_space + i), &reg, 8);
5420         }
5421 }
5422
5423 /**
5424  *  s2io_phy_id  - timer function that alternates adapter LED.
5425  *  @data : address of the private member of the device structure, which
5426  *  is a pointer to the s2io_nic structure, provided as an u32.
5427  * Description: This is actually the timer function that alternates the
5428  * adapter LED bit of the adapter control bit to set/reset every time on
5429  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5430  *  once every second.
5431 */
5432 static void s2io_phy_id(unsigned long data)
5433 {
5434         struct s2io_nic *sp = (struct s2io_nic *) data;
5435         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5436         u64 val64 = 0;
5437         u16 subid;
5438
5439         subid = sp->pdev->subsystem_device;
5440         if ((sp->device_type == XFRAME_II_DEVICE) ||
5441                    ((subid & 0xFF) >= 0x07)) {
5442                 val64 = readq(&bar0->gpio_control);
5443                 val64 ^= GPIO_CTRL_GPIO_0;
5444                 writeq(val64, &bar0->gpio_control);
5445         } else {
5446                 val64 = readq(&bar0->adapter_control);
5447                 val64 ^= ADAPTER_LED_ON;
5448                 writeq(val64, &bar0->adapter_control);
5449         }
5450
5451         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5452 }
5453
5454 /**
5455  * s2io_ethtool_idnic - To physically identify the nic on the system.
5456  * @sp : private member of the device structure, which is a pointer to the
5457  * s2io_nic structure.
5458  * @id : pointer to the structure with identification parameters given by
5459  * ethtool.
5460  * Description: Used to physically identify the NIC on the system.
5461  * The Link LED will blink for a time specified by the user for
5462  * identification.
5463  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5464  * identification is possible only if it's link is up.
5465  * Return value:
5466  * int , returns 0 on success
5467  */
5468
5469 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5470 {
5471         u64 val64 = 0, last_gpio_ctrl_val;
5472         struct s2io_nic *sp = dev->priv;
5473         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5474         u16 subid;
5475
5476         subid = sp->pdev->subsystem_device;
5477         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5478         if ((sp->device_type == XFRAME_I_DEVICE) &&
5479                 ((subid & 0xFF) < 0x07)) {
5480                 val64 = readq(&bar0->adapter_control);
5481                 if (!(val64 & ADAPTER_CNTL_EN)) {
5482                         printk(KERN_ERR
5483                                "Adapter Link down, cannot blink LED\n");
5484                         return -EFAULT;
5485                 }
5486         }
5487         if (sp->id_timer.function == NULL) {
5488                 init_timer(&sp->id_timer);
5489                 sp->id_timer.function = s2io_phy_id;
5490                 sp->id_timer.data = (unsigned long) sp;
5491         }
5492         mod_timer(&sp->id_timer, jiffies);
5493         if (data)
5494                 msleep_interruptible(data * HZ);
5495         else
5496                 msleep_interruptible(MAX_FLICKER_TIME);
5497         del_timer_sync(&sp->id_timer);
5498
5499         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5500                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5501                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5502         }
5503
5504         return 0;
5505 }
5506
5507 static void s2io_ethtool_gringparam(struct net_device *dev,
5508                                     struct ethtool_ringparam *ering)
5509 {
5510         struct s2io_nic *sp = dev->priv;
5511         int i,tx_desc_count=0,rx_desc_count=0;
5512
5513         if (sp->rxd_mode == RXD_MODE_1)
5514                 ering->rx_max_pending = MAX_RX_DESC_1;
5515         else if (sp->rxd_mode == RXD_MODE_3B)
5516                 ering->rx_max_pending = MAX_RX_DESC_2;
5517
5518         ering->tx_max_pending = MAX_TX_DESC;
5519         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5520                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5521
5522         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5523         ering->tx_pending = tx_desc_count;
5524         rx_desc_count = 0;
5525         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5526                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5527
5528         ering->rx_pending = rx_desc_count;
5529
5530         ering->rx_mini_max_pending = 0;
5531         ering->rx_mini_pending = 0;
5532         if(sp->rxd_mode == RXD_MODE_1)
5533                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5534         else if (sp->rxd_mode == RXD_MODE_3B)
5535                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5536         ering->rx_jumbo_pending = rx_desc_count;
5537 }
5538
5539 /**
5540  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5541  * @sp : private member of the device structure, which is a pointer to the
5542  *      s2io_nic structure.
5543  * @ep : pointer to the structure with pause parameters given by ethtool.
5544  * Description:
5545  * Returns the Pause frame generation and reception capability of the NIC.
5546  * Return value:
5547  *  void
5548  */
5549 static void s2io_ethtool_getpause_data(struct net_device *dev,
5550                                        struct ethtool_pauseparam *ep)
5551 {
5552         u64 val64;
5553         struct s2io_nic *sp = dev->priv;
5554         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5555
5556         val64 = readq(&bar0->rmac_pause_cfg);
5557         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5558                 ep->tx_pause = TRUE;
5559         if (val64 & RMAC_PAUSE_RX_ENABLE)
5560                 ep->rx_pause = TRUE;
5561         ep->autoneg = FALSE;
5562 }
5563
5564 /**
5565  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5566  * @sp : private member of the device structure, which is a pointer to the
5567  *      s2io_nic structure.
5568  * @ep : pointer to the structure with pause parameters given by ethtool.
5569  * Description:
5570  * It can be used to set or reset Pause frame generation or reception
5571  * support of the NIC.
5572  * Return value:
5573  * int, returns 0 on Success
5574  */
5575
5576 static int s2io_ethtool_setpause_data(struct net_device *dev,
5577                                struct ethtool_pauseparam *ep)
5578 {
5579         u64 val64;
5580         struct s2io_nic *sp = dev->priv;
5581         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5582
5583         val64 = readq(&bar0->rmac_pause_cfg);
5584         if (ep->tx_pause)
5585                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5586         else
5587                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5588         if (ep->rx_pause)
5589                 val64 |= RMAC_PAUSE_RX_ENABLE;
5590         else
5591                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5592         writeq(val64, &bar0->rmac_pause_cfg);
5593         return 0;
5594 }
5595
5596 /**
5597  * read_eeprom - reads 4 bytes of data from user given offset.
5598  * @sp : private member of the device structure, which is a pointer to the
5599  *      s2io_nic structure.
5600  * @off : offset at which the data must be written
5601  * @data : Its an output parameter where the data read at the given
5602  *      offset is stored.
5603  * Description:
5604  * Will read 4 bytes of data from the user given offset and return the
5605  * read data.
5606  * NOTE: Will allow to read only part of the EEPROM visible through the
5607  *   I2C bus.
5608  * Return value:
5609  *  -1 on failure and 0 on success.
5610  */
5611
5612 #define S2IO_DEV_ID             5
5613 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5614 {
5615         int ret = -1;
5616         u32 exit_cnt = 0;
5617         u64 val64;
5618         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5619
5620         if (sp->device_type == XFRAME_I_DEVICE) {
5621                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5622                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5623                     I2C_CONTROL_CNTL_START;
5624                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5625
5626                 while (exit_cnt < 5) {
5627                         val64 = readq(&bar0->i2c_control);
5628                         if (I2C_CONTROL_CNTL_END(val64)) {
5629                                 *data = I2C_CONTROL_GET_DATA(val64);
5630                                 ret = 0;
5631                                 break;
5632                         }
5633                         msleep(50);
5634                         exit_cnt++;
5635                 }
5636         }
5637
5638         if (sp->device_type == XFRAME_II_DEVICE) {
5639                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5640                         SPI_CONTROL_BYTECNT(0x3) |
5641                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5642                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5643                 val64 |= SPI_CONTROL_REQ;
5644                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5645                 while (exit_cnt < 5) {
5646                         val64 = readq(&bar0->spi_control);
5647                         if (val64 & SPI_CONTROL_NACK) {
5648                                 ret = 1;
5649                                 break;
5650                         } else if (val64 & SPI_CONTROL_DONE) {
5651                                 *data = readq(&bar0->spi_data);
5652                                 *data &= 0xffffff;
5653                                 ret = 0;
5654                                 break;
5655                         }
5656                         msleep(50);
5657                         exit_cnt++;
5658                 }
5659         }
5660         return ret;
5661 }
5662
5663 /**
5664  *  write_eeprom - actually writes the relevant part of the data value.
5665  *  @sp : private member of the device structure, which is a pointer to the
5666  *       s2io_nic structure.
5667  *  @off : offset at which the data must be written
5668  *  @data : The data that is to be written
5669  *  @cnt : Number of bytes of the data that are actually to be written into
5670  *  the Eeprom. (max of 3)
5671  * Description:
5672  *  Actually writes the relevant part of the data value into the Eeprom
5673  *  through the I2C bus.
5674  * Return value:
5675  *  0 on success, -1 on failure.
5676  */
5677
5678 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5679 {
5680         int exit_cnt = 0, ret = -1;
5681         u64 val64;
5682         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5683
5684         if (sp->device_type == XFRAME_I_DEVICE) {
5685                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5686                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5687                     I2C_CONTROL_CNTL_START;
5688                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5689
5690                 while (exit_cnt < 5) {
5691                         val64 = readq(&bar0->i2c_control);
5692                         if (I2C_CONTROL_CNTL_END(val64)) {
5693                                 if (!(val64 & I2C_CONTROL_NACK))
5694                                         ret = 0;
5695                                 break;
5696                         }
5697                         msleep(50);
5698                         exit_cnt++;
5699                 }
5700         }
5701
5702         if (sp->device_type == XFRAME_II_DEVICE) {
5703                 int write_cnt = (cnt == 8) ? 0 : cnt;
5704                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5705
5706                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5707                         SPI_CONTROL_BYTECNT(write_cnt) |
5708                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5709                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5710                 val64 |= SPI_CONTROL_REQ;
5711                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5712                 while (exit_cnt < 5) {
5713                         val64 = readq(&bar0->spi_control);
5714                         if (val64 & SPI_CONTROL_NACK) {
5715                                 ret = 1;
5716                                 break;
5717                         } else if (val64 & SPI_CONTROL_DONE) {
5718                                 ret = 0;
5719                                 break;
5720                         }
5721                         msleep(50);
5722                         exit_cnt++;
5723                 }
5724         }
5725         return ret;
5726 }
5727 static void s2io_vpd_read(struct s2io_nic *nic)
5728 {
5729         u8 *vpd_data;
5730         u8 data;
5731         int i=0, cnt, fail = 0;
5732         int vpd_addr = 0x80;
5733
5734         if (nic->device_type == XFRAME_II_DEVICE) {
5735                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5736                 vpd_addr = 0x80;
5737         }
5738         else {
5739                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5740                 vpd_addr = 0x50;
5741         }
5742         strcpy(nic->serial_num, "NOT AVAILABLE");
5743
5744         vpd_data = kmalloc(256, GFP_KERNEL);
5745         if (!vpd_data) {
5746                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5747                 return;
5748         }
5749         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5750
5751         for (i = 0; i < 256; i +=4 ) {
5752                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5753                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5754                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5755                 for (cnt = 0; cnt <5; cnt++) {
5756                         msleep(2);
5757                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5758                         if (data == 0x80)
5759                                 break;
5760                 }
5761                 if (cnt >= 5) {
5762                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5763                         fail = 1;
5764                         break;
5765                 }
5766                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5767                                       (u32 *)&vpd_data[i]);
5768         }
5769
5770         if(!fail) {
5771                 /* read serial number of adapter */
5772                 for (cnt = 0; cnt < 256; cnt++) {
5773                 if ((vpd_data[cnt] == 'S') &&
5774                         (vpd_data[cnt+1] == 'N') &&
5775                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5776                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5777                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5778                                         vpd_data[cnt+2]);
5779                                 break;
5780                         }
5781                 }
5782         }
5783
5784         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5785                 memset(nic->product_name, 0, vpd_data[1]);
5786                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5787         }
5788         kfree(vpd_data);
5789         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5790 }
5791
5792 /**
5793  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5794  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5795  *  @eeprom : pointer to the user level structure provided by ethtool,
5796  *  containing all relevant information.
5797  *  @data_buf : user defined value to be written into Eeprom.
5798  *  Description: Reads the values stored in the Eeprom at given offset
5799  *  for a given length. Stores these values int the input argument data
5800  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5801  *  Return value:
5802  *  int  0 on success
5803  */
5804
5805 static int s2io_ethtool_geeprom(struct net_device *dev,
5806                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5807 {
5808         u32 i, valid;
5809         u64 data;
5810         struct s2io_nic *sp = dev->priv;
5811
5812         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5813
5814         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5815                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5816
5817         for (i = 0; i < eeprom->len; i += 4) {
5818                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5819                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5820                         return -EFAULT;
5821                 }
5822                 valid = INV(data);
5823                 memcpy((data_buf + i), &valid, 4);
5824         }
5825         return 0;
5826 }
5827
5828 /**
5829  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5830  *  @sp : private member of the device structure, which is a pointer to the
5831  *  s2io_nic structure.
5832  *  @eeprom : pointer to the user level structure provided by ethtool,
5833  *  containing all relevant information.
5834  *  @data_buf ; user defined value to be written into Eeprom.
5835  *  Description:
5836  *  Tries to write the user provided value in the Eeprom, at the offset
5837  *  given by the user.
5838  *  Return value:
5839  *  0 on success, -EFAULT on failure.
5840  */
5841
5842 static int s2io_ethtool_seeprom(struct net_device *dev,
5843                                 struct ethtool_eeprom *eeprom,
5844                                 u8 * data_buf)
5845 {
5846         int len = eeprom->len, cnt = 0;
5847         u64 valid = 0, data;
5848         struct s2io_nic *sp = dev->priv;
5849
5850         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5851                 DBG_PRINT(ERR_DBG,
5852                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5853                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5854                           eeprom->magic);
5855                 return -EFAULT;
5856         }
5857
5858         while (len) {
5859                 data = (u32) data_buf[cnt] & 0x000000FF;
5860                 if (data) {
5861                         valid = (u32) (data << 24);
5862                 } else
5863                         valid = data;
5864
5865                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5866                         DBG_PRINT(ERR_DBG,
5867                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5868                         DBG_PRINT(ERR_DBG,
5869                                   "write into the specified offset\n");
5870                         return -EFAULT;
5871                 }
5872                 cnt++;
5873                 len--;
5874         }
5875
5876         return 0;
5877 }
5878
5879 /**
5880  * s2io_register_test - reads and writes into all clock domains.
5881  * @sp : private member of the device structure, which is a pointer to the
5882  * s2io_nic structure.
5883  * @data : variable that returns the result of each of the test conducted b
5884  * by the driver.
5885  * Description:
5886  * Read and write into all clock domains. The NIC has 3 clock domains,
5887  * see that registers in all the three regions are accessible.
5888  * Return value:
5889  * 0 on success.
5890  */
5891
5892 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5893 {
5894         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5895         u64 val64 = 0, exp_val;
5896         int fail = 0;
5897
5898         val64 = readq(&bar0->pif_rd_swapper_fb);
5899         if (val64 != 0x123456789abcdefULL) {
5900                 fail = 1;
5901                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5902         }
5903
5904         val64 = readq(&bar0->rmac_pause_cfg);
5905         if (val64 != 0xc000ffff00000000ULL) {
5906                 fail = 1;
5907                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5908         }
5909
5910         val64 = readq(&bar0->rx_queue_cfg);
5911         if (sp->device_type == XFRAME_II_DEVICE)
5912                 exp_val = 0x0404040404040404ULL;
5913         else
5914                 exp_val = 0x0808080808080808ULL;
5915         if (val64 != exp_val) {
5916                 fail = 1;
5917                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5918         }
5919
5920         val64 = readq(&bar0->xgxs_efifo_cfg);
5921         if (val64 != 0x000000001923141EULL) {
5922                 fail = 1;
5923                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5924         }
5925
5926         val64 = 0x5A5A5A5A5A5A5A5AULL;
5927         writeq(val64, &bar0->xmsi_data);
5928         val64 = readq(&bar0->xmsi_data);
5929         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5930                 fail = 1;
5931                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5932         }
5933
5934         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5935         writeq(val64, &bar0->xmsi_data);
5936         val64 = readq(&bar0->xmsi_data);
5937         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5938                 fail = 1;
5939                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5940         }
5941
5942         *data = fail;
5943         return fail;
5944 }
5945
5946 /**
5947  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5948  * @sp : private member of the device structure, which is a pointer to the
5949  * s2io_nic structure.
5950  * @data:variable that returns the result of each of the test conducted by
5951  * the driver.
5952  * Description:
5953  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5954  * register.
5955  * Return value:
5956  * 0 on success.
5957  */
5958
5959 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5960 {
5961         int fail = 0;
5962         u64 ret_data, org_4F0, org_7F0;
5963         u8 saved_4F0 = 0, saved_7F0 = 0;
5964         struct net_device *dev = sp->dev;
5965
5966         /* Test Write Error at offset 0 */
5967         /* Note that SPI interface allows write access to all areas
5968          * of EEPROM. Hence doing all negative testing only for Xframe I.
5969          */
5970         if (sp->device_type == XFRAME_I_DEVICE)
5971                 if (!write_eeprom(sp, 0, 0, 3))
5972                         fail = 1;
5973
5974         /* Save current values at offsets 0x4F0 and 0x7F0 */
5975         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5976                 saved_4F0 = 1;
5977         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5978                 saved_7F0 = 1;
5979
5980         /* Test Write at offset 4f0 */
5981         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5982                 fail = 1;
5983         if (read_eeprom(sp, 0x4F0, &ret_data))
5984                 fail = 1;
5985
5986         if (ret_data != 0x012345) {
5987                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5988                         "Data written %llx Data read %llx\n",
5989                         dev->name, (unsigned long long)0x12345,
5990                         (unsigned long long)ret_data);
5991                 fail = 1;
5992         }
5993
5994         /* Reset the EEPROM data go FFFF */
5995         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5996
5997         /* Test Write Request Error at offset 0x7c */
5998         if (sp->device_type == XFRAME_I_DEVICE)
5999                 if (!write_eeprom(sp, 0x07C, 0, 3))
6000                         fail = 1;
6001
6002         /* Test Write Request at offset 0x7f0 */
6003         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6004                 fail = 1;
6005         if (read_eeprom(sp, 0x7F0, &ret_data))
6006                 fail = 1;
6007
6008         if (ret_data != 0x012345) {
6009                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6010                         "Data written %llx Data read %llx\n",
6011                         dev->name, (unsigned long long)0x12345,
6012                         (unsigned long long)ret_data);
6013                 fail = 1;
6014         }
6015
6016         /* Reset the EEPROM data go FFFF */
6017         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6018
6019         if (sp->device_type == XFRAME_I_DEVICE) {
6020                 /* Test Write Error at offset 0x80 */
6021                 if (!write_eeprom(sp, 0x080, 0, 3))
6022                         fail = 1;
6023
6024                 /* Test Write Error at offset 0xfc */
6025                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6026                         fail = 1;
6027
6028                 /* Test Write Error at offset 0x100 */
6029                 if (!write_eeprom(sp, 0x100, 0, 3))
6030                         fail = 1;
6031
6032                 /* Test Write Error at offset 4ec */
6033                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6034                         fail = 1;
6035         }
6036
6037         /* Restore values at offsets 0x4F0 and 0x7F0 */
6038         if (saved_4F0)
6039                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6040         if (saved_7F0)
6041                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6042
6043         *data = fail;
6044         return fail;
6045 }
6046
6047 /**
6048  * s2io_bist_test - invokes the MemBist test of the card .
6049  * @sp : private member of the device structure, which is a pointer to the
6050  * s2io_nic structure.
6051  * @data:variable that returns the result of each of the test conducted by
6052  * the driver.
6053  * Description:
6054  * This invokes the MemBist test of the card. We give around
6055  * 2 secs time for the Test to complete. If it's still not complete
6056  * within this peiod, we consider that the test failed.
6057  * Return value:
6058  * 0 on success and -1 on failure.
6059  */
6060
6061 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6062 {
6063         u8 bist = 0;
6064         int cnt = 0, ret = -1;
6065
6066         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6067         bist |= PCI_BIST_START;
6068         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6069
6070         while (cnt < 20) {
6071                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6072                 if (!(bist & PCI_BIST_START)) {
6073                         *data = (bist & PCI_BIST_CODE_MASK);
6074                         ret = 0;
6075                         break;
6076                 }
6077                 msleep(100);
6078                 cnt++;
6079         }
6080
6081         return ret;
6082 }
6083
6084 /**
6085  * s2io-link_test - verifies the link state of the nic
6086  * @sp ; private member of the device structure, which is a pointer to the
6087  * s2io_nic structure.
6088  * @data: variable that returns the result of each of the test conducted by
6089  * the driver.
6090  * Description:
6091  * The function verifies the link state of the NIC and updates the input
6092  * argument 'data' appropriately.
6093  * Return value:
6094  * 0 on success.
6095  */
6096
6097 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6098 {
6099         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6100         u64 val64;
6101
6102         val64 = readq(&bar0->adapter_status);
6103         if(!(LINK_IS_UP(val64)))
6104                 *data = 1;
6105         else
6106                 *data = 0;
6107
6108         return *data;
6109 }
6110
6111 /**
6112  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6113  * @sp - private member of the device structure, which is a pointer to the
6114  * s2io_nic structure.
6115  * @data - variable that returns the result of each of the test
6116  * conducted by the driver.
6117  * Description:
6118  *  This is one of the offline test that tests the read and write
6119  *  access to the RldRam chip on the NIC.
6120  * Return value:
6121  *  0 on success.
6122  */
6123
6124 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6125 {
6126         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6127         u64 val64;
6128         int cnt, iteration = 0, test_fail = 0;
6129
6130         val64 = readq(&bar0->adapter_control);
6131         val64 &= ~ADAPTER_ECC_EN;
6132         writeq(val64, &bar0->adapter_control);
6133
6134         val64 = readq(&bar0->mc_rldram_test_ctrl);
6135         val64 |= MC_RLDRAM_TEST_MODE;
6136         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6137
6138         val64 = readq(&bar0->mc_rldram_mrs);
6139         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6140         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6141
6142         val64 |= MC_RLDRAM_MRS_ENABLE;
6143         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6144
6145         while (iteration < 2) {
6146                 val64 = 0x55555555aaaa0000ULL;
6147                 if (iteration == 1) {
6148                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6149                 }
6150                 writeq(val64, &bar0->mc_rldram_test_d0);
6151
6152                 val64 = 0xaaaa5a5555550000ULL;
6153                 if (iteration == 1) {
6154                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6155                 }
6156                 writeq(val64, &bar0->mc_rldram_test_d1);
6157
6158                 val64 = 0x55aaaaaaaa5a0000ULL;
6159                 if (iteration == 1) {
6160                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6161                 }
6162                 writeq(val64, &bar0->mc_rldram_test_d2);
6163
6164                 val64 = (u64) (0x0000003ffffe0100ULL);
6165                 writeq(val64, &bar0->mc_rldram_test_add);
6166
6167                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6168                         MC_RLDRAM_TEST_GO;
6169                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6170
6171                 for (cnt = 0; cnt < 5; cnt++) {
6172                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6173                         if (val64 & MC_RLDRAM_TEST_DONE)
6174                                 break;
6175                         msleep(200);
6176                 }
6177
6178                 if (cnt == 5)
6179                         break;
6180
6181                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6182                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6183
6184                 for (cnt = 0; cnt < 5; cnt++) {
6185                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6186                         if (val64 & MC_RLDRAM_TEST_DONE)
6187                                 break;
6188                         msleep(500);
6189                 }
6190
6191                 if (cnt == 5)
6192                         break;
6193
6194                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6195                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6196                         test_fail = 1;
6197
6198                 iteration++;
6199         }
6200
6201         *data = test_fail;
6202
6203         /* Bring the adapter out of test mode */
6204         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6205
6206         return test_fail;
6207 }
6208
6209 /**
6210  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6211  *  @sp : private member of the device structure, which is a pointer to the
6212  *  s2io_nic structure.
6213  *  @ethtest : pointer to a ethtool command specific structure that will be
6214  *  returned to the user.
6215  *  @data : variable that returns the result of each of the test
6216  * conducted by the driver.
6217  * Description:
6218  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6219  *  the health of the card.
6220  * Return value:
6221  *  void
6222  */
6223
6224 static void s2io_ethtool_test(struct net_device *dev,
6225                               struct ethtool_test *ethtest,
6226                               uint64_t * data)
6227 {
6228         struct s2io_nic *sp = dev->priv;
6229         int orig_state = netif_running(sp->dev);
6230
6231         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6232                 /* Offline Tests. */
6233                 if (orig_state)
6234                         s2io_close(sp->dev);
6235
6236                 if (s2io_register_test(sp, &data[0]))
6237                         ethtest->flags |= ETH_TEST_FL_FAILED;
6238
6239                 s2io_reset(sp);
6240
6241                 if (s2io_rldram_test(sp, &data[3]))
6242                         ethtest->flags |= ETH_TEST_FL_FAILED;
6243
6244                 s2io_reset(sp);
6245
6246                 if (s2io_eeprom_test(sp, &data[1]))
6247                         ethtest->flags |= ETH_TEST_FL_FAILED;
6248
6249                 if (s2io_bist_test(sp, &data[4]))
6250                         ethtest->flags |= ETH_TEST_FL_FAILED;
6251
6252                 if (orig_state)
6253                         s2io_open(sp->dev);
6254
6255                 data[2] = 0;
6256         } else {
6257                 /* Online Tests. */
6258                 if (!orig_state) {
6259                         DBG_PRINT(ERR_DBG,
6260                                   "%s: is not up, cannot run test\n",
6261                                   dev->name);
6262                         data[0] = -1;
6263                         data[1] = -1;
6264                         data[2] = -1;
6265                         data[3] = -1;
6266                         data[4] = -1;
6267                 }
6268
6269                 if (s2io_link_test(sp, &data[2]))
6270                         ethtest->flags |= ETH_TEST_FL_FAILED;
6271
6272                 data[0] = 0;
6273                 data[1] = 0;
6274                 data[3] = 0;
6275                 data[4] = 0;
6276         }
6277 }
6278
6279 static void s2io_get_ethtool_stats(struct net_device *dev,
6280                                    struct ethtool_stats *estats,
6281                                    u64 * tmp_stats)
6282 {
6283         int i = 0, k;
6284         struct s2io_nic *sp = dev->priv;
6285         struct stat_block *stat_info = sp->mac_control.stats_info;
6286
6287         s2io_updt_stats(sp);
6288         tmp_stats[i++] =
6289                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6290                 le32_to_cpu(stat_info->tmac_frms);
6291         tmp_stats[i++] =
6292                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6293                 le32_to_cpu(stat_info->tmac_data_octets);
6294         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6295         tmp_stats[i++] =
6296                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6297                 le32_to_cpu(stat_info->tmac_mcst_frms);
6298         tmp_stats[i++] =
6299                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6300                 le32_to_cpu(stat_info->tmac_bcst_frms);
6301         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6302         tmp_stats[i++] =
6303                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6304                 le32_to_cpu(stat_info->tmac_ttl_octets);
6305         tmp_stats[i++] =
6306                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6307                 le32_to_cpu(stat_info->tmac_ucst_frms);
6308         tmp_stats[i++] =
6309                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6310                 le32_to_cpu(stat_info->tmac_nucst_frms);
6311         tmp_stats[i++] =
6312                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6313                 le32_to_cpu(stat_info->tmac_any_err_frms);
6314         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6315         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6316         tmp_stats[i++] =
6317                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6318                 le32_to_cpu(stat_info->tmac_vld_ip);
6319         tmp_stats[i++] =
6320                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6321                 le32_to_cpu(stat_info->tmac_drop_ip);
6322         tmp_stats[i++] =
6323                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6324                 le32_to_cpu(stat_info->tmac_icmp);
6325         tmp_stats[i++] =
6326                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6327                 le32_to_cpu(stat_info->tmac_rst_tcp);
6328         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6329         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6330                 le32_to_cpu(stat_info->tmac_udp);
6331         tmp_stats[i++] =
6332                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6333                 le32_to_cpu(stat_info->rmac_vld_frms);
6334         tmp_stats[i++] =
6335                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6336                 le32_to_cpu(stat_info->rmac_data_octets);
6337         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6338         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6339         tmp_stats[i++] =
6340                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6341                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6342         tmp_stats[i++] =
6343                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6344                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6345         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6346         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6347         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6348         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6349         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6350         tmp_stats[i++] =
6351                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6352                 le32_to_cpu(stat_info->rmac_ttl_octets);
6353         tmp_stats[i++] =
6354                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6355                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6356         tmp_stats[i++] =
6357                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6358                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6359         tmp_stats[i++] =
6360                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6361                 le32_to_cpu(stat_info->rmac_discarded_frms);
6362         tmp_stats[i++] =
6363                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6364                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6365         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6366         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6367         tmp_stats[i++] =
6368                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6369                 le32_to_cpu(stat_info->rmac_usized_frms);
6370         tmp_stats[i++] =
6371                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6372                 le32_to_cpu(stat_info->rmac_osized_frms);
6373         tmp_stats[i++] =
6374                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6375                 le32_to_cpu(stat_info->rmac_frag_frms);
6376         tmp_stats[i++] =
6377                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6378                 le32_to_cpu(stat_info->rmac_jabber_frms);
6379         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6380         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6381         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6382         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6384         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6385         tmp_stats[i++] =
6386                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6387                 le32_to_cpu(stat_info->rmac_ip);
6388         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6389         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6390         tmp_stats[i++] =
6391                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6392                 le32_to_cpu(stat_info->rmac_drop_ip);
6393         tmp_stats[i++] =
6394                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6395                 le32_to_cpu(stat_info->rmac_icmp);
6396         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6397         tmp_stats[i++] =
6398                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6399                 le32_to_cpu(stat_info->rmac_udp);
6400         tmp_stats[i++] =
6401                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6402                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6403         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6404         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6405         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6406         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6407         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6408         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6409         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6410         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6411         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6412         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6413         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6414         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6415         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6416         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6417         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6418         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6419         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6420         tmp_stats[i++] =
6421                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6422                 le32_to_cpu(stat_info->rmac_pause_cnt);
6423         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6424         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6425         tmp_stats[i++] =
6426                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6427                 le32_to_cpu(stat_info->rmac_accepted_ip);
6428         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6429         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6430         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6431         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6432         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6433         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6434         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6435         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6436         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6437         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6438         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6439         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6440         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6441         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6442         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6443         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6444         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6445         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6446         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6447
6448         /* Enhanced statistics exist only for Hercules */
6449         if(sp->device_type == XFRAME_II_DEVICE) {
6450                 tmp_stats[i++] =
6451                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6452                 tmp_stats[i++] =
6453                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6454                 tmp_stats[i++] =
6455                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6456                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6457                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6458                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6459                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6460                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6461                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6462                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6463                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6464                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6465                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6466                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6467                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6468                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6469         }
6470
6471         tmp_stats[i++] = 0;
6472         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6473         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6474         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6475         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6476         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6477         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6478         for (k = 0; k < MAX_RX_RINGS; k++)
6479                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6480         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6481         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6482         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6483         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6484         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6485         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6486         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6487         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6488         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6489         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6490         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6491         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6492         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6493         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6494         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6495         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6496         if (stat_info->sw_stat.num_aggregations) {
6497                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6498                 int count = 0;
6499                 /*
6500                  * Since 64-bit divide does not work on all platforms,
6501                  * do repeated subtraction.
6502                  */
6503                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6504                         tmp -= stat_info->sw_stat.num_aggregations;
6505                         count++;
6506                 }
6507                 tmp_stats[i++] = count;
6508         }
6509         else
6510                 tmp_stats[i++] = 0;
6511         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6512         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6513         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6514         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6515         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6516         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6517         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6518         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6519         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6520
6521         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6522         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6523         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6524         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6525         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6526
6527         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6528         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6529         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6530         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6531         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6532         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6533         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6534         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6535         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6536         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6537         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6538         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6539         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6540         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6541         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6542         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6543         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6544         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6545         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6546         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6547         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6548         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6549         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6550         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6551         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6552         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6553 }
6554
6555 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6556 {
6557         return (XENA_REG_SPACE);
6558 }
6559
6560
6561 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6562 {
6563         struct s2io_nic *sp = dev->priv;
6564
6565         return (sp->rx_csum);
6566 }
6567
6568 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6569 {
6570         struct s2io_nic *sp = dev->priv;
6571
6572         if (data)
6573                 sp->rx_csum = 1;
6574         else
6575                 sp->rx_csum = 0;
6576
6577         return 0;
6578 }
6579
6580 static int s2io_get_eeprom_len(struct net_device *dev)
6581 {
6582         return (XENA_EEPROM_SPACE);
6583 }
6584
6585 static int s2io_get_sset_count(struct net_device *dev, int sset)
6586 {
6587         struct s2io_nic *sp = dev->priv;
6588
6589         switch (sset) {
6590         case ETH_SS_TEST:
6591                 return S2IO_TEST_LEN;
6592         case ETH_SS_STATS:
6593                 switch(sp->device_type) {
6594                 case XFRAME_I_DEVICE:
6595                         return XFRAME_I_STAT_LEN;
6596                 case XFRAME_II_DEVICE:
6597                         return XFRAME_II_STAT_LEN;
6598                 default:
6599                         return 0;
6600                 }
6601         default:
6602                 return -EOPNOTSUPP;
6603         }
6604 }
6605
6606 static void s2io_ethtool_get_strings(struct net_device *dev,
6607                                      u32 stringset, u8 * data)
6608 {
6609         int stat_size = 0;
6610         struct s2io_nic *sp = dev->priv;
6611
6612         switch (stringset) {
6613         case ETH_SS_TEST:
6614                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6615                 break;
6616         case ETH_SS_STATS:
6617                 stat_size = sizeof(ethtool_xena_stats_keys);
6618                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6619                 if(sp->device_type == XFRAME_II_DEVICE) {
6620                         memcpy(data + stat_size,
6621                                 &ethtool_enhanced_stats_keys,
6622                                 sizeof(ethtool_enhanced_stats_keys));
6623                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6624                 }
6625
6626                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6627                         sizeof(ethtool_driver_stats_keys));
6628         }
6629 }
6630
6631 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6632 {
6633         if (data)
6634                 dev->features |= NETIF_F_IP_CSUM;
6635         else
6636                 dev->features &= ~NETIF_F_IP_CSUM;
6637
6638         return 0;
6639 }
6640
6641 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6642 {
6643         return (dev->features & NETIF_F_TSO) != 0;
6644 }
6645 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6646 {
6647         if (data)
6648                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6649         else
6650                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6651
6652         return 0;
6653 }
6654
6655 static const struct ethtool_ops netdev_ethtool_ops = {
6656         .get_settings = s2io_ethtool_gset,
6657         .set_settings = s2io_ethtool_sset,
6658         .get_drvinfo = s2io_ethtool_gdrvinfo,
6659         .get_regs_len = s2io_ethtool_get_regs_len,
6660         .get_regs = s2io_ethtool_gregs,
6661         .get_link = ethtool_op_get_link,
6662         .get_eeprom_len = s2io_get_eeprom_len,
6663         .get_eeprom = s2io_ethtool_geeprom,
6664         .set_eeprom = s2io_ethtool_seeprom,
6665         .get_ringparam = s2io_ethtool_gringparam,
6666         .get_pauseparam = s2io_ethtool_getpause_data,
6667         .set_pauseparam = s2io_ethtool_setpause_data,
6668         .get_rx_csum = s2io_ethtool_get_rx_csum,
6669         .set_rx_csum = s2io_ethtool_set_rx_csum,
6670         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6671         .set_sg = ethtool_op_set_sg,
6672         .get_tso = s2io_ethtool_op_get_tso,
6673         .set_tso = s2io_ethtool_op_set_tso,
6674         .set_ufo = ethtool_op_set_ufo,
6675         .self_test = s2io_ethtool_test,
6676         .get_strings = s2io_ethtool_get_strings,
6677         .phys_id = s2io_ethtool_idnic,
6678         .get_ethtool_stats = s2io_get_ethtool_stats,
6679         .get_sset_count = s2io_get_sset_count,
6680 };
6681
6682 /**
6683  *  s2io_ioctl - Entry point for the Ioctl
6684  *  @dev :  Device pointer.
6685  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6686  *  a proprietary structure used to pass information to the driver.
6687  *  @cmd :  This is used to distinguish between the different commands that
6688  *  can be passed to the IOCTL functions.
6689  *  Description:
6690  *  Currently there are no special functionality supported in IOCTL, hence
6691  *  function always return EOPNOTSUPPORTED
6692  */
6693
6694 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6695 {
6696         return -EOPNOTSUPP;
6697 }
6698
6699 /**
6700  *  s2io_change_mtu - entry point to change MTU size for the device.
6701  *   @dev : device pointer.
6702  *   @new_mtu : the new MTU size for the device.
6703  *   Description: A driver entry point to change MTU size for the device.
6704  *   Before changing the MTU the device must be stopped.
6705  *  Return value:
6706  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6707  *   file on failure.
6708  */
6709
6710 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6711 {
6712         struct s2io_nic *sp = dev->priv;
6713         int ret = 0;
6714
6715         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6716                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6717                           dev->name);
6718                 return -EPERM;
6719         }
6720
6721         dev->mtu = new_mtu;
6722         if (netif_running(dev)) {
6723                 s2io_stop_all_tx_queue(sp);
6724                 s2io_card_down(sp);
6725                 ret = s2io_card_up(sp);
6726                 if (ret) {
6727                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6728                                   __FUNCTION__);
6729                         return ret;
6730                 }
6731                 s2io_wake_all_tx_queue(sp);
6732         } else { /* Device is down */
6733                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6734                 u64 val64 = new_mtu;
6735
6736                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6737         }
6738
6739         return ret;
6740 }
6741
6742 /**
6743  * s2io_set_link - Set the LInk status
6744  * @data: long pointer to device private structue
6745  * Description: Sets the link status for the adapter
6746  */
6747
6748 static void s2io_set_link(struct work_struct *work)
6749 {
6750         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6751         struct net_device *dev = nic->dev;
6752         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6753         register u64 val64;
6754         u16 subid;
6755
6756         rtnl_lock();
6757
6758         if (!netif_running(dev))
6759                 goto out_unlock;
6760
6761         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6762                 /* The card is being reset, no point doing anything */
6763                 goto out_unlock;
6764         }
6765
6766         subid = nic->pdev->subsystem_device;
6767         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6768                 /*
6769                  * Allow a small delay for the NICs self initiated
6770                  * cleanup to complete.
6771                  */
6772                 msleep(100);
6773         }
6774
6775         val64 = readq(&bar0->adapter_status);
6776         if (LINK_IS_UP(val64)) {
6777                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6778                         if (verify_xena_quiescence(nic)) {
6779                                 val64 = readq(&bar0->adapter_control);
6780                                 val64 |= ADAPTER_CNTL_EN;
6781                                 writeq(val64, &bar0->adapter_control);
6782                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6783                                         nic->device_type, subid)) {
6784                                         val64 = readq(&bar0->gpio_control);
6785                                         val64 |= GPIO_CTRL_GPIO_0;
6786                                         writeq(val64, &bar0->gpio_control);
6787                                         val64 = readq(&bar0->gpio_control);
6788                                 } else {
6789                                         val64 |= ADAPTER_LED_ON;
6790                                         writeq(val64, &bar0->adapter_control);
6791                                 }
6792                                 nic->device_enabled_once = TRUE;
6793                         } else {
6794                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6795                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6796                                 s2io_stop_all_tx_queue(nic);
6797                         }
6798                 }
6799                 val64 = readq(&bar0->adapter_control);
6800                 val64 |= ADAPTER_LED_ON;
6801                 writeq(val64, &bar0->adapter_control);
6802                 s2io_link(nic, LINK_UP);
6803         } else {
6804                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6805                                                       subid)) {
6806                         val64 = readq(&bar0->gpio_control);
6807                         val64 &= ~GPIO_CTRL_GPIO_0;
6808                         writeq(val64, &bar0->gpio_control);
6809                         val64 = readq(&bar0->gpio_control);
6810                 }
6811                 /* turn off LED */
6812                 val64 = readq(&bar0->adapter_control);
6813                 val64 = val64 &(~ADAPTER_LED_ON);
6814                 writeq(val64, &bar0->adapter_control);
6815                 s2io_link(nic, LINK_DOWN);
6816         }
6817         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6818
6819 out_unlock:
6820         rtnl_unlock();
6821 }
6822
6823 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6824                                 struct buffAdd *ba,
6825                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6826                                 u64 *temp2, int size)
6827 {
6828         struct net_device *dev = sp->dev;
6829         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6830
6831         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6832                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6833                 /* allocate skb */
6834                 if (*skb) {
6835                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6836                         /*
6837                          * As Rx frame are not going to be processed,
6838                          * using same mapped address for the Rxd
6839                          * buffer pointer
6840                          */
6841                         rxdp1->Buffer0_ptr = *temp0;
6842                 } else {
6843                         *skb = dev_alloc_skb(size);
6844                         if (!(*skb)) {
6845                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6846                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6847                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6848                                 sp->mac_control.stats_info->sw_stat. \
6849                                         mem_alloc_fail_cnt++;
6850                                 return -ENOMEM ;
6851                         }
6852                         sp->mac_control.stats_info->sw_stat.mem_allocated
6853                                 += (*skb)->truesize;
6854                         /* storing the mapped addr in a temp variable
6855                          * such it will be used for next rxd whose
6856                          * Host Control is NULL
6857                          */
6858                         rxdp1->Buffer0_ptr = *temp0 =
6859                                 pci_map_single( sp->pdev, (*skb)->data,
6860                                         size - NET_IP_ALIGN,
6861                                         PCI_DMA_FROMDEVICE);
6862                         if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
6863                                 goto memalloc_failed;
6864                         rxdp->Host_Control = (unsigned long) (*skb);
6865                 }
6866         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6867                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6868                 /* Two buffer Mode */
6869                 if (*skb) {
6870                         rxdp3->Buffer2_ptr = *temp2;
6871                         rxdp3->Buffer0_ptr = *temp0;
6872                         rxdp3->Buffer1_ptr = *temp1;
6873                 } else {
6874                         *skb = dev_alloc_skb(size);
6875                         if (!(*skb)) {
6876                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6877                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6878                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6879                                 sp->mac_control.stats_info->sw_stat. \
6880                                         mem_alloc_fail_cnt++;
6881                                 return -ENOMEM;
6882                         }
6883                         sp->mac_control.stats_info->sw_stat.mem_allocated
6884                                 += (*skb)->truesize;
6885                         rxdp3->Buffer2_ptr = *temp2 =
6886                                 pci_map_single(sp->pdev, (*skb)->data,
6887                                                dev->mtu + 4,
6888                                                PCI_DMA_FROMDEVICE);
6889                         if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
6890                                 goto memalloc_failed;
6891                         rxdp3->Buffer0_ptr = *temp0 =
6892                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6893                                                 PCI_DMA_FROMDEVICE);
6894                         if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
6895                                 pci_unmap_single (sp->pdev,
6896                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6897                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6898                                 goto memalloc_failed;
6899                         }
6900                         rxdp->Host_Control = (unsigned long) (*skb);
6901
6902                         /* Buffer-1 will be dummy buffer not used */
6903                         rxdp3->Buffer1_ptr = *temp1 =
6904                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6905                                                 PCI_DMA_FROMDEVICE);
6906                         if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
6907                                 pci_unmap_single (sp->pdev,
6908                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6909                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6910                                 pci_unmap_single (sp->pdev,
6911                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6912                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6913                                 goto memalloc_failed;
6914                         }
6915                 }
6916         }
6917         return 0;
6918         memalloc_failed:
6919                 stats->pci_map_fail_cnt++;
6920                 stats->mem_freed += (*skb)->truesize;
6921                 dev_kfree_skb(*skb);
6922                 return -ENOMEM;
6923 }
6924
6925 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6926                                 int size)
6927 {
6928         struct net_device *dev = sp->dev;
6929         if (sp->rxd_mode == RXD_MODE_1) {
6930                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6931         } else if (sp->rxd_mode == RXD_MODE_3B) {
6932                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6933                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6934                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6935         }
6936 }
6937
6938 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6939 {
6940         int i, j, k, blk_cnt = 0, size;
6941         struct mac_info * mac_control = &sp->mac_control;
6942         struct config_param *config = &sp->config;
6943         struct net_device *dev = sp->dev;
6944         struct RxD_t *rxdp = NULL;
6945         struct sk_buff *skb = NULL;
6946         struct buffAdd *ba = NULL;
6947         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6948
6949         /* Calculate the size based on ring mode */
6950         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6951                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6952         if (sp->rxd_mode == RXD_MODE_1)
6953                 size += NET_IP_ALIGN;
6954         else if (sp->rxd_mode == RXD_MODE_3B)
6955                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6956
6957         for (i = 0; i < config->rx_ring_num; i++) {
6958                 blk_cnt = config->rx_cfg[i].num_rxd /
6959                         (rxd_count[sp->rxd_mode] +1);
6960
6961                 for (j = 0; j < blk_cnt; j++) {
6962                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6963                                 rxdp = mac_control->rings[i].
6964                                         rx_blocks[j].rxds[k].virt_addr;
6965                                 if(sp->rxd_mode == RXD_MODE_3B)
6966                                         ba = &mac_control->rings[i].ba[j][k];
6967                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6968                                                        &skb,(u64 *)&temp0_64,
6969                                                        (u64 *)&temp1_64,
6970                                                        (u64 *)&temp2_64,
6971                                                         size) == -ENOMEM) {
6972                                         return 0;
6973                                 }
6974
6975                                 set_rxd_buffer_size(sp, rxdp, size);
6976                                 wmb();
6977                                 /* flip the Ownership bit to Hardware */
6978                                 rxdp->Control_1 |= RXD_OWN_XENA;
6979                         }
6980                 }
6981         }
6982         return 0;
6983
6984 }
6985
6986 static int s2io_add_isr(struct s2io_nic * sp)
6987 {
6988         int ret = 0;
6989         struct net_device *dev = sp->dev;
6990         int err = 0;
6991
6992         if (sp->config.intr_type == MSI_X)
6993                 ret = s2io_enable_msi_x(sp);
6994         if (ret) {
6995                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6996                 sp->config.intr_type = INTA;
6997         }
6998
6999         /* Store the values of the MSIX table in the struct s2io_nic structure */
7000         store_xmsi_data(sp);
7001
7002         /* After proper initialization of H/W, register ISR */
7003         if (sp->config.intr_type == MSI_X) {
7004                 int i, msix_rx_cnt = 0;
7005
7006                 for (i = 0; i < sp->num_entries; i++) {
7007                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7008                                 if (sp->s2io_entries[i].type ==
7009                                         MSIX_RING_TYPE) {
7010                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7011                                                 dev->name, i);
7012                                         err = request_irq(sp->entries[i].vector,
7013                                                 s2io_msix_ring_handle, 0,
7014                                                 sp->desc[i],
7015                                                 sp->s2io_entries[i].arg);
7016                                 } else if (sp->s2io_entries[i].type ==
7017                                         MSIX_ALARM_TYPE) {
7018                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7019                                         dev->name, i);
7020                                         err = request_irq(sp->entries[i].vector,
7021                                                 s2io_msix_fifo_handle, 0,
7022                                                 sp->desc[i],
7023                                                 sp->s2io_entries[i].arg);
7024
7025                                 }
7026                                 /* if either data or addr is zero print it. */
7027                                 if (!(sp->msix_info[i].addr &&
7028                                         sp->msix_info[i].data)) {
7029                                         DBG_PRINT(ERR_DBG,
7030                                                 "%s @Addr:0x%llx Data:0x%llx\n",
7031                                                 sp->desc[i],
7032                                                 (unsigned long long)
7033                                                 sp->msix_info[i].addr,
7034                                                 (unsigned long long)
7035                                                 ntohl(sp->msix_info[i].data));
7036                                 } else
7037                                         msix_rx_cnt++;
7038                                 if (err) {
7039                                         remove_msix_isr(sp);
7040
7041                                         DBG_PRINT(ERR_DBG,
7042                                                 "%s:MSI-X-%d registration "
7043                                                 "failed\n", dev->name, i);
7044
7045                                         DBG_PRINT(ERR_DBG,
7046                                                 "%s: Defaulting to INTA\n",
7047                                                 dev->name);
7048                                         sp->config.intr_type = INTA;
7049                                         break;
7050                                 }
7051                                 sp->s2io_entries[i].in_use =
7052                                         MSIX_REGISTERED_SUCCESS;
7053                         }
7054                 }
7055                 if (!err) {
7056                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7057                                 --msix_rx_cnt);
7058                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7059                                                 " through alarm vector\n");
7060                 }
7061         }
7062         if (sp->config.intr_type == INTA) {
7063                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7064                                 sp->name, dev);
7065                 if (err) {
7066                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7067                                   dev->name);
7068                         return -1;
7069                 }
7070         }
7071         return 0;
7072 }
7073 static void s2io_rem_isr(struct s2io_nic * sp)
7074 {
7075         if (sp->config.intr_type == MSI_X)
7076                 remove_msix_isr(sp);
7077         else
7078                 remove_inta_isr(sp);
7079 }
7080
7081 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7082 {
7083         int cnt = 0;
7084         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7085         register u64 val64 = 0;
7086         struct config_param *config;
7087         config = &sp->config;
7088
7089         if (!is_s2io_card_up(sp))
7090                 return;
7091
7092         del_timer_sync(&sp->alarm_timer);
7093         /* If s2io_set_link task is executing, wait till it completes. */
7094         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7095                 msleep(50);
7096         }
7097         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7098
7099         /* Disable napi */
7100         if (sp->config.napi) {
7101                 int off = 0;
7102                 if (config->intr_type ==  MSI_X) {
7103                         for (; off < sp->config.rx_ring_num; off++)
7104                                 napi_disable(&sp->mac_control.rings[off].napi);
7105                         }
7106                 else
7107                         napi_disable(&sp->napi);
7108         }
7109
7110         /* disable Tx and Rx traffic on the NIC */
7111         if (do_io)
7112                 stop_nic(sp);
7113
7114         s2io_rem_isr(sp);
7115
7116         /* stop the tx queue, indicate link down */
7117         s2io_link(sp, LINK_DOWN);
7118
7119         /* Check if the device is Quiescent and then Reset the NIC */
7120         while(do_io) {
7121                 /* As per the HW requirement we need to replenish the
7122                  * receive buffer to avoid the ring bump. Since there is
7123                  * no intention of processing the Rx frame at this pointwe are
7124                  * just settting the ownership bit of rxd in Each Rx
7125                  * ring to HW and set the appropriate buffer size
7126                  * based on the ring mode
7127                  */
7128                 rxd_owner_bit_reset(sp);
7129
7130                 val64 = readq(&bar0->adapter_status);
7131                 if (verify_xena_quiescence(sp)) {
7132                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7133                         break;
7134                 }
7135
7136                 msleep(50);
7137                 cnt++;
7138                 if (cnt == 10) {
7139                         DBG_PRINT(ERR_DBG,
7140                                   "s2io_close:Device not Quiescent ");
7141                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7142                                   (unsigned long long) val64);
7143                         break;
7144                 }
7145         }
7146         if (do_io)
7147                 s2io_reset(sp);
7148
7149         /* Free all Tx buffers */
7150         free_tx_buffers(sp);
7151
7152         /* Free all Rx buffers */
7153         free_rx_buffers(sp);
7154
7155         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7156 }
7157
7158 static void s2io_card_down(struct s2io_nic * sp)
7159 {
7160         do_s2io_card_down(sp, 1);
7161 }
7162
7163 static int s2io_card_up(struct s2io_nic * sp)
7164 {
7165         int i, ret = 0;
7166         struct mac_info *mac_control;
7167         struct config_param *config;
7168         struct net_device *dev = (struct net_device *) sp->dev;
7169         u16 interruptible;
7170
7171         /* Initialize the H/W I/O registers */
7172         ret = init_nic(sp);
7173         if (ret != 0) {
7174                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7175                           dev->name);
7176                 if (ret != -EIO)
7177                         s2io_reset(sp);
7178                 return ret;
7179         }
7180
7181         /*
7182          * Initializing the Rx buffers. For now we are considering only 1
7183          * Rx ring and initializing buffers into 30 Rx blocks
7184          */
7185         mac_control = &sp->mac_control;
7186         config = &sp->config;
7187
7188         for (i = 0; i < config->rx_ring_num; i++) {
7189                 mac_control->rings[i].mtu = dev->mtu;
7190                 ret = fill_rx_buffers(&mac_control->rings[i], 1);
7191                 if (ret) {
7192                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7193                                   dev->name);
7194                         s2io_reset(sp);
7195                         free_rx_buffers(sp);
7196                         return -ENOMEM;
7197                 }
7198                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7199                           mac_control->rings[i].rx_bufs_left);
7200         }
7201
7202         /* Initialise napi */
7203         if (config->napi) {
7204                 int i;
7205                 if (config->intr_type ==  MSI_X) {
7206                         for (i = 0; i < sp->config.rx_ring_num; i++)
7207                                 napi_enable(&sp->mac_control.rings[i].napi);
7208                 } else {
7209                         napi_enable(&sp->napi);
7210                 }
7211         }
7212
7213         /* Maintain the state prior to the open */
7214         if (sp->promisc_flg)
7215                 sp->promisc_flg = 0;
7216         if (sp->m_cast_flg) {
7217                 sp->m_cast_flg = 0;
7218                 sp->all_multi_pos= 0;
7219         }
7220
7221         /* Setting its receive mode */
7222         s2io_set_multicast(dev);
7223
7224         if (sp->lro) {
7225                 /* Initialize max aggregatable pkts per session based on MTU */
7226                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7227                 /* Check if we can use(if specified) user provided value */
7228                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7229                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7230         }
7231
7232         /* Enable Rx Traffic and interrupts on the NIC */
7233         if (start_nic(sp)) {
7234                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7235                 s2io_reset(sp);
7236                 free_rx_buffers(sp);
7237                 return -ENODEV;
7238         }
7239
7240         /* Add interrupt service routine */
7241         if (s2io_add_isr(sp) != 0) {
7242                 if (sp->config.intr_type == MSI_X)
7243                         s2io_rem_isr(sp);
7244                 s2io_reset(sp);
7245                 free_rx_buffers(sp);
7246                 return -ENODEV;
7247         }
7248
7249         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7250
7251         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7252
7253         /*  Enable select interrupts */
7254         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7255         if (sp->config.intr_type != INTA) {
7256                 interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7257                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7258         } else {
7259                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7260                 interruptible |= TX_PIC_INTR;
7261                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7262         }
7263
7264         return 0;
7265 }
7266
7267 /**
7268  * s2io_restart_nic - Resets the NIC.
7269  * @data : long pointer to the device private structure
7270  * Description:
7271  * This function is scheduled to be run by the s2io_tx_watchdog
7272  * function after 0.5 secs to reset the NIC. The idea is to reduce
7273  * the run time of the watch dog routine which is run holding a
7274  * spin lock.
7275  */
7276
7277 static void s2io_restart_nic(struct work_struct *work)
7278 {
7279         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7280         struct net_device *dev = sp->dev;
7281
7282         rtnl_lock();
7283
7284         if (!netif_running(dev))
7285                 goto out_unlock;
7286
7287         s2io_card_down(sp);
7288         if (s2io_card_up(sp)) {
7289                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7290                           dev->name);
7291         }
7292         s2io_wake_all_tx_queue(sp);
7293         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7294                   dev->name);
7295 out_unlock:
7296         rtnl_unlock();
7297 }
7298
7299 /**
7300  *  s2io_tx_watchdog - Watchdog for transmit side.
7301  *  @dev : Pointer to net device structure
7302  *  Description:
7303  *  This function is triggered if the Tx Queue is stopped
7304  *  for a pre-defined amount of time when the Interface is still up.
7305  *  If the Interface is jammed in such a situation, the hardware is
7306  *  reset (by s2io_close) and restarted again (by s2io_open) to
7307  *  overcome any problem that might have been caused in the hardware.
7308  *  Return value:
7309  *  void
7310  */
7311
7312 static void s2io_tx_watchdog(struct net_device *dev)
7313 {
7314         struct s2io_nic *sp = dev->priv;
7315
7316         if (netif_carrier_ok(dev)) {
7317                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7318                 schedule_work(&sp->rst_timer_task);
7319                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7320         }
7321 }
7322
7323 /**
7324  *   rx_osm_handler - To perform some OS related operations on SKB.
7325  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7326  *   @skb : the socket buffer pointer.
7327  *   @len : length of the packet
7328  *   @cksum : FCS checksum of the frame.
7329  *   @ring_no : the ring from which this RxD was extracted.
7330  *   Description:
7331  *   This function is called by the Rx interrupt serivce routine to perform
7332  *   some OS related operations on the SKB before passing it to the upper
7333  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7334  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7335  *   to the upper layer. If the checksum is wrong, it increments the Rx
7336  *   packet error count, frees the SKB and returns error.
7337  *   Return value:
7338  *   SUCCESS on success and -1 on failure.
7339  */
7340 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7341 {
7342         struct s2io_nic *sp = ring_data->nic;
7343         struct net_device *dev = (struct net_device *) ring_data->dev;
7344         struct sk_buff *skb = (struct sk_buff *)
7345                 ((unsigned long) rxdp->Host_Control);
7346         int ring_no = ring_data->ring_no;
7347         u16 l3_csum, l4_csum;
7348         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7349         struct lro *lro;
7350         u8 err_mask;
7351
7352         skb->dev = dev;
7353
7354         if (err) {
7355                 /* Check for parity error */
7356                 if (err & 0x1) {
7357                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7358                 }
7359                 err_mask = err >> 48;
7360                 switch(err_mask) {
7361                         case 1:
7362                                 sp->mac_control.stats_info->sw_stat.
7363                                 rx_parity_err_cnt++;
7364                         break;
7365
7366                         case 2:
7367                                 sp->mac_control.stats_info->sw_stat.
7368                                 rx_abort_cnt++;
7369                         break;
7370
7371                         case 3:
7372                                 sp->mac_control.stats_info->sw_stat.
7373                                 rx_parity_abort_cnt++;
7374                         break;
7375
7376                         case 4:
7377                                 sp->mac_control.stats_info->sw_stat.
7378                                 rx_rda_fail_cnt++;
7379                         break;
7380
7381                         case 5:
7382                                 sp->mac_control.stats_info->sw_stat.
7383                                 rx_unkn_prot_cnt++;
7384                         break;
7385
7386                         case 6:
7387                                 sp->mac_control.stats_info->sw_stat.
7388                                 rx_fcs_err_cnt++;
7389                         break;
7390
7391                         case 7:
7392                                 sp->mac_control.stats_info->sw_stat.
7393                                 rx_buf_size_err_cnt++;
7394                         break;
7395
7396                         case 8:
7397                                 sp->mac_control.stats_info->sw_stat.
7398                                 rx_rxd_corrupt_cnt++;
7399                         break;
7400
7401                         case 15:
7402                                 sp->mac_control.stats_info->sw_stat.
7403                                 rx_unkn_err_cnt++;
7404                         break;
7405                 }
7406                 /*
7407                 * Drop the packet if bad transfer code. Exception being
7408                 * 0x5, which could be due to unsupported IPv6 extension header.
7409                 * In this case, we let stack handle the packet.
7410                 * Note that in this case, since checksum will be incorrect,
7411                 * stack will validate the same.
7412                 */
7413                 if (err_mask != 0x5) {
7414                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7415                                 dev->name, err_mask);
7416                         sp->stats.rx_crc_errors++;
7417                         sp->mac_control.stats_info->sw_stat.mem_freed
7418                                 += skb->truesize;
7419                         dev_kfree_skb(skb);
7420                         ring_data->rx_bufs_left -= 1;
7421                         rxdp->Host_Control = 0;
7422                         return 0;
7423                 }
7424         }
7425
7426         /* Updating statistics */
7427         ring_data->rx_packets++;
7428         rxdp->Host_Control = 0;
7429         if (sp->rxd_mode == RXD_MODE_1) {
7430                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7431
7432                 ring_data->rx_bytes += len;
7433                 skb_put(skb, len);
7434
7435         } else if (sp->rxd_mode == RXD_MODE_3B) {
7436                 int get_block = ring_data->rx_curr_get_info.block_index;
7437                 int get_off = ring_data->rx_curr_get_info.offset;
7438                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7439                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7440                 unsigned char *buff = skb_push(skb, buf0_len);
7441
7442                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7443                 ring_data->rx_bytes += buf0_len + buf2_len;
7444                 memcpy(buff, ba->ba_0, buf0_len);
7445                 skb_put(skb, buf2_len);
7446         }
7447
7448         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7449             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7450             (sp->rx_csum)) {
7451                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7452                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7453                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7454                         /*
7455                          * NIC verifies if the Checksum of the received
7456                          * frame is Ok or not and accordingly returns
7457                          * a flag in the RxD.
7458                          */
7459                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7460                         if (ring_data->lro) {
7461                                 u32 tcp_len;
7462                                 u8 *tcp;
7463                                 int ret = 0;
7464
7465                                 ret = s2io_club_tcp_session(ring_data,
7466                                         skb->data, &tcp, &tcp_len, &lro,
7467                                         rxdp, sp);
7468                                 switch (ret) {
7469                                         case 3: /* Begin anew */
7470                                                 lro->parent = skb;
7471                                                 goto aggregate;
7472                                         case 1: /* Aggregate */
7473                                         {
7474                                                 lro_append_pkt(sp, lro,
7475                                                         skb, tcp_len);
7476                                                 goto aggregate;
7477                                         }
7478                                         case 4: /* Flush session */
7479                                         {
7480                                                 lro_append_pkt(sp, lro,
7481                                                         skb, tcp_len);
7482                                                 queue_rx_frame(lro->parent,
7483                                                         lro->vlan_tag);
7484                                                 clear_lro_session(lro);
7485                                                 sp->mac_control.stats_info->
7486                                                     sw_stat.flush_max_pkts++;
7487                                                 goto aggregate;
7488                                         }
7489                                         case 2: /* Flush both */
7490                                                 lro->parent->data_len =
7491                                                         lro->frags_len;
7492                                                 sp->mac_control.stats_info->
7493                                                      sw_stat.sending_both++;
7494                                                 queue_rx_frame(lro->parent,
7495                                                         lro->vlan_tag);
7496                                                 clear_lro_session(lro);
7497                                                 goto send_up;
7498                                         case 0: /* sessions exceeded */
7499                                         case -1: /* non-TCP or not
7500                                                   * L2 aggregatable
7501                                                   */
7502                                         case 5: /*
7503                                                  * First pkt in session not
7504                                                  * L3/L4 aggregatable
7505                                                  */
7506                                                 break;
7507                                         default:
7508                                                 DBG_PRINT(ERR_DBG,
7509                                                         "%s: Samadhana!!\n",
7510                                                          __FUNCTION__);
7511                                                 BUG();
7512                                 }
7513                         }
7514                 } else {
7515                         /*
7516                          * Packet with erroneous checksum, let the
7517                          * upper layers deal with it.
7518                          */
7519                         skb->ip_summed = CHECKSUM_NONE;
7520                 }
7521         } else
7522                 skb->ip_summed = CHECKSUM_NONE;
7523
7524         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7525 send_up:
7526         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7527         dev->last_rx = jiffies;
7528 aggregate:
7529         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7530         return SUCCESS;
7531 }
7532
7533 /**
7534  *  s2io_link - stops/starts the Tx queue.
7535  *  @sp : private member of the device structure, which is a pointer to the
7536  *  s2io_nic structure.
7537  *  @link : inidicates whether link is UP/DOWN.
7538  *  Description:
7539  *  This function stops/starts the Tx queue depending on whether the link
7540  *  status of the NIC is is down or up. This is called by the Alarm
7541  *  interrupt handler whenever a link change interrupt comes up.
7542  *  Return value:
7543  *  void.
7544  */
7545
7546 static void s2io_link(struct s2io_nic * sp, int link)
7547 {
7548         struct net_device *dev = (struct net_device *) sp->dev;
7549
7550         if (link != sp->last_link_state) {
7551                 init_tti(sp, link);
7552                 if (link == LINK_DOWN) {
7553                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7554                         s2io_stop_all_tx_queue(sp);
7555                         netif_carrier_off(dev);
7556                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7557                         sp->mac_control.stats_info->sw_stat.link_up_time =
7558                                 jiffies - sp->start_time;
7559                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7560                 } else {
7561                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7562                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7563                         sp->mac_control.stats_info->sw_stat.link_down_time =
7564                                 jiffies - sp->start_time;
7565                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7566                         netif_carrier_on(dev);
7567                         s2io_wake_all_tx_queue(sp);
7568                 }
7569         }
7570         sp->last_link_state = link;
7571         sp->start_time = jiffies;
7572 }
7573
7574 /**
7575  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7576  *  @sp : private member of the device structure, which is a pointer to the
7577  *  s2io_nic structure.
7578  *  Description:
7579  *  This function initializes a few of the PCI and PCI-X configuration registers
7580  *  with recommended values.
7581  *  Return value:
7582  *  void
7583  */
7584
7585 static void s2io_init_pci(struct s2io_nic * sp)
7586 {
7587         u16 pci_cmd = 0, pcix_cmd = 0;
7588
7589         /* Enable Data Parity Error Recovery in PCI-X command register. */
7590         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7591                              &(pcix_cmd));
7592         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7593                               (pcix_cmd | 1));
7594         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7595                              &(pcix_cmd));
7596
7597         /* Set the PErr Response bit in PCI command register. */
7598         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7599         pci_write_config_word(sp->pdev, PCI_COMMAND,
7600                               (pci_cmd | PCI_COMMAND_PARITY));
7601         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7602 }
7603
7604 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7605         u8 *dev_multiq)
7606 {
7607         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7608                 (tx_fifo_num < 1)) {
7609                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7610                         "(%d) not supported\n", tx_fifo_num);
7611
7612                 if (tx_fifo_num < 1)
7613                         tx_fifo_num = 1;
7614                 else
7615                         tx_fifo_num = MAX_TX_FIFOS;
7616
7617                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7618                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7619         }
7620
7621         if (multiq)
7622                 *dev_multiq = multiq;
7623
7624         if (tx_steering_type && (1 == tx_fifo_num)) {
7625                 if (tx_steering_type != TX_DEFAULT_STEERING)
7626                         DBG_PRINT(ERR_DBG,
7627                                 "s2io: Tx steering is not supported with "
7628                                 "one fifo. Disabling Tx steering.\n");
7629                 tx_steering_type = NO_STEERING;
7630         }
7631
7632         if ((tx_steering_type < NO_STEERING) ||
7633                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7634                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7635                          "supported\n");
7636                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7637                 tx_steering_type = NO_STEERING;
7638         }
7639
7640         if (rx_ring_num > MAX_RX_RINGS) {
7641                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7642                          "supported\n");
7643                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7644                         MAX_RX_RINGS);
7645                 rx_ring_num = MAX_RX_RINGS;
7646         }
7647
7648         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7649                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7650                           "Defaulting to INTA\n");
7651                 *dev_intr_type = INTA;
7652         }
7653
7654         if ((*dev_intr_type == MSI_X) &&
7655                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7656                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7657                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7658                                         "Defaulting to INTA\n");
7659                 *dev_intr_type = INTA;
7660         }
7661
7662         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7663                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7664                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7665                 rx_ring_mode = 1;
7666         }
7667         return SUCCESS;
7668 }
7669
7670 /**
7671  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7672  * or Traffic class respectively.
7673  * @nic: device private variable
7674  * Description: The function configures the receive steering to
7675  * desired receive ring.
7676  * Return Value:  SUCCESS on success and
7677  * '-1' on failure (endian settings incorrect).
7678  */
7679 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7680 {
7681         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7682         register u64 val64 = 0;
7683
7684         if (ds_codepoint > 63)
7685                 return FAILURE;
7686
7687         val64 = RTS_DS_MEM_DATA(ring);
7688         writeq(val64, &bar0->rts_ds_mem_data);
7689
7690         val64 = RTS_DS_MEM_CTRL_WE |
7691                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7692                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7693
7694         writeq(val64, &bar0->rts_ds_mem_ctrl);
7695
7696         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7697                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7698                                 S2IO_BIT_RESET);
7699 }
7700
7701 /**
7702  *  s2io_init_nic - Initialization of the adapter .
7703  *  @pdev : structure containing the PCI related information of the device.
7704  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7705  *  Description:
7706  *  The function initializes an adapter identified by the pci_dec structure.
7707  *  All OS related initialization including memory and device structure and
7708  *  initlaization of the device private variable is done. Also the swapper
7709  *  control register is initialized to enable read and write into the I/O
7710  *  registers of the device.
7711  *  Return value:
7712  *  returns 0 on success and negative on failure.
7713  */
7714
7715 static int __devinit
7716 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7717 {
7718         struct s2io_nic *sp;
7719         struct net_device *dev;
7720         int i, j, ret;
7721         int dma_flag = FALSE;
7722         u32 mac_up, mac_down;
7723         u64 val64 = 0, tmp64 = 0;
7724         struct XENA_dev_config __iomem *bar0 = NULL;
7725         u16 subid;
7726         struct mac_info *mac_control;
7727         struct config_param *config;
7728         int mode;
7729         u8 dev_intr_type = intr_type;
7730         u8 dev_multiq = 0;
7731         DECLARE_MAC_BUF(mac);
7732
7733         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7734         if (ret)
7735                 return ret;
7736
7737         if ((ret = pci_enable_device(pdev))) {
7738                 DBG_PRINT(ERR_DBG,
7739                           "s2io_init_nic: pci_enable_device failed\n");
7740                 return ret;
7741         }
7742
7743         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7744                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7745                 dma_flag = TRUE;
7746                 if (pci_set_consistent_dma_mask
7747                     (pdev, DMA_64BIT_MASK)) {
7748                         DBG_PRINT(ERR_DBG,
7749                                   "Unable to obtain 64bit DMA for \
7750                                         consistent allocations\n");
7751                         pci_disable_device(pdev);
7752                         return -ENOMEM;
7753                 }
7754         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7755                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7756         } else {
7757                 pci_disable_device(pdev);
7758                 return -ENOMEM;
7759         }
7760         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7761                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7762                 pci_disable_device(pdev);
7763                 return -ENODEV;
7764         }
7765         if (dev_multiq)
7766                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7767         else
7768                 dev = alloc_etherdev(sizeof(struct s2io_nic));
7769         if (dev == NULL) {
7770                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7771                 pci_disable_device(pdev);
7772                 pci_release_regions(pdev);
7773                 return -ENODEV;
7774         }
7775
7776         pci_set_master(pdev);
7777         pci_set_drvdata(pdev, dev);
7778         SET_NETDEV_DEV(dev, &pdev->dev);
7779
7780         /*  Private member variable initialized to s2io NIC structure */
7781         sp = dev->priv;
7782         memset(sp, 0, sizeof(struct s2io_nic));
7783         sp->dev = dev;
7784         sp->pdev = pdev;
7785         sp->high_dma_flag = dma_flag;
7786         sp->device_enabled_once = FALSE;
7787         if (rx_ring_mode == 1)
7788                 sp->rxd_mode = RXD_MODE_1;
7789         if (rx_ring_mode == 2)
7790                 sp->rxd_mode = RXD_MODE_3B;
7791
7792         sp->config.intr_type = dev_intr_type;
7793
7794         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7795                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7796                 sp->device_type = XFRAME_II_DEVICE;
7797         else
7798                 sp->device_type = XFRAME_I_DEVICE;
7799
7800         sp->lro = lro_enable;
7801
7802         /* Initialize some PCI/PCI-X fields of the NIC. */
7803         s2io_init_pci(sp);
7804
7805         /*
7806          * Setting the device configuration parameters.
7807          * Most of these parameters can be specified by the user during
7808          * module insertion as they are module loadable parameters. If
7809          * these parameters are not not specified during load time, they
7810          * are initialized with default values.
7811          */
7812         mac_control = &sp->mac_control;
7813         config = &sp->config;
7814
7815         config->napi = napi;
7816         config->tx_steering_type = tx_steering_type;
7817
7818         /* Tx side parameters. */
7819         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7820                 config->tx_fifo_num = MAX_TX_FIFOS;
7821         else
7822                 config->tx_fifo_num = tx_fifo_num;
7823
7824         /* Initialize the fifos used for tx steering */
7825         if (config->tx_fifo_num < 5) {
7826                         if (config->tx_fifo_num  == 1)
7827                                 sp->total_tcp_fifos = 1;
7828                         else
7829                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7830                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7831                         sp->total_udp_fifos = 1;
7832                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7833         } else {
7834                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7835                                                 FIFO_OTHER_MAX_NUM);
7836                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7837                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7838                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7839         }
7840
7841         config->multiq = dev_multiq;
7842         for (i = 0; i < config->tx_fifo_num; i++) {
7843                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7844                 config->tx_cfg[i].fifo_priority = i;
7845         }
7846
7847         /* mapping the QoS priority to the configured fifos */
7848         for (i = 0; i < MAX_TX_FIFOS; i++)
7849                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7850
7851         /* map the hashing selector table to the configured fifos */
7852         for (i = 0; i < config->tx_fifo_num; i++)
7853                 sp->fifo_selector[i] = fifo_selector[i];
7854
7855
7856         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7857         for (i = 0; i < config->tx_fifo_num; i++) {
7858                 config->tx_cfg[i].f_no_snoop =
7859                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7860                 if (config->tx_cfg[i].fifo_len < 65) {
7861                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7862                         break;
7863                 }
7864         }
7865         /* + 2 because one Txd for skb->data and one Txd for UFO */
7866         config->max_txds = MAX_SKB_FRAGS + 2;
7867
7868         /* Rx side parameters. */
7869         config->rx_ring_num = rx_ring_num;
7870         for (i = 0; i < config->rx_ring_num; i++) {
7871                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7872                     (rxd_count[sp->rxd_mode] + 1);
7873                 config->rx_cfg[i].ring_priority = i;
7874                 mac_control->rings[i].rx_bufs_left = 0;
7875                 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7876                 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7877                 mac_control->rings[i].pdev = sp->pdev;
7878                 mac_control->rings[i].dev = sp->dev;
7879         }
7880
7881         for (i = 0; i < rx_ring_num; i++) {
7882                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7883                 config->rx_cfg[i].f_no_snoop =
7884                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7885         }
7886
7887         /*  Setting Mac Control parameters */
7888         mac_control->rmac_pause_time = rmac_pause_time;
7889         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7890         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7891
7892
7893         /*  initialize the shared memory used by the NIC and the host */
7894         if (init_shared_mem(sp)) {
7895                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7896                           dev->name);
7897                 ret = -ENOMEM;
7898                 goto mem_alloc_failed;
7899         }
7900
7901         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7902                                      pci_resource_len(pdev, 0));
7903         if (!sp->bar0) {
7904                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7905                           dev->name);
7906                 ret = -ENOMEM;
7907                 goto bar0_remap_failed;
7908         }
7909
7910         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7911                                      pci_resource_len(pdev, 2));
7912         if (!sp->bar1) {
7913                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7914                           dev->name);
7915                 ret = -ENOMEM;
7916                 goto bar1_remap_failed;
7917         }
7918
7919         dev->irq = pdev->irq;
7920         dev->base_addr = (unsigned long) sp->bar0;
7921
7922         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7923         for (j = 0; j < MAX_TX_FIFOS; j++) {
7924                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7925                     (sp->bar1 + (j * 0x00020000));
7926         }
7927
7928         /*  Driver entry points */
7929         dev->open = &s2io_open;
7930         dev->stop = &s2io_close;
7931         dev->hard_start_xmit = &s2io_xmit;
7932         dev->get_stats = &s2io_get_stats;
7933         dev->set_multicast_list = &s2io_set_multicast;
7934         dev->do_ioctl = &s2io_ioctl;
7935         dev->set_mac_address = &s2io_set_mac_addr;
7936         dev->change_mtu = &s2io_change_mtu;
7937         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7938         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7939         dev->vlan_rx_register = s2io_vlan_rx_register;
7940         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7941
7942         /*
7943          * will use eth_mac_addr() for  dev->set_mac_address
7944          * mac address will be set every time dev->open() is called
7945          */
7946 #ifdef CONFIG_NET_POLL_CONTROLLER
7947         dev->poll_controller = s2io_netpoll;
7948 #endif
7949
7950         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7951         if (sp->high_dma_flag == TRUE)
7952                 dev->features |= NETIF_F_HIGHDMA;
7953         dev->features |= NETIF_F_TSO;
7954         dev->features |= NETIF_F_TSO6;
7955         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7956                 dev->features |= NETIF_F_UFO;
7957                 dev->features |= NETIF_F_HW_CSUM;
7958         }
7959         dev->tx_timeout = &s2io_tx_watchdog;
7960         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7961         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7962         INIT_WORK(&sp->set_link_task, s2io_set_link);
7963
7964         pci_save_state(sp->pdev);
7965
7966         /* Setting swapper control on the NIC, for proper reset operation */
7967         if (s2io_set_swapper(sp)) {
7968                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7969                           dev->name);
7970                 ret = -EAGAIN;
7971                 goto set_swap_failed;
7972         }
7973
7974         /* Verify if the Herc works on the slot its placed into */
7975         if (sp->device_type & XFRAME_II_DEVICE) {
7976                 mode = s2io_verify_pci_mode(sp);
7977                 if (mode < 0) {
7978                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7979                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7980                         ret = -EBADSLT;
7981                         goto set_swap_failed;
7982                 }
7983         }
7984
7985         if (sp->config.intr_type == MSI_X) {
7986                 sp->num_entries = config->rx_ring_num + 1;
7987                 ret = s2io_enable_msi_x(sp);
7988
7989                 if (!ret) {
7990                         ret = s2io_test_msi(sp);
7991                         /* rollback MSI-X, will re-enable during add_isr() */
7992                         remove_msix_isr(sp);
7993                 }
7994                 if (ret) {
7995
7996                         DBG_PRINT(ERR_DBG,
7997                           "%s: MSI-X requested but failed to enable\n",
7998                           dev->name);
7999                         sp->config.intr_type = INTA;
8000                 }
8001         }
8002
8003         if (config->intr_type ==  MSI_X) {
8004                 for (i = 0; i < config->rx_ring_num ; i++)
8005                         netif_napi_add(dev, &mac_control->rings[i].napi,
8006                                 s2io_poll_msix, 64);
8007         } else {
8008                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8009         }
8010
8011         /* Not needed for Herc */
8012         if (sp->device_type & XFRAME_I_DEVICE) {
8013                 /*
8014                  * Fix for all "FFs" MAC address problems observed on
8015                  * Alpha platforms
8016                  */
8017                 fix_mac_address(sp);
8018                 s2io_reset(sp);
8019         }
8020
8021         /*
8022          * MAC address initialization.
8023          * For now only one mac address will be read and used.
8024          */
8025         bar0 = sp->bar0;
8026         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8027             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8028         writeq(val64, &bar0->rmac_addr_cmd_mem);
8029         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8030                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8031         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8032         mac_down = (u32) tmp64;
8033         mac_up = (u32) (tmp64 >> 32);
8034
8035         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8036         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8037         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8038         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8039         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8040         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8041
8042         /*  Set the factory defined MAC address initially   */
8043         dev->addr_len = ETH_ALEN;
8044         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8045         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8046
8047         /* initialize number of multicast & unicast MAC entries variables */
8048         if (sp->device_type == XFRAME_I_DEVICE) {
8049                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8050                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8051                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8052         } else if (sp->device_type == XFRAME_II_DEVICE) {
8053                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8054                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8055                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8056         }
8057
8058         /* store mac addresses from CAM to s2io_nic structure */
8059         do_s2io_store_unicast_mc(sp);
8060
8061         /* Configure MSIX vector for number of rings configured plus one */
8062         if ((sp->device_type == XFRAME_II_DEVICE) &&
8063                 (config->intr_type == MSI_X))
8064                 sp->num_entries = config->rx_ring_num + 1;
8065
8066          /* Store the values of the MSIX table in the s2io_nic structure */
8067         store_xmsi_data(sp);
8068         /* reset Nic and bring it to known state */
8069         s2io_reset(sp);
8070
8071         /*
8072          * Initialize link state flags
8073          * and the card state parameter
8074          */
8075         sp->state = 0;
8076
8077         /* Initialize spinlocks */
8078         for (i = 0; i < sp->config.tx_fifo_num; i++)
8079                 spin_lock_init(&mac_control->fifos[i].tx_lock);
8080
8081         /*
8082          * SXE-002: Configure link and activity LED to init state
8083          * on driver load.
8084          */
8085         subid = sp->pdev->subsystem_device;
8086         if ((subid & 0xFF) >= 0x07) {
8087                 val64 = readq(&bar0->gpio_control);
8088                 val64 |= 0x0000800000000000ULL;
8089                 writeq(val64, &bar0->gpio_control);
8090                 val64 = 0x0411040400000000ULL;
8091                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8092                 val64 = readq(&bar0->gpio_control);
8093         }
8094
8095         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8096
8097         if (register_netdev(dev)) {
8098                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8099                 ret = -ENODEV;
8100                 goto register_failed;
8101         }
8102         s2io_vpd_read(sp);
8103         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8104         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8105                   sp->product_name, pdev->revision);
8106         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8107                   s2io_driver_version);
8108         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8109                   dev->name, print_mac(mac, dev->dev_addr));
8110         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8111         if (sp->device_type & XFRAME_II_DEVICE) {
8112                 mode = s2io_print_pci_mode(sp);
8113                 if (mode < 0) {
8114                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8115                         ret = -EBADSLT;
8116                         unregister_netdev(dev);
8117                         goto set_swap_failed;
8118                 }
8119         }
8120         switch(sp->rxd_mode) {
8121                 case RXD_MODE_1:
8122                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8123                                                 dev->name);
8124                     break;
8125                 case RXD_MODE_3B:
8126                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8127                                                 dev->name);
8128                     break;
8129         }
8130
8131         switch (sp->config.napi) {
8132         case 0:
8133                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8134                 break;
8135         case 1:
8136                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8137                 break;
8138         }
8139
8140         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8141                 sp->config.tx_fifo_num);
8142
8143         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8144                   sp->config.rx_ring_num);
8145
8146         switch(sp->config.intr_type) {
8147                 case INTA:
8148                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8149                     break;
8150                 case MSI_X:
8151                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8152                     break;
8153         }
8154         if (sp->config.multiq) {
8155         for (i = 0; i < sp->config.tx_fifo_num; i++)
8156                 mac_control->fifos[i].multiq = config->multiq;
8157                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8158                         dev->name);
8159         } else
8160                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8161                         dev->name);
8162
8163         switch (sp->config.tx_steering_type) {
8164         case NO_STEERING:
8165                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8166                         " transmit\n", dev->name);
8167                         break;
8168         case TX_PRIORITY_STEERING:
8169                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8170                         " transmit\n", dev->name);
8171                 break;
8172         case TX_DEFAULT_STEERING:
8173                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8174                         " transmit\n", dev->name);
8175         }
8176
8177         if (sp->lro)
8178                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8179                           dev->name);
8180         if (ufo)
8181                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8182                                         " enabled\n", dev->name);
8183         /* Initialize device name */
8184         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8185
8186         /*
8187          * Make Link state as off at this point, when the Link change
8188          * interrupt comes the state will be automatically changed to
8189          * the right state.
8190          */
8191         netif_carrier_off(dev);
8192
8193         return 0;
8194
8195       register_failed:
8196       set_swap_failed:
8197         iounmap(sp->bar1);
8198       bar1_remap_failed:
8199         iounmap(sp->bar0);
8200       bar0_remap_failed:
8201       mem_alloc_failed:
8202         free_shared_mem(sp);
8203         pci_disable_device(pdev);
8204         pci_release_regions(pdev);
8205         pci_set_drvdata(pdev, NULL);
8206         free_netdev(dev);
8207
8208         return ret;
8209 }
8210
8211 /**
8212  * s2io_rem_nic - Free the PCI device
8213  * @pdev: structure containing the PCI related information of the device.
8214  * Description: This function is called by the Pci subsystem to release a
8215  * PCI device and free up all resource held up by the device. This could
8216  * be in response to a Hot plug event or when the driver is to be removed
8217  * from memory.
8218  */
8219
8220 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8221 {
8222         struct net_device *dev =
8223             (struct net_device *) pci_get_drvdata(pdev);
8224         struct s2io_nic *sp;
8225
8226         if (dev == NULL) {
8227                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8228                 return;
8229         }
8230
8231         flush_scheduled_work();
8232
8233         sp = dev->priv;
8234         unregister_netdev(dev);
8235
8236         free_shared_mem(sp);
8237         iounmap(sp->bar0);
8238         iounmap(sp->bar1);
8239         pci_release_regions(pdev);
8240         pci_set_drvdata(pdev, NULL);
8241         free_netdev(dev);
8242         pci_disable_device(pdev);
8243 }
8244
8245 /**
8246  * s2io_starter - Entry point for the driver
8247  * Description: This function is the entry point for the driver. It verifies
8248  * the module loadable parameters and initializes PCI configuration space.
8249  */
8250
8251 static int __init s2io_starter(void)
8252 {
8253         return pci_register_driver(&s2io_driver);
8254 }
8255
8256 /**
8257  * s2io_closer - Cleanup routine for the driver
8258  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8259  */
8260
8261 static __exit void s2io_closer(void)
8262 {
8263         pci_unregister_driver(&s2io_driver);
8264         DBG_PRINT(INIT_DBG, "cleanup done\n");
8265 }
8266
8267 module_init(s2io_starter);
8268 module_exit(s2io_closer);
8269
8270 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8271                 struct tcphdr **tcp, struct RxD_t *rxdp,
8272                 struct s2io_nic *sp)
8273 {
8274         int ip_off;
8275         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8276
8277         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8278                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8279                           __FUNCTION__);
8280                 return -1;
8281         }
8282
8283         /* Checking for DIX type or DIX type with VLAN */
8284         if ((l2_type == 0)
8285                 || (l2_type == 4)) {
8286                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8287                 /*
8288                  * If vlan stripping is disabled and the frame is VLAN tagged,
8289                  * shift the offset by the VLAN header size bytes.
8290                  */
8291                 if ((!vlan_strip_flag) &&
8292                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8293                         ip_off += HEADER_VLAN_SIZE;
8294         } else {
8295                 /* LLC, SNAP etc are considered non-mergeable */
8296                 return -1;
8297         }
8298
8299         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8300         ip_len = (u8)((*ip)->ihl);
8301         ip_len <<= 2;
8302         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8303
8304         return 0;
8305 }
8306
8307 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8308                                   struct tcphdr *tcp)
8309 {
8310         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8311         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8312            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8313                 return -1;
8314         return 0;
8315 }
8316
8317 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8318 {
8319         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8320 }
8321
8322 static void initiate_new_session(struct lro *lro, u8 *l2h,
8323         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8324 {
8325         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8326         lro->l2h = l2h;
8327         lro->iph = ip;
8328         lro->tcph = tcp;
8329         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8330         lro->tcp_ack = tcp->ack_seq;
8331         lro->sg_num = 1;
8332         lro->total_len = ntohs(ip->tot_len);
8333         lro->frags_len = 0;
8334         lro->vlan_tag = vlan_tag;
8335         /*
8336          * check if we saw TCP timestamp. Other consistency checks have
8337          * already been done.
8338          */
8339         if (tcp->doff == 8) {
8340                 __be32 *ptr;
8341                 ptr = (__be32 *)(tcp+1);
8342                 lro->saw_ts = 1;
8343                 lro->cur_tsval = ntohl(*(ptr+1));
8344                 lro->cur_tsecr = *(ptr+2);
8345         }
8346         lro->in_use = 1;
8347 }
8348
8349 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8350 {
8351         struct iphdr *ip = lro->iph;
8352         struct tcphdr *tcp = lro->tcph;
8353         __sum16 nchk;
8354         struct stat_block *statinfo = sp->mac_control.stats_info;
8355         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8356
8357         /* Update L3 header */
8358         ip->tot_len = htons(lro->total_len);
8359         ip->check = 0;
8360         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8361         ip->check = nchk;
8362
8363         /* Update L4 header */
8364         tcp->ack_seq = lro->tcp_ack;
8365         tcp->window = lro->window;
8366
8367         /* Update tsecr field if this session has timestamps enabled */
8368         if (lro->saw_ts) {
8369                 __be32 *ptr = (__be32 *)(tcp + 1);
8370                 *(ptr+2) = lro->cur_tsecr;
8371         }
8372
8373         /* Update counters required for calculation of
8374          * average no. of packets aggregated.
8375          */
8376         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8377         statinfo->sw_stat.num_aggregations++;
8378 }
8379
8380 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8381                 struct tcphdr *tcp, u32 l4_pyld)
8382 {
8383         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8384         lro->total_len += l4_pyld;
8385         lro->frags_len += l4_pyld;
8386         lro->tcp_next_seq += l4_pyld;
8387         lro->sg_num++;
8388
8389         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8390         lro->tcp_ack = tcp->ack_seq;
8391         lro->window = tcp->window;
8392
8393         if (lro->saw_ts) {
8394                 __be32 *ptr;
8395                 /* Update tsecr and tsval from this packet */
8396                 ptr = (__be32 *)(tcp+1);
8397                 lro->cur_tsval = ntohl(*(ptr+1));
8398                 lro->cur_tsecr = *(ptr + 2);
8399         }
8400 }
8401
8402 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8403                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8404 {
8405         u8 *ptr;
8406
8407         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8408
8409         if (!tcp_pyld_len) {
8410                 /* Runt frame or a pure ack */
8411                 return -1;
8412         }
8413
8414         if (ip->ihl != 5) /* IP has options */
8415                 return -1;
8416
8417         /* If we see CE codepoint in IP header, packet is not mergeable */
8418         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8419                 return -1;
8420
8421         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8422         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8423                                     tcp->ece || tcp->cwr || !tcp->ack) {
8424                 /*
8425                  * Currently recognize only the ack control word and
8426                  * any other control field being set would result in
8427                  * flushing the LRO session
8428                  */
8429                 return -1;
8430         }
8431
8432         /*
8433          * Allow only one TCP timestamp option. Don't aggregate if
8434          * any other options are detected.
8435          */
8436         if (tcp->doff != 5 && tcp->doff != 8)
8437                 return -1;
8438
8439         if (tcp->doff == 8) {
8440                 ptr = (u8 *)(tcp + 1);
8441                 while (*ptr == TCPOPT_NOP)
8442                         ptr++;
8443                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8444                         return -1;
8445
8446                 /* Ensure timestamp value increases monotonically */
8447                 if (l_lro)
8448                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8449                                 return -1;
8450
8451                 /* timestamp echo reply should be non-zero */
8452                 if (*((__be32 *)(ptr+6)) == 0)
8453                         return -1;
8454         }
8455
8456         return 0;
8457 }
8458
8459 static int
8460 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8461         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8462         struct s2io_nic *sp)
8463 {
8464         struct iphdr *ip;
8465         struct tcphdr *tcph;
8466         int ret = 0, i;
8467         u16 vlan_tag = 0;
8468
8469         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8470                                          rxdp, sp))) {
8471                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8472                           ip->saddr, ip->daddr);
8473         } else
8474                 return ret;
8475
8476         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8477         tcph = (struct tcphdr *)*tcp;
8478         *tcp_len = get_l4_pyld_length(ip, tcph);
8479         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8480                 struct lro *l_lro = &ring_data->lro0_n[i];
8481                 if (l_lro->in_use) {
8482                         if (check_for_socket_match(l_lro, ip, tcph))
8483                                 continue;
8484                         /* Sock pair matched */
8485                         *lro = l_lro;
8486
8487                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8488                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8489                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8490                                           (*lro)->tcp_next_seq,
8491                                           ntohl(tcph->seq));
8492
8493                                 sp->mac_control.stats_info->
8494                                    sw_stat.outof_sequence_pkts++;
8495                                 ret = 2;
8496                                 break;
8497                         }
8498
8499                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8500                                 ret = 1; /* Aggregate */
8501                         else
8502                                 ret = 2; /* Flush both */
8503                         break;
8504                 }
8505         }
8506
8507         if (ret == 0) {
8508                 /* Before searching for available LRO objects,
8509                  * check if the pkt is L3/L4 aggregatable. If not
8510                  * don't create new LRO session. Just send this
8511                  * packet up.
8512                  */
8513                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8514                         return 5;
8515                 }
8516
8517                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8518                         struct lro *l_lro = &ring_data->lro0_n[i];
8519                         if (!(l_lro->in_use)) {
8520                                 *lro = l_lro;
8521                                 ret = 3; /* Begin anew */
8522                                 break;
8523                         }
8524                 }
8525         }
8526
8527         if (ret == 0) { /* sessions exceeded */
8528                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8529                           __FUNCTION__);
8530                 *lro = NULL;
8531                 return ret;
8532         }
8533
8534         switch (ret) {
8535                 case 3:
8536                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8537                                                                 vlan_tag);
8538                         break;
8539                 case 2:
8540                         update_L3L4_header(sp, *lro);
8541                         break;
8542                 case 1:
8543                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8544                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8545                                 update_L3L4_header(sp, *lro);
8546                                 ret = 4; /* Flush the LRO */
8547                         }
8548                         break;
8549                 default:
8550                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8551                                 __FUNCTION__);
8552                         break;
8553         }
8554
8555         return ret;
8556 }
8557
8558 static void clear_lro_session(struct lro *lro)
8559 {
8560         static u16 lro_struct_size = sizeof(struct lro);
8561
8562         memset(lro, 0, lro_struct_size);
8563 }
8564
8565 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8566 {
8567         struct net_device *dev = skb->dev;
8568         struct s2io_nic *sp = dev->priv;
8569
8570         skb->protocol = eth_type_trans(skb, dev);
8571         if (sp->vlgrp && vlan_tag
8572                 && (vlan_strip_flag)) {
8573                 /* Queueing the vlan frame to the upper layer */
8574                 if (sp->config.napi)
8575                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8576                 else
8577                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8578         } else {
8579                 if (sp->config.napi)
8580                         netif_receive_skb(skb);
8581                 else
8582                         netif_rx(skb);
8583         }
8584 }
8585
8586 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8587                            struct sk_buff *skb,
8588                            u32 tcp_len)
8589 {
8590         struct sk_buff *first = lro->parent;
8591
8592         first->len += tcp_len;
8593         first->data_len = lro->frags_len;
8594         skb_pull(skb, (skb->len - tcp_len));
8595         if (skb_shinfo(first)->frag_list)
8596                 lro->last_frag->next = skb;
8597         else
8598                 skb_shinfo(first)->frag_list = skb;
8599         first->truesize += skb->truesize;
8600         lro->last_frag = skb;
8601         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8602         return;
8603 }
8604
8605 /**
8606  * s2io_io_error_detected - called when PCI error is detected
8607  * @pdev: Pointer to PCI device
8608  * @state: The current pci connection state
8609  *
8610  * This function is called after a PCI bus error affecting
8611  * this device has been detected.
8612  */
8613 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8614                                                pci_channel_state_t state)
8615 {
8616         struct net_device *netdev = pci_get_drvdata(pdev);
8617         struct s2io_nic *sp = netdev->priv;
8618
8619         netif_device_detach(netdev);
8620
8621         if (netif_running(netdev)) {
8622                 /* Bring down the card, while avoiding PCI I/O */
8623                 do_s2io_card_down(sp, 0);
8624         }
8625         pci_disable_device(pdev);
8626
8627         return PCI_ERS_RESULT_NEED_RESET;
8628 }
8629
8630 /**
8631  * s2io_io_slot_reset - called after the pci bus has been reset.
8632  * @pdev: Pointer to PCI device
8633  *
8634  * Restart the card from scratch, as if from a cold-boot.
8635  * At this point, the card has exprienced a hard reset,
8636  * followed by fixups by BIOS, and has its config space
8637  * set up identically to what it was at cold boot.
8638  */
8639 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8640 {
8641         struct net_device *netdev = pci_get_drvdata(pdev);
8642         struct s2io_nic *sp = netdev->priv;
8643
8644         if (pci_enable_device(pdev)) {
8645                 printk(KERN_ERR "s2io: "
8646                        "Cannot re-enable PCI device after reset.\n");
8647                 return PCI_ERS_RESULT_DISCONNECT;
8648         }
8649
8650         pci_set_master(pdev);
8651         s2io_reset(sp);
8652
8653         return PCI_ERS_RESULT_RECOVERED;
8654 }
8655
8656 /**
8657  * s2io_io_resume - called when traffic can start flowing again.
8658  * @pdev: Pointer to PCI device
8659  *
8660  * This callback is called when the error recovery driver tells
8661  * us that its OK to resume normal operation.
8662  */
8663 static void s2io_io_resume(struct pci_dev *pdev)
8664 {
8665         struct net_device *netdev = pci_get_drvdata(pdev);
8666         struct s2io_nic *sp = netdev->priv;
8667
8668         if (netif_running(netdev)) {
8669                 if (s2io_card_up(sp)) {
8670                         printk(KERN_ERR "s2io: "
8671                                "Can't bring device back up after reset.\n");
8672                         return;
8673                 }
8674
8675                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8676                         s2io_card_down(sp);
8677                         printk(KERN_ERR "s2io: "
8678                                "Can't resetore mac addr after reset.\n");
8679                         return;
8680                 }
8681         }
8682
8683         netif_device_attach(netdev);
8684         netif_tx_wake_all_queues(netdev);
8685 }