Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.24"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
550         if (sp->config.multiq) {
551                 for (i = 0; i < sp->config.tx_fifo_num; i++)
552                         netif_stop_subqueue(sp->dev, i);
553         } else
554 #endif
555         {
556                 for (i = 0; i < sp->config.tx_fifo_num; i++)
557                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
558                 netif_stop_queue(sp->dev);
559         }
560 }
561
562 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
563 {
564 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
565         if (sp->config.multiq)
566                 netif_stop_subqueue(sp->dev, fifo_no);
567         else
568 #endif
569         {
570                 sp->mac_control.fifos[fifo_no].queue_state =
571                         FIFO_QUEUE_STOP;
572                 netif_stop_queue(sp->dev);
573         }
574 }
575
576 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 {
578         int i;
579 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
580         if (sp->config.multiq) {
581                 for (i = 0; i < sp->config.tx_fifo_num; i++)
582                         netif_start_subqueue(sp->dev, i);
583         } else
584 #endif
585         {
586                 for (i = 0; i < sp->config.tx_fifo_num; i++)
587                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
588                 netif_start_queue(sp->dev);
589         }
590 }
591
592 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
593 {
594 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
595         if (sp->config.multiq)
596                 netif_start_subqueue(sp->dev, fifo_no);
597         else
598 #endif
599         {
600                 sp->mac_control.fifos[fifo_no].queue_state =
601                         FIFO_QUEUE_START;
602                 netif_start_queue(sp->dev);
603         }
604 }
605
606 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
607 {
608         int i;
609 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
610         if (sp->config.multiq) {
611                 for (i = 0; i < sp->config.tx_fifo_num; i++)
612                         netif_wake_subqueue(sp->dev, i);
613         } else
614 #endif
615         {
616                 for (i = 0; i < sp->config.tx_fifo_num; i++)
617                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
618                 netif_wake_queue(sp->dev);
619         }
620 }
621
622 static inline void s2io_wake_tx_queue(
623         struct fifo_info *fifo, int cnt, u8 multiq)
624 {
625
626 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
627         if (multiq) {
628                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
629                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
630         } else
631 #endif
632         if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
633                 if (netif_queue_stopped(fifo->dev)) {
634                         fifo->queue_state = FIFO_QUEUE_START;
635                         netif_wake_queue(fifo->dev);
636                 }
637         }
638 }
639
640 /**
641  * init_shared_mem - Allocation and Initialization of Memory
642  * @nic: Device private variable.
643  * Description: The function allocates all the memory areas shared
644  * between the NIC and the driver. This includes Tx descriptors,
645  * Rx descriptors and the statistics block.
646  */
647
648 static int init_shared_mem(struct s2io_nic *nic)
649 {
650         u32 size;
651         void *tmp_v_addr, *tmp_v_addr_next;
652         dma_addr_t tmp_p_addr, tmp_p_addr_next;
653         struct RxD_block *pre_rxd_blk = NULL;
654         int i, j, blk_cnt;
655         int lst_size, lst_per_page;
656         struct net_device *dev = nic->dev;
657         unsigned long tmp;
658         struct buffAdd *ba;
659
660         struct mac_info *mac_control;
661         struct config_param *config;
662         unsigned long long mem_allocated = 0;
663
664         mac_control = &nic->mac_control;
665         config = &nic->config;
666
667
668         /* Allocation and initialization of TXDLs in FIOFs */
669         size = 0;
670         for (i = 0; i < config->tx_fifo_num; i++) {
671                 size += config->tx_cfg[i].fifo_len;
672         }
673         if (size > MAX_AVAILABLE_TXDS) {
674                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
675                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
676                 return -EINVAL;
677         }
678
679         size = 0;
680         for (i = 0; i < config->tx_fifo_num; i++) {
681                 size = config->tx_cfg[i].fifo_len;
682                 /*
683                  * Legal values are from 2 to 8192
684                  */
685                 if (size < 2) {
686                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
687                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
688                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
689                                 "are 2 to 8192\n");
690                         return -EINVAL;
691                 }
692         }
693
694         lst_size = (sizeof(struct TxD) * config->max_txds);
695         lst_per_page = PAGE_SIZE / lst_size;
696
697         for (i = 0; i < config->tx_fifo_num; i++) {
698                 int fifo_len = config->tx_cfg[i].fifo_len;
699                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
700                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
701                                                           GFP_KERNEL);
702                 if (!mac_control->fifos[i].list_info) {
703                         DBG_PRINT(INFO_DBG,
704                                   "Malloc failed for list_info\n");
705                         return -ENOMEM;
706                 }
707                 mem_allocated += list_holder_size;
708         }
709         for (i = 0; i < config->tx_fifo_num; i++) {
710                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
711                                                 lst_per_page);
712                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
713                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
714                     config->tx_cfg[i].fifo_len - 1;
715                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
716                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
717                     config->tx_cfg[i].fifo_len - 1;
718                 mac_control->fifos[i].fifo_no = i;
719                 mac_control->fifos[i].nic = nic;
720                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
721                 mac_control->fifos[i].dev = dev;
722
723                 for (j = 0; j < page_num; j++) {
724                         int k = 0;
725                         dma_addr_t tmp_p;
726                         void *tmp_v;
727                         tmp_v = pci_alloc_consistent(nic->pdev,
728                                                      PAGE_SIZE, &tmp_p);
729                         if (!tmp_v) {
730                                 DBG_PRINT(INFO_DBG,
731                                           "pci_alloc_consistent ");
732                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
733                                 return -ENOMEM;
734                         }
735                         /* If we got a zero DMA address(can happen on
736                          * certain platforms like PPC), reallocate.
737                          * Store virtual address of page we don't want,
738                          * to be freed later.
739                          */
740                         if (!tmp_p) {
741                                 mac_control->zerodma_virt_addr = tmp_v;
742                                 DBG_PRINT(INIT_DBG,
743                                 "%s: Zero DMA address for TxDL. ", dev->name);
744                                 DBG_PRINT(INIT_DBG,
745                                 "Virtual address %p\n", tmp_v);
746                                 tmp_v = pci_alloc_consistent(nic->pdev,
747                                                      PAGE_SIZE, &tmp_p);
748                                 if (!tmp_v) {
749                                         DBG_PRINT(INFO_DBG,
750                                           "pci_alloc_consistent ");
751                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
752                                         return -ENOMEM;
753                                 }
754                                 mem_allocated += PAGE_SIZE;
755                         }
756                         while (k < lst_per_page) {
757                                 int l = (j * lst_per_page) + k;
758                                 if (l == config->tx_cfg[i].fifo_len)
759                                         break;
760                                 mac_control->fifos[i].list_info[l].list_virt_addr =
761                                     tmp_v + (k * lst_size);
762                                 mac_control->fifos[i].list_info[l].list_phy_addr =
763                                     tmp_p + (k * lst_size);
764                                 k++;
765                         }
766                 }
767         }
768
769         for (i = 0; i < config->tx_fifo_num; i++) {
770                 size = config->tx_cfg[i].fifo_len;
771                 mac_control->fifos[i].ufo_in_band_v
772                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
773                 if (!mac_control->fifos[i].ufo_in_band_v)
774                         return -ENOMEM;
775                 mem_allocated += (size * sizeof(u64));
776         }
777
778         /* Allocation and initialization of RXDs in Rings */
779         size = 0;
780         for (i = 0; i < config->rx_ring_num; i++) {
781                 if (config->rx_cfg[i].num_rxd %
782                     (rxd_count[nic->rxd_mode] + 1)) {
783                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
784                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
785                                   i);
786                         DBG_PRINT(ERR_DBG, "RxDs per Block");
787                         return FAILURE;
788                 }
789                 size += config->rx_cfg[i].num_rxd;
790                 mac_control->rings[i].block_count =
791                         config->rx_cfg[i].num_rxd /
792                         (rxd_count[nic->rxd_mode] + 1 );
793                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
794                         mac_control->rings[i].block_count;
795         }
796         if (nic->rxd_mode == RXD_MODE_1)
797                 size = (size * (sizeof(struct RxD1)));
798         else
799                 size = (size * (sizeof(struct RxD3)));
800
801         for (i = 0; i < config->rx_ring_num; i++) {
802                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
803                 mac_control->rings[i].rx_curr_get_info.offset = 0;
804                 mac_control->rings[i].rx_curr_get_info.ring_len =
805                     config->rx_cfg[i].num_rxd - 1;
806                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
807                 mac_control->rings[i].rx_curr_put_info.offset = 0;
808                 mac_control->rings[i].rx_curr_put_info.ring_len =
809                     config->rx_cfg[i].num_rxd - 1;
810                 mac_control->rings[i].nic = nic;
811                 mac_control->rings[i].ring_no = i;
812                 mac_control->rings[i].lro = lro_enable;
813
814                 blk_cnt = config->rx_cfg[i].num_rxd /
815                                 (rxd_count[nic->rxd_mode] + 1);
816                 /*  Allocating all the Rx blocks */
817                 for (j = 0; j < blk_cnt; j++) {
818                         struct rx_block_info *rx_blocks;
819                         int l;
820
821                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
822                         size = SIZE_OF_BLOCK; //size is always page size
823                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
824                                                           &tmp_p_addr);
825                         if (tmp_v_addr == NULL) {
826                                 /*
827                                  * In case of failure, free_shared_mem()
828                                  * is called, which should free any
829                                  * memory that was alloced till the
830                                  * failure happened.
831                                  */
832                                 rx_blocks->block_virt_addr = tmp_v_addr;
833                                 return -ENOMEM;
834                         }
835                         mem_allocated += size;
836                         memset(tmp_v_addr, 0, size);
837                         rx_blocks->block_virt_addr = tmp_v_addr;
838                         rx_blocks->block_dma_addr = tmp_p_addr;
839                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
840                                                   rxd_count[nic->rxd_mode],
841                                                   GFP_KERNEL);
842                         if (!rx_blocks->rxds)
843                                 return -ENOMEM;
844                         mem_allocated +=
845                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
846                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
847                                 rx_blocks->rxds[l].virt_addr =
848                                         rx_blocks->block_virt_addr +
849                                         (rxd_size[nic->rxd_mode] * l);
850                                 rx_blocks->rxds[l].dma_addr =
851                                         rx_blocks->block_dma_addr +
852                                         (rxd_size[nic->rxd_mode] * l);
853                         }
854                 }
855                 /* Interlinking all Rx Blocks */
856                 for (j = 0; j < blk_cnt; j++) {
857                         tmp_v_addr =
858                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
859                         tmp_v_addr_next =
860                                 mac_control->rings[i].rx_blocks[(j + 1) %
861                                               blk_cnt].block_virt_addr;
862                         tmp_p_addr =
863                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
864                         tmp_p_addr_next =
865                                 mac_control->rings[i].rx_blocks[(j + 1) %
866                                               blk_cnt].block_dma_addr;
867
868                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
869                         pre_rxd_blk->reserved_2_pNext_RxD_block =
870                             (unsigned long) tmp_v_addr_next;
871                         pre_rxd_blk->pNext_RxD_Blk_physical =
872                             (u64) tmp_p_addr_next;
873                 }
874         }
875         if (nic->rxd_mode == RXD_MODE_3B) {
876                 /*
877                  * Allocation of Storages for buffer addresses in 2BUFF mode
878                  * and the buffers as well.
879                  */
880                 for (i = 0; i < config->rx_ring_num; i++) {
881                         blk_cnt = config->rx_cfg[i].num_rxd /
882                            (rxd_count[nic->rxd_mode]+ 1);
883                         mac_control->rings[i].ba =
884                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
885                                      GFP_KERNEL);
886                         if (!mac_control->rings[i].ba)
887                                 return -ENOMEM;
888                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
889                         for (j = 0; j < blk_cnt; j++) {
890                                 int k = 0;
891                                 mac_control->rings[i].ba[j] =
892                                         kmalloc((sizeof(struct buffAdd) *
893                                                 (rxd_count[nic->rxd_mode] + 1)),
894                                                 GFP_KERNEL);
895                                 if (!mac_control->rings[i].ba[j])
896                                         return -ENOMEM;
897                                 mem_allocated += (sizeof(struct buffAdd) *  \
898                                         (rxd_count[nic->rxd_mode] + 1));
899                                 while (k != rxd_count[nic->rxd_mode]) {
900                                         ba = &mac_control->rings[i].ba[j][k];
901
902                                         ba->ba_0_org = (void *) kmalloc
903                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
904                                         if (!ba->ba_0_org)
905                                                 return -ENOMEM;
906                                         mem_allocated +=
907                                                 (BUF0_LEN + ALIGN_SIZE);
908                                         tmp = (unsigned long)ba->ba_0_org;
909                                         tmp += ALIGN_SIZE;
910                                         tmp &= ~((unsigned long) ALIGN_SIZE);
911                                         ba->ba_0 = (void *) tmp;
912
913                                         ba->ba_1_org = (void *) kmalloc
914                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
915                                         if (!ba->ba_1_org)
916                                                 return -ENOMEM;
917                                         mem_allocated
918                                                 += (BUF1_LEN + ALIGN_SIZE);
919                                         tmp = (unsigned long) ba->ba_1_org;
920                                         tmp += ALIGN_SIZE;
921                                         tmp &= ~((unsigned long) ALIGN_SIZE);
922                                         ba->ba_1 = (void *) tmp;
923                                         k++;
924                                 }
925                         }
926                 }
927         }
928
929         /* Allocation and initialization of Statistics block */
930         size = sizeof(struct stat_block);
931         mac_control->stats_mem = pci_alloc_consistent
932             (nic->pdev, size, &mac_control->stats_mem_phy);
933
934         if (!mac_control->stats_mem) {
935                 /*
936                  * In case of failure, free_shared_mem() is called, which
937                  * should free any memory that was alloced till the
938                  * failure happened.
939                  */
940                 return -ENOMEM;
941         }
942         mem_allocated += size;
943         mac_control->stats_mem_sz = size;
944
945         tmp_v_addr = mac_control->stats_mem;
946         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
947         memset(tmp_v_addr, 0, size);
948         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
949                   (unsigned long long) tmp_p_addr);
950         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
951         return SUCCESS;
952 }
953
954 /**
955  * free_shared_mem - Free the allocated Memory
956  * @nic:  Device private variable.
957  * Description: This function is to free all memory locations allocated by
958  * the init_shared_mem() function and return it to the kernel.
959  */
960
961 static void free_shared_mem(struct s2io_nic *nic)
962 {
963         int i, j, blk_cnt, size;
964         void *tmp_v_addr;
965         dma_addr_t tmp_p_addr;
966         struct mac_info *mac_control;
967         struct config_param *config;
968         int lst_size, lst_per_page;
969         struct net_device *dev;
970         int page_num = 0;
971
972         if (!nic)
973                 return;
974
975         dev = nic->dev;
976
977         mac_control = &nic->mac_control;
978         config = &nic->config;
979
980         lst_size = (sizeof(struct TxD) * config->max_txds);
981         lst_per_page = PAGE_SIZE / lst_size;
982
983         for (i = 0; i < config->tx_fifo_num; i++) {
984                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
985                                                         lst_per_page);
986                 for (j = 0; j < page_num; j++) {
987                         int mem_blks = (j * lst_per_page);
988                         if (!mac_control->fifos[i].list_info)
989                                 return;
990                         if (!mac_control->fifos[i].list_info[mem_blks].
991                                  list_virt_addr)
992                                 break;
993                         pci_free_consistent(nic->pdev, PAGE_SIZE,
994                                             mac_control->fifos[i].
995                                             list_info[mem_blks].
996                                             list_virt_addr,
997                                             mac_control->fifos[i].
998                                             list_info[mem_blks].
999                                             list_phy_addr);
1000                         nic->mac_control.stats_info->sw_stat.mem_freed
1001                                                 += PAGE_SIZE;
1002                 }
1003                 /* If we got a zero DMA address during allocation,
1004                  * free the page now
1005                  */
1006                 if (mac_control->zerodma_virt_addr) {
1007                         pci_free_consistent(nic->pdev, PAGE_SIZE,
1008                                             mac_control->zerodma_virt_addr,
1009                                             (dma_addr_t)0);
1010                         DBG_PRINT(INIT_DBG,
1011                                 "%s: Freeing TxDL with zero DMA addr. ",
1012                                 dev->name);
1013                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1014                                 mac_control->zerodma_virt_addr);
1015                         nic->mac_control.stats_info->sw_stat.mem_freed
1016                                                 += PAGE_SIZE;
1017                 }
1018                 kfree(mac_control->fifos[i].list_info);
1019                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1021         }
1022
1023         size = SIZE_OF_BLOCK;
1024         for (i = 0; i < config->rx_ring_num; i++) {
1025                 blk_cnt = mac_control->rings[i].block_count;
1026                 for (j = 0; j < blk_cnt; j++) {
1027                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1028                                 block_virt_addr;
1029                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1030                                 block_dma_addr;
1031                         if (tmp_v_addr == NULL)
1032                                 break;
1033                         pci_free_consistent(nic->pdev, size,
1034                                             tmp_v_addr, tmp_p_addr);
1035                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1036                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1037                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1038                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1039                 }
1040         }
1041
1042         if (nic->rxd_mode == RXD_MODE_3B) {
1043                 /* Freeing buffer storage addresses in 2BUFF mode. */
1044                 for (i = 0; i < config->rx_ring_num; i++) {
1045                         blk_cnt = config->rx_cfg[i].num_rxd /
1046                             (rxd_count[nic->rxd_mode] + 1);
1047                         for (j = 0; j < blk_cnt; j++) {
1048                                 int k = 0;
1049                                 if (!mac_control->rings[i].ba[j])
1050                                         continue;
1051                                 while (k != rxd_count[nic->rxd_mode]) {
1052                                         struct buffAdd *ba =
1053                                                 &mac_control->rings[i].ba[j][k];
1054                                         kfree(ba->ba_0_org);
1055                                         nic->mac_control.stats_info->sw_stat.\
1056                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1057                                         kfree(ba->ba_1_org);
1058                                         nic->mac_control.stats_info->sw_stat.\
1059                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1060                                         k++;
1061                                 }
1062                                 kfree(mac_control->rings[i].ba[j]);
1063                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1064                                         (sizeof(struct buffAdd) *
1065                                         (rxd_count[nic->rxd_mode] + 1));
1066                         }
1067                         kfree(mac_control->rings[i].ba);
1068                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1069                         (sizeof(struct buffAdd *) * blk_cnt);
1070                 }
1071         }
1072
1073         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1074                 if (mac_control->fifos[i].ufo_in_band_v) {
1075                         nic->mac_control.stats_info->sw_stat.mem_freed
1076                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1077                         kfree(mac_control->fifos[i].ufo_in_band_v);
1078                 }
1079         }
1080
1081         if (mac_control->stats_mem) {
1082                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1083                         mac_control->stats_mem_sz;
1084                 pci_free_consistent(nic->pdev,
1085                                     mac_control->stats_mem_sz,
1086                                     mac_control->stats_mem,
1087                                     mac_control->stats_mem_phy);
1088         }
1089 }
1090
1091 /**
1092  * s2io_verify_pci_mode -
1093  */
1094
1095 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1096 {
1097         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1098         register u64 val64 = 0;
1099         int     mode;
1100
1101         val64 = readq(&bar0->pci_mode);
1102         mode = (u8)GET_PCI_MODE(val64);
1103
1104         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1105                 return -1;      /* Unknown PCI mode */
1106         return mode;
1107 }
1108
1109 #define NEC_VENID   0x1033
1110 #define NEC_DEVID   0x0125
1111 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1112 {
1113         struct pci_dev *tdev = NULL;
1114         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116                         if (tdev->bus == s2io_pdev->bus->parent) {
1117                                 pci_dev_put(tdev);
1118                                 return 1;
1119                         }
1120                 }
1121         }
1122         return 0;
1123 }
1124
1125 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1126 /**
1127  * s2io_print_pci_mode -
1128  */
1129 static int s2io_print_pci_mode(struct s2io_nic *nic)
1130 {
1131         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1132         register u64 val64 = 0;
1133         int     mode;
1134         struct config_param *config = &nic->config;
1135
1136         val64 = readq(&bar0->pci_mode);
1137         mode = (u8)GET_PCI_MODE(val64);
1138
1139         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1140                 return -1;      /* Unknown PCI mode */
1141
1142         config->bus_speed = bus_speed[mode];
1143
1144         if (s2io_on_nec_bridge(nic->pdev)) {
1145                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1146                                                         nic->dev->name);
1147                 return mode;
1148         }
1149
1150         if (val64 & PCI_MODE_32_BITS) {
1151                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1152         } else {
1153                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1154         }
1155
1156         switch(mode) {
1157                 case PCI_MODE_PCI_33:
1158                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1159                         break;
1160                 case PCI_MODE_PCI_66:
1161                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1162                         break;
1163                 case PCI_MODE_PCIX_M1_66:
1164                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1165                         break;
1166                 case PCI_MODE_PCIX_M1_100:
1167                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1168                         break;
1169                 case PCI_MODE_PCIX_M1_133:
1170                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1171                         break;
1172                 case PCI_MODE_PCIX_M2_66:
1173                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1174                         break;
1175                 case PCI_MODE_PCIX_M2_100:
1176                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1177                         break;
1178                 case PCI_MODE_PCIX_M2_133:
1179                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1180                         break;
1181                 default:
1182                         return -1;      /* Unsupported bus speed */
1183         }
1184
1185         return mode;
1186 }
1187
1188 /**
1189  *  init_tti - Initialization transmit traffic interrupt scheme
1190  *  @nic: device private variable
1191  *  @link: link status (UP/DOWN) used to enable/disable continuous
1192  *  transmit interrupts
1193  *  Description: The function configures transmit traffic interrupts
1194  *  Return Value:  SUCCESS on success and
1195  *  '-1' on failure
1196  */
1197
1198 static int init_tti(struct s2io_nic *nic, int link)
1199 {
1200         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1201         register u64 val64 = 0;
1202         int i;
1203         struct config_param *config;
1204
1205         config = &nic->config;
1206
1207         for (i = 0; i < config->tx_fifo_num; i++) {
1208                 /*
1209                  * TTI Initialization. Default Tx timer gets us about
1210                  * 250 interrupts per sec. Continuous interrupts are enabled
1211                  * by default.
1212                  */
1213                 if (nic->device_type == XFRAME_II_DEVICE) {
1214                         int count = (nic->config.bus_speed * 125)/2;
1215                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1216                 } else
1217                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1218
1219                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1220                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1221                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1222                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1223                 if (i == 0)
1224                         if (use_continuous_tx_intrs && (link == LINK_UP))
1225                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1226                 writeq(val64, &bar0->tti_data1_mem);
1227
1228                 if (nic->config.intr_type == MSI_X) {
1229                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1230                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1231                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1232                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1233                 } else {
1234                         if ((nic->config.tx_steering_type ==
1235                                 TX_DEFAULT_STEERING) &&
1236                                 (config->tx_fifo_num > 1) &&
1237                                 (i >= nic->udp_fifo_idx) &&
1238                                 (i < (nic->udp_fifo_idx +
1239                                 nic->total_udp_fifos)))
1240                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1241                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1242                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1243                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1244                         else
1245                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1246                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1247                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1248                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1249                 }
1250
1251                 writeq(val64, &bar0->tti_data2_mem);
1252
1253                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1254                                 TTI_CMD_MEM_OFFSET(i);
1255                 writeq(val64, &bar0->tti_command_mem);
1256
1257                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1258                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1259                         return FAILURE;
1260         }
1261
1262         return SUCCESS;
1263 }
1264
1265 /**
1266  *  init_nic - Initialization of hardware
1267  *  @nic: device private variable
1268  *  Description: The function sequentially configures every block
1269  *  of the H/W from their reset values.
1270  *  Return Value:  SUCCESS on success and
1271  *  '-1' on failure (endian settings incorrect).
1272  */
1273
1274 static int init_nic(struct s2io_nic *nic)
1275 {
1276         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1277         struct net_device *dev = nic->dev;
1278         register u64 val64 = 0;
1279         void __iomem *add;
1280         u32 time;
1281         int i, j;
1282         struct mac_info *mac_control;
1283         struct config_param *config;
1284         int dtx_cnt = 0;
1285         unsigned long long mem_share;
1286         int mem_size;
1287
1288         mac_control = &nic->mac_control;
1289         config = &nic->config;
1290
1291         /* to set the swapper controle on the card */
1292         if(s2io_set_swapper(nic)) {
1293                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1294                 return -EIO;
1295         }
1296
1297         /*
1298          * Herc requires EOI to be removed from reset before XGXS, so..
1299          */
1300         if (nic->device_type & XFRAME_II_DEVICE) {
1301                 val64 = 0xA500000000ULL;
1302                 writeq(val64, &bar0->sw_reset);
1303                 msleep(500);
1304                 val64 = readq(&bar0->sw_reset);
1305         }
1306
1307         /* Remove XGXS from reset state */
1308         val64 = 0;
1309         writeq(val64, &bar0->sw_reset);
1310         msleep(500);
1311         val64 = readq(&bar0->sw_reset);
1312
1313         /* Ensure that it's safe to access registers by checking
1314          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1315          */
1316         if (nic->device_type == XFRAME_II_DEVICE) {
1317                 for (i = 0; i < 50; i++) {
1318                         val64 = readq(&bar0->adapter_status);
1319                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1320                                 break;
1321                         msleep(10);
1322                 }
1323                 if (i == 50)
1324                         return -ENODEV;
1325         }
1326
1327         /*  Enable Receiving broadcasts */
1328         add = &bar0->mac_cfg;
1329         val64 = readq(&bar0->mac_cfg);
1330         val64 |= MAC_RMAC_BCAST_ENABLE;
1331         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1332         writel((u32) val64, add);
1333         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1334         writel((u32) (val64 >> 32), (add + 4));
1335
1336         /* Read registers in all blocks */
1337         val64 = readq(&bar0->mac_int_mask);
1338         val64 = readq(&bar0->mc_int_mask);
1339         val64 = readq(&bar0->xgxs_int_mask);
1340
1341         /*  Set MTU */
1342         val64 = dev->mtu;
1343         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1344
1345         if (nic->device_type & XFRAME_II_DEVICE) {
1346                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1347                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1348                                           &bar0->dtx_control, UF);
1349                         if (dtx_cnt & 0x1)
1350                                 msleep(1); /* Necessary!! */
1351                         dtx_cnt++;
1352                 }
1353         } else {
1354                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1355                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1356                                           &bar0->dtx_control, UF);
1357                         val64 = readq(&bar0->dtx_control);
1358                         dtx_cnt++;
1359                 }
1360         }
1361
1362         /*  Tx DMA Initialization */
1363         val64 = 0;
1364         writeq(val64, &bar0->tx_fifo_partition_0);
1365         writeq(val64, &bar0->tx_fifo_partition_1);
1366         writeq(val64, &bar0->tx_fifo_partition_2);
1367         writeq(val64, &bar0->tx_fifo_partition_3);
1368
1369
1370         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1371                 val64 |=
1372                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1373                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1374                                     ((j * 32) + 5), 3);
1375
1376                 if (i == (config->tx_fifo_num - 1)) {
1377                         if (i % 2 == 0)
1378                                 i++;
1379                 }
1380
1381                 switch (i) {
1382                 case 1:
1383                         writeq(val64, &bar0->tx_fifo_partition_0);
1384                         val64 = 0;
1385                         j = 0;
1386                         break;
1387                 case 3:
1388                         writeq(val64, &bar0->tx_fifo_partition_1);
1389                         val64 = 0;
1390                         j = 0;
1391                         break;
1392                 case 5:
1393                         writeq(val64, &bar0->tx_fifo_partition_2);
1394                         val64 = 0;
1395                         j = 0;
1396                         break;
1397                 case 7:
1398                         writeq(val64, &bar0->tx_fifo_partition_3);
1399                         val64 = 0;
1400                         j = 0;
1401                         break;
1402                 default:
1403                         j++;
1404                         break;
1405                 }
1406         }
1407
1408         /*
1409          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1410          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1411          */
1412         if ((nic->device_type == XFRAME_I_DEVICE) &&
1413                 (nic->pdev->revision < 4))
1414                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1415
1416         val64 = readq(&bar0->tx_fifo_partition_0);
1417         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1418                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1419
1420         /*
1421          * Initialization of Tx_PA_CONFIG register to ignore packet
1422          * integrity checking.
1423          */
1424         val64 = readq(&bar0->tx_pa_cfg);
1425         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1426             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1427         writeq(val64, &bar0->tx_pa_cfg);
1428
1429         /* Rx DMA intialization. */
1430         val64 = 0;
1431         for (i = 0; i < config->rx_ring_num; i++) {
1432                 val64 |=
1433                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1434                          3);
1435         }
1436         writeq(val64, &bar0->rx_queue_priority);
1437
1438         /*
1439          * Allocating equal share of memory to all the
1440          * configured Rings.
1441          */
1442         val64 = 0;
1443         if (nic->device_type & XFRAME_II_DEVICE)
1444                 mem_size = 32;
1445         else
1446                 mem_size = 64;
1447
1448         for (i = 0; i < config->rx_ring_num; i++) {
1449                 switch (i) {
1450                 case 0:
1451                         mem_share = (mem_size / config->rx_ring_num +
1452                                      mem_size % config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1454                         continue;
1455                 case 1:
1456                         mem_share = (mem_size / config->rx_ring_num);
1457                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1458                         continue;
1459                 case 2:
1460                         mem_share = (mem_size / config->rx_ring_num);
1461                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1462                         continue;
1463                 case 3:
1464                         mem_share = (mem_size / config->rx_ring_num);
1465                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1466                         continue;
1467                 case 4:
1468                         mem_share = (mem_size / config->rx_ring_num);
1469                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1470                         continue;
1471                 case 5:
1472                         mem_share = (mem_size / config->rx_ring_num);
1473                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1474                         continue;
1475                 case 6:
1476                         mem_share = (mem_size / config->rx_ring_num);
1477                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1478                         continue;
1479                 case 7:
1480                         mem_share = (mem_size / config->rx_ring_num);
1481                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1482                         continue;
1483                 }
1484         }
1485         writeq(val64, &bar0->rx_queue_cfg);
1486
1487         /*
1488          * Filling Tx round robin registers
1489          * as per the number of FIFOs for equal scheduling priority
1490          */
1491         switch (config->tx_fifo_num) {
1492         case 1:
1493                 val64 = 0x0;
1494                 writeq(val64, &bar0->tx_w_round_robin_0);
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 writeq(val64, &bar0->tx_w_round_robin_3);
1498                 writeq(val64, &bar0->tx_w_round_robin_4);
1499                 break;
1500         case 2:
1501                 val64 = 0x0001000100010001ULL;
1502                 writeq(val64, &bar0->tx_w_round_robin_0);
1503                 writeq(val64, &bar0->tx_w_round_robin_1);
1504                 writeq(val64, &bar0->tx_w_round_robin_2);
1505                 writeq(val64, &bar0->tx_w_round_robin_3);
1506                 val64 = 0x0001000100000000ULL;
1507                 writeq(val64, &bar0->tx_w_round_robin_4);
1508                 break;
1509         case 3:
1510                 val64 = 0x0001020001020001ULL;
1511                 writeq(val64, &bar0->tx_w_round_robin_0);
1512                 val64 = 0x0200010200010200ULL;
1513                 writeq(val64, &bar0->tx_w_round_robin_1);
1514                 val64 = 0x0102000102000102ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_2);
1516                 val64 = 0x0001020001020001ULL;
1517                 writeq(val64, &bar0->tx_w_round_robin_3);
1518                 val64 = 0x0200010200000000ULL;
1519                 writeq(val64, &bar0->tx_w_round_robin_4);
1520                 break;
1521         case 4:
1522                 val64 = 0x0001020300010203ULL;
1523                 writeq(val64, &bar0->tx_w_round_robin_0);
1524                 writeq(val64, &bar0->tx_w_round_robin_1);
1525                 writeq(val64, &bar0->tx_w_round_robin_2);
1526                 writeq(val64, &bar0->tx_w_round_robin_3);
1527                 val64 = 0x0001020300000000ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_4);
1529                 break;
1530         case 5:
1531                 val64 = 0x0001020304000102ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_0);
1533                 val64 = 0x0304000102030400ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_1);
1535                 val64 = 0x0102030400010203ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_2);
1537                 val64 = 0x0400010203040001ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_3);
1539                 val64 = 0x0203040000000000ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_4);
1541                 break;
1542         case 6:
1543                 val64 = 0x0001020304050001ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_0);
1545                 val64 = 0x0203040500010203ULL;
1546                 writeq(val64, &bar0->tx_w_round_robin_1);
1547                 val64 = 0x0405000102030405ULL;
1548                 writeq(val64, &bar0->tx_w_round_robin_2);
1549                 val64 = 0x0001020304050001ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_3);
1551                 val64 = 0x0203040500000000ULL;
1552                 writeq(val64, &bar0->tx_w_round_robin_4);
1553                 break;
1554         case 7:
1555                 val64 = 0x0001020304050600ULL;
1556                 writeq(val64, &bar0->tx_w_round_robin_0);
1557                 val64 = 0x0102030405060001ULL;
1558                 writeq(val64, &bar0->tx_w_round_robin_1);
1559                 val64 = 0x0203040506000102ULL;
1560                 writeq(val64, &bar0->tx_w_round_robin_2);
1561                 val64 = 0x0304050600010203ULL;
1562                 writeq(val64, &bar0->tx_w_round_robin_3);
1563                 val64 = 0x0405060000000000ULL;
1564                 writeq(val64, &bar0->tx_w_round_robin_4);
1565                 break;
1566         case 8:
1567                 val64 = 0x0001020304050607ULL;
1568                 writeq(val64, &bar0->tx_w_round_robin_0);
1569                 writeq(val64, &bar0->tx_w_round_robin_1);
1570                 writeq(val64, &bar0->tx_w_round_robin_2);
1571                 writeq(val64, &bar0->tx_w_round_robin_3);
1572                 val64 = 0x0001020300000000ULL;
1573                 writeq(val64, &bar0->tx_w_round_robin_4);
1574                 break;
1575         }
1576
1577         /* Enable all configured Tx FIFO partitions */
1578         val64 = readq(&bar0->tx_fifo_partition_0);
1579         val64 |= (TX_FIFO_PARTITION_EN);
1580         writeq(val64, &bar0->tx_fifo_partition_0);
1581
1582         /* Filling the Rx round robin registers as per the
1583          * number of Rings and steering based on QoS with
1584          * equal priority.
1585          */
1586         switch (config->rx_ring_num) {
1587         case 1:
1588                 val64 = 0x0;
1589                 writeq(val64, &bar0->rx_w_round_robin_0);
1590                 writeq(val64, &bar0->rx_w_round_robin_1);
1591                 writeq(val64, &bar0->rx_w_round_robin_2);
1592                 writeq(val64, &bar0->rx_w_round_robin_3);
1593                 writeq(val64, &bar0->rx_w_round_robin_4);
1594
1595                 val64 = 0x8080808080808080ULL;
1596                 writeq(val64, &bar0->rts_qos_steering);
1597                 break;
1598         case 2:
1599                 val64 = 0x0001000100010001ULL;
1600                 writeq(val64, &bar0->rx_w_round_robin_0);
1601                 writeq(val64, &bar0->rx_w_round_robin_1);
1602                 writeq(val64, &bar0->rx_w_round_robin_2);
1603                 writeq(val64, &bar0->rx_w_round_robin_3);
1604                 val64 = 0x0001000100000000ULL;
1605                 writeq(val64, &bar0->rx_w_round_robin_4);
1606
1607                 val64 = 0x8080808040404040ULL;
1608                 writeq(val64, &bar0->rts_qos_steering);
1609                 break;
1610         case 3:
1611                 val64 = 0x0001020001020001ULL;
1612                 writeq(val64, &bar0->rx_w_round_robin_0);
1613                 val64 = 0x0200010200010200ULL;
1614                 writeq(val64, &bar0->rx_w_round_robin_1);
1615                 val64 = 0x0102000102000102ULL;
1616                 writeq(val64, &bar0->rx_w_round_robin_2);
1617                 val64 = 0x0001020001020001ULL;
1618                 writeq(val64, &bar0->rx_w_round_robin_3);
1619                 val64 = 0x0200010200000000ULL;
1620                 writeq(val64, &bar0->rx_w_round_robin_4);
1621
1622                 val64 = 0x8080804040402020ULL;
1623                 writeq(val64, &bar0->rts_qos_steering);
1624                 break;
1625         case 4:
1626                 val64 = 0x0001020300010203ULL;
1627                 writeq(val64, &bar0->rx_w_round_robin_0);
1628                 writeq(val64, &bar0->rx_w_round_robin_1);
1629                 writeq(val64, &bar0->rx_w_round_robin_2);
1630                 writeq(val64, &bar0->rx_w_round_robin_3);
1631                 val64 = 0x0001020300000000ULL;
1632                 writeq(val64, &bar0->rx_w_round_robin_4);
1633
1634                 val64 = 0x8080404020201010ULL;
1635                 writeq(val64, &bar0->rts_qos_steering);
1636                 break;
1637         case 5:
1638                 val64 = 0x0001020304000102ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_0);
1640                 val64 = 0x0304000102030400ULL;
1641                 writeq(val64, &bar0->rx_w_round_robin_1);
1642                 val64 = 0x0102030400010203ULL;
1643                 writeq(val64, &bar0->rx_w_round_robin_2);
1644                 val64 = 0x0400010203040001ULL;
1645                 writeq(val64, &bar0->rx_w_round_robin_3);
1646                 val64 = 0x0203040000000000ULL;
1647                 writeq(val64, &bar0->rx_w_round_robin_4);
1648
1649                 val64 = 0x8080404020201008ULL;
1650                 writeq(val64, &bar0->rts_qos_steering);
1651                 break;
1652         case 6:
1653                 val64 = 0x0001020304050001ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_0);
1655                 val64 = 0x0203040500010203ULL;
1656                 writeq(val64, &bar0->rx_w_round_robin_1);
1657                 val64 = 0x0405000102030405ULL;
1658                 writeq(val64, &bar0->rx_w_round_robin_2);
1659                 val64 = 0x0001020304050001ULL;
1660                 writeq(val64, &bar0->rx_w_round_robin_3);
1661                 val64 = 0x0203040500000000ULL;
1662                 writeq(val64, &bar0->rx_w_round_robin_4);
1663
1664                 val64 = 0x8080404020100804ULL;
1665                 writeq(val64, &bar0->rts_qos_steering);
1666                 break;
1667         case 7:
1668                 val64 = 0x0001020304050600ULL;
1669                 writeq(val64, &bar0->rx_w_round_robin_0);
1670                 val64 = 0x0102030405060001ULL;
1671                 writeq(val64, &bar0->rx_w_round_robin_1);
1672                 val64 = 0x0203040506000102ULL;
1673                 writeq(val64, &bar0->rx_w_round_robin_2);
1674                 val64 = 0x0304050600010203ULL;
1675                 writeq(val64, &bar0->rx_w_round_robin_3);
1676                 val64 = 0x0405060000000000ULL;
1677                 writeq(val64, &bar0->rx_w_round_robin_4);
1678
1679                 val64 = 0x8080402010080402ULL;
1680                 writeq(val64, &bar0->rts_qos_steering);
1681                 break;
1682         case 8:
1683                 val64 = 0x0001020304050607ULL;
1684                 writeq(val64, &bar0->rx_w_round_robin_0);
1685                 writeq(val64, &bar0->rx_w_round_robin_1);
1686                 writeq(val64, &bar0->rx_w_round_robin_2);
1687                 writeq(val64, &bar0->rx_w_round_robin_3);
1688                 val64 = 0x0001020300000000ULL;
1689                 writeq(val64, &bar0->rx_w_round_robin_4);
1690
1691                 val64 = 0x8040201008040201ULL;
1692                 writeq(val64, &bar0->rts_qos_steering);
1693                 break;
1694         }
1695
1696         /* UDP Fix */
1697         val64 = 0;
1698         for (i = 0; i < 8; i++)
1699                 writeq(val64, &bar0->rts_frm_len_n[i]);
1700
1701         /* Set the default rts frame length for the rings configured */
1702         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1703         for (i = 0 ; i < config->rx_ring_num ; i++)
1704                 writeq(val64, &bar0->rts_frm_len_n[i]);
1705
1706         /* Set the frame length for the configured rings
1707          * desired by the user
1708          */
1709         for (i = 0; i < config->rx_ring_num; i++) {
1710                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1711                  * specified frame length steering.
1712                  * If the user provides the frame length then program
1713                  * the rts_frm_len register for those values or else
1714                  * leave it as it is.
1715                  */
1716                 if (rts_frm_len[i] != 0) {
1717                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1718                                 &bar0->rts_frm_len_n[i]);
1719                 }
1720         }
1721
1722         /* Disable differentiated services steering logic */
1723         for (i = 0; i < 64; i++) {
1724                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1725                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1726                                 dev->name);
1727                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1728                         return -ENODEV;
1729                 }
1730         }
1731
1732         /* Program statistics memory */
1733         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1734
1735         if (nic->device_type == XFRAME_II_DEVICE) {
1736                 val64 = STAT_BC(0x320);
1737                 writeq(val64, &bar0->stat_byte_cnt);
1738         }
1739
1740         /*
1741          * Initializing the sampling rate for the device to calculate the
1742          * bandwidth utilization.
1743          */
1744         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1745             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1746         writeq(val64, &bar0->mac_link_util);
1747
1748         /*
1749          * Initializing the Transmit and Receive Traffic Interrupt
1750          * Scheme.
1751          */
1752
1753         /* Initialize TTI */
1754         if (SUCCESS != init_tti(nic, nic->last_link_state))
1755                 return -ENODEV;
1756
1757         /* RTI Initialization */
1758         if (nic->device_type == XFRAME_II_DEVICE) {
1759                 /*
1760                  * Programmed to generate Apprx 500 Intrs per
1761                  * second
1762                  */
1763                 int count = (nic->config.bus_speed * 125)/4;
1764                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1765         } else
1766                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1767         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1768                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1769                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1770
1771         writeq(val64, &bar0->rti_data1_mem);
1772
1773         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1774                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1775         if (nic->config.intr_type == MSI_X)
1776             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1777                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1778         else
1779             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1780                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1781         writeq(val64, &bar0->rti_data2_mem);
1782
1783         for (i = 0; i < config->rx_ring_num; i++) {
1784                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1785                                 | RTI_CMD_MEM_OFFSET(i);
1786                 writeq(val64, &bar0->rti_command_mem);
1787
1788                 /*
1789                  * Once the operation completes, the Strobe bit of the
1790                  * command register will be reset. We poll for this
1791                  * particular condition. We wait for a maximum of 500ms
1792                  * for the operation to complete, if it's not complete
1793                  * by then we return error.
1794                  */
1795                 time = 0;
1796                 while (TRUE) {
1797                         val64 = readq(&bar0->rti_command_mem);
1798                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1799                                 break;
1800
1801                         if (time > 10) {
1802                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1803                                           dev->name);
1804                                 return -ENODEV;
1805                         }
1806                         time++;
1807                         msleep(50);
1808                 }
1809         }
1810
1811         /*
1812          * Initializing proper values as Pause threshold into all
1813          * the 8 Queues on Rx side.
1814          */
1815         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1816         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1817
1818         /* Disable RMAC PAD STRIPPING */
1819         add = &bar0->mac_cfg;
1820         val64 = readq(&bar0->mac_cfg);
1821         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1822         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1823         writel((u32) (val64), add);
1824         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1825         writel((u32) (val64 >> 32), (add + 4));
1826         val64 = readq(&bar0->mac_cfg);
1827
1828         /* Enable FCS stripping by adapter */
1829         add = &bar0->mac_cfg;
1830         val64 = readq(&bar0->mac_cfg);
1831         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1832         if (nic->device_type == XFRAME_II_DEVICE)
1833                 writeq(val64, &bar0->mac_cfg);
1834         else {
1835                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1836                 writel((u32) (val64), add);
1837                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1838                 writel((u32) (val64 >> 32), (add + 4));
1839         }
1840
1841         /*
1842          * Set the time value to be inserted in the pause frame
1843          * generated by xena.
1844          */
1845         val64 = readq(&bar0->rmac_pause_cfg);
1846         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1847         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1848         writeq(val64, &bar0->rmac_pause_cfg);
1849
1850         /*
1851          * Set the Threshold Limit for Generating the pause frame
1852          * If the amount of data in any Queue exceeds ratio of
1853          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1854          * pause frame is generated
1855          */
1856         val64 = 0;
1857         for (i = 0; i < 4; i++) {
1858                 val64 |=
1859                     (((u64) 0xFF00 | nic->mac_control.
1860                       mc_pause_threshold_q0q3)
1861                      << (i * 2 * 8));
1862         }
1863         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1864
1865         val64 = 0;
1866         for (i = 0; i < 4; i++) {
1867                 val64 |=
1868                     (((u64) 0xFF00 | nic->mac_control.
1869                       mc_pause_threshold_q4q7)
1870                      << (i * 2 * 8));
1871         }
1872         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1873
1874         /*
1875          * TxDMA will stop Read request if the number of read split has
1876          * exceeded the limit pointed by shared_splits
1877          */
1878         val64 = readq(&bar0->pic_control);
1879         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1880         writeq(val64, &bar0->pic_control);
1881
1882         if (nic->config.bus_speed == 266) {
1883                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1884                 writeq(0x0, &bar0->read_retry_delay);
1885                 writeq(0x0, &bar0->write_retry_delay);
1886         }
1887
1888         /*
1889          * Programming the Herc to split every write transaction
1890          * that does not start on an ADB to reduce disconnects.
1891          */
1892         if (nic->device_type == XFRAME_II_DEVICE) {
1893                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1894                         MISC_LINK_STABILITY_PRD(3);
1895                 writeq(val64, &bar0->misc_control);
1896                 val64 = readq(&bar0->pic_control2);
1897                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1898                 writeq(val64, &bar0->pic_control2);
1899         }
1900         if (strstr(nic->product_name, "CX4")) {
1901                 val64 = TMAC_AVG_IPG(0x17);
1902                 writeq(val64, &bar0->tmac_avg_ipg);
1903         }
1904
1905         return SUCCESS;
1906 }
1907 #define LINK_UP_DOWN_INTERRUPT          1
1908 #define MAC_RMAC_ERR_TIMER              2
1909
1910 static int s2io_link_fault_indication(struct s2io_nic *nic)
1911 {
1912         if (nic->config.intr_type != INTA)
1913                 return MAC_RMAC_ERR_TIMER;
1914         if (nic->device_type == XFRAME_II_DEVICE)
1915                 return LINK_UP_DOWN_INTERRUPT;
1916         else
1917                 return MAC_RMAC_ERR_TIMER;
1918 }
1919
1920 /**
1921  *  do_s2io_write_bits -  update alarm bits in alarm register
1922  *  @value: alarm bits
1923  *  @flag: interrupt status
1924  *  @addr: address value
1925  *  Description: update alarm bits in alarm register
1926  *  Return Value:
1927  *  NONE.
1928  */
1929 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1930 {
1931         u64 temp64;
1932
1933         temp64 = readq(addr);
1934
1935         if(flag == ENABLE_INTRS)
1936                 temp64 &= ~((u64) value);
1937         else
1938                 temp64 |= ((u64) value);
1939         writeq(temp64, addr);
1940 }
1941
1942 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1943 {
1944         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1945         register u64 gen_int_mask = 0;
1946
1947         if (mask & TX_DMA_INTR) {
1948
1949                 gen_int_mask |= TXDMA_INT_M;
1950
1951                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1952                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1953                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1954                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1955
1956                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1957                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1958                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1959                                 &bar0->pfc_err_mask);
1960
1961                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1962                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1963                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1964
1965                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1966                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1967                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1968                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1969                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1970                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1971
1972                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1973                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1974
1975                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1976                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1977                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1978                                 flag, &bar0->lso_err_mask);
1979
1980                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1981                                 flag, &bar0->tpa_err_mask);
1982
1983                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1984
1985         }
1986
1987         if (mask & TX_MAC_INTR) {
1988                 gen_int_mask |= TXMAC_INT_M;
1989                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1990                                 &bar0->mac_int_mask);
1991                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1992                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1993                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1994                                 flag, &bar0->mac_tmac_err_mask);
1995         }
1996
1997         if (mask & TX_XGXS_INTR) {
1998                 gen_int_mask |= TXXGXS_INT_M;
1999                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
2000                                 &bar0->xgxs_int_mask);
2001                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
2002                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
2003                                 flag, &bar0->xgxs_txgxs_err_mask);
2004         }
2005
2006         if (mask & RX_DMA_INTR) {
2007                 gen_int_mask |= RXDMA_INT_M;
2008                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
2009                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
2010                                 flag, &bar0->rxdma_int_mask);
2011                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
2012                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
2013                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
2014                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
2015                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
2016                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
2017                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2018                                 &bar0->prc_pcix_err_mask);
2019                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2020                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2021                                 &bar0->rpa_err_mask);
2022                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2023                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2024                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2025                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2026                                 flag, &bar0->rda_err_mask);
2027                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2028                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2029                                 flag, &bar0->rti_err_mask);
2030         }
2031
2032         if (mask & RX_MAC_INTR) {
2033                 gen_int_mask |= RXMAC_INT_M;
2034                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2035                                 &bar0->mac_int_mask);
2036                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2037                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2038                                 RMAC_DOUBLE_ECC_ERR |
2039                                 RMAC_LINK_STATE_CHANGE_INT,
2040                                 flag, &bar0->mac_rmac_err_mask);
2041         }
2042
2043         if (mask & RX_XGXS_INTR)
2044         {
2045                 gen_int_mask |= RXXGXS_INT_M;
2046                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2047                                 &bar0->xgxs_int_mask);
2048                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2049                                 &bar0->xgxs_rxgxs_err_mask);
2050         }
2051
2052         if (mask & MC_INTR) {
2053                 gen_int_mask |= MC_INT_M;
2054                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2055                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2056                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2057                                 &bar0->mc_err_mask);
2058         }
2059         nic->general_int_mask = gen_int_mask;
2060
2061         /* Remove this line when alarm interrupts are enabled */
2062         nic->general_int_mask = 0;
2063 }
2064 /**
2065  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2066  *  @nic: device private variable,
2067  *  @mask: A mask indicating which Intr block must be modified and,
2068  *  @flag: A flag indicating whether to enable or disable the Intrs.
2069  *  Description: This function will either disable or enable the interrupts
2070  *  depending on the flag argument. The mask argument can be used to
2071  *  enable/disable any Intr block.
2072  *  Return Value: NONE.
2073  */
2074
2075 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2076 {
2077         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2078         register u64 temp64 = 0, intr_mask = 0;
2079
2080         intr_mask = nic->general_int_mask;
2081
2082         /*  Top level interrupt classification */
2083         /*  PIC Interrupts */
2084         if (mask & TX_PIC_INTR) {
2085                 /*  Enable PIC Intrs in the general intr mask register */
2086                 intr_mask |= TXPIC_INT_M;
2087                 if (flag == ENABLE_INTRS) {
2088                         /*
2089                          * If Hercules adapter enable GPIO otherwise
2090                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2091                          * interrupts for now.
2092                          * TODO
2093                          */
2094                         if (s2io_link_fault_indication(nic) ==
2095                                         LINK_UP_DOWN_INTERRUPT ) {
2096                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2097                                                 &bar0->pic_int_mask);
2098                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2099                                                 &bar0->gpio_int_mask);
2100                         } else
2101                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2102                 } else if (flag == DISABLE_INTRS) {
2103                         /*
2104                          * Disable PIC Intrs in the general
2105                          * intr mask register
2106                          */
2107                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2108                 }
2109         }
2110
2111         /*  Tx traffic interrupts */
2112         if (mask & TX_TRAFFIC_INTR) {
2113                 intr_mask |= TXTRAFFIC_INT_M;
2114                 if (flag == ENABLE_INTRS) {
2115                         /*
2116                          * Enable all the Tx side interrupts
2117                          * writing 0 Enables all 64 TX interrupt levels
2118                          */
2119                         writeq(0x0, &bar0->tx_traffic_mask);
2120                 } else if (flag == DISABLE_INTRS) {
2121                         /*
2122                          * Disable Tx Traffic Intrs in the general intr mask
2123                          * register.
2124                          */
2125                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2126                 }
2127         }
2128
2129         /*  Rx traffic interrupts */
2130         if (mask & RX_TRAFFIC_INTR) {
2131                 intr_mask |= RXTRAFFIC_INT_M;
2132                 if (flag == ENABLE_INTRS) {
2133                         /* writing 0 Enables all 8 RX interrupt levels */
2134                         writeq(0x0, &bar0->rx_traffic_mask);
2135                 } else if (flag == DISABLE_INTRS) {
2136                         /*
2137                          * Disable Rx Traffic Intrs in the general intr mask
2138                          * register.
2139                          */
2140                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2141                 }
2142         }
2143
2144         temp64 = readq(&bar0->general_int_mask);
2145         if (flag == ENABLE_INTRS)
2146                 temp64 &= ~((u64) intr_mask);
2147         else
2148                 temp64 = DISABLE_ALL_INTRS;
2149         writeq(temp64, &bar0->general_int_mask);
2150
2151         nic->general_int_mask = readq(&bar0->general_int_mask);
2152 }
2153
2154 /**
2155  *  verify_pcc_quiescent- Checks for PCC quiescent state
2156  *  Return: 1 If PCC is quiescence
2157  *          0 If PCC is not quiescence
2158  */
2159 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2160 {
2161         int ret = 0, herc;
2162         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2163         u64 val64 = readq(&bar0->adapter_status);
2164
2165         herc = (sp->device_type == XFRAME_II_DEVICE);
2166
2167         if (flag == FALSE) {
2168                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2169                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2170                                 ret = 1;
2171                 } else {
2172                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2173                                 ret = 1;
2174                 }
2175         } else {
2176                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2177                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2178                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2179                                 ret = 1;
2180                 } else {
2181                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2182                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2183                                 ret = 1;
2184                 }
2185         }
2186
2187         return ret;
2188 }
2189 /**
2190  *  verify_xena_quiescence - Checks whether the H/W is ready
2191  *  Description: Returns whether the H/W is ready to go or not. Depending
2192  *  on whether adapter enable bit was written or not the comparison
2193  *  differs and the calling function passes the input argument flag to
2194  *  indicate this.
2195  *  Return: 1 If xena is quiescence
2196  *          0 If Xena is not quiescence
2197  */
2198
2199 static int verify_xena_quiescence(struct s2io_nic *sp)
2200 {
2201         int  mode;
2202         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2203         u64 val64 = readq(&bar0->adapter_status);
2204         mode = s2io_verify_pci_mode(sp);
2205
2206         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2207                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2208                 return 0;
2209         }
2210         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2211         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2212                 return 0;
2213         }
2214         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2215                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2216                 return 0;
2217         }
2218         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2219                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2220                 return 0;
2221         }
2222         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2223                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2224                 return 0;
2225         }
2226         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2227                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2228                 return 0;
2229         }
2230         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2231                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2232                 return 0;
2233         }
2234         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2235                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2236                 return 0;
2237         }
2238
2239         /*
2240          * In PCI 33 mode, the P_PLL is not used, and therefore,
2241          * the the P_PLL_LOCK bit in the adapter_status register will
2242          * not be asserted.
2243          */
2244         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2245                 sp->device_type == XFRAME_II_DEVICE && mode !=
2246                 PCI_MODE_PCI_33) {
2247                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2248                 return 0;
2249         }
2250         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2251                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2252                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2253                 return 0;
2254         }
2255         return 1;
2256 }
2257
2258 /**
2259  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2260  * @sp: Pointer to device specifc structure
2261  * Description :
2262  * New procedure to clear mac address reading  problems on Alpha platforms
2263  *
2264  */
2265
2266 static void fix_mac_address(struct s2io_nic * sp)
2267 {
2268         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2269         u64 val64;
2270         int i = 0;
2271
2272         while (fix_mac[i] != END_SIGN) {
2273                 writeq(fix_mac[i++], &bar0->gpio_control);
2274                 udelay(10);
2275                 val64 = readq(&bar0->gpio_control);
2276         }
2277 }
2278
2279 /**
2280  *  start_nic - Turns the device on
2281  *  @nic : device private variable.
2282  *  Description:
2283  *  This function actually turns the device on. Before this  function is
2284  *  called,all Registers are configured from their reset states
2285  *  and shared memory is allocated but the NIC is still quiescent. On
2286  *  calling this function, the device interrupts are cleared and the NIC is
2287  *  literally switched on by writing into the adapter control register.
2288  *  Return Value:
2289  *  SUCCESS on success and -1 on failure.
2290  */
2291
2292 static int start_nic(struct s2io_nic *nic)
2293 {
2294         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2295         struct net_device *dev = nic->dev;
2296         register u64 val64 = 0;
2297         u16 subid, i;
2298         struct mac_info *mac_control;
2299         struct config_param *config;
2300
2301         mac_control = &nic->mac_control;
2302         config = &nic->config;
2303
2304         /*  PRC Initialization and configuration */
2305         for (i = 0; i < config->rx_ring_num; i++) {
2306                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2307                        &bar0->prc_rxd0_n[i]);
2308
2309                 val64 = readq(&bar0->prc_ctrl_n[i]);
2310                 if (nic->rxd_mode == RXD_MODE_1)
2311                         val64 |= PRC_CTRL_RC_ENABLED;
2312                 else
2313                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2314                 if (nic->device_type == XFRAME_II_DEVICE)
2315                         val64 |= PRC_CTRL_GROUP_READS;
2316                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2317                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2318                 writeq(val64, &bar0->prc_ctrl_n[i]);
2319         }
2320
2321         if (nic->rxd_mode == RXD_MODE_3B) {
2322                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2323                 val64 = readq(&bar0->rx_pa_cfg);
2324                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2325                 writeq(val64, &bar0->rx_pa_cfg);
2326         }
2327
2328         if (vlan_tag_strip == 0) {
2329                 val64 = readq(&bar0->rx_pa_cfg);
2330                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2331                 writeq(val64, &bar0->rx_pa_cfg);
2332                 vlan_strip_flag = 0;
2333         }
2334
2335         /*
2336          * Enabling MC-RLDRAM. After enabling the device, we timeout
2337          * for around 100ms, which is approximately the time required
2338          * for the device to be ready for operation.
2339          */
2340         val64 = readq(&bar0->mc_rldram_mrs);
2341         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2342         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2343         val64 = readq(&bar0->mc_rldram_mrs);
2344
2345         msleep(100);    /* Delay by around 100 ms. */
2346
2347         /* Enabling ECC Protection. */
2348         val64 = readq(&bar0->adapter_control);
2349         val64 &= ~ADAPTER_ECC_EN;
2350         writeq(val64, &bar0->adapter_control);
2351
2352         /*
2353          * Verify if the device is ready to be enabled, if so enable
2354          * it.
2355          */
2356         val64 = readq(&bar0->adapter_status);
2357         if (!verify_xena_quiescence(nic)) {
2358                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2359                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2360                           (unsigned long long) val64);
2361                 return FAILURE;
2362         }
2363
2364         /*
2365          * With some switches, link might be already up at this point.
2366          * Because of this weird behavior, when we enable laser,
2367          * we may not get link. We need to handle this. We cannot
2368          * figure out which switch is misbehaving. So we are forced to
2369          * make a global change.
2370          */
2371
2372         /* Enabling Laser. */
2373         val64 = readq(&bar0->adapter_control);
2374         val64 |= ADAPTER_EOI_TX_ON;
2375         writeq(val64, &bar0->adapter_control);
2376
2377         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2378                 /*
2379                  * Dont see link state interrupts initally on some switches,
2380                  * so directly scheduling the link state task here.
2381                  */
2382                 schedule_work(&nic->set_link_task);
2383         }
2384         /* SXE-002: Initialize link and activity LED */
2385         subid = nic->pdev->subsystem_device;
2386         if (((subid & 0xFF) >= 0x07) &&
2387             (nic->device_type == XFRAME_I_DEVICE)) {
2388                 val64 = readq(&bar0->gpio_control);
2389                 val64 |= 0x0000800000000000ULL;
2390                 writeq(val64, &bar0->gpio_control);
2391                 val64 = 0x0411040400000000ULL;
2392                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2393         }
2394
2395         return SUCCESS;
2396 }
2397 /**
2398  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2399  */
2400 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2401                                         TxD *txdlp, int get_off)
2402 {
2403         struct s2io_nic *nic = fifo_data->nic;
2404         struct sk_buff *skb;
2405         struct TxD *txds;
2406         u16 j, frg_cnt;
2407
2408         txds = txdlp;
2409         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2410                 pci_unmap_single(nic->pdev, (dma_addr_t)
2411                         txds->Buffer_Pointer, sizeof(u64),
2412                         PCI_DMA_TODEVICE);
2413                 txds++;
2414         }
2415
2416         skb = (struct sk_buff *) ((unsigned long)
2417                         txds->Host_Control);
2418         if (!skb) {
2419                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2420                 return NULL;
2421         }
2422         pci_unmap_single(nic->pdev, (dma_addr_t)
2423                          txds->Buffer_Pointer,
2424                          skb->len - skb->data_len,
2425                          PCI_DMA_TODEVICE);
2426         frg_cnt = skb_shinfo(skb)->nr_frags;
2427         if (frg_cnt) {
2428                 txds++;
2429                 for (j = 0; j < frg_cnt; j++, txds++) {
2430                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2431                         if (!txds->Buffer_Pointer)
2432                                 break;
2433                         pci_unmap_page(nic->pdev, (dma_addr_t)
2434                                         txds->Buffer_Pointer,
2435                                        frag->size, PCI_DMA_TODEVICE);
2436                 }
2437         }
2438         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2439         return(skb);
2440 }
2441
2442 /**
2443  *  free_tx_buffers - Free all queued Tx buffers
2444  *  @nic : device private variable.
2445  *  Description:
2446  *  Free all queued Tx buffers.
2447  *  Return Value: void
2448 */
2449
2450 static void free_tx_buffers(struct s2io_nic *nic)
2451 {
2452         struct net_device *dev = nic->dev;
2453         struct sk_buff *skb;
2454         struct TxD *txdp;
2455         int i, j;
2456         struct mac_info *mac_control;
2457         struct config_param *config;
2458         int cnt = 0;
2459
2460         mac_control = &nic->mac_control;
2461         config = &nic->config;
2462
2463         for (i = 0; i < config->tx_fifo_num; i++) {
2464                 unsigned long flags;
2465                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2466                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2467                         txdp = (struct TxD *) \
2468                         mac_control->fifos[i].list_info[j].list_virt_addr;
2469                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2470                         if (skb) {
2471                                 nic->mac_control.stats_info->sw_stat.mem_freed
2472                                         += skb->truesize;
2473                                 dev_kfree_skb(skb);
2474                                 cnt++;
2475                         }
2476                 }
2477                 DBG_PRINT(INTR_DBG,
2478                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2479                           dev->name, cnt, i);
2480                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2481                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2482                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2483         }
2484 }
2485
2486 /**
2487  *   stop_nic -  To stop the nic
2488  *   @nic ; device private variable.
2489  *   Description:
2490  *   This function does exactly the opposite of what the start_nic()
2491  *   function does. This function is called to stop the device.
2492  *   Return Value:
2493  *   void.
2494  */
2495
2496 static void stop_nic(struct s2io_nic *nic)
2497 {
2498         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2499         register u64 val64 = 0;
2500         u16 interruptible;
2501         struct mac_info *mac_control;
2502         struct config_param *config;
2503
2504         mac_control = &nic->mac_control;
2505         config = &nic->config;
2506
2507         /*  Disable all interrupts */
2508         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2509         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2510         interruptible |= TX_PIC_INTR;
2511         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2512
2513         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2514         val64 = readq(&bar0->adapter_control);
2515         val64 &= ~(ADAPTER_CNTL_EN);
2516         writeq(val64, &bar0->adapter_control);
2517 }
2518
2519 /**
2520  *  fill_rx_buffers - Allocates the Rx side skbs
2521  *  @ring_info: per ring structure
2522  *  Description:
2523  *  The function allocates Rx side skbs and puts the physical
2524  *  address of these buffers into the RxD buffer pointers, so that the NIC
2525  *  can DMA the received frame into these locations.
2526  *  The NIC supports 3 receive modes, viz
2527  *  1. single buffer,
2528  *  2. three buffer and
2529  *  3. Five buffer modes.
2530  *  Each mode defines how many fragments the received frame will be split
2531  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2532  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2533  *  is split into 3 fragments. As of now only single buffer mode is
2534  *  supported.
2535  *   Return Value:
2536  *  SUCCESS on success or an appropriate -ve value on failure.
2537  */
2538
2539 static int fill_rx_buffers(struct ring_info *ring)
2540 {
2541         struct sk_buff *skb;
2542         struct RxD_t *rxdp;
2543         int off, size, block_no, block_no1;
2544         u32 alloc_tab = 0;
2545         u32 alloc_cnt;
2546         u64 tmp;
2547         struct buffAdd *ba;
2548         struct RxD_t *first_rxdp = NULL;
2549         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2550         int rxd_index = 0;
2551         struct RxD1 *rxdp1;
2552         struct RxD3 *rxdp3;
2553         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2554
2555         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2556
2557         block_no1 = ring->rx_curr_get_info.block_index;
2558         while (alloc_tab < alloc_cnt) {
2559                 block_no = ring->rx_curr_put_info.block_index;
2560
2561                 off = ring->rx_curr_put_info.offset;
2562
2563                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2564
2565                 rxd_index = off + 1;
2566                 if (block_no)
2567                         rxd_index += (block_no * ring->rxd_count);
2568
2569                 if ((block_no == block_no1) && 
2570                         (off == ring->rx_curr_get_info.offset) &&
2571                         (rxdp->Host_Control)) {
2572                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2573                                 ring->dev->name);
2574                         DBG_PRINT(INTR_DBG, " info equated\n");
2575                         goto end;
2576                 }
2577                 if (off && (off == ring->rxd_count)) {
2578                         ring->rx_curr_put_info.block_index++;
2579                         if (ring->rx_curr_put_info.block_index ==
2580                                                         ring->block_count)
2581                                 ring->rx_curr_put_info.block_index = 0;
2582                         block_no = ring->rx_curr_put_info.block_index;
2583                         off = 0;
2584                         ring->rx_curr_put_info.offset = off;
2585                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2586                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2587                                   ring->dev->name, rxdp);
2588
2589                 }
2590
2591                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2592                         ((ring->rxd_mode == RXD_MODE_3B) &&
2593                                 (rxdp->Control_2 & s2BIT(0)))) {
2594                         ring->rx_curr_put_info.offset = off;
2595                         goto end;
2596                 }
2597                 /* calculate size of skb based on ring mode */
2598                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2599                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2600                 if (ring->rxd_mode == RXD_MODE_1)
2601                         size += NET_IP_ALIGN;
2602                 else
2603                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2604
2605                 /* allocate skb */
2606                 skb = dev_alloc_skb(size);
2607                 if(!skb) {
2608                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2609                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2610                         if (first_rxdp) {
2611                                 wmb();
2612                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2613                         }
2614                         stats->mem_alloc_fail_cnt++;
2615                                 
2616                         return -ENOMEM ;
2617                 }
2618                 stats->mem_allocated += skb->truesize;
2619
2620                 if (ring->rxd_mode == RXD_MODE_1) {
2621                         /* 1 buffer mode - normal operation mode */
2622                         rxdp1 = (struct RxD1*)rxdp;
2623                         memset(rxdp, 0, sizeof(struct RxD1));
2624                         skb_reserve(skb, NET_IP_ALIGN);
2625                         rxdp1->Buffer0_ptr = pci_map_single
2626                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2627                                 PCI_DMA_FROMDEVICE);
2628                         if( (rxdp1->Buffer0_ptr == 0) ||
2629                                 (rxdp1->Buffer0_ptr ==
2630                                 DMA_ERROR_CODE))
2631                                 goto pci_map_failed;
2632
2633                         rxdp->Control_2 =
2634                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2635                         rxdp->Host_Control = (unsigned long) (skb);
2636                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2637                         /*
2638                          * 2 buffer mode -
2639                          * 2 buffer mode provides 128
2640                          * byte aligned receive buffers.
2641                          */
2642
2643                         rxdp3 = (struct RxD3*)rxdp;
2644                         /* save buffer pointers to avoid frequent dma mapping */
2645                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2646                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2647                         memset(rxdp, 0, sizeof(struct RxD3));
2648                         /* restore the buffer pointers for dma sync*/
2649                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2650                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2651
2652                         ba = &ring->ba[block_no][off];
2653                         skb_reserve(skb, BUF0_LEN);
2654                         tmp = (u64)(unsigned long) skb->data;
2655                         tmp += ALIGN_SIZE;
2656                         tmp &= ~ALIGN_SIZE;
2657                         skb->data = (void *) (unsigned long)tmp;
2658                         skb_reset_tail_pointer(skb);
2659
2660                         if (!(rxdp3->Buffer0_ptr))
2661                                 rxdp3->Buffer0_ptr =
2662                                    pci_map_single(ring->pdev, ba->ba_0,
2663                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2664                         else
2665                                 pci_dma_sync_single_for_device(ring->pdev,
2666                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2667                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2668                         if( (rxdp3->Buffer0_ptr == 0) ||
2669                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2670                                 goto pci_map_failed;
2671
2672                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2673                         if (ring->rxd_mode == RXD_MODE_3B) {
2674                                 /* Two buffer mode */
2675
2676                                 /*
2677                                  * Buffer2 will have L3/L4 header plus
2678                                  * L4 payload
2679                                  */
2680                                 rxdp3->Buffer2_ptr = pci_map_single
2681                                 (ring->pdev, skb->data, ring->mtu + 4,
2682                                                 PCI_DMA_FROMDEVICE);
2683
2684                                 if( (rxdp3->Buffer2_ptr == 0) ||
2685                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2686                                         goto pci_map_failed;
2687
2688                                 if (!rxdp3->Buffer1_ptr)
2689                                         rxdp3->Buffer1_ptr =
2690                                                 pci_map_single(ring->pdev,
2691                                                 ba->ba_1, BUF1_LEN,
2692                                                 PCI_DMA_FROMDEVICE);
2693
2694                                 if( (rxdp3->Buffer1_ptr == 0) ||
2695                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2696                                         pci_unmap_single
2697                                                 (ring->pdev,
2698                                                 (dma_addr_t)(unsigned long)
2699                                                 skb->data,
2700                                                 ring->mtu + 4,
2701                                                 PCI_DMA_FROMDEVICE);
2702                                         goto pci_map_failed;
2703                                 }
2704                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2705                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2706                                                                 (ring->mtu + 4);
2707                         }
2708                         rxdp->Control_2 |= s2BIT(0);
2709                         rxdp->Host_Control = (unsigned long) (skb);
2710                 }
2711                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2712                         rxdp->Control_1 |= RXD_OWN_XENA;
2713                 off++;
2714                 if (off == (ring->rxd_count + 1))
2715                         off = 0;
2716                 ring->rx_curr_put_info.offset = off;
2717
2718                 rxdp->Control_2 |= SET_RXD_MARKER;
2719                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2720                         if (first_rxdp) {
2721                                 wmb();
2722                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2723                         }
2724                         first_rxdp = rxdp;
2725                 }
2726                 ring->rx_bufs_left += 1;
2727                 alloc_tab++;
2728         }
2729
2730       end:
2731         /* Transfer ownership of first descriptor to adapter just before
2732          * exiting. Before that, use memory barrier so that ownership
2733          * and other fields are seen by adapter correctly.
2734          */
2735         if (first_rxdp) {
2736                 wmb();
2737                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2738         }
2739
2740         return SUCCESS;
2741 pci_map_failed:
2742         stats->pci_map_fail_cnt++;
2743         stats->mem_freed += skb->truesize;
2744         dev_kfree_skb_irq(skb);
2745         return -ENOMEM;
2746 }
2747
2748 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2749 {
2750         struct net_device *dev = sp->dev;
2751         int j;
2752         struct sk_buff *skb;
2753         struct RxD_t *rxdp;
2754         struct mac_info *mac_control;
2755         struct buffAdd *ba;
2756         struct RxD1 *rxdp1;
2757         struct RxD3 *rxdp3;
2758
2759         mac_control = &sp->mac_control;
2760         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2761                 rxdp = mac_control->rings[ring_no].
2762                                 rx_blocks[blk].rxds[j].virt_addr;
2763                 skb = (struct sk_buff *)
2764                         ((unsigned long) rxdp->Host_Control);
2765                 if (!skb) {
2766                         continue;
2767                 }
2768                 if (sp->rxd_mode == RXD_MODE_1) {
2769                         rxdp1 = (struct RxD1*)rxdp;
2770                         pci_unmap_single(sp->pdev, (dma_addr_t)
2771                                 rxdp1->Buffer0_ptr,
2772                                 dev->mtu +
2773                                 HEADER_ETHERNET_II_802_3_SIZE
2774                                 + HEADER_802_2_SIZE +
2775                                 HEADER_SNAP_SIZE,
2776                                 PCI_DMA_FROMDEVICE);
2777                         memset(rxdp, 0, sizeof(struct RxD1));
2778                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2779                         rxdp3 = (struct RxD3*)rxdp;
2780                         ba = &mac_control->rings[ring_no].
2781                                 ba[blk][j];
2782                         pci_unmap_single(sp->pdev, (dma_addr_t)
2783                                 rxdp3->Buffer0_ptr,
2784                                 BUF0_LEN,
2785                                 PCI_DMA_FROMDEVICE);
2786                         pci_unmap_single(sp->pdev, (dma_addr_t)
2787                                 rxdp3->Buffer1_ptr,
2788                                 BUF1_LEN,
2789                                 PCI_DMA_FROMDEVICE);
2790                         pci_unmap_single(sp->pdev, (dma_addr_t)
2791                                 rxdp3->Buffer2_ptr,
2792                                 dev->mtu + 4,
2793                                 PCI_DMA_FROMDEVICE);
2794                         memset(rxdp, 0, sizeof(struct RxD3));
2795                 }
2796                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2797                 dev_kfree_skb(skb);
2798                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2799         }
2800 }
2801
2802 /**
2803  *  free_rx_buffers - Frees all Rx buffers
2804  *  @sp: device private variable.
2805  *  Description:
2806  *  This function will free all Rx buffers allocated by host.
2807  *  Return Value:
2808  *  NONE.
2809  */
2810
2811 static void free_rx_buffers(struct s2io_nic *sp)
2812 {
2813         struct net_device *dev = sp->dev;
2814         int i, blk = 0, buf_cnt = 0;
2815         struct mac_info *mac_control;
2816         struct config_param *config;
2817
2818         mac_control = &sp->mac_control;
2819         config = &sp->config;
2820
2821         for (i = 0; i < config->rx_ring_num; i++) {
2822                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2823                         free_rxd_blk(sp,i,blk);
2824
2825                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2826                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2827                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2828                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2829                 mac_control->rings[i].rx_bufs_left = 0;
2830                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2831                           dev->name, buf_cnt, i);
2832         }
2833 }
2834
2835 static int s2io_chk_rx_buffers(struct ring_info *ring)
2836 {
2837         if (fill_rx_buffers(ring) == -ENOMEM) {
2838                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2839                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2840         }
2841         return 0;
2842 }
2843
2844 /**
2845  * s2io_poll - Rx interrupt handler for NAPI support
2846  * @napi : pointer to the napi structure.
2847  * @budget : The number of packets that were budgeted to be processed
2848  * during  one pass through the 'Poll" function.
2849  * Description:
2850  * Comes into picture only if NAPI support has been incorporated. It does
2851  * the same thing that rx_intr_handler does, but not in a interrupt context
2852  * also It will process only a given number of packets.
2853  * Return value:
2854  * 0 on success and 1 if there are No Rx packets to be processed.
2855  */
2856
2857 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2858 {
2859         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2860         struct net_device *dev = ring->dev;
2861         struct config_param *config;
2862         struct mac_info *mac_control;
2863         int pkts_processed = 0;
2864         u8 *addr = NULL, val8 = 0;
2865         struct s2io_nic *nic = dev->priv;
2866         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2867         int budget_org = budget;
2868
2869         config = &nic->config;
2870         mac_control = &nic->mac_control;
2871
2872         if (unlikely(!is_s2io_card_up(nic)))
2873                 return 0;
2874
2875         pkts_processed = rx_intr_handler(ring, budget);
2876         s2io_chk_rx_buffers(ring);
2877
2878         if (pkts_processed < budget_org) {
2879                 netif_rx_complete(dev, napi);
2880                 /*Re Enable MSI-Rx Vector*/
2881                 addr = (u8 *)&bar0->xmsi_mask_reg;
2882                 addr += 7 - ring->ring_no;
2883                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2884                 writeb(val8, addr);
2885                 val8 = readb(addr);
2886         }
2887         return pkts_processed;
2888 }
2889 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2890 {
2891         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2892         struct ring_info *ring;
2893         struct net_device *dev = nic->dev;
2894         struct config_param *config;
2895         struct mac_info *mac_control;
2896         int pkts_processed = 0;
2897         int ring_pkts_processed, i;
2898         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2899         int budget_org = budget;
2900
2901         config = &nic->config;
2902         mac_control = &nic->mac_control;
2903
2904         if (unlikely(!is_s2io_card_up(nic)))
2905                 return 0;
2906
2907         for (i = 0; i < config->rx_ring_num; i++) {
2908                 ring = &mac_control->rings[i];
2909                 ring_pkts_processed = rx_intr_handler(ring, budget);
2910                 s2io_chk_rx_buffers(ring);
2911                 pkts_processed += ring_pkts_processed;
2912                 budget -= ring_pkts_processed;
2913                 if (budget <= 0)
2914                         break;
2915         }
2916         if (pkts_processed < budget_org) {
2917                 netif_rx_complete(dev, napi);
2918                 /* Re enable the Rx interrupts for the ring */
2919                 writeq(0, &bar0->rx_traffic_mask);
2920                 readl(&bar0->rx_traffic_mask);
2921         }
2922         return pkts_processed;
2923 }
2924
2925 #ifdef CONFIG_NET_POLL_CONTROLLER
2926 /**
2927  * s2io_netpoll - netpoll event handler entry point
2928  * @dev : pointer to the device structure.
2929  * Description:
2930  *      This function will be called by upper layer to check for events on the
2931  * interface in situations where interrupts are disabled. It is used for
2932  * specific in-kernel networking tasks, such as remote consoles and kernel
2933  * debugging over the network (example netdump in RedHat).
2934  */
2935 static void s2io_netpoll(struct net_device *dev)
2936 {
2937         struct s2io_nic *nic = dev->priv;
2938         struct mac_info *mac_control;
2939         struct config_param *config;
2940         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2941         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2942         int i;
2943
2944         if (pci_channel_offline(nic->pdev))
2945                 return;
2946
2947         disable_irq(dev->irq);
2948
2949         mac_control = &nic->mac_control;
2950         config = &nic->config;
2951
2952         writeq(val64, &bar0->rx_traffic_int);
2953         writeq(val64, &bar0->tx_traffic_int);
2954
2955         /* we need to free up the transmitted skbufs or else netpoll will
2956          * run out of skbs and will fail and eventually netpoll application such
2957          * as netdump will fail.
2958          */
2959         for (i = 0; i < config->tx_fifo_num; i++)
2960                 tx_intr_handler(&mac_control->fifos[i]);
2961
2962         /* check for received packet and indicate up to network */
2963         for (i = 0; i < config->rx_ring_num; i++)
2964                 rx_intr_handler(&mac_control->rings[i], 0);
2965
2966         for (i = 0; i < config->rx_ring_num; i++) {
2967                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2968                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2969                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2970                         break;
2971                 }
2972         }
2973         enable_irq(dev->irq);
2974         return;
2975 }
2976 #endif
2977
2978 /**
2979  *  rx_intr_handler - Rx interrupt handler
2980  *  @ring_info: per ring structure.
2981  *  @budget: budget for napi processing.
2982  *  Description:
2983  *  If the interrupt is because of a received frame or if the
2984  *  receive ring contains fresh as yet un-processed frames,this function is
2985  *  called. It picks out the RxD at which place the last Rx processing had
2986  *  stopped and sends the skb to the OSM's Rx handler and then increments
2987  *  the offset.
2988  *  Return Value:
2989  *  No. of napi packets processed.
2990  */
2991 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2992 {
2993         int get_block, put_block;
2994         struct rx_curr_get_info get_info, put_info;
2995         struct RxD_t *rxdp;
2996         struct sk_buff *skb;
2997         int pkt_cnt = 0, napi_pkts = 0;
2998         int i;
2999         struct RxD1* rxdp1;
3000         struct RxD3* rxdp3;
3001
3002         get_info = ring_data->rx_curr_get_info;
3003         get_block = get_info.block_index;
3004         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
3005         put_block = put_info.block_index;
3006         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
3007
3008         while (RXD_IS_UP2DT(rxdp)) {
3009                 /*
3010                  * If your are next to put index then it's
3011                  * FIFO full condition
3012                  */
3013                 if ((get_block == put_block) &&
3014                     (get_info.offset + 1) == put_info.offset) {
3015                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3016                                 ring_data->dev->name);
3017                         break;
3018                 }
3019                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3020                 if (skb == NULL) {
3021                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3022                                   ring_data->dev->name);
3023                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3024                         return 0;
3025                 }
3026                 if (ring_data->rxd_mode == RXD_MODE_1) {
3027                         rxdp1 = (struct RxD1*)rxdp;
3028                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3029                                 rxdp1->Buffer0_ptr,
3030                                 ring_data->mtu +
3031                                 HEADER_ETHERNET_II_802_3_SIZE +
3032                                 HEADER_802_2_SIZE +
3033                                 HEADER_SNAP_SIZE,
3034                                 PCI_DMA_FROMDEVICE);
3035                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3036                         rxdp3 = (struct RxD3*)rxdp;
3037                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3038                                 rxdp3->Buffer0_ptr,
3039                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3040                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3041                                 rxdp3->Buffer2_ptr,
3042                                 ring_data->mtu + 4,
3043                                 PCI_DMA_FROMDEVICE);
3044                 }
3045                 prefetch(skb->data);
3046                 rx_osm_handler(ring_data, rxdp);
3047                 get_info.offset++;
3048                 ring_data->rx_curr_get_info.offset = get_info.offset;
3049                 rxdp = ring_data->rx_blocks[get_block].
3050                                 rxds[get_info.offset].virt_addr;
3051                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3052                         get_info.offset = 0;
3053                         ring_data->rx_curr_get_info.offset = get_info.offset;
3054                         get_block++;
3055                         if (get_block == ring_data->block_count)
3056                                 get_block = 0;
3057                         ring_data->rx_curr_get_info.block_index = get_block;
3058                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3059                 }
3060
3061                 if (ring_data->nic->config.napi) {
3062                         budget--;
3063                         napi_pkts++;
3064                         if (!budget)
3065                                 break;
3066                 }
3067                 pkt_cnt++;
3068                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3069                         break;
3070         }
3071         if (ring_data->lro) {
3072                 /* Clear all LRO sessions before exiting */
3073                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3074                         struct lro *lro = &ring_data->lro0_n[i];
3075                         if (lro->in_use) {
3076                                 update_L3L4_header(ring_data->nic, lro);
3077                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3078                                 clear_lro_session(lro);
3079                         }
3080                 }
3081         }
3082         return(napi_pkts);
3083 }
3084
3085 /**
3086  *  tx_intr_handler - Transmit interrupt handler
3087  *  @nic : device private variable
3088  *  Description:
3089  *  If an interrupt was raised to indicate DMA complete of the
3090  *  Tx packet, this function is called. It identifies the last TxD
3091  *  whose buffer was freed and frees all skbs whose data have already
3092  *  DMA'ed into the NICs internal memory.
3093  *  Return Value:
3094  *  NONE
3095  */
3096
3097 static void tx_intr_handler(struct fifo_info *fifo_data)
3098 {
3099         struct s2io_nic *nic = fifo_data->nic;
3100         struct tx_curr_get_info get_info, put_info;
3101         struct sk_buff *skb = NULL;
3102         struct TxD *txdlp;
3103         int pkt_cnt = 0;
3104         unsigned long flags = 0;
3105         u8 err_mask;
3106
3107         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3108                         return;
3109
3110         get_info = fifo_data->tx_curr_get_info;
3111         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3112         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3113             list_virt_addr;
3114         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3115                (get_info.offset != put_info.offset) &&
3116                (txdlp->Host_Control)) {
3117                 /* Check for TxD errors */
3118                 if (txdlp->Control_1 & TXD_T_CODE) {
3119                         unsigned long long err;
3120                         err = txdlp->Control_1 & TXD_T_CODE;
3121                         if (err & 0x1) {
3122                                 nic->mac_control.stats_info->sw_stat.
3123                                                 parity_err_cnt++;
3124                         }
3125
3126                         /* update t_code statistics */
3127                         err_mask = err >> 48;
3128                         switch(err_mask) {
3129                                 case 2:
3130                                         nic->mac_control.stats_info->sw_stat.
3131                                                         tx_buf_abort_cnt++;
3132                                 break;
3133
3134                                 case 3:
3135                                         nic->mac_control.stats_info->sw_stat.
3136                                                         tx_desc_abort_cnt++;
3137                                 break;
3138
3139                                 case 7:
3140                                         nic->mac_control.stats_info->sw_stat.
3141                                                         tx_parity_err_cnt++;
3142                                 break;
3143
3144                                 case 10:
3145                                         nic->mac_control.stats_info->sw_stat.
3146                                                         tx_link_loss_cnt++;
3147                                 break;
3148
3149                                 case 15:
3150                                         nic->mac_control.stats_info->sw_stat.
3151                                                         tx_list_proc_err_cnt++;
3152                                 break;
3153                         }
3154                 }
3155
3156                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3157                 if (skb == NULL) {
3158                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3159                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3160                         __FUNCTION__);
3161                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3162                         return;
3163                 }
3164                 pkt_cnt++;
3165
3166                 /* Updating the statistics block */
3167                 nic->stats.tx_bytes += skb->len;
3168                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3169                 dev_kfree_skb_irq(skb);
3170
3171                 get_info.offset++;
3172                 if (get_info.offset == get_info.fifo_len + 1)
3173                         get_info.offset = 0;
3174                 txdlp = (struct TxD *) fifo_data->list_info
3175                     [get_info.offset].list_virt_addr;
3176                 fifo_data->tx_curr_get_info.offset =
3177                     get_info.offset;
3178         }
3179
3180         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3181
3182         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3183 }
3184
3185 /**
3186  *  s2io_mdio_write - Function to write in to MDIO registers
3187  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3188  *  @addr     : address value
3189  *  @value    : data value
3190  *  @dev      : pointer to net_device structure
3191  *  Description:
3192  *  This function is used to write values to the MDIO registers
3193  *  NONE
3194  */
3195 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3196 {
3197         u64 val64 = 0x0;
3198         struct s2io_nic *sp = dev->priv;
3199         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3200
3201         //address transaction
3202         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3203                         | MDIO_MMD_DEV_ADDR(mmd_type)
3204                         | MDIO_MMS_PRT_ADDR(0x0);
3205         writeq(val64, &bar0->mdio_control);
3206         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3207         writeq(val64, &bar0->mdio_control);
3208         udelay(100);
3209
3210         //Data transaction
3211         val64 = 0x0;
3212         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3213                         | MDIO_MMD_DEV_ADDR(mmd_type)
3214                         | MDIO_MMS_PRT_ADDR(0x0)
3215                         | MDIO_MDIO_DATA(value)
3216                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3217         writeq(val64, &bar0->mdio_control);
3218         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3219         writeq(val64, &bar0->mdio_control);
3220         udelay(100);
3221
3222         val64 = 0x0;
3223         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3224         | MDIO_MMD_DEV_ADDR(mmd_type)
3225         | MDIO_MMS_PRT_ADDR(0x0)
3226         | MDIO_OP(MDIO_OP_READ_TRANS);
3227         writeq(val64, &bar0->mdio_control);
3228         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3229         writeq(val64, &bar0->mdio_control);
3230         udelay(100);
3231
3232 }
3233
3234 /**
3235  *  s2io_mdio_read - Function to write in to MDIO registers
3236  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3237  *  @addr     : address value
3238  *  @dev      : pointer to net_device structure
3239  *  Description:
3240  *  This function is used to read values to the MDIO registers
3241  *  NONE
3242  */
3243 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3244 {
3245         u64 val64 = 0x0;
3246         u64 rval64 = 0x0;
3247         struct s2io_nic *sp = dev->priv;
3248         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3249
3250         /* address transaction */
3251         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3252                         | MDIO_MMD_DEV_ADDR(mmd_type)
3253                         | MDIO_MMS_PRT_ADDR(0x0);
3254         writeq(val64, &bar0->mdio_control);
3255         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3256         writeq(val64, &bar0->mdio_control);
3257         udelay(100);
3258
3259         /* Data transaction */
3260         val64 = 0x0;
3261         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3262                         | MDIO_MMD_DEV_ADDR(mmd_type)
3263                         | MDIO_MMS_PRT_ADDR(0x0)
3264                         | MDIO_OP(MDIO_OP_READ_TRANS);
3265         writeq(val64, &bar0->mdio_control);
3266         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3267         writeq(val64, &bar0->mdio_control);
3268         udelay(100);
3269
3270         /* Read the value from regs */
3271         rval64 = readq(&bar0->mdio_control);
3272         rval64 = rval64 & 0xFFFF0000;
3273         rval64 = rval64 >> 16;
3274         return rval64;
3275 }
3276 /**
3277  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3278  *  @counter      : couter value to be updated
3279  *  @flag         : flag to indicate the status
3280  *  @type         : counter type
3281  *  Description:
3282  *  This function is to check the status of the xpak counters value
3283  *  NONE
3284  */
3285
3286 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3287 {
3288         u64 mask = 0x3;
3289         u64 val64;
3290         int i;
3291         for(i = 0; i <index; i++)
3292                 mask = mask << 0x2;
3293
3294         if(flag > 0)
3295         {
3296                 *counter = *counter + 1;
3297                 val64 = *regs_stat & mask;
3298                 val64 = val64 >> (index * 0x2);
3299                 val64 = val64 + 1;
3300                 if(val64 == 3)
3301                 {
3302                         switch(type)
3303                         {
3304                         case 1:
3305                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3306                                           "service. Excessive temperatures may "
3307                                           "result in premature transceiver "
3308                                           "failure \n");
3309                         break;
3310                         case 2:
3311                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3312                                           "service Excessive bias currents may "
3313                                           "indicate imminent laser diode "
3314                                           "failure \n");
3315                         break;
3316                         case 3:
3317                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3318                                           "service Excessive laser output "
3319                                           "power may saturate far-end "
3320                                           "receiver\n");
3321                         break;
3322                         default:
3323                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3324                                           "type \n");
3325                         }
3326                         val64 = 0x0;
3327                 }
3328                 val64 = val64 << (index * 0x2);
3329                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3330
3331         } else {
3332                 *regs_stat = *regs_stat & (~mask);
3333         }
3334 }
3335
3336 /**
3337  *  s2io_updt_xpak_counter - Function to update the xpak counters
3338  *  @dev         : pointer to net_device struct
3339  *  Description:
3340  *  This function is to upate the status of the xpak counters value
3341  *  NONE
3342  */
3343 static void s2io_updt_xpak_counter(struct net_device *dev)
3344 {
3345         u16 flag  = 0x0;
3346         u16 type  = 0x0;
3347         u16 val16 = 0x0;
3348         u64 val64 = 0x0;
3349         u64 addr  = 0x0;
3350
3351         struct s2io_nic *sp = dev->priv;
3352         struct stat_block *stat_info = sp->mac_control.stats_info;
3353
3354         /* Check the communication with the MDIO slave */
3355         addr = 0x0000;
3356         val64 = 0x0;
3357         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3358         if((val64 == 0xFFFF) || (val64 == 0x0000))
3359         {
3360                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3361                           "Returned %llx\n", (unsigned long long)val64);
3362                 return;
3363         }
3364
3365         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3366         if(val64 != 0x2040)
3367         {
3368                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3369                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3370                           (unsigned long long)val64);
3371                 return;
3372         }
3373
3374         /* Loading the DOM register to MDIO register */
3375         addr = 0xA100;
3376         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3377         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3378
3379         /* Reading the Alarm flags */
3380         addr = 0xA070;
3381         val64 = 0x0;
3382         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3383
3384         flag = CHECKBIT(val64, 0x7);
3385         type = 1;
3386         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3387                                 &stat_info->xpak_stat.xpak_regs_stat,
3388                                 0x0, flag, type);
3389
3390         if(CHECKBIT(val64, 0x6))
3391                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3392
3393         flag = CHECKBIT(val64, 0x3);
3394         type = 2;
3395         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3396                                 &stat_info->xpak_stat.xpak_regs_stat,
3397                                 0x2, flag, type);
3398
3399         if(CHECKBIT(val64, 0x2))
3400                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3401
3402         flag = CHECKBIT(val64, 0x1);
3403         type = 3;
3404         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3405                                 &stat_info->xpak_stat.xpak_regs_stat,
3406                                 0x4, flag, type);
3407
3408         if(CHECKBIT(val64, 0x0))
3409                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3410
3411         /* Reading the Warning flags */
3412         addr = 0xA074;
3413         val64 = 0x0;
3414         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3415
3416         if(CHECKBIT(val64, 0x7))
3417                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3418
3419         if(CHECKBIT(val64, 0x6))
3420                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3421
3422         if(CHECKBIT(val64, 0x3))
3423                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3424
3425         if(CHECKBIT(val64, 0x2))
3426                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3427
3428         if(CHECKBIT(val64, 0x1))
3429                 stat_info->xpak_stat.warn_laser_output_power_high++;
3430
3431         if(CHECKBIT(val64, 0x0))
3432                 stat_info->xpak_stat.warn_laser_output_power_low++;
3433 }
3434
3435 /**
3436  *  wait_for_cmd_complete - waits for a command to complete.
3437  *  @sp : private member of the device structure, which is a pointer to the
3438  *  s2io_nic structure.
3439  *  Description: Function that waits for a command to Write into RMAC
3440  *  ADDR DATA registers to be completed and returns either success or
3441  *  error depending on whether the command was complete or not.
3442  *  Return value:
3443  *   SUCCESS on success and FAILURE on failure.
3444  */
3445
3446 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3447                                 int bit_state)
3448 {
3449         int ret = FAILURE, cnt = 0, delay = 1;
3450         u64 val64;
3451
3452         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3453                 return FAILURE;
3454
3455         do {
3456                 val64 = readq(addr);
3457                 if (bit_state == S2IO_BIT_RESET) {
3458                         if (!(val64 & busy_bit)) {
3459                                 ret = SUCCESS;
3460                                 break;
3461                         }
3462                 } else {
3463                         if (!(val64 & busy_bit)) {
3464                                 ret = SUCCESS;
3465                                 break;
3466                         }
3467                 }
3468
3469                 if(in_interrupt())
3470                         mdelay(delay);
3471                 else
3472                         msleep(delay);
3473
3474                 if (++cnt >= 10)
3475                         delay = 50;
3476         } while (cnt < 20);
3477         return ret;
3478 }
3479 /*
3480  * check_pci_device_id - Checks if the device id is supported
3481  * @id : device id
3482  * Description: Function to check if the pci device id is supported by driver.
3483  * Return value: Actual device id if supported else PCI_ANY_ID
3484  */
3485 static u16 check_pci_device_id(u16 id)
3486 {
3487         switch (id) {
3488         case PCI_DEVICE_ID_HERC_WIN:
3489         case PCI_DEVICE_ID_HERC_UNI:
3490                 return XFRAME_II_DEVICE;
3491         case PCI_DEVICE_ID_S2IO_UNI:
3492         case PCI_DEVICE_ID_S2IO_WIN:
3493                 return XFRAME_I_DEVICE;
3494         default:
3495                 return PCI_ANY_ID;
3496         }
3497 }
3498
3499 /**
3500  *  s2io_reset - Resets the card.
3501  *  @sp : private member of the device structure.
3502  *  Description: Function to Reset the card. This function then also
3503  *  restores the previously saved PCI configuration space registers as
3504  *  the card reset also resets the configuration space.
3505  *  Return value:
3506  *  void.
3507  */
3508
3509 static void s2io_reset(struct s2io_nic * sp)
3510 {
3511         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3512         u64 val64;
3513         u16 subid, pci_cmd;
3514         int i;
3515         u16 val16;
3516         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3517         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3518
3519         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3520                         __FUNCTION__, sp->dev->name);
3521
3522         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3523         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3524
3525         val64 = SW_RESET_ALL;
3526         writeq(val64, &bar0->sw_reset);
3527         if (strstr(sp->product_name, "CX4")) {
3528                 msleep(750);
3529         }
3530         msleep(250);
3531         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3532
3533                 /* Restore the PCI state saved during initialization. */
3534                 pci_restore_state(sp->pdev);
3535                 pci_read_config_word(sp->pdev, 0x2, &val16);
3536                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3537                         break;
3538                 msleep(200);
3539         }
3540
3541         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3542                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3543         }
3544
3545         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3546
3547         s2io_init_pci(sp);
3548
3549         /* Set swapper to enable I/O register access */
3550         s2io_set_swapper(sp);
3551
3552         /* restore mac_addr entries */
3553         do_s2io_restore_unicast_mc(sp);
3554
3555         /* Restore the MSIX table entries from local variables */
3556         restore_xmsi_data(sp);
3557
3558         /* Clear certain PCI/PCI-X fields after reset */
3559         if (sp->device_type == XFRAME_II_DEVICE) {
3560                 /* Clear "detected parity error" bit */
3561                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3562
3563                 /* Clearing PCIX Ecc status register */
3564                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3565
3566                 /* Clearing PCI_STATUS error reflected here */
3567                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3568         }
3569
3570         /* Reset device statistics maintained by OS */
3571         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3572
3573         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3574         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3575         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3576         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3577         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3578         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3579         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3580         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3581         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3582         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3583         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3584         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3585         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3586         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3587         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3588         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3589         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3590         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3591         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3592
3593         /* SXE-002: Configure link and activity LED to turn it off */
3594         subid = sp->pdev->subsystem_device;
3595         if (((subid & 0xFF) >= 0x07) &&
3596             (sp->device_type == XFRAME_I_DEVICE)) {
3597                 val64 = readq(&bar0->gpio_control);
3598                 val64 |= 0x0000800000000000ULL;
3599                 writeq(val64, &bar0->gpio_control);
3600                 val64 = 0x0411040400000000ULL;
3601                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3602         }
3603
3604         /*
3605          * Clear spurious ECC interrupts that would have occured on
3606          * XFRAME II cards after reset.
3607          */
3608         if (sp->device_type == XFRAME_II_DEVICE) {
3609                 val64 = readq(&bar0->pcc_err_reg);
3610                 writeq(val64, &bar0->pcc_err_reg);
3611         }
3612
3613         sp->device_enabled_once = FALSE;
3614 }
3615
3616 /**
3617  *  s2io_set_swapper - to set the swapper controle on the card
3618  *  @sp : private member of the device structure,
3619  *  pointer to the s2io_nic structure.
3620  *  Description: Function to set the swapper control on the card
3621  *  correctly depending on the 'endianness' of the system.
3622  *  Return value:
3623  *  SUCCESS on success and FAILURE on failure.
3624  */
3625
3626 static int s2io_set_swapper(struct s2io_nic * sp)
3627 {
3628         struct net_device *dev = sp->dev;
3629         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3630         u64 val64, valt, valr;
3631
3632         /*
3633          * Set proper endian settings and verify the same by reading
3634          * the PIF Feed-back register.
3635          */
3636
3637         val64 = readq(&bar0->pif_rd_swapper_fb);
3638         if (val64 != 0x0123456789ABCDEFULL) {
3639                 int i = 0;
3640                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3641                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3642                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3643                                 0};                     /* FE=0, SE=0 */
3644
3645                 while(i<4) {
3646                         writeq(value[i], &bar0->swapper_ctrl);
3647                         val64 = readq(&bar0->pif_rd_swapper_fb);
3648                         if (val64 == 0x0123456789ABCDEFULL)
3649                                 break;
3650                         i++;
3651                 }
3652                 if (i == 4) {
3653                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3654                                 dev->name);
3655                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3656                                 (unsigned long long) val64);
3657                         return FAILURE;
3658                 }
3659                 valr = value[i];
3660         } else {
3661                 valr = readq(&bar0->swapper_ctrl);
3662         }
3663
3664         valt = 0x0123456789ABCDEFULL;
3665         writeq(valt, &bar0->xmsi_address);
3666         val64 = readq(&bar0->xmsi_address);
3667
3668         if(val64 != valt) {
3669                 int i = 0;
3670                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3671                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3672                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3673                                 0};                     /* FE=0, SE=0 */
3674
3675                 while(i<4) {
3676                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3677                         writeq(valt, &bar0->xmsi_address);
3678                         val64 = readq(&bar0->xmsi_address);
3679                         if(val64 == valt)
3680                                 break;
3681                         i++;
3682                 }
3683                 if(i == 4) {
3684                         unsigned long long x = val64;
3685                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3686                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3687                         return FAILURE;
3688                 }
3689         }
3690         val64 = readq(&bar0->swapper_ctrl);
3691         val64 &= 0xFFFF000000000000ULL;
3692
3693 #ifdef  __BIG_ENDIAN
3694         /*
3695          * The device by default set to a big endian format, so a
3696          * big endian driver need not set anything.
3697          */
3698         val64 |= (SWAPPER_CTRL_TXP_FE |
3699                  SWAPPER_CTRL_TXP_SE |
3700                  SWAPPER_CTRL_TXD_R_FE |
3701                  SWAPPER_CTRL_TXD_W_FE |
3702                  SWAPPER_CTRL_TXF_R_FE |
3703                  SWAPPER_CTRL_RXD_R_FE |
3704                  SWAPPER_CTRL_RXD_W_FE |
3705                  SWAPPER_CTRL_RXF_W_FE |
3706                  SWAPPER_CTRL_XMSI_FE |
3707                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3708         if (sp->config.intr_type == INTA)
3709                 val64 |= SWAPPER_CTRL_XMSI_SE;
3710         writeq(val64, &bar0->swapper_ctrl);
3711 #else
3712         /*
3713          * Initially we enable all bits to make it accessible by the
3714          * driver, then we selectively enable only those bits that
3715          * we want to set.
3716          */
3717         val64 |= (SWAPPER_CTRL_TXP_FE |
3718                  SWAPPER_CTRL_TXP_SE |
3719                  SWAPPER_CTRL_TXD_R_FE |
3720                  SWAPPER_CTRL_TXD_R_SE |
3721                  SWAPPER_CTRL_TXD_W_FE |
3722                  SWAPPER_CTRL_TXD_W_SE |
3723                  SWAPPER_CTRL_TXF_R_FE |
3724                  SWAPPER_CTRL_RXD_R_FE |
3725                  SWAPPER_CTRL_RXD_R_SE |
3726                  SWAPPER_CTRL_RXD_W_FE |
3727                  SWAPPER_CTRL_RXD_W_SE |
3728                  SWAPPER_CTRL_RXF_W_FE |
3729                  SWAPPER_CTRL_XMSI_FE |
3730                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3731         if (sp->config.intr_type == INTA)
3732                 val64 |= SWAPPER_CTRL_XMSI_SE;
3733         writeq(val64, &bar0->swapper_ctrl);
3734 #endif
3735         val64 = readq(&bar0->swapper_ctrl);
3736
3737         /*
3738          * Verifying if endian settings are accurate by reading a
3739          * feedback register.
3740          */
3741         val64 = readq(&bar0->pif_rd_swapper_fb);
3742         if (val64 != 0x0123456789ABCDEFULL) {
3743                 /* Endian settings are incorrect, calls for another dekko. */
3744                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3745                           dev->name);
3746                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3747                           (unsigned long long) val64);
3748                 return FAILURE;
3749         }
3750
3751         return SUCCESS;
3752 }
3753
3754 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3755 {
3756         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3757         u64 val64;
3758         int ret = 0, cnt = 0;
3759
3760         do {
3761                 val64 = readq(&bar0->xmsi_access);
3762                 if (!(val64 & s2BIT(15)))
3763                         break;
3764                 mdelay(1);
3765                 cnt++;
3766         } while(cnt < 5);
3767         if (cnt == 5) {
3768                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3769                 ret = 1;
3770         }
3771
3772         return ret;
3773 }
3774
3775 static void restore_xmsi_data(struct s2io_nic *nic)
3776 {
3777         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3778         u64 val64;
3779         int i, msix_index;
3780
3781
3782         if (nic->device_type == XFRAME_I_DEVICE)
3783                 return;
3784
3785         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3786                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3787                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3788                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3789                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3790                 writeq(val64, &bar0->xmsi_access);
3791                 if (wait_for_msix_trans(nic, msix_index)) {
3792                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3793                         continue;
3794                 }
3795         }
3796 }
3797
3798 static void store_xmsi_data(struct s2io_nic *nic)
3799 {
3800         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3801         u64 val64, addr, data;
3802         int i, msix_index;
3803
3804         if (nic->device_type == XFRAME_I_DEVICE)
3805                 return;
3806
3807         /* Store and display */
3808         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3809                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3810                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3811                 writeq(val64, &bar0->xmsi_access);
3812                 if (wait_for_msix_trans(nic, msix_index)) {
3813                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3814                         continue;
3815                 }
3816                 addr = readq(&bar0->xmsi_address);
3817                 data = readq(&bar0->xmsi_data);
3818                 if (addr && data) {
3819                         nic->msix_info[i].addr = addr;
3820                         nic->msix_info[i].data = data;
3821                 }
3822         }
3823 }
3824
3825 static int s2io_enable_msi_x(struct s2io_nic *nic)
3826 {
3827         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3828         u64 rx_mat;
3829         u16 msi_control; /* Temp variable */
3830         int ret, i, j, msix_indx = 1;
3831
3832         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3833                                GFP_KERNEL);
3834         if (!nic->entries) {
3835                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3836                         __FUNCTION__);
3837                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3838                 return -ENOMEM;
3839         }
3840         nic->mac_control.stats_info->sw_stat.mem_allocated
3841                 += (nic->num_entries * sizeof(struct msix_entry));
3842
3843         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3844
3845         nic->s2io_entries =
3846                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3847                                    GFP_KERNEL);
3848         if (!nic->s2io_entries) {
3849                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3850                         __FUNCTION__);
3851                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3852                 kfree(nic->entries);
3853                 nic->mac_control.stats_info->sw_stat.mem_freed
3854                         += (nic->num_entries * sizeof(struct msix_entry));
3855                 return -ENOMEM;
3856         }
3857          nic->mac_control.stats_info->sw_stat.mem_allocated
3858                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3859         memset(nic->s2io_entries, 0,
3860                 nic->num_entries * sizeof(struct s2io_msix_entry));
3861
3862         nic->entries[0].entry = 0;
3863         nic->s2io_entries[0].entry = 0;
3864         nic->s2io_entries[0].in_use = MSIX_FLG;
3865         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3866         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3867
3868         for (i = 1; i < nic->num_entries; i++) {
3869                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3870                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3871                 nic->s2io_entries[i].arg = NULL;
3872                 nic->s2io_entries[i].in_use = 0;
3873         }
3874
3875         rx_mat = readq(&bar0->rx_mat);
3876         for (j = 0; j < nic->config.rx_ring_num; j++) {
3877                 rx_mat |= RX_MAT_SET(j, msix_indx);
3878                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3879                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3880                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3881                 msix_indx += 8;
3882         }
3883         writeq(rx_mat, &bar0->rx_mat);
3884         readq(&bar0->rx_mat);
3885
3886         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3887         /* We fail init if error or we get less vectors than min required */
3888         if (ret) {
3889                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3890                 kfree(nic->entries);
3891                 nic->mac_control.stats_info->sw_stat.mem_freed
3892                         += (nic->num_entries * sizeof(struct msix_entry));
3893                 kfree(nic->s2io_entries);
3894                 nic->mac_control.stats_info->sw_stat.mem_freed
3895                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3896                 nic->entries = NULL;
3897                 nic->s2io_entries = NULL;
3898                 return -ENOMEM;
3899         }
3900
3901         /*
3902          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3903          * in the herc NIC. (Temp change, needs to be removed later)
3904          */
3905         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3906         msi_control |= 0x1; /* Enable MSI */
3907         pci_write_config_word(nic->pdev, 0x42, msi_control);
3908
3909         return 0;
3910 }
3911
3912 /* Handle software interrupt used during MSI(X) test */
3913 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3914 {
3915         struct s2io_nic *sp = dev_id;
3916
3917         sp->msi_detected = 1;
3918         wake_up(&sp->msi_wait);
3919
3920         return IRQ_HANDLED;
3921 }
3922
3923 /* Test interrupt path by forcing a a software IRQ */
3924 static int s2io_test_msi(struct s2io_nic *sp)
3925 {
3926         struct pci_dev *pdev = sp->pdev;
3927         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3928         int err;
3929         u64 val64, saved64;
3930
3931         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3932                         sp->name, sp);
3933         if (err) {
3934                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3935                        sp->dev->name, pci_name(pdev), pdev->irq);
3936                 return err;
3937         }
3938
3939         init_waitqueue_head (&sp->msi_wait);
3940         sp->msi_detected = 0;
3941
3942         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3943         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3944         val64 |= SCHED_INT_CTRL_TIMER_EN;
3945         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3946         writeq(val64, &bar0->scheduled_int_ctrl);
3947
3948         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3949
3950         if (!sp->msi_detected) {
3951                 /* MSI(X) test failed, go back to INTx mode */
3952                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3953                         "using MSI(X) during test\n", sp->dev->name,
3954                         pci_name(pdev));
3955
3956                 err = -EOPNOTSUPP;
3957         }
3958
3959         free_irq(sp->entries[1].vector, sp);
3960
3961         writeq(saved64, &bar0->scheduled_int_ctrl);
3962
3963         return err;
3964 }
3965
3966 static void remove_msix_isr(struct s2io_nic *sp)
3967 {
3968         int i;
3969         u16 msi_control;
3970
3971         for (i = 0; i < sp->num_entries; i++) {
3972                 if (sp->s2io_entries[i].in_use ==
3973                         MSIX_REGISTERED_SUCCESS) {
3974                         int vector = sp->entries[i].vector;
3975                         void *arg = sp->s2io_entries[i].arg;
3976                         free_irq(vector, arg);
3977                 }
3978         }
3979
3980         kfree(sp->entries);
3981         kfree(sp->s2io_entries);
3982         sp->entries = NULL;
3983         sp->s2io_entries = NULL;
3984
3985         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3986         msi_control &= 0xFFFE; /* Disable MSI */
3987         pci_write_config_word(sp->pdev, 0x42, msi_control);
3988
3989         pci_disable_msix(sp->pdev);
3990 }
3991
3992 static void remove_inta_isr(struct s2io_nic *sp)
3993 {
3994         struct net_device *dev = sp->dev;
3995
3996         free_irq(sp->pdev->irq, dev);
3997 }
3998
3999 /* ********************************************************* *
4000  * Functions defined below concern the OS part of the driver *
4001  * ********************************************************* */
4002
4003 /**
4004  *  s2io_open - open entry point of the driver
4005  *  @dev : pointer to the device structure.
4006  *  Description:
4007  *  This function is the open entry point of the driver. It mainly calls a
4008  *  function to allocate Rx buffers and inserts them into the buffer
4009  *  descriptors and then enables the Rx part of the NIC.
4010  *  Return value:
4011  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4012  *   file on failure.
4013  */
4014
4015 static int s2io_open(struct net_device *dev)
4016 {
4017         struct s2io_nic *sp = dev->priv;
4018         int err = 0;
4019
4020         /*
4021          * Make sure you have link off by default every time
4022          * Nic is initialized
4023          */
4024         netif_carrier_off(dev);
4025         sp->last_link_state = 0;
4026
4027         /* Initialize H/W and enable interrupts */
4028         err = s2io_card_up(sp);
4029         if (err) {
4030                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4031                           dev->name);
4032                 goto hw_init_failed;
4033         }
4034
4035         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4036                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4037                 s2io_card_down(sp);
4038                 err = -ENODEV;
4039                 goto hw_init_failed;
4040         }
4041         s2io_start_all_tx_queue(sp);
4042         return 0;
4043
4044 hw_init_failed:
4045         if (sp->config.intr_type == MSI_X) {
4046                 if (sp->entries) {
4047                         kfree(sp->entries);
4048                         sp->mac_control.stats_info->sw_stat.mem_freed
4049                         += (sp->num_entries * sizeof(struct msix_entry));
4050                 }
4051                 if (sp->s2io_entries) {
4052                         kfree(sp->s2io_entries);
4053                         sp->mac_control.stats_info->sw_stat.mem_freed
4054                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4055                 }
4056         }
4057         return err;
4058 }
4059
4060 /**
4061  *  s2io_close -close entry point of the driver
4062  *  @dev : device pointer.
4063  *  Description:
4064  *  This is the stop entry point of the driver. It needs to undo exactly
4065  *  whatever was done by the open entry point,thus it's usually referred to
4066  *  as the close function.Among other things this function mainly stops the
4067  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4068  *  Return value:
4069  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4070  *  file on failure.
4071  */
4072
4073 static int s2io_close(struct net_device *dev)
4074 {
4075         struct s2io_nic *sp = dev->priv;
4076         struct config_param *config = &sp->config;
4077         u64 tmp64;
4078         int offset;
4079
4080         /* Return if the device is already closed               *
4081         *  Can happen when s2io_card_up failed in change_mtu    *
4082         */
4083         if (!is_s2io_card_up(sp))
4084                 return 0;
4085
4086         s2io_stop_all_tx_queue(sp);
4087         /* delete all populated mac entries */
4088         for (offset = 1; offset < config->max_mc_addr; offset++) {
4089                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4090                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4091                         do_s2io_delete_unicast_mc(sp, tmp64);
4092         }
4093
4094         s2io_card_down(sp);
4095
4096         return 0;
4097 }
4098
4099 /**
4100  *  s2io_xmit - Tx entry point of te driver
4101  *  @skb : the socket buffer containing the Tx data.
4102  *  @dev : device pointer.
4103  *  Description :
4104  *  This function is the Tx entry point of the driver. S2IO NIC supports
4105  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4106  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4107  *  not be upadted.
4108  *  Return value:
4109  *  0 on success & 1 on failure.
4110  */
4111
4112 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4113 {
4114         struct s2io_nic *sp = dev->priv;
4115         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4116         register u64 val64;
4117         struct TxD *txdp;
4118         struct TxFIFO_element __iomem *tx_fifo;
4119         unsigned long flags = 0;
4120         u16 vlan_tag = 0;
4121         struct fifo_info *fifo = NULL;
4122         struct mac_info *mac_control;
4123         struct config_param *config;
4124         int do_spin_lock = 1;
4125         int offload_type;
4126         int enable_per_list_interrupt = 0;
4127         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4128
4129         mac_control = &sp->mac_control;
4130         config = &sp->config;
4131
4132         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4133
4134         if (unlikely(skb->len <= 0)) {
4135                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4136                 dev_kfree_skb_any(skb);
4137                 return 0;
4138         }
4139
4140         if (!is_s2io_card_up(sp)) {
4141                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4142                           dev->name);
4143                 dev_kfree_skb(skb);
4144                 return 0;
4145         }
4146
4147         queue = 0;
4148         if (sp->vlgrp && vlan_tx_tag_present(skb))
4149                 vlan_tag = vlan_tx_tag_get(skb);
4150         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4151                 if (skb->protocol == htons(ETH_P_IP)) {
4152                         struct iphdr *ip;
4153                         struct tcphdr *th;
4154                         ip = ip_hdr(skb);
4155
4156                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4157                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4158                                                 ip->ihl*4);
4159
4160                                 if (ip->protocol == IPPROTO_TCP) {
4161                                         queue_len = sp->total_tcp_fifos;
4162                                         queue = (ntohs(th->source) +
4163                                                         ntohs(th->dest)) &
4164                                             sp->fifo_selector[queue_len - 1];
4165                                         if (queue >= queue_len)
4166                                                 queue = queue_len - 1;
4167                                 } else if (ip->protocol == IPPROTO_UDP) {
4168                                         queue_len = sp->total_udp_fifos;
4169                                         queue = (ntohs(th->source) +
4170                                                         ntohs(th->dest)) &
4171                                             sp->fifo_selector[queue_len - 1];
4172                                         if (queue >= queue_len)
4173                                                 queue = queue_len - 1;
4174                                         queue += sp->udp_fifo_idx;
4175                                         if (skb->len > 1024)
4176                                                 enable_per_list_interrupt = 1;
4177                                         do_spin_lock = 0;
4178                                 }
4179                         }
4180                 }
4181         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4182                 /* get fifo number based on skb->priority value */
4183                 queue = config->fifo_mapping
4184                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4185         fifo = &mac_control->fifos[queue];
4186
4187         if (do_spin_lock)
4188                 spin_lock_irqsave(&fifo->tx_lock, flags);
4189         else {
4190                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4191                         return NETDEV_TX_LOCKED;
4192         }
4193
4194 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
4195         if (sp->config.multiq) {
4196                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4197                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4198                         return NETDEV_TX_BUSY;
4199                 }
4200         } else
4201 #endif
4202         if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4203                 if (netif_queue_stopped(dev)) {
4204                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4205                         return NETDEV_TX_BUSY;
4206                 }
4207         }
4208
4209         put_off = (u16) fifo->tx_curr_put_info.offset;
4210         get_off = (u16) fifo->tx_curr_get_info.offset;
4211         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4212
4213         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4214         /* Avoid "put" pointer going beyond "get" pointer */
4215         if (txdp->Host_Control ||
4216                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4217                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4218                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4219                 dev_kfree_skb(skb);
4220                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4221                 return 0;
4222         }
4223
4224         offload_type = s2io_offload_type(skb);
4225         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4226                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4227                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4228         }
4229         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4230                 txdp->Control_2 |=
4231                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4232                      TXD_TX_CKO_UDP_EN);
4233         }
4234         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4235         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4236         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4237         if (enable_per_list_interrupt)
4238                 if (put_off & (queue_len >> 5))
4239                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4240         if (vlan_tag) {
4241                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4242                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4243         }
4244
4245         frg_len = skb->len - skb->data_len;
4246         if (offload_type == SKB_GSO_UDP) {
4247                 int ufo_size;
4248
4249                 ufo_size = s2io_udp_mss(skb);
4250                 ufo_size &= ~7;
4251                 txdp->Control_1 |= TXD_UFO_EN;
4252                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4253                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4254 #ifdef __BIG_ENDIAN
4255                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4256                 fifo->ufo_in_band_v[put_off] =
4257                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4258 #else
4259                 fifo->ufo_in_band_v[put_off] =
4260                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4261 #endif
4262                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4263                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4264                                         fifo->ufo_in_band_v,
4265                                         sizeof(u64), PCI_DMA_TODEVICE);
4266                 if((txdp->Buffer_Pointer == 0) ||
4267                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4268                         goto pci_map_failed;
4269                 txdp++;
4270         }
4271
4272         txdp->Buffer_Pointer = pci_map_single
4273             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4274         if((txdp->Buffer_Pointer == 0) ||
4275                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4276                 goto pci_map_failed;
4277
4278         txdp->Host_Control = (unsigned long) skb;
4279         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4280         if (offload_type == SKB_GSO_UDP)
4281                 txdp->Control_1 |= TXD_UFO_EN;
4282
4283         frg_cnt = skb_shinfo(skb)->nr_frags;
4284         /* For fragmented SKB. */
4285         for (i = 0; i < frg_cnt; i++) {
4286                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4287                 /* A '0' length fragment will be ignored */
4288                 if (!frag->size)
4289                         continue;
4290                 txdp++;
4291                 txdp->Buffer_Pointer = (u64) pci_map_page
4292                     (sp->pdev, frag->page, frag->page_offset,
4293                      frag->size, PCI_DMA_TODEVICE);
4294                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4295                 if (offload_type == SKB_GSO_UDP)
4296                         txdp->Control_1 |= TXD_UFO_EN;
4297         }
4298         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4299
4300         if (offload_type == SKB_GSO_UDP)
4301                 frg_cnt++; /* as Txd0 was used for inband header */
4302
4303         tx_fifo = mac_control->tx_FIFO_start[queue];
4304         val64 = fifo->list_info[put_off].list_phy_addr;
4305         writeq(val64, &tx_fifo->TxDL_Pointer);
4306
4307         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4308                  TX_FIFO_LAST_LIST);
4309         if (offload_type)
4310                 val64 |= TX_FIFO_SPECIAL_FUNC;
4311
4312         writeq(val64, &tx_fifo->List_Control);
4313
4314         mmiowb();
4315
4316         put_off++;
4317         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4318                 put_off = 0;
4319         fifo->tx_curr_put_info.offset = put_off;
4320
4321         /* Avoid "put" pointer going beyond "get" pointer */
4322         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4323                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4324                 DBG_PRINT(TX_DBG,
4325                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4326                           put_off, get_off);
4327                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4328         }
4329         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4330         dev->trans_start = jiffies;
4331         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4332
4333         if (sp->config.intr_type == MSI_X)
4334                 tx_intr_handler(fifo);
4335
4336         return 0;
4337 pci_map_failed:
4338         stats->pci_map_fail_cnt++;
4339         s2io_stop_tx_queue(sp, fifo->fifo_no);
4340         stats->mem_freed += skb->truesize;
4341         dev_kfree_skb(skb);
4342         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4343         return 0;
4344 }
4345
4346 static void
4347 s2io_alarm_handle(unsigned long data)
4348 {
4349         struct s2io_nic *sp = (struct s2io_nic *)data;
4350         struct net_device *dev = sp->dev;
4351
4352         s2io_handle_errors(dev);
4353         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4354 }
4355
4356 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4357 {
4358         struct ring_info *ring = (struct ring_info *)dev_id;
4359         struct s2io_nic *sp = ring->nic;
4360         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4361         struct net_device *dev = sp->dev;
4362
4363         if (unlikely(!is_s2io_card_up(sp)))
4364                 return IRQ_HANDLED;
4365
4366         if (sp->config.napi) {
4367                 u8 *addr = NULL, val8 = 0;
4368
4369                 addr = (u8 *)&bar0->xmsi_mask_reg;
4370                 addr += (7 - ring->ring_no);
4371                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4372                 writeb(val8, addr);
4373                 val8 = readb(addr);
4374                 netif_rx_schedule(dev, &ring->napi);
4375         } else {
4376                 rx_intr_handler(ring, 0);
4377                 s2io_chk_rx_buffers(ring);
4378         }
4379
4380         return IRQ_HANDLED;
4381 }
4382
4383 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4384 {
4385         int i;
4386         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4387         struct s2io_nic *sp = fifos->nic;
4388         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4389         struct config_param *config  = &sp->config;
4390         u64 reason;
4391
4392         if (unlikely(!is_s2io_card_up(sp)))
4393                 return IRQ_NONE;
4394
4395         reason = readq(&bar0->general_int_status);
4396         if (unlikely(reason == S2IO_MINUS_ONE))
4397                 /* Nothing much can be done. Get out */
4398                 return IRQ_HANDLED;
4399
4400         writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4401
4402         if (reason & GEN_INTR_TXTRAFFIC)
4403                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4404
4405         for (i = 0; i < config->tx_fifo_num; i++)
4406                 tx_intr_handler(&fifos[i]);
4407
4408         writeq(sp->general_int_mask, &bar0->general_int_mask);
4409         readl(&bar0->general_int_status);
4410
4411         return IRQ_HANDLED;
4412 }
4413
4414 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4415 {
4416         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4417         u64 val64;
4418
4419         val64 = readq(&bar0->pic_int_status);
4420         if (val64 & PIC_INT_GPIO) {
4421                 val64 = readq(&bar0->gpio_int_reg);
4422                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4423                     (val64 & GPIO_INT_REG_LINK_UP)) {
4424                         /*
4425                          * This is unstable state so clear both up/down
4426                          * interrupt and adapter to re-evaluate the link state.
4427                          */
4428                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4429                         val64 |= GPIO_INT_REG_LINK_UP;
4430                         writeq(val64, &bar0->gpio_int_reg);
4431                         val64 = readq(&bar0->gpio_int_mask);
4432                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4433                                    GPIO_INT_MASK_LINK_DOWN);
4434                         writeq(val64, &bar0->gpio_int_mask);
4435                 }
4436                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4437                         val64 = readq(&bar0->adapter_status);
4438                                 /* Enable Adapter */
4439                         val64 = readq(&bar0->adapter_control);
4440                         val64 |= ADAPTER_CNTL_EN;
4441                         writeq(val64, &bar0->adapter_control);
4442                         val64 |= ADAPTER_LED_ON;
4443                         writeq(val64, &bar0->adapter_control);
4444                         if (!sp->device_enabled_once)
4445                                 sp->device_enabled_once = 1;
4446
4447                         s2io_link(sp, LINK_UP);
4448                         /*
4449                          * unmask link down interrupt and mask link-up
4450                          * intr
4451                          */
4452                         val64 = readq(&bar0->gpio_int_mask);
4453                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4454                         val64 |= GPIO_INT_MASK_LINK_UP;
4455                         writeq(val64, &bar0->gpio_int_mask);
4456
4457                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4458                         val64 = readq(&bar0->adapter_status);
4459                         s2io_link(sp, LINK_DOWN);
4460                         /* Link is down so unmaks link up interrupt */
4461                         val64 = readq(&bar0->gpio_int_mask);
4462                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4463                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4464                         writeq(val64, &bar0->gpio_int_mask);
4465
4466                         /* turn off LED */
4467                         val64 = readq(&bar0->adapter_control);
4468                         val64 = val64 &(~ADAPTER_LED_ON);
4469                         writeq(val64, &bar0->adapter_control);
4470                 }
4471         }
4472         val64 = readq(&bar0->gpio_int_mask);
4473 }
4474
4475 /**
4476  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4477  *  @value: alarm bits
4478  *  @addr: address value
4479  *  @cnt: counter variable
4480  *  Description: Check for alarm and increment the counter
4481  *  Return Value:
4482  *  1 - if alarm bit set
4483  *  0 - if alarm bit is not set
4484  */
4485 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4486                           unsigned long long *cnt)
4487 {
4488         u64 val64;
4489         val64 = readq(addr);
4490         if ( val64 & value ) {
4491                 writeq(val64, addr);
4492                 (*cnt)++;
4493                 return 1;
4494         }
4495         return 0;
4496
4497 }
4498
4499 /**
4500  *  s2io_handle_errors - Xframe error indication handler
4501  *  @nic: device private variable
4502  *  Description: Handle alarms such as loss of link, single or
4503  *  double ECC errors, critical and serious errors.
4504  *  Return Value:
4505  *  NONE
4506  */
4507 static void s2io_handle_errors(void * dev_id)
4508 {
4509         struct net_device *dev = (struct net_device *) dev_id;
4510         struct s2io_nic *sp = dev->priv;
4511         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4512         u64 temp64 = 0,val64=0;
4513         int i = 0;
4514
4515         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4516         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4517
4518         if (!is_s2io_card_up(sp))
4519                 return;
4520
4521         if (pci_channel_offline(sp->pdev))
4522                 return;
4523
4524         memset(&sw_stat->ring_full_cnt, 0,
4525                 sizeof(sw_stat->ring_full_cnt));
4526
4527         /* Handling the XPAK counters update */
4528         if(stats->xpak_timer_count < 72000) {
4529                 /* waiting for an hour */
4530                 stats->xpak_timer_count++;
4531         } else {
4532                 s2io_updt_xpak_counter(dev);
4533                 /* reset the count to zero */
4534                 stats->xpak_timer_count = 0;
4535         }
4536
4537         /* Handling link status change error Intr */
4538         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4539                 val64 = readq(&bar0->mac_rmac_err_reg);
4540                 writeq(val64, &bar0->mac_rmac_err_reg);
4541                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4542                         schedule_work(&sp->set_link_task);
4543         }
4544
4545         /* In case of a serious error, the device will be Reset. */
4546         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4547                                 &sw_stat->serious_err_cnt))
4548                 goto reset;
4549
4550         /* Check for data parity error */
4551         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4552                                 &sw_stat->parity_err_cnt))
4553                 goto reset;
4554
4555         /* Check for ring full counter */
4556         if (sp->device_type == XFRAME_II_DEVICE) {
4557                 val64 = readq(&bar0->ring_bump_counter1);
4558                 for (i=0; i<4; i++) {
4559                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4560                         temp64 >>= 64 - ((i+1)*16);
4561                         sw_stat->ring_full_cnt[i] += temp64;
4562                 }
4563
4564                 val64 = readq(&bar0->ring_bump_counter2);
4565                 for (i=0; i<4; i++) {
4566                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4567                         temp64 >>= 64 - ((i+1)*16);
4568                          sw_stat->ring_full_cnt[i+4] += temp64;
4569                 }
4570         }
4571
4572         val64 = readq(&bar0->txdma_int_status);
4573         /*check for pfc_err*/
4574         if (val64 & TXDMA_PFC_INT) {
4575                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4576                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4577                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4578                                 &sw_stat->pfc_err_cnt))
4579                         goto reset;
4580                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4581                                 &sw_stat->pfc_err_cnt);
4582         }
4583
4584         /*check for tda_err*/
4585         if (val64 & TXDMA_TDA_INT) {
4586                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4587                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4588                                 &sw_stat->tda_err_cnt))
4589                         goto reset;
4590                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4591                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4592         }
4593         /*check for pcc_err*/
4594         if (val64 & TXDMA_PCC_INT) {
4595                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4596                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4597                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4598                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4599                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4600                                 &sw_stat->pcc_err_cnt))
4601                         goto reset;
4602                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4603                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4604         }
4605
4606         /*check for tti_err*/
4607         if (val64 & TXDMA_TTI_INT) {
4608                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4609                                 &sw_stat->tti_err_cnt))
4610                         goto reset;
4611                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4612                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4613         }
4614
4615         /*check for lso_err*/
4616         if (val64 & TXDMA_LSO_INT) {
4617                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4618                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4619                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4620                         goto reset;
4621                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4622                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4623         }
4624
4625         /*check for tpa_err*/
4626         if (val64 & TXDMA_TPA_INT) {
4627                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4628                         &sw_stat->tpa_err_cnt))
4629                         goto reset;
4630                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4631                         &sw_stat->tpa_err_cnt);
4632         }
4633
4634         /*check for sm_err*/
4635         if (val64 & TXDMA_SM_INT) {
4636                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4637                         &sw_stat->sm_err_cnt))
4638                         goto reset;
4639         }
4640
4641         val64 = readq(&bar0->mac_int_status);
4642         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4643                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4644                                 &bar0->mac_tmac_err_reg,
4645                                 &sw_stat->mac_tmac_err_cnt))
4646                         goto reset;
4647                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4648                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4649                                 &bar0->mac_tmac_err_reg,
4650                                 &sw_stat->mac_tmac_err_cnt);
4651         }
4652
4653         val64 = readq(&bar0->xgxs_int_status);
4654         if (val64 & XGXS_INT_STATUS_TXGXS) {
4655                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4656                                 &bar0->xgxs_txgxs_err_reg,
4657                                 &sw_stat->xgxs_txgxs_err_cnt))
4658                         goto reset;
4659                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4660                                 &bar0->xgxs_txgxs_err_reg,
4661                                 &sw_stat->xgxs_txgxs_err_cnt);
4662         }
4663
4664         val64 = readq(&bar0->rxdma_int_status);
4665         if (val64 & RXDMA_INT_RC_INT_M) {
4666                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4667                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4668                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4669                         goto reset;
4670                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4671                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4672                                 &sw_stat->rc_err_cnt);
4673                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4674                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4675                                 &sw_stat->prc_pcix_err_cnt))
4676                         goto reset;
4677                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4678                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4679                                 &sw_stat->prc_pcix_err_cnt);
4680         }
4681
4682         if (val64 & RXDMA_INT_RPA_INT_M) {
4683                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4684                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4685                         goto reset;
4686                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4687                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4688         }
4689
4690         if (val64 & RXDMA_INT_RDA_INT_M) {
4691                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4692                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4693                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4694                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4695                         goto reset;
4696                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4697                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4698                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4699         }
4700
4701         if (val64 & RXDMA_INT_RTI_INT_M) {
4702                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4703                                 &sw_stat->rti_err_cnt))
4704                         goto reset;
4705                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4706                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4707         }
4708
4709         val64 = readq(&bar0->mac_int_status);
4710         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4711                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4712                                 &bar0->mac_rmac_err_reg,
4713                                 &sw_stat->mac_rmac_err_cnt))
4714                         goto reset;
4715                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4716                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4717                                 &sw_stat->mac_rmac_err_cnt);
4718         }
4719
4720         val64 = readq(&bar0->xgxs_int_status);
4721         if (val64 & XGXS_INT_STATUS_RXGXS) {
4722                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4723                                 &bar0->xgxs_rxgxs_err_reg,
4724                                 &sw_stat->xgxs_rxgxs_err_cnt))
4725                         goto reset;
4726         }
4727
4728         val64 = readq(&bar0->mc_int_status);
4729         if(val64 & MC_INT_STATUS_MC_INT) {
4730                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4731                                 &sw_stat->mc_err_cnt))
4732                         goto reset;
4733
4734                 /* Handling Ecc errors */
4735                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4736                         writeq(val64, &bar0->mc_err_reg);
4737                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4738                                 sw_stat->double_ecc_errs++;
4739                                 if (sp->device_type != XFRAME_II_DEVICE) {
4740                                         /*
4741                                          * Reset XframeI only if critical error
4742                                          */
4743                                         if (val64 &
4744                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4745                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4746                                                                 goto reset;
4747                                         }
4748                         } else
4749                                 sw_stat->single_ecc_errs++;
4750                 }
4751         }
4752         return;
4753
4754 reset:
4755         s2io_stop_all_tx_queue(sp);
4756         schedule_work(&sp->rst_timer_task);
4757         sw_stat->soft_reset_cnt++;
4758         return;
4759 }
4760
4761 /**
4762  *  s2io_isr - ISR handler of the device .
4763  *  @irq: the irq of the device.
4764  *  @dev_id: a void pointer to the dev structure of the NIC.
4765  *  Description:  This function is the ISR handler of the device. It
4766  *  identifies the reason for the interrupt and calls the relevant
4767  *  service routines. As a contongency measure, this ISR allocates the
4768  *  recv buffers, if their numbers are below the panic value which is
4769  *  presently set to 25% of the original number of rcv buffers allocated.
4770  *  Return value:
4771  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4772  *   IRQ_NONE: will be returned if interrupt is not from our device
4773  */
4774 static irqreturn_t s2io_isr(int irq, void *dev_id)
4775 {
4776         struct net_device *dev = (struct net_device *) dev_id;
4777         struct s2io_nic *sp = dev->priv;
4778         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4779         int i;
4780         u64 reason = 0;
4781         struct mac_info *mac_control;
4782         struct config_param *config;
4783
4784         /* Pretend we handled any irq's from a disconnected card */
4785         if (pci_channel_offline(sp->pdev))
4786                 return IRQ_NONE;
4787
4788         if (!is_s2io_card_up(sp))
4789                 return IRQ_NONE;
4790
4791         mac_control = &sp->mac_control;
4792         config = &sp->config;
4793
4794         /*
4795          * Identify the cause for interrupt and call the appropriate
4796          * interrupt handler. Causes for the interrupt could be;
4797          * 1. Rx of packet.
4798          * 2. Tx complete.
4799          * 3. Link down.
4800          */
4801         reason = readq(&bar0->general_int_status);
4802
4803         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4804                 /* Nothing much can be done. Get out */
4805                 return IRQ_HANDLED;
4806         }
4807
4808         if (reason & (GEN_INTR_RXTRAFFIC |
4809                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4810         {
4811                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4812
4813                 if (config->napi) {
4814                         if (reason & GEN_INTR_RXTRAFFIC) {
4815                                 netif_rx_schedule(dev, &sp->napi);
4816                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4817                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4818                                 readl(&bar0->rx_traffic_int);
4819                         }
4820                 } else {
4821                         /*
4822                          * rx_traffic_int reg is an R1 register, writing all 1's
4823                          * will ensure that the actual interrupt causing bit
4824                          * get's cleared and hence a read can be avoided.
4825                          */
4826                         if (reason & GEN_INTR_RXTRAFFIC)
4827                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4828
4829                         for (i = 0; i < config->rx_ring_num; i++)
4830                                 rx_intr_handler(&mac_control->rings[i], 0);
4831                 }
4832
4833                 /*
4834                  * tx_traffic_int reg is an R1 register, writing all 1's
4835                  * will ensure that the actual interrupt causing bit get's
4836                  * cleared and hence a read can be avoided.
4837                  */
4838                 if (reason & GEN_INTR_TXTRAFFIC)
4839                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4840
4841                 for (i = 0; i < config->tx_fifo_num; i++)
4842                         tx_intr_handler(&mac_control->fifos[i]);
4843
4844                 if (reason & GEN_INTR_TXPIC)
4845                         s2io_txpic_intr_handle(sp);
4846
4847                 /*
4848                  * Reallocate the buffers from the interrupt handler itself.
4849                  */
4850                 if (!config->napi) {
4851                         for (i = 0; i < config->rx_ring_num; i++)
4852                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4853                 }
4854                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4855                 readl(&bar0->general_int_status);
4856
4857                 return IRQ_HANDLED;
4858
4859         }
4860         else if (!reason) {
4861                 /* The interrupt was not raised by us */
4862                 return IRQ_NONE;
4863         }
4864
4865         return IRQ_HANDLED;
4866 }
4867
4868 /**
4869  * s2io_updt_stats -
4870  */
4871 static void s2io_updt_stats(struct s2io_nic *sp)
4872 {
4873         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4874         u64 val64;
4875         int cnt = 0;
4876
4877         if (is_s2io_card_up(sp)) {
4878                 /* Apprx 30us on a 133 MHz bus */
4879                 val64 = SET_UPDT_CLICKS(10) |
4880                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4881                 writeq(val64, &bar0->stat_cfg);
4882                 do {
4883                         udelay(100);
4884                         val64 = readq(&bar0->stat_cfg);
4885                         if (!(val64 & s2BIT(0)))
4886                                 break;
4887                         cnt++;
4888                         if (cnt == 5)
4889                                 break; /* Updt failed */
4890                 } while(1);
4891         }
4892 }
4893
4894 /**
4895  *  s2io_get_stats - Updates the device statistics structure.
4896  *  @dev : pointer to the device structure.
4897  *  Description:
4898  *  This function updates the device statistics structure in the s2io_nic
4899  *  structure and returns a pointer to the same.
4900  *  Return value:
4901  *  pointer to the updated net_device_stats structure.
4902  */
4903
4904 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4905 {
4906         struct s2io_nic *sp = dev->priv;
4907         struct mac_info *mac_control;
4908         struct config_param *config;
4909         int i;
4910
4911
4912         mac_control = &sp->mac_control;
4913         config = &sp->config;
4914
4915         /* Configure Stats for immediate updt */
4916         s2io_updt_stats(sp);
4917
4918         sp->stats.tx_packets =
4919                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4920         sp->stats.tx_errors =
4921                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4922         sp->stats.rx_errors =
4923                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4924         sp->stats.multicast =
4925                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4926         sp->stats.rx_length_errors =
4927                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4928
4929         /* collect per-ring rx_packets and rx_bytes */
4930         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4931         for (i = 0; i < config->rx_ring_num; i++) {
4932                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4933                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4934         }
4935
4936         return (&sp->stats);
4937 }
4938
4939 /**
4940  *  s2io_set_multicast - entry point for multicast address enable/disable.
4941  *  @dev : pointer to the device structure
4942  *  Description:
4943  *  This function is a driver entry point which gets called by the kernel
4944  *  whenever multicast addresses must be enabled/disabled. This also gets
4945  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4946  *  determine, if multicast address must be enabled or if promiscuous mode
4947  *  is to be disabled etc.
4948  *  Return value:
4949  *  void.
4950  */
4951
4952 static void s2io_set_multicast(struct net_device *dev)
4953 {
4954         int i, j, prev_cnt;
4955         struct dev_mc_list *mclist;
4956         struct s2io_nic *sp = dev->priv;
4957         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4958         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4959             0xfeffffffffffULL;
4960         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4961         void __iomem *add;
4962         struct config_param *config = &sp->config;
4963
4964         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4965                 /*  Enable all Multicast addresses */
4966                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4967                        &bar0->rmac_addr_data0_mem);
4968                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4969                        &bar0->rmac_addr_data1_mem);
4970                 val64 = RMAC_ADDR_CMD_MEM_WE |
4971                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4972                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4973                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4974                 /* Wait till command completes */
4975                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4976                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4977                                         S2IO_BIT_RESET);
4978
4979                 sp->m_cast_flg = 1;
4980                 sp->all_multi_pos = config->max_mc_addr - 1;
4981         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4982                 /*  Disable all Multicast addresses */
4983                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4984                        &bar0->rmac_addr_data0_mem);
4985                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4986                        &bar0->rmac_addr_data1_mem);
4987                 val64 = RMAC_ADDR_CMD_MEM_WE |
4988                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4989                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4990                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4991                 /* Wait till command completes */
4992                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4993                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4994                                         S2IO_BIT_RESET);
4995
4996                 sp->m_cast_flg = 0;
4997                 sp->all_multi_pos = 0;
4998         }
4999
5000         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5001                 /*  Put the NIC into promiscuous mode */
5002                 add = &bar0->mac_cfg;
5003                 val64 = readq(&bar0->mac_cfg);
5004                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5005
5006                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5007                 writel((u32) val64, add);
5008                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5009                 writel((u32) (val64 >> 32), (add + 4));
5010
5011                 if (vlan_tag_strip != 1) {
5012                         val64 = readq(&bar0->rx_pa_cfg);
5013                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5014                         writeq(val64, &bar0->rx_pa_cfg);
5015                         vlan_strip_flag = 0;
5016                 }
5017
5018                 val64 = readq(&bar0->mac_cfg);
5019                 sp->promisc_flg = 1;
5020                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5021                           dev->name);
5022         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5023                 /*  Remove the NIC from promiscuous mode */
5024                 add = &bar0->mac_cfg;
5025                 val64 = readq(&bar0->mac_cfg);
5026                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5027
5028                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5029                 writel((u32) val64, add);
5030                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5031                 writel((u32) (val64 >> 32), (add + 4));
5032
5033                 if (vlan_tag_strip != 0) {
5034                         val64 = readq(&bar0->rx_pa_cfg);
5035                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5036                         writeq(val64, &bar0->rx_pa_cfg);
5037                         vlan_strip_flag = 1;
5038                 }
5039
5040                 val64 = readq(&bar0->mac_cfg);
5041                 sp->promisc_flg = 0;
5042                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5043                           dev->name);
5044         }
5045
5046         /*  Update individual M_CAST address list */
5047         if ((!sp->m_cast_flg) && dev->mc_count) {
5048                 if (dev->mc_count >
5049                     (config->max_mc_addr - config->max_mac_addr)) {
5050                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5051                                   dev->name);
5052                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5053                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5054                         return;
5055                 }
5056
5057                 prev_cnt = sp->mc_addr_count;
5058                 sp->mc_addr_count = dev->mc_count;
5059
5060                 /* Clear out the previous list of Mc in the H/W. */
5061                 for (i = 0; i < prev_cnt; i++) {
5062                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5063                                &bar0->rmac_addr_data0_mem);
5064                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5065                                 &bar0->rmac_addr_data1_mem);
5066                         val64 = RMAC_ADDR_CMD_MEM_WE |
5067                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5068                             RMAC_ADDR_CMD_MEM_OFFSET
5069                             (config->mc_start_offset + i);
5070                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5071
5072                         /* Wait for command completes */
5073                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5074                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5075                                         S2IO_BIT_RESET)) {
5076                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5077                                           dev->name);
5078                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5079                                 return;
5080                         }
5081                 }
5082
5083                 /* Create the new Rx filter list and update the same in H/W. */
5084                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5085                      i++, mclist = mclist->next) {
5086                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5087                                ETH_ALEN);
5088                         mac_addr = 0;
5089                         for (j = 0; j < ETH_ALEN; j++) {
5090                                 mac_addr |= mclist->dmi_addr[j];
5091                                 mac_addr <<= 8;
5092                         }
5093                         mac_addr >>= 8;
5094                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5095                                &bar0->rmac_addr_data0_mem);
5096                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5097                                 &bar0->rmac_addr_data1_mem);
5098                         val64 = RMAC_ADDR_CMD_MEM_WE |
5099                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5100                             RMAC_ADDR_CMD_MEM_OFFSET
5101                             (i + config->mc_start_offset);
5102                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5103
5104                         /* Wait for command completes */
5105                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5106                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5107                                         S2IO_BIT_RESET)) {
5108                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5109                                           dev->name);
5110                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5111                                 return;
5112                         }
5113                 }
5114         }
5115 }
5116
5117 /* read from CAM unicast & multicast addresses and store it in
5118  * def_mac_addr structure
5119  */
5120 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5121 {
5122         int offset;
5123         u64 mac_addr = 0x0;
5124         struct config_param *config = &sp->config;
5125
5126         /* store unicast & multicast mac addresses */
5127         for (offset = 0; offset < config->max_mc_addr; offset++) {
5128                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5129                 /* if read fails disable the entry */
5130                 if (mac_addr == FAILURE)
5131                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5132                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5133         }
5134 }
5135
5136 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5137 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5138 {
5139         int offset;
5140         struct config_param *config = &sp->config;
5141         /* restore unicast mac address */
5142         for (offset = 0; offset < config->max_mac_addr; offset++)
5143                 do_s2io_prog_unicast(sp->dev,
5144                         sp->def_mac_addr[offset].mac_addr);
5145
5146         /* restore multicast mac address */
5147         for (offset = config->mc_start_offset;
5148                 offset < config->max_mc_addr; offset++)
5149                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5150 }
5151
5152 /* add a multicast MAC address to CAM */
5153 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5154 {
5155         int i;
5156         u64 mac_addr = 0;
5157         struct config_param *config = &sp->config;
5158
5159         for (i = 0; i < ETH_ALEN; i++) {
5160                 mac_addr <<= 8;
5161                 mac_addr |= addr[i];
5162         }
5163         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5164                 return SUCCESS;
5165
5166         /* check if the multicast mac already preset in CAM */
5167         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5168                 u64 tmp64;
5169                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5170                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5171                         break;
5172
5173                 if (tmp64 == mac_addr)
5174                         return SUCCESS;
5175         }
5176         if (i == config->max_mc_addr) {
5177                 DBG_PRINT(ERR_DBG,
5178                         "CAM full no space left for multicast MAC\n");
5179                 return FAILURE;
5180         }
5181         /* Update the internal structure with this new mac address */
5182         do_s2io_copy_mac_addr(sp, i, mac_addr);
5183
5184         return (do_s2io_add_mac(sp, mac_addr, i));
5185 }
5186
5187 /* add MAC address to CAM */
5188 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5189 {
5190         u64 val64;
5191         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5192
5193         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5194                 &bar0->rmac_addr_data0_mem);
5195
5196         val64 =
5197                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5198                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5199         writeq(val64, &bar0->rmac_addr_cmd_mem);
5200
5201         /* Wait till command completes */
5202         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5203                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5204                 S2IO_BIT_RESET)) {
5205                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5206                 return FAILURE;
5207         }
5208         return SUCCESS;
5209 }
5210 /* deletes a specified unicast/multicast mac entry from CAM */
5211 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5212 {
5213         int offset;
5214         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5215         struct config_param *config = &sp->config;
5216
5217         for (offset = 1;
5218                 offset < config->max_mc_addr; offset++) {
5219                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5220                 if (tmp64 == addr) {
5221                         /* disable the entry by writing  0xffffffffffffULL */
5222                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5223                                 return FAILURE;
5224                         /* store the new mac list from CAM */
5225                         do_s2io_store_unicast_mc(sp);
5226                         return SUCCESS;
5227                 }
5228         }
5229         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5230                         (unsigned long long)addr);
5231         return FAILURE;
5232 }
5233
5234 /* read mac entries from CAM */
5235 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5236 {
5237         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5238         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5239
5240         /* read mac addr */
5241         val64 =
5242                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5243                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5244         writeq(val64, &bar0->rmac_addr_cmd_mem);
5245
5246         /* Wait till command completes */
5247         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5248                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5249                 S2IO_BIT_RESET)) {
5250                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5251                 return FAILURE;
5252         }
5253         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5254         return (tmp64 >> 16);
5255 }
5256
5257 /**
5258  * s2io_set_mac_addr driver entry point
5259  */
5260
5261 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5262 {
5263         struct sockaddr *addr = p;
5264
5265         if (!is_valid_ether_addr(addr->sa_data))
5266                 return -EINVAL;
5267
5268         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5269
5270         /* store the MAC address in CAM */
5271         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5272 }
5273 /**
5274  *  do_s2io_prog_unicast - Programs the Xframe mac address
5275  *  @dev : pointer to the device structure.
5276  *  @addr: a uchar pointer to the new mac address which is to be set.
5277  *  Description : This procedure will program the Xframe to receive
5278  *  frames with new Mac Address
5279  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5280  *  as defined in errno.h file on failure.
5281  */
5282
5283 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5284 {
5285         struct s2io_nic *sp = dev->priv;
5286         register u64 mac_addr = 0, perm_addr = 0;
5287         int i;
5288         u64 tmp64;
5289         struct config_param *config = &sp->config;
5290
5291         /*
5292         * Set the new MAC address as the new unicast filter and reflect this
5293         * change on the device address registered with the OS. It will be
5294         * at offset 0.
5295         */
5296         for (i = 0; i < ETH_ALEN; i++) {
5297                 mac_addr <<= 8;
5298                 mac_addr |= addr[i];
5299                 perm_addr <<= 8;
5300                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5301         }
5302
5303         /* check if the dev_addr is different than perm_addr */
5304         if (mac_addr == perm_addr)
5305                 return SUCCESS;
5306
5307         /* check if the mac already preset in CAM */
5308         for (i = 1; i < config->max_mac_addr; i++) {
5309                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5310                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5311                         break;
5312
5313                 if (tmp64 == mac_addr) {
5314                         DBG_PRINT(INFO_DBG,
5315                         "MAC addr:0x%llx already present in CAM\n",
5316                         (unsigned long long)mac_addr);
5317                         return SUCCESS;
5318                 }
5319         }
5320         if (i == config->max_mac_addr) {
5321                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5322                 return FAILURE;
5323         }
5324         /* Update the internal structure with this new mac address */
5325         do_s2io_copy_mac_addr(sp, i, mac_addr);
5326         return (do_s2io_add_mac(sp, mac_addr, i));
5327 }
5328
5329 /**
5330  * s2io_ethtool_sset - Sets different link parameters.
5331  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5332  * @info: pointer to the structure with parameters given by ethtool to set
5333  * link information.
5334  * Description:
5335  * The function sets different link parameters provided by the user onto
5336  * the NIC.
5337  * Return value:
5338  * 0 on success.
5339 */
5340
5341 static int s2io_ethtool_sset(struct net_device *dev,
5342                              struct ethtool_cmd *info)
5343 {
5344         struct s2io_nic *sp = dev->priv;
5345         if ((info->autoneg == AUTONEG_ENABLE) ||
5346             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5347                 return -EINVAL;
5348         else {
5349                 s2io_close(sp->dev);
5350                 s2io_open(sp->dev);
5351         }
5352
5353         return 0;
5354 }
5355
5356 /**
5357  * s2io_ethtol_gset - Return link specific information.
5358  * @sp : private member of the device structure, pointer to the
5359  *      s2io_nic structure.
5360  * @info : pointer to the structure with parameters given by ethtool
5361  * to return link information.
5362  * Description:
5363  * Returns link specific information like speed, duplex etc.. to ethtool.
5364  * Return value :
5365  * return 0 on success.
5366  */
5367
5368 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5369 {
5370         struct s2io_nic *sp = dev->priv;
5371         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5372         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5373         info->port = PORT_FIBRE;
5374
5375         /* info->transceiver */
5376         info->transceiver = XCVR_EXTERNAL;
5377
5378         if (netif_carrier_ok(sp->dev)) {
5379                 info->speed = 10000;
5380                 info->duplex = DUPLEX_FULL;
5381         } else {
5382                 info->speed = -1;
5383                 info->duplex = -1;
5384         }
5385
5386         info->autoneg = AUTONEG_DISABLE;
5387         return 0;
5388 }
5389
5390 /**
5391  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5392  * @sp : private member of the device structure, which is a pointer to the
5393  * s2io_nic structure.
5394  * @info : pointer to the structure with parameters given by ethtool to
5395  * return driver information.
5396  * Description:
5397  * Returns driver specefic information like name, version etc.. to ethtool.
5398  * Return value:
5399  *  void
5400  */
5401
5402 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5403                                   struct ethtool_drvinfo *info)
5404 {
5405         struct s2io_nic *sp = dev->priv;
5406
5407         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5408         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5409         strncpy(info->fw_version, "", sizeof(info->fw_version));
5410         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5411         info->regdump_len = XENA_REG_SPACE;
5412         info->eedump_len = XENA_EEPROM_SPACE;
5413 }
5414
5415 /**
5416  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5417  *  @sp: private member of the device structure, which is a pointer to the
5418  *  s2io_nic structure.
5419  *  @regs : pointer to the structure with parameters given by ethtool for
5420  *  dumping the registers.
5421  *  @reg_space: The input argumnet into which all the registers are dumped.
5422  *  Description:
5423  *  Dumps the entire register space of xFrame NIC into the user given
5424  *  buffer area.
5425  * Return value :
5426  * void .
5427 */
5428
5429 static void s2io_ethtool_gregs(struct net_device *dev,
5430                                struct ethtool_regs *regs, void *space)
5431 {
5432         int i;
5433         u64 reg;
5434         u8 *reg_space = (u8 *) space;
5435         struct s2io_nic *sp = dev->priv;
5436
5437         regs->len = XENA_REG_SPACE;
5438         regs->version = sp->pdev->subsystem_device;
5439
5440         for (i = 0; i < regs->len; i += 8) {
5441                 reg = readq(sp->bar0 + i);
5442                 memcpy((reg_space + i), &reg, 8);
5443         }
5444 }
5445
5446 /**
5447  *  s2io_phy_id  - timer function that alternates adapter LED.
5448  *  @data : address of the private member of the device structure, which
5449  *  is a pointer to the s2io_nic structure, provided as an u32.
5450  * Description: This is actually the timer function that alternates the
5451  * adapter LED bit of the adapter control bit to set/reset every time on
5452  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5453  *  once every second.
5454 */
5455 static void s2io_phy_id(unsigned long data)
5456 {
5457         struct s2io_nic *sp = (struct s2io_nic *) data;
5458         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5459         u64 val64 = 0;
5460         u16 subid;
5461
5462         subid = sp->pdev->subsystem_device;
5463         if ((sp->device_type == XFRAME_II_DEVICE) ||
5464                    ((subid & 0xFF) >= 0x07)) {
5465                 val64 = readq(&bar0->gpio_control);
5466                 val64 ^= GPIO_CTRL_GPIO_0;
5467                 writeq(val64, &bar0->gpio_control);
5468         } else {
5469                 val64 = readq(&bar0->adapter_control);
5470                 val64 ^= ADAPTER_LED_ON;
5471                 writeq(val64, &bar0->adapter_control);
5472         }
5473
5474         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5475 }
5476
5477 /**
5478  * s2io_ethtool_idnic - To physically identify the nic on the system.
5479  * @sp : private member of the device structure, which is a pointer to the
5480  * s2io_nic structure.
5481  * @id : pointer to the structure with identification parameters given by
5482  * ethtool.
5483  * Description: Used to physically identify the NIC on the system.
5484  * The Link LED will blink for a time specified by the user for
5485  * identification.
5486  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5487  * identification is possible only if it's link is up.
5488  * Return value:
5489  * int , returns 0 on success
5490  */
5491
5492 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5493 {
5494         u64 val64 = 0, last_gpio_ctrl_val;
5495         struct s2io_nic *sp = dev->priv;
5496         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5497         u16 subid;
5498
5499         subid = sp->pdev->subsystem_device;
5500         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5501         if ((sp->device_type == XFRAME_I_DEVICE) &&
5502                 ((subid & 0xFF) < 0x07)) {
5503                 val64 = readq(&bar0->adapter_control);
5504                 if (!(val64 & ADAPTER_CNTL_EN)) {
5505                         printk(KERN_ERR
5506                                "Adapter Link down, cannot blink LED\n");
5507                         return -EFAULT;
5508                 }
5509         }
5510         if (sp->id_timer.function == NULL) {
5511                 init_timer(&sp->id_timer);
5512                 sp->id_timer.function = s2io_phy_id;
5513                 sp->id_timer.data = (unsigned long) sp;
5514         }
5515         mod_timer(&sp->id_timer, jiffies);
5516         if (data)
5517                 msleep_interruptible(data * HZ);
5518         else
5519                 msleep_interruptible(MAX_FLICKER_TIME);
5520         del_timer_sync(&sp->id_timer);
5521
5522         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5523                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5524                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5525         }
5526
5527         return 0;
5528 }
5529
5530 static void s2io_ethtool_gringparam(struct net_device *dev,
5531                                     struct ethtool_ringparam *ering)
5532 {
5533         struct s2io_nic *sp = dev->priv;
5534         int i,tx_desc_count=0,rx_desc_count=0;
5535
5536         if (sp->rxd_mode == RXD_MODE_1)
5537                 ering->rx_max_pending = MAX_RX_DESC_1;
5538         else if (sp->rxd_mode == RXD_MODE_3B)
5539                 ering->rx_max_pending = MAX_RX_DESC_2;
5540
5541         ering->tx_max_pending = MAX_TX_DESC;
5542         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5543                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5544
5545         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5546         ering->tx_pending = tx_desc_count;
5547         rx_desc_count = 0;
5548         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5549                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5550
5551         ering->rx_pending = rx_desc_count;
5552
5553         ering->rx_mini_max_pending = 0;
5554         ering->rx_mini_pending = 0;
5555         if(sp->rxd_mode == RXD_MODE_1)
5556                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5557         else if (sp->rxd_mode == RXD_MODE_3B)
5558                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5559         ering->rx_jumbo_pending = rx_desc_count;
5560 }
5561
5562 /**
5563  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5564  * @sp : private member of the device structure, which is a pointer to the
5565  *      s2io_nic structure.
5566  * @ep : pointer to the structure with pause parameters given by ethtool.
5567  * Description:
5568  * Returns the Pause frame generation and reception capability of the NIC.
5569  * Return value:
5570  *  void
5571  */
5572 static void s2io_ethtool_getpause_data(struct net_device *dev,
5573                                        struct ethtool_pauseparam *ep)
5574 {
5575         u64 val64;
5576         struct s2io_nic *sp = dev->priv;
5577         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5578
5579         val64 = readq(&bar0->rmac_pause_cfg);
5580         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5581                 ep->tx_pause = TRUE;
5582         if (val64 & RMAC_PAUSE_RX_ENABLE)
5583                 ep->rx_pause = TRUE;
5584         ep->autoneg = FALSE;
5585 }
5586
5587 /**
5588  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5589  * @sp : private member of the device structure, which is a pointer to the
5590  *      s2io_nic structure.
5591  * @ep : pointer to the structure with pause parameters given by ethtool.
5592  * Description:
5593  * It can be used to set or reset Pause frame generation or reception
5594  * support of the NIC.
5595  * Return value:
5596  * int, returns 0 on Success
5597  */
5598
5599 static int s2io_ethtool_setpause_data(struct net_device *dev,
5600                                struct ethtool_pauseparam *ep)
5601 {
5602         u64 val64;
5603         struct s2io_nic *sp = dev->priv;
5604         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5605
5606         val64 = readq(&bar0->rmac_pause_cfg);
5607         if (ep->tx_pause)
5608                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5609         else
5610                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5611         if (ep->rx_pause)
5612                 val64 |= RMAC_PAUSE_RX_ENABLE;
5613         else
5614                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5615         writeq(val64, &bar0->rmac_pause_cfg);
5616         return 0;
5617 }
5618
5619 /**
5620  * read_eeprom - reads 4 bytes of data from user given offset.
5621  * @sp : private member of the device structure, which is a pointer to the
5622  *      s2io_nic structure.
5623  * @off : offset at which the data must be written
5624  * @data : Its an output parameter where the data read at the given
5625  *      offset is stored.
5626  * Description:
5627  * Will read 4 bytes of data from the user given offset and return the
5628  * read data.
5629  * NOTE: Will allow to read only part of the EEPROM visible through the
5630  *   I2C bus.
5631  * Return value:
5632  *  -1 on failure and 0 on success.
5633  */
5634
5635 #define S2IO_DEV_ID             5
5636 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5637 {
5638         int ret = -1;
5639         u32 exit_cnt = 0;
5640         u64 val64;
5641         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5642
5643         if (sp->device_type == XFRAME_I_DEVICE) {
5644                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5645                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5646                     I2C_CONTROL_CNTL_START;
5647                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5648
5649                 while (exit_cnt < 5) {
5650                         val64 = readq(&bar0->i2c_control);
5651                         if (I2C_CONTROL_CNTL_END(val64)) {
5652                                 *data = I2C_CONTROL_GET_DATA(val64);
5653                                 ret = 0;
5654                                 break;
5655                         }
5656                         msleep(50);
5657                         exit_cnt++;
5658                 }
5659         }
5660
5661         if (sp->device_type == XFRAME_II_DEVICE) {
5662                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5663                         SPI_CONTROL_BYTECNT(0x3) |
5664                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5665                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5666                 val64 |= SPI_CONTROL_REQ;
5667                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5668                 while (exit_cnt < 5) {
5669                         val64 = readq(&bar0->spi_control);
5670                         if (val64 & SPI_CONTROL_NACK) {
5671                                 ret = 1;
5672                                 break;
5673                         } else if (val64 & SPI_CONTROL_DONE) {
5674                                 *data = readq(&bar0->spi_data);
5675                                 *data &= 0xffffff;
5676                                 ret = 0;
5677                                 break;
5678                         }
5679                         msleep(50);
5680                         exit_cnt++;
5681                 }
5682         }
5683         return ret;
5684 }
5685
5686 /**
5687  *  write_eeprom - actually writes the relevant part of the data value.
5688  *  @sp : private member of the device structure, which is a pointer to the
5689  *       s2io_nic structure.
5690  *  @off : offset at which the data must be written
5691  *  @data : The data that is to be written
5692  *  @cnt : Number of bytes of the data that are actually to be written into
5693  *  the Eeprom. (max of 3)
5694  * Description:
5695  *  Actually writes the relevant part of the data value into the Eeprom
5696  *  through the I2C bus.
5697  * Return value:
5698  *  0 on success, -1 on failure.
5699  */
5700
5701 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5702 {
5703         int exit_cnt = 0, ret = -1;
5704         u64 val64;
5705         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5706
5707         if (sp->device_type == XFRAME_I_DEVICE) {
5708                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5709                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5710                     I2C_CONTROL_CNTL_START;
5711                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5712
5713                 while (exit_cnt < 5) {
5714                         val64 = readq(&bar0->i2c_control);
5715                         if (I2C_CONTROL_CNTL_END(val64)) {
5716                                 if (!(val64 & I2C_CONTROL_NACK))
5717                                         ret = 0;
5718                                 break;
5719                         }
5720                         msleep(50);
5721                         exit_cnt++;
5722                 }
5723         }
5724
5725         if (sp->device_type == XFRAME_II_DEVICE) {
5726                 int write_cnt = (cnt == 8) ? 0 : cnt;
5727                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5728
5729                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5730                         SPI_CONTROL_BYTECNT(write_cnt) |
5731                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5732                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5733                 val64 |= SPI_CONTROL_REQ;
5734                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5735                 while (exit_cnt < 5) {
5736                         val64 = readq(&bar0->spi_control);
5737                         if (val64 & SPI_CONTROL_NACK) {
5738                                 ret = 1;
5739                                 break;
5740                         } else if (val64 & SPI_CONTROL_DONE) {
5741                                 ret = 0;
5742                                 break;
5743                         }
5744                         msleep(50);
5745                         exit_cnt++;
5746                 }
5747         }
5748         return ret;
5749 }
5750 static void s2io_vpd_read(struct s2io_nic *nic)
5751 {
5752         u8 *vpd_data;
5753         u8 data;
5754         int i=0, cnt, fail = 0;
5755         int vpd_addr = 0x80;
5756
5757         if (nic->device_type == XFRAME_II_DEVICE) {
5758                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5759                 vpd_addr = 0x80;
5760         }
5761         else {
5762                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5763                 vpd_addr = 0x50;
5764         }
5765         strcpy(nic->serial_num, "NOT AVAILABLE");
5766
5767         vpd_data = kmalloc(256, GFP_KERNEL);
5768         if (!vpd_data) {
5769                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5770                 return;
5771         }
5772         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5773
5774         for (i = 0; i < 256; i +=4 ) {
5775                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5776                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5777                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5778                 for (cnt = 0; cnt <5; cnt++) {
5779                         msleep(2);
5780                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5781                         if (data == 0x80)
5782                                 break;
5783                 }
5784                 if (cnt >= 5) {
5785                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5786                         fail = 1;
5787                         break;
5788                 }
5789                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5790                                       (u32 *)&vpd_data[i]);
5791         }
5792
5793         if(!fail) {
5794                 /* read serial number of adapter */
5795                 for (cnt = 0; cnt < 256; cnt++) {
5796                 if ((vpd_data[cnt] == 'S') &&
5797                         (vpd_data[cnt+1] == 'N') &&
5798                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5799                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5800                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5801                                         vpd_data[cnt+2]);
5802                                 break;
5803                         }
5804                 }
5805         }
5806
5807         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5808                 memset(nic->product_name, 0, vpd_data[1]);
5809                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5810         }
5811         kfree(vpd_data);
5812         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5813 }
5814
5815 /**
5816  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5817  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5818  *  @eeprom : pointer to the user level structure provided by ethtool,
5819  *  containing all relevant information.
5820  *  @data_buf : user defined value to be written into Eeprom.
5821  *  Description: Reads the values stored in the Eeprom at given offset
5822  *  for a given length. Stores these values int the input argument data
5823  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5824  *  Return value:
5825  *  int  0 on success
5826  */
5827
5828 static int s2io_ethtool_geeprom(struct net_device *dev,
5829                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5830 {
5831         u32 i, valid;
5832         u64 data;
5833         struct s2io_nic *sp = dev->priv;
5834
5835         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5836
5837         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5838                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5839
5840         for (i = 0; i < eeprom->len; i += 4) {
5841                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5842                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5843                         return -EFAULT;
5844                 }
5845                 valid = INV(data);
5846                 memcpy((data_buf + i), &valid, 4);
5847         }
5848         return 0;
5849 }
5850
5851 /**
5852  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5853  *  @sp : private member of the device structure, which is a pointer to the
5854  *  s2io_nic structure.
5855  *  @eeprom : pointer to the user level structure provided by ethtool,
5856  *  containing all relevant information.
5857  *  @data_buf ; user defined value to be written into Eeprom.
5858  *  Description:
5859  *  Tries to write the user provided value in the Eeprom, at the offset
5860  *  given by the user.
5861  *  Return value:
5862  *  0 on success, -EFAULT on failure.
5863  */
5864
5865 static int s2io_ethtool_seeprom(struct net_device *dev,
5866                                 struct ethtool_eeprom *eeprom,
5867                                 u8 * data_buf)
5868 {
5869         int len = eeprom->len, cnt = 0;
5870         u64 valid = 0, data;
5871         struct s2io_nic *sp = dev->priv;
5872
5873         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5874                 DBG_PRINT(ERR_DBG,
5875                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5876                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5877                           eeprom->magic);
5878                 return -EFAULT;
5879         }
5880
5881         while (len) {
5882                 data = (u32) data_buf[cnt] & 0x000000FF;
5883                 if (data) {
5884                         valid = (u32) (data << 24);
5885                 } else
5886                         valid = data;
5887
5888                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5889                         DBG_PRINT(ERR_DBG,
5890                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5891                         DBG_PRINT(ERR_DBG,
5892                                   "write into the specified offset\n");
5893                         return -EFAULT;
5894                 }
5895                 cnt++;
5896                 len--;
5897         }
5898
5899         return 0;
5900 }
5901
5902 /**
5903  * s2io_register_test - reads and writes into all clock domains.
5904  * @sp : private member of the device structure, which is a pointer to the
5905  * s2io_nic structure.
5906  * @data : variable that returns the result of each of the test conducted b
5907  * by the driver.
5908  * Description:
5909  * Read and write into all clock domains. The NIC has 3 clock domains,
5910  * see that registers in all the three regions are accessible.
5911  * Return value:
5912  * 0 on success.
5913  */
5914
5915 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5916 {
5917         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5918         u64 val64 = 0, exp_val;
5919         int fail = 0;
5920
5921         val64 = readq(&bar0->pif_rd_swapper_fb);
5922         if (val64 != 0x123456789abcdefULL) {
5923                 fail = 1;
5924                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5925         }
5926
5927         val64 = readq(&bar0->rmac_pause_cfg);
5928         if (val64 != 0xc000ffff00000000ULL) {
5929                 fail = 1;
5930                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5931         }
5932
5933         val64 = readq(&bar0->rx_queue_cfg);
5934         if (sp->device_type == XFRAME_II_DEVICE)
5935                 exp_val = 0x0404040404040404ULL;
5936         else
5937                 exp_val = 0x0808080808080808ULL;
5938         if (val64 != exp_val) {
5939                 fail = 1;
5940                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5941         }
5942
5943         val64 = readq(&bar0->xgxs_efifo_cfg);
5944         if (val64 != 0x000000001923141EULL) {
5945                 fail = 1;
5946                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5947         }
5948
5949         val64 = 0x5A5A5A5A5A5A5A5AULL;
5950         writeq(val64, &bar0->xmsi_data);
5951         val64 = readq(&bar0->xmsi_data);
5952         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5953                 fail = 1;
5954                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5955         }
5956
5957         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5958         writeq(val64, &bar0->xmsi_data);
5959         val64 = readq(&bar0->xmsi_data);
5960         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5961                 fail = 1;
5962                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5963         }
5964
5965         *data = fail;
5966         return fail;
5967 }
5968
5969 /**
5970  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5971  * @sp : private member of the device structure, which is a pointer to the
5972  * s2io_nic structure.
5973  * @data:variable that returns the result of each of the test conducted by
5974  * the driver.
5975  * Description:
5976  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5977  * register.
5978  * Return value:
5979  * 0 on success.
5980  */
5981
5982 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5983 {
5984         int fail = 0;
5985         u64 ret_data, org_4F0, org_7F0;
5986         u8 saved_4F0 = 0, saved_7F0 = 0;
5987         struct net_device *dev = sp->dev;
5988
5989         /* Test Write Error at offset 0 */
5990         /* Note that SPI interface allows write access to all areas
5991          * of EEPROM. Hence doing all negative testing only for Xframe I.
5992          */
5993         if (sp->device_type == XFRAME_I_DEVICE)
5994                 if (!write_eeprom(sp, 0, 0, 3))
5995                         fail = 1;
5996
5997         /* Save current values at offsets 0x4F0 and 0x7F0 */
5998         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5999                 saved_4F0 = 1;
6000         if (!read_eeprom(sp, 0x7F0, &org_7F0))
6001                 saved_7F0 = 1;
6002
6003         /* Test Write at offset 4f0 */
6004         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6005                 fail = 1;
6006         if (read_eeprom(sp, 0x4F0, &ret_data))
6007                 fail = 1;
6008
6009         if (ret_data != 0x012345) {
6010                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6011                         "Data written %llx Data read %llx\n",
6012                         dev->name, (unsigned long long)0x12345,
6013                         (unsigned long long)ret_data);
6014                 fail = 1;
6015         }
6016
6017         /* Reset the EEPROM data go FFFF */
6018         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6019
6020         /* Test Write Request Error at offset 0x7c */
6021         if (sp->device_type == XFRAME_I_DEVICE)
6022                 if (!write_eeprom(sp, 0x07C, 0, 3))
6023                         fail = 1;
6024
6025         /* Test Write Request at offset 0x7f0 */
6026         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6027                 fail = 1;
6028         if (read_eeprom(sp, 0x7F0, &ret_data))
6029                 fail = 1;
6030
6031         if (ret_data != 0x012345) {
6032                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6033                         "Data written %llx Data read %llx\n",
6034                         dev->name, (unsigned long long)0x12345,
6035                         (unsigned long long)ret_data);
6036                 fail = 1;
6037         }
6038
6039         /* Reset the EEPROM data go FFFF */
6040         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6041
6042         if (sp->device_type == XFRAME_I_DEVICE) {
6043                 /* Test Write Error at offset 0x80 */
6044                 if (!write_eeprom(sp, 0x080, 0, 3))
6045                         fail = 1;
6046
6047                 /* Test Write Error at offset 0xfc */
6048                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6049                         fail = 1;
6050
6051                 /* Test Write Error at offset 0x100 */
6052                 if (!write_eeprom(sp, 0x100, 0, 3))
6053                         fail = 1;
6054
6055                 /* Test Write Error at offset 4ec */
6056                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6057                         fail = 1;
6058         }
6059
6060         /* Restore values at offsets 0x4F0 and 0x7F0 */
6061         if (saved_4F0)
6062                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6063         if (saved_7F0)
6064                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6065
6066         *data = fail;
6067         return fail;
6068 }
6069
6070 /**
6071  * s2io_bist_test - invokes the MemBist test of the card .
6072  * @sp : private member of the device structure, which is a pointer to the
6073  * s2io_nic structure.
6074  * @data:variable that returns the result of each of the test conducted by
6075  * the driver.
6076  * Description:
6077  * This invokes the MemBist test of the card. We give around
6078  * 2 secs time for the Test to complete. If it's still not complete
6079  * within this peiod, we consider that the test failed.
6080  * Return value:
6081  * 0 on success and -1 on failure.
6082  */
6083
6084 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6085 {
6086         u8 bist = 0;
6087         int cnt = 0, ret = -1;
6088
6089         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6090         bist |= PCI_BIST_START;
6091         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6092
6093         while (cnt < 20) {
6094                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6095                 if (!(bist & PCI_BIST_START)) {
6096                         *data = (bist & PCI_BIST_CODE_MASK);
6097                         ret = 0;
6098                         break;
6099                 }
6100                 msleep(100);
6101                 cnt++;
6102         }
6103
6104         return ret;
6105 }
6106
6107 /**
6108  * s2io-link_test - verifies the link state of the nic
6109  * @sp ; private member of the device structure, which is a pointer to the
6110  * s2io_nic structure.
6111  * @data: variable that returns the result of each of the test conducted by
6112  * the driver.
6113  * Description:
6114  * The function verifies the link state of the NIC and updates the input
6115  * argument 'data' appropriately.
6116  * Return value:
6117  * 0 on success.
6118  */
6119
6120 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6121 {
6122         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6123         u64 val64;
6124
6125         val64 = readq(&bar0->adapter_status);
6126         if(!(LINK_IS_UP(val64)))
6127                 *data = 1;
6128         else
6129                 *data = 0;
6130
6131         return *data;
6132 }
6133
6134 /**
6135  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6136  * @sp - private member of the device structure, which is a pointer to the
6137  * s2io_nic structure.
6138  * @data - variable that returns the result of each of the test
6139  * conducted by the driver.
6140  * Description:
6141  *  This is one of the offline test that tests the read and write
6142  *  access to the RldRam chip on the NIC.
6143  * Return value:
6144  *  0 on success.
6145  */
6146
6147 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6148 {
6149         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6150         u64 val64;
6151         int cnt, iteration = 0, test_fail = 0;
6152
6153         val64 = readq(&bar0->adapter_control);
6154         val64 &= ~ADAPTER_ECC_EN;
6155         writeq(val64, &bar0->adapter_control);
6156
6157         val64 = readq(&bar0->mc_rldram_test_ctrl);
6158         val64 |= MC_RLDRAM_TEST_MODE;
6159         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6160
6161         val64 = readq(&bar0->mc_rldram_mrs);
6162         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6163         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6164
6165         val64 |= MC_RLDRAM_MRS_ENABLE;
6166         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6167
6168         while (iteration < 2) {
6169                 val64 = 0x55555555aaaa0000ULL;
6170                 if (iteration == 1) {
6171                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6172                 }
6173                 writeq(val64, &bar0->mc_rldram_test_d0);
6174
6175                 val64 = 0xaaaa5a5555550000ULL;
6176                 if (iteration == 1) {
6177                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6178                 }
6179                 writeq(val64, &bar0->mc_rldram_test_d1);
6180
6181                 val64 = 0x55aaaaaaaa5a0000ULL;
6182                 if (iteration == 1) {
6183                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6184                 }
6185                 writeq(val64, &bar0->mc_rldram_test_d2);
6186
6187                 val64 = (u64) (0x0000003ffffe0100ULL);
6188                 writeq(val64, &bar0->mc_rldram_test_add);
6189
6190                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6191                         MC_RLDRAM_TEST_GO;
6192                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6193
6194                 for (cnt = 0; cnt < 5; cnt++) {
6195                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6196                         if (val64 & MC_RLDRAM_TEST_DONE)
6197                                 break;
6198                         msleep(200);
6199                 }
6200
6201                 if (cnt == 5)
6202                         break;
6203
6204                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6205                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6206
6207                 for (cnt = 0; cnt < 5; cnt++) {
6208                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6209                         if (val64 & MC_RLDRAM_TEST_DONE)
6210                                 break;
6211                         msleep(500);
6212                 }
6213
6214                 if (cnt == 5)
6215                         break;
6216
6217                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6218                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6219                         test_fail = 1;
6220
6221                 iteration++;
6222         }
6223
6224         *data = test_fail;
6225
6226         /* Bring the adapter out of test mode */
6227         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6228
6229         return test_fail;
6230 }
6231
6232 /**
6233  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6234  *  @sp : private member of the device structure, which is a pointer to the
6235  *  s2io_nic structure.
6236  *  @ethtest : pointer to a ethtool command specific structure that will be
6237  *  returned to the user.
6238  *  @data : variable that returns the result of each of the test
6239  * conducted by the driver.
6240  * Description:
6241  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6242  *  the health of the card.
6243  * Return value:
6244  *  void
6245  */
6246
6247 static void s2io_ethtool_test(struct net_device *dev,
6248                               struct ethtool_test *ethtest,
6249                               uint64_t * data)
6250 {
6251         struct s2io_nic *sp = dev->priv;
6252         int orig_state = netif_running(sp->dev);
6253
6254         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6255                 /* Offline Tests. */
6256                 if (orig_state)
6257                         s2io_close(sp->dev);
6258
6259                 if (s2io_register_test(sp, &data[0]))
6260                         ethtest->flags |= ETH_TEST_FL_FAILED;
6261
6262                 s2io_reset(sp);
6263
6264                 if (s2io_rldram_test(sp, &data[3]))
6265                         ethtest->flags |= ETH_TEST_FL_FAILED;
6266
6267                 s2io_reset(sp);
6268
6269                 if (s2io_eeprom_test(sp, &data[1]))
6270                         ethtest->flags |= ETH_TEST_FL_FAILED;
6271
6272                 if (s2io_bist_test(sp, &data[4]))
6273                         ethtest->flags |= ETH_TEST_FL_FAILED;
6274
6275                 if (orig_state)
6276                         s2io_open(sp->dev);
6277
6278                 data[2] = 0;
6279         } else {
6280                 /* Online Tests. */
6281                 if (!orig_state) {
6282                         DBG_PRINT(ERR_DBG,
6283                                   "%s: is not up, cannot run test\n",
6284                                   dev->name);
6285                         data[0] = -1;
6286                         data[1] = -1;
6287                         data[2] = -1;
6288                         data[3] = -1;
6289                         data[4] = -1;
6290                 }
6291
6292                 if (s2io_link_test(sp, &data[2]))
6293                         ethtest->flags |= ETH_TEST_FL_FAILED;
6294
6295                 data[0] = 0;
6296                 data[1] = 0;
6297                 data[3] = 0;
6298                 data[4] = 0;
6299         }
6300 }
6301
6302 static void s2io_get_ethtool_stats(struct net_device *dev,
6303                                    struct ethtool_stats *estats,
6304                                    u64 * tmp_stats)
6305 {
6306         int i = 0, k;
6307         struct s2io_nic *sp = dev->priv;
6308         struct stat_block *stat_info = sp->mac_control.stats_info;
6309
6310         s2io_updt_stats(sp);
6311         tmp_stats[i++] =
6312                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6313                 le32_to_cpu(stat_info->tmac_frms);
6314         tmp_stats[i++] =
6315                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6316                 le32_to_cpu(stat_info->tmac_data_octets);
6317         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6318         tmp_stats[i++] =
6319                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6320                 le32_to_cpu(stat_info->tmac_mcst_frms);
6321         tmp_stats[i++] =
6322                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6323                 le32_to_cpu(stat_info->tmac_bcst_frms);
6324         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6325         tmp_stats[i++] =
6326                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6327                 le32_to_cpu(stat_info->tmac_ttl_octets);
6328         tmp_stats[i++] =
6329                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6330                 le32_to_cpu(stat_info->tmac_ucst_frms);
6331         tmp_stats[i++] =
6332                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6333                 le32_to_cpu(stat_info->tmac_nucst_frms);
6334         tmp_stats[i++] =
6335                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6336                 le32_to_cpu(stat_info->tmac_any_err_frms);
6337         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6338         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6339         tmp_stats[i++] =
6340                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6341                 le32_to_cpu(stat_info->tmac_vld_ip);
6342         tmp_stats[i++] =
6343                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6344                 le32_to_cpu(stat_info->tmac_drop_ip);
6345         tmp_stats[i++] =
6346                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6347                 le32_to_cpu(stat_info->tmac_icmp);
6348         tmp_stats[i++] =
6349                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6350                 le32_to_cpu(stat_info->tmac_rst_tcp);
6351         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6352         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6353                 le32_to_cpu(stat_info->tmac_udp);
6354         tmp_stats[i++] =
6355                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6356                 le32_to_cpu(stat_info->rmac_vld_frms);
6357         tmp_stats[i++] =
6358                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6359                 le32_to_cpu(stat_info->rmac_data_octets);
6360         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6361         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6362         tmp_stats[i++] =
6363                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6364                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6365         tmp_stats[i++] =
6366                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6367                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6368         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6369         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6370         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6371         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6372         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6373         tmp_stats[i++] =
6374                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6375                 le32_to_cpu(stat_info->rmac_ttl_octets);
6376         tmp_stats[i++] =
6377                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6378                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6379         tmp_stats[i++] =
6380                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6381                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6382         tmp_stats[i++] =
6383                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6384                 le32_to_cpu(stat_info->rmac_discarded_frms);
6385         tmp_stats[i++] =
6386                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6387                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6388         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6389         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6390         tmp_stats[i++] =
6391                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6392                 le32_to_cpu(stat_info->rmac_usized_frms);
6393         tmp_stats[i++] =
6394                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6395                 le32_to_cpu(stat_info->rmac_osized_frms);
6396         tmp_stats[i++] =
6397                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6398                 le32_to_cpu(stat_info->rmac_frag_frms);
6399         tmp_stats[i++] =
6400                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6401                 le32_to_cpu(stat_info->rmac_jabber_frms);
6402         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6403         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6404         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6405         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6406         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6407         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6408         tmp_stats[i++] =
6409                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6410                 le32_to_cpu(stat_info->rmac_ip);
6411         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6412         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6413         tmp_stats[i++] =
6414                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6415                 le32_to_cpu(stat_info->rmac_drop_ip);
6416         tmp_stats[i++] =
6417                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6418                 le32_to_cpu(stat_info->rmac_icmp);
6419         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6420         tmp_stats[i++] =
6421                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6422                 le32_to_cpu(stat_info->rmac_udp);
6423         tmp_stats[i++] =
6424                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6425                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6426         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6427         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6428         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6429         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6430         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6431         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6432         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6433         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6434         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6435         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6436         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6437         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6438         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6439         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6440         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6441         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6442         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6443         tmp_stats[i++] =
6444                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6445                 le32_to_cpu(stat_info->rmac_pause_cnt);
6446         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6447         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6448         tmp_stats[i++] =
6449                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6450                 le32_to_cpu(stat_info->rmac_accepted_ip);
6451         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6452         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6453         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6454         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6455         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6456         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6457         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6458         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6459         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6460         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6461         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6462         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6463         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6464         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6465         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6466         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6467         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6468         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6469         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6470
6471         /* Enhanced statistics exist only for Hercules */
6472         if(sp->device_type == XFRAME_II_DEVICE) {
6473                 tmp_stats[i++] =
6474                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6475                 tmp_stats[i++] =
6476                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6477                 tmp_stats[i++] =
6478                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6479                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6480                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6481                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6482                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6483                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6484                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6485                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6486                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6487                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6488                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6489                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6490                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6491                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6492         }
6493
6494         tmp_stats[i++] = 0;
6495         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6496         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6497         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6498         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6499         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6500         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6501         for (k = 0; k < MAX_RX_RINGS; k++)
6502                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6503         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6504         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6505         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6506         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6507         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6508         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6509         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6510         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6511         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6512         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6513         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6514         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6515         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6516         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6517         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6518         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6519         if (stat_info->sw_stat.num_aggregations) {
6520                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6521                 int count = 0;
6522                 /*
6523                  * Since 64-bit divide does not work on all platforms,
6524                  * do repeated subtraction.
6525                  */
6526                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6527                         tmp -= stat_info->sw_stat.num_aggregations;
6528                         count++;
6529                 }
6530                 tmp_stats[i++] = count;
6531         }
6532         else
6533                 tmp_stats[i++] = 0;
6534         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6535         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6536         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6537         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6538         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6539         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6540         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6541         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6542         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6543
6544         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6545         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6546         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6547         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6548         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6549
6550         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6551         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6552         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6553         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6554         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6555         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6556         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6557         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6558         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6559         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6560         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6561         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6562         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6563         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6564         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6565         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6566         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6567         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6568         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6569         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6570         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6571         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6572         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6573         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6574         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6575         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6576 }
6577
6578 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6579 {
6580         return (XENA_REG_SPACE);
6581 }
6582
6583
6584 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6585 {
6586         struct s2io_nic *sp = dev->priv;
6587
6588         return (sp->rx_csum);
6589 }
6590
6591 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6592 {
6593         struct s2io_nic *sp = dev->priv;
6594
6595         if (data)
6596                 sp->rx_csum = 1;
6597         else
6598                 sp->rx_csum = 0;
6599
6600         return 0;
6601 }
6602
6603 static int s2io_get_eeprom_len(struct net_device *dev)
6604 {
6605         return (XENA_EEPROM_SPACE);
6606 }
6607
6608 static int s2io_get_sset_count(struct net_device *dev, int sset)
6609 {
6610         struct s2io_nic *sp = dev->priv;
6611
6612         switch (sset) {
6613         case ETH_SS_TEST:
6614                 return S2IO_TEST_LEN;
6615         case ETH_SS_STATS:
6616                 switch(sp->device_type) {
6617                 case XFRAME_I_DEVICE:
6618                         return XFRAME_I_STAT_LEN;
6619                 case XFRAME_II_DEVICE:
6620                         return XFRAME_II_STAT_LEN;
6621                 default:
6622                         return 0;
6623                 }
6624         default:
6625                 return -EOPNOTSUPP;
6626         }
6627 }
6628
6629 static void s2io_ethtool_get_strings(struct net_device *dev,
6630                                      u32 stringset, u8 * data)
6631 {
6632         int stat_size = 0;
6633         struct s2io_nic *sp = dev->priv;
6634
6635         switch (stringset) {
6636         case ETH_SS_TEST:
6637                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6638                 break;
6639         case ETH_SS_STATS:
6640                 stat_size = sizeof(ethtool_xena_stats_keys);
6641                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6642                 if(sp->device_type == XFRAME_II_DEVICE) {
6643                         memcpy(data + stat_size,
6644                                 &ethtool_enhanced_stats_keys,
6645                                 sizeof(ethtool_enhanced_stats_keys));
6646                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6647                 }
6648
6649                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6650                         sizeof(ethtool_driver_stats_keys));
6651         }
6652 }
6653
6654 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6655 {
6656         if (data)
6657                 dev->features |= NETIF_F_IP_CSUM;
6658         else
6659                 dev->features &= ~NETIF_F_IP_CSUM;
6660
6661         return 0;
6662 }
6663
6664 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6665 {
6666         return (dev->features & NETIF_F_TSO) != 0;
6667 }
6668 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6669 {
6670         if (data)
6671                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6672         else
6673                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6674
6675         return 0;
6676 }
6677
6678 static const struct ethtool_ops netdev_ethtool_ops = {
6679         .get_settings = s2io_ethtool_gset,
6680         .set_settings = s2io_ethtool_sset,
6681         .get_drvinfo = s2io_ethtool_gdrvinfo,
6682         .get_regs_len = s2io_ethtool_get_regs_len,
6683         .get_regs = s2io_ethtool_gregs,
6684         .get_link = ethtool_op_get_link,
6685         .get_eeprom_len = s2io_get_eeprom_len,
6686         .get_eeprom = s2io_ethtool_geeprom,
6687         .set_eeprom = s2io_ethtool_seeprom,
6688         .get_ringparam = s2io_ethtool_gringparam,
6689         .get_pauseparam = s2io_ethtool_getpause_data,
6690         .set_pauseparam = s2io_ethtool_setpause_data,
6691         .get_rx_csum = s2io_ethtool_get_rx_csum,
6692         .set_rx_csum = s2io_ethtool_set_rx_csum,
6693         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6694         .set_sg = ethtool_op_set_sg,
6695         .get_tso = s2io_ethtool_op_get_tso,
6696         .set_tso = s2io_ethtool_op_set_tso,
6697         .set_ufo = ethtool_op_set_ufo,
6698         .self_test = s2io_ethtool_test,
6699         .get_strings = s2io_ethtool_get_strings,
6700         .phys_id = s2io_ethtool_idnic,
6701         .get_ethtool_stats = s2io_get_ethtool_stats,
6702         .get_sset_count = s2io_get_sset_count,
6703 };
6704
6705 /**
6706  *  s2io_ioctl - Entry point for the Ioctl
6707  *  @dev :  Device pointer.
6708  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6709  *  a proprietary structure used to pass information to the driver.
6710  *  @cmd :  This is used to distinguish between the different commands that
6711  *  can be passed to the IOCTL functions.
6712  *  Description:
6713  *  Currently there are no special functionality supported in IOCTL, hence
6714  *  function always return EOPNOTSUPPORTED
6715  */
6716
6717 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6718 {
6719         return -EOPNOTSUPP;
6720 }
6721
6722 /**
6723  *  s2io_change_mtu - entry point to change MTU size for the device.
6724  *   @dev : device pointer.
6725  *   @new_mtu : the new MTU size for the device.
6726  *   Description: A driver entry point to change MTU size for the device.
6727  *   Before changing the MTU the device must be stopped.
6728  *  Return value:
6729  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6730  *   file on failure.
6731  */
6732
6733 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6734 {
6735         struct s2io_nic *sp = dev->priv;
6736         int ret = 0;
6737
6738         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6739                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6740                           dev->name);
6741                 return -EPERM;
6742         }
6743
6744         dev->mtu = new_mtu;
6745         if (netif_running(dev)) {
6746                 s2io_stop_all_tx_queue(sp);
6747                 s2io_card_down(sp);
6748                 ret = s2io_card_up(sp);
6749                 if (ret) {
6750                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6751                                   __FUNCTION__);
6752                         return ret;
6753                 }
6754                 s2io_wake_all_tx_queue(sp);
6755         } else { /* Device is down */
6756                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6757                 u64 val64 = new_mtu;
6758
6759                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6760         }
6761
6762         return ret;
6763 }
6764
6765 /**
6766  * s2io_set_link - Set the LInk status
6767  * @data: long pointer to device private structue
6768  * Description: Sets the link status for the adapter
6769  */
6770
6771 static void s2io_set_link(struct work_struct *work)
6772 {
6773         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6774         struct net_device *dev = nic->dev;
6775         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6776         register u64 val64;
6777         u16 subid;
6778
6779         rtnl_lock();
6780
6781         if (!netif_running(dev))
6782                 goto out_unlock;
6783
6784         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6785                 /* The card is being reset, no point doing anything */
6786                 goto out_unlock;
6787         }
6788
6789         subid = nic->pdev->subsystem_device;
6790         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6791                 /*
6792                  * Allow a small delay for the NICs self initiated
6793                  * cleanup to complete.
6794                  */
6795                 msleep(100);
6796         }
6797
6798         val64 = readq(&bar0->adapter_status);
6799         if (LINK_IS_UP(val64)) {
6800                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6801                         if (verify_xena_quiescence(nic)) {
6802                                 val64 = readq(&bar0->adapter_control);
6803                                 val64 |= ADAPTER_CNTL_EN;
6804                                 writeq(val64, &bar0->adapter_control);
6805                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6806                                         nic->device_type, subid)) {
6807                                         val64 = readq(&bar0->gpio_control);
6808                                         val64 |= GPIO_CTRL_GPIO_0;
6809                                         writeq(val64, &bar0->gpio_control);
6810                                         val64 = readq(&bar0->gpio_control);
6811                                 } else {
6812                                         val64 |= ADAPTER_LED_ON;
6813                                         writeq(val64, &bar0->adapter_control);
6814                                 }
6815                                 nic->device_enabled_once = TRUE;
6816                         } else {
6817                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6818                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6819                                 s2io_stop_all_tx_queue(nic);
6820                         }
6821                 }
6822                 val64 = readq(&bar0->adapter_control);
6823                 val64 |= ADAPTER_LED_ON;
6824                 writeq(val64, &bar0->adapter_control);
6825                 s2io_link(nic, LINK_UP);
6826         } else {
6827                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6828                                                       subid)) {
6829                         val64 = readq(&bar0->gpio_control);
6830                         val64 &= ~GPIO_CTRL_GPIO_0;
6831                         writeq(val64, &bar0->gpio_control);
6832                         val64 = readq(&bar0->gpio_control);
6833                 }
6834                 /* turn off LED */
6835                 val64 = readq(&bar0->adapter_control);
6836                 val64 = val64 &(~ADAPTER_LED_ON);
6837                 writeq(val64, &bar0->adapter_control);
6838                 s2io_link(nic, LINK_DOWN);
6839         }
6840         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6841
6842 out_unlock:
6843         rtnl_unlock();
6844 }
6845
6846 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6847                                 struct buffAdd *ba,
6848                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6849                                 u64 *temp2, int size)
6850 {
6851         struct net_device *dev = sp->dev;
6852         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6853
6854         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6855                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6856                 /* allocate skb */
6857                 if (*skb) {
6858                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6859                         /*
6860                          * As Rx frame are not going to be processed,
6861                          * using same mapped address for the Rxd
6862                          * buffer pointer
6863                          */
6864                         rxdp1->Buffer0_ptr = *temp0;
6865                 } else {
6866                         *skb = dev_alloc_skb(size);
6867                         if (!(*skb)) {
6868                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6869                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6870                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6871                                 sp->mac_control.stats_info->sw_stat. \
6872                                         mem_alloc_fail_cnt++;
6873                                 return -ENOMEM ;
6874                         }
6875                         sp->mac_control.stats_info->sw_stat.mem_allocated
6876                                 += (*skb)->truesize;
6877                         /* storing the mapped addr in a temp variable
6878                          * such it will be used for next rxd whose
6879                          * Host Control is NULL
6880                          */
6881                         rxdp1->Buffer0_ptr = *temp0 =
6882                                 pci_map_single( sp->pdev, (*skb)->data,
6883                                         size - NET_IP_ALIGN,
6884                                         PCI_DMA_FROMDEVICE);
6885                         if( (rxdp1->Buffer0_ptr == 0) ||
6886                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6887                                 goto memalloc_failed;
6888                         }
6889                         rxdp->Host_Control = (unsigned long) (*skb);
6890                 }
6891         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6892                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6893                 /* Two buffer Mode */
6894                 if (*skb) {
6895                         rxdp3->Buffer2_ptr = *temp2;
6896                         rxdp3->Buffer0_ptr = *temp0;
6897                         rxdp3->Buffer1_ptr = *temp1;
6898                 } else {
6899                         *skb = dev_alloc_skb(size);
6900                         if (!(*skb)) {
6901                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6902                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6903                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6904                                 sp->mac_control.stats_info->sw_stat. \
6905                                         mem_alloc_fail_cnt++;
6906                                 return -ENOMEM;
6907                         }
6908                         sp->mac_control.stats_info->sw_stat.mem_allocated
6909                                 += (*skb)->truesize;
6910                         rxdp3->Buffer2_ptr = *temp2 =
6911                                 pci_map_single(sp->pdev, (*skb)->data,
6912                                                dev->mtu + 4,
6913                                                PCI_DMA_FROMDEVICE);
6914                         if( (rxdp3->Buffer2_ptr == 0) ||
6915                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6916                                 goto memalloc_failed;
6917                         }
6918                         rxdp3->Buffer0_ptr = *temp0 =
6919                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6920                                                 PCI_DMA_FROMDEVICE);
6921                         if( (rxdp3->Buffer0_ptr == 0) ||
6922                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6923                                 pci_unmap_single (sp->pdev,
6924                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6925                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6926                                 goto memalloc_failed;
6927                         }
6928                         rxdp->Host_Control = (unsigned long) (*skb);
6929
6930                         /* Buffer-1 will be dummy buffer not used */
6931                         rxdp3->Buffer1_ptr = *temp1 =
6932                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6933                                                 PCI_DMA_FROMDEVICE);
6934                         if( (rxdp3->Buffer1_ptr == 0) ||
6935                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6936                                 pci_unmap_single (sp->pdev,
6937                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6938                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6939                                 pci_unmap_single (sp->pdev,
6940                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6941                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6942                                 goto memalloc_failed;
6943                         }
6944                 }
6945         }
6946         return 0;
6947         memalloc_failed:
6948                 stats->pci_map_fail_cnt++;
6949                 stats->mem_freed += (*skb)->truesize;
6950                 dev_kfree_skb(*skb);
6951                 return -ENOMEM;
6952 }
6953
6954 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6955                                 int size)
6956 {
6957         struct net_device *dev = sp->dev;
6958         if (sp->rxd_mode == RXD_MODE_1) {
6959                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6960         } else if (sp->rxd_mode == RXD_MODE_3B) {
6961                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6962                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6963                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6964         }
6965 }
6966
6967 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6968 {
6969         int i, j, k, blk_cnt = 0, size;
6970         struct mac_info * mac_control = &sp->mac_control;
6971         struct config_param *config = &sp->config;
6972         struct net_device *dev = sp->dev;
6973         struct RxD_t *rxdp = NULL;
6974         struct sk_buff *skb = NULL;
6975         struct buffAdd *ba = NULL;
6976         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6977
6978         /* Calculate the size based on ring mode */
6979         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6980                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6981         if (sp->rxd_mode == RXD_MODE_1)
6982                 size += NET_IP_ALIGN;
6983         else if (sp->rxd_mode == RXD_MODE_3B)
6984                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6985
6986         for (i = 0; i < config->rx_ring_num; i++) {
6987                 blk_cnt = config->rx_cfg[i].num_rxd /
6988                         (rxd_count[sp->rxd_mode] +1);
6989
6990                 for (j = 0; j < blk_cnt; j++) {
6991                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6992                                 rxdp = mac_control->rings[i].
6993                                         rx_blocks[j].rxds[k].virt_addr;
6994                                 if(sp->rxd_mode == RXD_MODE_3B)
6995                                         ba = &mac_control->rings[i].ba[j][k];
6996                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6997                                                        &skb,(u64 *)&temp0_64,
6998                                                        (u64 *)&temp1_64,
6999                                                        (u64 *)&temp2_64,
7000                                                         size) == ENOMEM) {
7001                                         return 0;
7002                                 }
7003
7004                                 set_rxd_buffer_size(sp, rxdp, size);
7005                                 wmb();
7006                                 /* flip the Ownership bit to Hardware */
7007                                 rxdp->Control_1 |= RXD_OWN_XENA;
7008                         }
7009                 }
7010         }
7011         return 0;
7012
7013 }
7014
7015 static int s2io_add_isr(struct s2io_nic * sp)
7016 {
7017         int ret = 0;
7018         struct net_device *dev = sp->dev;
7019         int err = 0;
7020
7021         if (sp->config.intr_type == MSI_X)
7022                 ret = s2io_enable_msi_x(sp);
7023         if (ret) {
7024                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7025                 sp->config.intr_type = INTA;
7026         }
7027
7028         /* Store the values of the MSIX table in the struct s2io_nic structure */
7029         store_xmsi_data(sp);
7030
7031         /* After proper initialization of H/W, register ISR */
7032         if (sp->config.intr_type == MSI_X) {
7033                 int i, msix_rx_cnt = 0;
7034
7035                 for (i = 0; i < sp->num_entries; i++) {
7036                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7037                                 if (sp->s2io_entries[i].type ==
7038                                         MSIX_RING_TYPE) {
7039                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7040                                                 dev->name, i);
7041                                         err = request_irq(sp->entries[i].vector,
7042                                                 s2io_msix_ring_handle, 0,
7043                                                 sp->desc[i],
7044                                                 sp->s2io_entries[i].arg);
7045                                 } else if (sp->s2io_entries[i].type ==
7046                                         MSIX_ALARM_TYPE) {
7047                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7048                                         dev->name, i);
7049                                         err = request_irq(sp->entries[i].vector,
7050                                                 s2io_msix_fifo_handle, 0,
7051                                                 sp->desc[i],
7052                                                 sp->s2io_entries[i].arg);
7053
7054                                 }
7055                                 /* if either data or addr is zero print it. */
7056                                 if (!(sp->msix_info[i].addr &&
7057                                         sp->msix_info[i].data)) {
7058                                         DBG_PRINT(ERR_DBG,
7059                                                 "%s @Addr:0x%llx Data:0x%llx\n",
7060                                                 sp->desc[i],
7061                                                 (unsigned long long)
7062                                                 sp->msix_info[i].addr,
7063                                                 (unsigned long long)
7064                                                 ntohl(sp->msix_info[i].data));
7065                                 } else
7066                                         msix_rx_cnt++;
7067                                 if (err) {
7068                                         remove_msix_isr(sp);
7069
7070                                         DBG_PRINT(ERR_DBG,
7071                                                 "%s:MSI-X-%d registration "
7072                                                 "failed\n", dev->name, i);
7073
7074                                         DBG_PRINT(ERR_DBG,
7075                                                 "%s: Defaulting to INTA\n",
7076                                                 dev->name);
7077                                         sp->config.intr_type = INTA;
7078                                         break;
7079                                 }
7080                                 sp->s2io_entries[i].in_use =
7081                                         MSIX_REGISTERED_SUCCESS;
7082                         }
7083                 }
7084                 if (!err) {
7085                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7086                                 --msix_rx_cnt);
7087                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7088                                                 " through alarm vector\n");
7089                 }
7090         }
7091         if (sp->config.intr_type == INTA) {
7092                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7093                                 sp->name, dev);
7094                 if (err) {
7095                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7096                                   dev->name);
7097                         return -1;
7098                 }
7099         }
7100         return 0;
7101 }
7102 static void s2io_rem_isr(struct s2io_nic * sp)
7103 {
7104         if (sp->config.intr_type == MSI_X)
7105                 remove_msix_isr(sp);
7106         else
7107                 remove_inta_isr(sp);
7108 }
7109
7110 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7111 {
7112         int cnt = 0;
7113         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7114         register u64 val64 = 0;
7115         struct config_param *config;
7116         config = &sp->config;
7117
7118         if (!is_s2io_card_up(sp))
7119                 return;
7120
7121         del_timer_sync(&sp->alarm_timer);
7122         /* If s2io_set_link task is executing, wait till it completes. */
7123         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7124                 msleep(50);
7125         }
7126         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7127
7128         /* Disable napi */
7129         if (sp->config.napi) {
7130                 int off = 0;
7131                 if (config->intr_type ==  MSI_X) {
7132                         for (; off < sp->config.rx_ring_num; off++)
7133                                 napi_disable(&sp->mac_control.rings[off].napi);
7134                         }
7135                 else
7136                         napi_disable(&sp->napi);
7137         }
7138
7139         /* disable Tx and Rx traffic on the NIC */
7140         if (do_io)
7141                 stop_nic(sp);
7142
7143         s2io_rem_isr(sp);
7144
7145         /* Check if the device is Quiescent and then Reset the NIC */
7146         while(do_io) {
7147                 /* As per the HW requirement we need to replenish the
7148                  * receive buffer to avoid the ring bump. Since there is
7149                  * no intention of processing the Rx frame at this pointwe are
7150                  * just settting the ownership bit of rxd in Each Rx
7151                  * ring to HW and set the appropriate buffer size
7152                  * based on the ring mode
7153                  */
7154                 rxd_owner_bit_reset(sp);
7155
7156                 val64 = readq(&bar0->adapter_status);
7157                 if (verify_xena_quiescence(sp)) {
7158                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7159                         break;
7160                 }
7161
7162                 msleep(50);
7163                 cnt++;
7164                 if (cnt == 10) {
7165                         DBG_PRINT(ERR_DBG,
7166                                   "s2io_close:Device not Quiescent ");
7167                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7168                                   (unsigned long long) val64);
7169                         break;
7170                 }
7171         }
7172         if (do_io)
7173                 s2io_reset(sp);
7174
7175         /* Free all Tx buffers */
7176         free_tx_buffers(sp);
7177
7178         /* Free all Rx buffers */
7179         free_rx_buffers(sp);
7180
7181         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7182 }
7183
7184 static void s2io_card_down(struct s2io_nic * sp)
7185 {
7186         do_s2io_card_down(sp, 1);
7187 }
7188
7189 static int s2io_card_up(struct s2io_nic * sp)
7190 {
7191         int i, ret = 0;
7192         struct mac_info *mac_control;
7193         struct config_param *config;
7194         struct net_device *dev = (struct net_device *) sp->dev;
7195         u16 interruptible;
7196
7197         /* Initialize the H/W I/O registers */
7198         ret = init_nic(sp);
7199         if (ret != 0) {
7200                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7201                           dev->name);
7202                 if (ret != -EIO)
7203                         s2io_reset(sp);
7204                 return ret;
7205         }
7206
7207         /*
7208          * Initializing the Rx buffers. For now we are considering only 1
7209          * Rx ring and initializing buffers into 30 Rx blocks
7210          */
7211         mac_control = &sp->mac_control;
7212         config = &sp->config;
7213
7214         for (i = 0; i < config->rx_ring_num; i++) {
7215                 mac_control->rings[i].mtu = dev->mtu;
7216                 ret = fill_rx_buffers(&mac_control->rings[i]);
7217                 if (ret) {
7218                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7219                                   dev->name);
7220                         s2io_reset(sp);
7221                         free_rx_buffers(sp);
7222                         return -ENOMEM;
7223                 }
7224                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7225                           mac_control->rings[i].rx_bufs_left);
7226         }
7227
7228         /* Initialise napi */
7229         if (config->napi) {
7230                 int i;
7231                 if (config->intr_type ==  MSI_X) {
7232                         for (i = 0; i < sp->config.rx_ring_num; i++)
7233                                 napi_enable(&sp->mac_control.rings[i].napi);
7234                 } else {
7235                         napi_enable(&sp->napi);
7236                 }
7237         }
7238
7239         /* Maintain the state prior to the open */
7240         if (sp->promisc_flg)
7241                 sp->promisc_flg = 0;
7242         if (sp->m_cast_flg) {
7243                 sp->m_cast_flg = 0;
7244                 sp->all_multi_pos= 0;
7245         }
7246
7247         /* Setting its receive mode */
7248         s2io_set_multicast(dev);
7249
7250         if (sp->lro) {
7251                 /* Initialize max aggregatable pkts per session based on MTU */
7252                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7253                 /* Check if we can use(if specified) user provided value */
7254                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7255                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7256         }
7257
7258         /* Enable Rx Traffic and interrupts on the NIC */
7259         if (start_nic(sp)) {
7260                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7261                 s2io_reset(sp);
7262                 free_rx_buffers(sp);
7263                 return -ENODEV;
7264         }
7265
7266         /* Add interrupt service routine */
7267         if (s2io_add_isr(sp) != 0) {
7268                 if (sp->config.intr_type == MSI_X)
7269                         s2io_rem_isr(sp);
7270                 s2io_reset(sp);
7271                 free_rx_buffers(sp);
7272                 return -ENODEV;
7273         }
7274
7275         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7276
7277         /*  Enable select interrupts */
7278         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7279         if (sp->config.intr_type != INTA)
7280                 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7281         else {
7282                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7283                 interruptible |= TX_PIC_INTR;
7284                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7285         }
7286
7287         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7288         return 0;
7289 }
7290
7291 /**
7292  * s2io_restart_nic - Resets the NIC.
7293  * @data : long pointer to the device private structure
7294  * Description:
7295  * This function is scheduled to be run by the s2io_tx_watchdog
7296  * function after 0.5 secs to reset the NIC. The idea is to reduce
7297  * the run time of the watch dog routine which is run holding a
7298  * spin lock.
7299  */
7300
7301 static void s2io_restart_nic(struct work_struct *work)
7302 {
7303         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7304         struct net_device *dev = sp->dev;
7305
7306         rtnl_lock();
7307
7308         if (!netif_running(dev))
7309                 goto out_unlock;
7310
7311         s2io_card_down(sp);
7312         if (s2io_card_up(sp)) {
7313                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7314                           dev->name);
7315         }
7316         s2io_wake_all_tx_queue(sp);
7317         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7318                   dev->name);
7319 out_unlock:
7320         rtnl_unlock();
7321 }
7322
7323 /**
7324  *  s2io_tx_watchdog - Watchdog for transmit side.
7325  *  @dev : Pointer to net device structure
7326  *  Description:
7327  *  This function is triggered if the Tx Queue is stopped
7328  *  for a pre-defined amount of time when the Interface is still up.
7329  *  If the Interface is jammed in such a situation, the hardware is
7330  *  reset (by s2io_close) and restarted again (by s2io_open) to
7331  *  overcome any problem that might have been caused in the hardware.
7332  *  Return value:
7333  *  void
7334  */
7335
7336 static void s2io_tx_watchdog(struct net_device *dev)
7337 {
7338         struct s2io_nic *sp = dev->priv;
7339
7340         if (netif_carrier_ok(dev)) {
7341                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7342                 schedule_work(&sp->rst_timer_task);
7343                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7344         }
7345 }
7346
7347 /**
7348  *   rx_osm_handler - To perform some OS related operations on SKB.
7349  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7350  *   @skb : the socket buffer pointer.
7351  *   @len : length of the packet
7352  *   @cksum : FCS checksum of the frame.
7353  *   @ring_no : the ring from which this RxD was extracted.
7354  *   Description:
7355  *   This function is called by the Rx interrupt serivce routine to perform
7356  *   some OS related operations on the SKB before passing it to the upper
7357  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7358  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7359  *   to the upper layer. If the checksum is wrong, it increments the Rx
7360  *   packet error count, frees the SKB and returns error.
7361  *   Return value:
7362  *   SUCCESS on success and -1 on failure.
7363  */
7364 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7365 {
7366         struct s2io_nic *sp = ring_data->nic;
7367         struct net_device *dev = (struct net_device *) ring_data->dev;
7368         struct sk_buff *skb = (struct sk_buff *)
7369                 ((unsigned long) rxdp->Host_Control);
7370         int ring_no = ring_data->ring_no;
7371         u16 l3_csum, l4_csum;
7372         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7373         struct lro *lro;
7374         u8 err_mask;
7375
7376         skb->dev = dev;
7377
7378         if (err) {
7379                 /* Check for parity error */
7380                 if (err & 0x1) {
7381                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7382                 }
7383                 err_mask = err >> 48;
7384                 switch(err_mask) {
7385                         case 1:
7386                                 sp->mac_control.stats_info->sw_stat.
7387                                 rx_parity_err_cnt++;
7388                         break;
7389
7390                         case 2:
7391                                 sp->mac_control.stats_info->sw_stat.
7392                                 rx_abort_cnt++;
7393                         break;
7394
7395                         case 3:
7396                                 sp->mac_control.stats_info->sw_stat.
7397                                 rx_parity_abort_cnt++;
7398                         break;
7399
7400                         case 4:
7401                                 sp->mac_control.stats_info->sw_stat.
7402                                 rx_rda_fail_cnt++;
7403                         break;
7404
7405                         case 5:
7406                                 sp->mac_control.stats_info->sw_stat.
7407                                 rx_unkn_prot_cnt++;
7408                         break;
7409
7410                         case 6:
7411                                 sp->mac_control.stats_info->sw_stat.
7412                                 rx_fcs_err_cnt++;
7413                         break;
7414
7415                         case 7:
7416                                 sp->mac_control.stats_info->sw_stat.
7417                                 rx_buf_size_err_cnt++;
7418                         break;
7419
7420                         case 8:
7421                                 sp->mac_control.stats_info->sw_stat.
7422                                 rx_rxd_corrupt_cnt++;
7423                         break;
7424
7425                         case 15:
7426                                 sp->mac_control.stats_info->sw_stat.
7427                                 rx_unkn_err_cnt++;
7428                         break;
7429                 }
7430                 /*
7431                 * Drop the packet if bad transfer code. Exception being
7432                 * 0x5, which could be due to unsupported IPv6 extension header.
7433                 * In this case, we let stack handle the packet.
7434                 * Note that in this case, since checksum will be incorrect,
7435                 * stack will validate the same.
7436                 */
7437                 if (err_mask != 0x5) {
7438                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7439                                 dev->name, err_mask);
7440                         sp->stats.rx_crc_errors++;
7441                         sp->mac_control.stats_info->sw_stat.mem_freed
7442                                 += skb->truesize;
7443                         dev_kfree_skb(skb);
7444                         ring_data->rx_bufs_left -= 1;
7445                         rxdp->Host_Control = 0;
7446                         return 0;
7447                 }
7448         }
7449
7450         /* Updating statistics */
7451         ring_data->rx_packets++;
7452         rxdp->Host_Control = 0;
7453         if (sp->rxd_mode == RXD_MODE_1) {
7454                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7455
7456                 ring_data->rx_bytes += len;
7457                 skb_put(skb, len);
7458
7459         } else if (sp->rxd_mode == RXD_MODE_3B) {
7460                 int get_block = ring_data->rx_curr_get_info.block_index;
7461                 int get_off = ring_data->rx_curr_get_info.offset;
7462                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7463                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7464                 unsigned char *buff = skb_push(skb, buf0_len);
7465
7466                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7467                 ring_data->rx_bytes += buf0_len + buf2_len;
7468                 memcpy(buff, ba->ba_0, buf0_len);
7469                 skb_put(skb, buf2_len);
7470         }
7471
7472         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7473             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7474             (sp->rx_csum)) {
7475                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7476                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7477                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7478                         /*
7479                          * NIC verifies if the Checksum of the received
7480                          * frame is Ok or not and accordingly returns
7481                          * a flag in the RxD.
7482                          */
7483                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7484                         if (ring_data->lro) {
7485                                 u32 tcp_len;
7486                                 u8 *tcp;
7487                                 int ret = 0;
7488
7489                                 ret = s2io_club_tcp_session(ring_data,
7490                                         skb->data, &tcp, &tcp_len, &lro,
7491                                         rxdp, sp);
7492                                 switch (ret) {
7493                                         case 3: /* Begin anew */
7494                                                 lro->parent = skb;
7495                                                 goto aggregate;
7496                                         case 1: /* Aggregate */
7497                                         {
7498                                                 lro_append_pkt(sp, lro,
7499                                                         skb, tcp_len);
7500                                                 goto aggregate;
7501                                         }
7502                                         case 4: /* Flush session */
7503                                         {
7504                                                 lro_append_pkt(sp, lro,
7505                                                         skb, tcp_len);
7506                                                 queue_rx_frame(lro->parent,
7507                                                         lro->vlan_tag);
7508                                                 clear_lro_session(lro);
7509                                                 sp->mac_control.stats_info->
7510                                                     sw_stat.flush_max_pkts++;
7511                                                 goto aggregate;
7512                                         }
7513                                         case 2: /* Flush both */
7514                                                 lro->parent->data_len =
7515                                                         lro->frags_len;
7516                                                 sp->mac_control.stats_info->
7517                                                      sw_stat.sending_both++;
7518                                                 queue_rx_frame(lro->parent,
7519                                                         lro->vlan_tag);
7520                                                 clear_lro_session(lro);
7521                                                 goto send_up;
7522                                         case 0: /* sessions exceeded */
7523                                         case -1: /* non-TCP or not
7524                                                   * L2 aggregatable
7525                                                   */
7526                                         case 5: /*
7527                                                  * First pkt in session not
7528                                                  * L3/L4 aggregatable
7529                                                  */
7530                                                 break;
7531                                         default:
7532                                                 DBG_PRINT(ERR_DBG,
7533                                                         "%s: Samadhana!!\n",
7534                                                          __FUNCTION__);
7535                                                 BUG();
7536                                 }
7537                         }
7538                 } else {
7539                         /*
7540                          * Packet with erroneous checksum, let the
7541                          * upper layers deal with it.
7542                          */
7543                         skb->ip_summed = CHECKSUM_NONE;
7544                 }
7545         } else
7546                 skb->ip_summed = CHECKSUM_NONE;
7547
7548         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7549 send_up:
7550         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7551         dev->last_rx = jiffies;
7552 aggregate:
7553         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7554         return SUCCESS;
7555 }
7556
7557 /**
7558  *  s2io_link - stops/starts the Tx queue.
7559  *  @sp : private member of the device structure, which is a pointer to the
7560  *  s2io_nic structure.
7561  *  @link : inidicates whether link is UP/DOWN.
7562  *  Description:
7563  *  This function stops/starts the Tx queue depending on whether the link
7564  *  status of the NIC is is down or up. This is called by the Alarm
7565  *  interrupt handler whenever a link change interrupt comes up.
7566  *  Return value:
7567  *  void.
7568  */
7569
7570 static void s2io_link(struct s2io_nic * sp, int link)
7571 {
7572         struct net_device *dev = (struct net_device *) sp->dev;
7573
7574         if (link != sp->last_link_state) {
7575                 init_tti(sp, link);
7576                 if (link == LINK_DOWN) {
7577                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7578                         s2io_stop_all_tx_queue(sp);
7579                         netif_carrier_off(dev);
7580                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7581                         sp->mac_control.stats_info->sw_stat.link_up_time =
7582                                 jiffies - sp->start_time;
7583                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7584                 } else {
7585                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7586                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7587                         sp->mac_control.stats_info->sw_stat.link_down_time =
7588                                 jiffies - sp->start_time;
7589                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7590                         netif_carrier_on(dev);
7591                         s2io_wake_all_tx_queue(sp);
7592                 }
7593         }
7594         sp->last_link_state = link;
7595         sp->start_time = jiffies;
7596 }
7597
7598 /**
7599  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7600  *  @sp : private member of the device structure, which is a pointer to the
7601  *  s2io_nic structure.
7602  *  Description:
7603  *  This function initializes a few of the PCI and PCI-X configuration registers
7604  *  with recommended values.
7605  *  Return value:
7606  *  void
7607  */
7608
7609 static void s2io_init_pci(struct s2io_nic * sp)
7610 {
7611         u16 pci_cmd = 0, pcix_cmd = 0;
7612
7613         /* Enable Data Parity Error Recovery in PCI-X command register. */
7614         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7615                              &(pcix_cmd));
7616         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7617                               (pcix_cmd | 1));
7618         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7619                              &(pcix_cmd));
7620
7621         /* Set the PErr Response bit in PCI command register. */
7622         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7623         pci_write_config_word(sp->pdev, PCI_COMMAND,
7624                               (pci_cmd | PCI_COMMAND_PARITY));
7625         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7626 }
7627
7628 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7629         u8 *dev_multiq)
7630 {
7631         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7632                 (tx_fifo_num < 1)) {
7633                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7634                         "(%d) not supported\n", tx_fifo_num);
7635
7636                 if (tx_fifo_num < 1)
7637                         tx_fifo_num = 1;
7638                 else
7639                         tx_fifo_num = MAX_TX_FIFOS;
7640
7641                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7642                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7643         }
7644
7645 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
7646         if (multiq) {
7647                 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7648                 multiq = 0;
7649         }
7650 #endif
7651         if (multiq)
7652                 *dev_multiq = multiq;
7653
7654         if (tx_steering_type && (1 == tx_fifo_num)) {
7655                 if (tx_steering_type != TX_DEFAULT_STEERING)
7656                         DBG_PRINT(ERR_DBG,
7657                                 "s2io: Tx steering is not supported with "
7658                                 "one fifo. Disabling Tx steering.\n");
7659                 tx_steering_type = NO_STEERING;
7660         }
7661
7662         if ((tx_steering_type < NO_STEERING) ||
7663                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7664                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7665                          "supported\n");
7666                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7667                 tx_steering_type = NO_STEERING;
7668         }
7669
7670         if (rx_ring_num > MAX_RX_RINGS) {
7671                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7672                          "supported\n");
7673                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7674                         MAX_RX_RINGS);
7675                 rx_ring_num = MAX_RX_RINGS;
7676         }
7677
7678         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7679                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7680                           "Defaulting to INTA\n");
7681                 *dev_intr_type = INTA;
7682         }
7683
7684         if ((*dev_intr_type == MSI_X) &&
7685                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7686                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7687                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7688                                         "Defaulting to INTA\n");
7689                 *dev_intr_type = INTA;
7690         }
7691
7692         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7693                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7694                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7695                 rx_ring_mode = 1;
7696         }
7697         return SUCCESS;
7698 }
7699
7700 /**
7701  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7702  * or Traffic class respectively.
7703  * @nic: device private variable
7704  * Description: The function configures the receive steering to
7705  * desired receive ring.
7706  * Return Value:  SUCCESS on success and
7707  * '-1' on failure (endian settings incorrect).
7708  */
7709 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7710 {
7711         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7712         register u64 val64 = 0;
7713
7714         if (ds_codepoint > 63)
7715                 return FAILURE;
7716
7717         val64 = RTS_DS_MEM_DATA(ring);
7718         writeq(val64, &bar0->rts_ds_mem_data);
7719
7720         val64 = RTS_DS_MEM_CTRL_WE |
7721                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7722                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7723
7724         writeq(val64, &bar0->rts_ds_mem_ctrl);
7725
7726         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7727                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7728                                 S2IO_BIT_RESET);
7729 }
7730
7731 /**
7732  *  s2io_init_nic - Initialization of the adapter .
7733  *  @pdev : structure containing the PCI related information of the device.
7734  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7735  *  Description:
7736  *  The function initializes an adapter identified by the pci_dec structure.
7737  *  All OS related initialization including memory and device structure and
7738  *  initlaization of the device private variable is done. Also the swapper
7739  *  control register is initialized to enable read and write into the I/O
7740  *  registers of the device.
7741  *  Return value:
7742  *  returns 0 on success and negative on failure.
7743  */
7744
7745 static int __devinit
7746 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7747 {
7748         struct s2io_nic *sp;
7749         struct net_device *dev;
7750         int i, j, ret;
7751         int dma_flag = FALSE;
7752         u32 mac_up, mac_down;
7753         u64 val64 = 0, tmp64 = 0;
7754         struct XENA_dev_config __iomem *bar0 = NULL;
7755         u16 subid;
7756         struct mac_info *mac_control;
7757         struct config_param *config;
7758         int mode;
7759         u8 dev_intr_type = intr_type;
7760         u8 dev_multiq = 0;
7761         DECLARE_MAC_BUF(mac);
7762
7763         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7764         if (ret)
7765                 return ret;
7766
7767         if ((ret = pci_enable_device(pdev))) {
7768                 DBG_PRINT(ERR_DBG,
7769                           "s2io_init_nic: pci_enable_device failed\n");
7770                 return ret;
7771         }
7772
7773         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7774                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7775                 dma_flag = TRUE;
7776                 if (pci_set_consistent_dma_mask
7777                     (pdev, DMA_64BIT_MASK)) {
7778                         DBG_PRINT(ERR_DBG,
7779                                   "Unable to obtain 64bit DMA for \
7780                                         consistent allocations\n");
7781                         pci_disable_device(pdev);
7782                         return -ENOMEM;
7783                 }
7784         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7785                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7786         } else {
7787                 pci_disable_device(pdev);
7788                 return -ENOMEM;
7789         }
7790         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7791                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7792                 pci_disable_device(pdev);
7793                 return -ENODEV;
7794         }
7795 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7796         if (dev_multiq)
7797                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7798         else
7799 #endif
7800         dev = alloc_etherdev(sizeof(struct s2io_nic));
7801         if (dev == NULL) {
7802                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7803                 pci_disable_device(pdev);
7804                 pci_release_regions(pdev);
7805                 return -ENODEV;
7806         }
7807
7808         pci_set_master(pdev);
7809         pci_set_drvdata(pdev, dev);
7810         SET_NETDEV_DEV(dev, &pdev->dev);
7811
7812         /*  Private member variable initialized to s2io NIC structure */
7813         sp = dev->priv;
7814         memset(sp, 0, sizeof(struct s2io_nic));
7815         sp->dev = dev;
7816         sp->pdev = pdev;
7817         sp->high_dma_flag = dma_flag;
7818         sp->device_enabled_once = FALSE;
7819         if (rx_ring_mode == 1)
7820                 sp->rxd_mode = RXD_MODE_1;
7821         if (rx_ring_mode == 2)
7822                 sp->rxd_mode = RXD_MODE_3B;
7823
7824         sp->config.intr_type = dev_intr_type;
7825
7826         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7827                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7828                 sp->device_type = XFRAME_II_DEVICE;
7829         else
7830                 sp->device_type = XFRAME_I_DEVICE;
7831
7832         sp->lro = lro_enable;
7833
7834         /* Initialize some PCI/PCI-X fields of the NIC. */
7835         s2io_init_pci(sp);
7836
7837         /*
7838          * Setting the device configuration parameters.
7839          * Most of these parameters can be specified by the user during
7840          * module insertion as they are module loadable parameters. If
7841          * these parameters are not not specified during load time, they
7842          * are initialized with default values.
7843          */
7844         mac_control = &sp->mac_control;
7845         config = &sp->config;
7846
7847         config->napi = napi;
7848         config->tx_steering_type = tx_steering_type;
7849
7850         /* Tx side parameters. */
7851         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7852                 config->tx_fifo_num = MAX_TX_FIFOS;
7853         else
7854                 config->tx_fifo_num = tx_fifo_num;
7855
7856         /* Initialize the fifos used for tx steering */
7857         if (config->tx_fifo_num < 5) {
7858                         if (config->tx_fifo_num  == 1)
7859                                 sp->total_tcp_fifos = 1;
7860                         else
7861                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7862                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7863                         sp->total_udp_fifos = 1;
7864                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7865         } else {
7866                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7867                                                 FIFO_OTHER_MAX_NUM);
7868                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7869                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7870                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7871         }
7872
7873         config->multiq = dev_multiq;
7874         for (i = 0; i < config->tx_fifo_num; i++) {
7875                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7876                 config->tx_cfg[i].fifo_priority = i;
7877         }
7878
7879         /* mapping the QoS priority to the configured fifos */
7880         for (i = 0; i < MAX_TX_FIFOS; i++)
7881                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7882
7883         /* map the hashing selector table to the configured fifos */
7884         for (i = 0; i < config->tx_fifo_num; i++)
7885                 sp->fifo_selector[i] = fifo_selector[i];
7886
7887
7888         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7889         for (i = 0; i < config->tx_fifo_num; i++) {
7890                 config->tx_cfg[i].f_no_snoop =
7891                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7892                 if (config->tx_cfg[i].fifo_len < 65) {
7893                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7894                         break;
7895                 }
7896         }
7897         /* + 2 because one Txd for skb->data and one Txd for UFO */
7898         config->max_txds = MAX_SKB_FRAGS + 2;
7899
7900         /* Rx side parameters. */
7901         config->rx_ring_num = rx_ring_num;
7902         for (i = 0; i < config->rx_ring_num; i++) {
7903                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7904                     (rxd_count[sp->rxd_mode] + 1);
7905                 config->rx_cfg[i].ring_priority = i;
7906                 mac_control->rings[i].rx_bufs_left = 0;
7907                 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7908                 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7909                 mac_control->rings[i].pdev = sp->pdev;
7910                 mac_control->rings[i].dev = sp->dev;
7911         }
7912
7913         for (i = 0; i < rx_ring_num; i++) {
7914                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7915                 config->rx_cfg[i].f_no_snoop =
7916                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7917         }
7918
7919         /*  Setting Mac Control parameters */
7920         mac_control->rmac_pause_time = rmac_pause_time;
7921         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7922         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7923
7924
7925         /*  initialize the shared memory used by the NIC and the host */
7926         if (init_shared_mem(sp)) {
7927                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7928                           dev->name);
7929                 ret = -ENOMEM;
7930                 goto mem_alloc_failed;
7931         }
7932
7933         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7934                                      pci_resource_len(pdev, 0));
7935         if (!sp->bar0) {
7936                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7937                           dev->name);
7938                 ret = -ENOMEM;
7939                 goto bar0_remap_failed;
7940         }
7941
7942         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7943                                      pci_resource_len(pdev, 2));
7944         if (!sp->bar1) {
7945                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7946                           dev->name);
7947                 ret = -ENOMEM;
7948                 goto bar1_remap_failed;
7949         }
7950
7951         dev->irq = pdev->irq;
7952         dev->base_addr = (unsigned long) sp->bar0;
7953
7954         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7955         for (j = 0; j < MAX_TX_FIFOS; j++) {
7956                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7957                     (sp->bar1 + (j * 0x00020000));
7958         }
7959
7960         /*  Driver entry points */
7961         dev->open = &s2io_open;
7962         dev->stop = &s2io_close;
7963         dev->hard_start_xmit = &s2io_xmit;
7964         dev->get_stats = &s2io_get_stats;
7965         dev->set_multicast_list = &s2io_set_multicast;
7966         dev->do_ioctl = &s2io_ioctl;
7967         dev->set_mac_address = &s2io_set_mac_addr;
7968         dev->change_mtu = &s2io_change_mtu;
7969         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7970         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7971         dev->vlan_rx_register = s2io_vlan_rx_register;
7972         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7973
7974         /*
7975          * will use eth_mac_addr() for  dev->set_mac_address
7976          * mac address will be set every time dev->open() is called
7977          */
7978 #ifdef CONFIG_NET_POLL_CONTROLLER
7979         dev->poll_controller = s2io_netpoll;
7980 #endif
7981
7982         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7983         if (sp->high_dma_flag == TRUE)
7984                 dev->features |= NETIF_F_HIGHDMA;
7985         dev->features |= NETIF_F_TSO;
7986         dev->features |= NETIF_F_TSO6;
7987         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7988                 dev->features |= NETIF_F_UFO;
7989                 dev->features |= NETIF_F_HW_CSUM;
7990         }
7991 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7992         if (config->multiq)
7993                 dev->features |= NETIF_F_MULTI_QUEUE;
7994 #endif
7995         dev->tx_timeout = &s2io_tx_watchdog;
7996         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7997         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7998         INIT_WORK(&sp->set_link_task, s2io_set_link);
7999
8000         pci_save_state(sp->pdev);
8001
8002         /* Setting swapper control on the NIC, for proper reset operation */
8003         if (s2io_set_swapper(sp)) {
8004                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8005                           dev->name);
8006                 ret = -EAGAIN;
8007                 goto set_swap_failed;
8008         }
8009
8010         /* Verify if the Herc works on the slot its placed into */
8011         if (sp->device_type & XFRAME_II_DEVICE) {
8012                 mode = s2io_verify_pci_mode(sp);
8013                 if (mode < 0) {
8014                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
8015                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8016                         ret = -EBADSLT;
8017                         goto set_swap_failed;
8018                 }
8019         }
8020
8021         if (sp->config.intr_type == MSI_X) {
8022                 sp->num_entries = config->rx_ring_num + 1;
8023                 ret = s2io_enable_msi_x(sp);
8024
8025                 if (!ret) {
8026                         ret = s2io_test_msi(sp);
8027                         /* rollback MSI-X, will re-enable during add_isr() */
8028                         remove_msix_isr(sp);
8029                 }
8030                 if (ret) {
8031
8032                         DBG_PRINT(ERR_DBG,
8033                           "%s: MSI-X requested but failed to enable\n",
8034                           dev->name);
8035                         sp->config.intr_type = INTA;
8036                 }
8037         }
8038
8039         if (config->intr_type ==  MSI_X) {
8040                 for (i = 0; i < config->rx_ring_num ; i++)
8041                         netif_napi_add(dev, &mac_control->rings[i].napi,
8042                                 s2io_poll_msix, 64);
8043         } else {
8044                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8045         }
8046
8047         /* Not needed for Herc */
8048         if (sp->device_type & XFRAME_I_DEVICE) {
8049                 /*
8050                  * Fix for all "FFs" MAC address problems observed on
8051                  * Alpha platforms
8052                  */
8053                 fix_mac_address(sp);
8054                 s2io_reset(sp);
8055         }
8056
8057         /*
8058          * MAC address initialization.
8059          * For now only one mac address will be read and used.
8060          */
8061         bar0 = sp->bar0;
8062         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8063             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8064         writeq(val64, &bar0->rmac_addr_cmd_mem);
8065         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8066                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8067         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8068         mac_down = (u32) tmp64;
8069         mac_up = (u32) (tmp64 >> 32);
8070
8071         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8072         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8073         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8074         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8075         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8076         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8077
8078         /*  Set the factory defined MAC address initially   */
8079         dev->addr_len = ETH_ALEN;
8080         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8081         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8082
8083         /* initialize number of multicast & unicast MAC entries variables */
8084         if (sp->device_type == XFRAME_I_DEVICE) {
8085                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8086                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8087                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8088         } else if (sp->device_type == XFRAME_II_DEVICE) {
8089                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8090                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8091                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8092         }
8093
8094         /* store mac addresses from CAM to s2io_nic structure */
8095         do_s2io_store_unicast_mc(sp);
8096
8097         /* Configure MSIX vector for number of rings configured plus one */
8098         if ((sp->device_type == XFRAME_II_DEVICE) &&
8099                 (config->intr_type == MSI_X))
8100                 sp->num_entries = config->rx_ring_num + 1;
8101
8102          /* Store the values of the MSIX table in the s2io_nic structure */
8103         store_xmsi_data(sp);
8104         /* reset Nic and bring it to known state */
8105         s2io_reset(sp);
8106
8107         /*
8108          * Initialize link state flags
8109          * and the card state parameter
8110          */
8111         sp->state = 0;
8112
8113         /* Initialize spinlocks */
8114         for (i = 0; i < sp->config.tx_fifo_num; i++)
8115                 spin_lock_init(&mac_control->fifos[i].tx_lock);
8116
8117         /*
8118          * SXE-002: Configure link and activity LED to init state
8119          * on driver load.
8120          */
8121         subid = sp->pdev->subsystem_device;
8122         if ((subid & 0xFF) >= 0x07) {
8123                 val64 = readq(&bar0->gpio_control);
8124                 val64 |= 0x0000800000000000ULL;
8125                 writeq(val64, &bar0->gpio_control);
8126                 val64 = 0x0411040400000000ULL;
8127                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8128                 val64 = readq(&bar0->gpio_control);
8129         }
8130
8131         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8132
8133         if (register_netdev(dev)) {
8134                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8135                 ret = -ENODEV;
8136                 goto register_failed;
8137         }
8138         s2io_vpd_read(sp);
8139         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8140         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8141                   sp->product_name, pdev->revision);
8142         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8143                   s2io_driver_version);
8144         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8145                   dev->name, print_mac(mac, dev->dev_addr));
8146         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8147         if (sp->device_type & XFRAME_II_DEVICE) {
8148                 mode = s2io_print_pci_mode(sp);
8149                 if (mode < 0) {
8150                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8151                         ret = -EBADSLT;
8152                         unregister_netdev(dev);
8153                         goto set_swap_failed;
8154                 }
8155         }
8156         switch(sp->rxd_mode) {
8157                 case RXD_MODE_1:
8158                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8159                                                 dev->name);
8160                     break;
8161                 case RXD_MODE_3B:
8162                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8163                                                 dev->name);
8164                     break;
8165         }
8166
8167         switch (sp->config.napi) {
8168         case 0:
8169                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8170                 break;
8171         case 1:
8172                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8173                 break;
8174         }
8175
8176         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8177                 sp->config.tx_fifo_num);
8178
8179         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8180                   sp->config.rx_ring_num);
8181
8182         switch(sp->config.intr_type) {
8183                 case INTA:
8184                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8185                     break;
8186                 case MSI_X:
8187                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8188                     break;
8189         }
8190         if (sp->config.multiq) {
8191         for (i = 0; i < sp->config.tx_fifo_num; i++)
8192                 mac_control->fifos[i].multiq = config->multiq;
8193                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8194                         dev->name);
8195         } else
8196                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8197                         dev->name);
8198
8199         switch (sp->config.tx_steering_type) {
8200         case NO_STEERING:
8201                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8202                         " transmit\n", dev->name);
8203                         break;
8204         case TX_PRIORITY_STEERING:
8205                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8206                         " transmit\n", dev->name);
8207                 break;
8208         case TX_DEFAULT_STEERING:
8209                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8210                         " transmit\n", dev->name);
8211         }
8212
8213         if (sp->lro)
8214                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8215                           dev->name);
8216         if (ufo)
8217                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8218                                         " enabled\n", dev->name);
8219         /* Initialize device name */
8220         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8221
8222         /*
8223          * Make Link state as off at this point, when the Link change
8224          * interrupt comes the state will be automatically changed to
8225          * the right state.
8226          */
8227         netif_carrier_off(dev);
8228
8229         return 0;
8230
8231       register_failed:
8232       set_swap_failed:
8233         iounmap(sp->bar1);
8234       bar1_remap_failed:
8235         iounmap(sp->bar0);
8236       bar0_remap_failed:
8237       mem_alloc_failed:
8238         free_shared_mem(sp);
8239         pci_disable_device(pdev);
8240         pci_release_regions(pdev);
8241         pci_set_drvdata(pdev, NULL);
8242         free_netdev(dev);
8243
8244         return ret;
8245 }
8246
8247 /**
8248  * s2io_rem_nic - Free the PCI device
8249  * @pdev: structure containing the PCI related information of the device.
8250  * Description: This function is called by the Pci subsystem to release a
8251  * PCI device and free up all resource held up by the device. This could
8252  * be in response to a Hot plug event or when the driver is to be removed
8253  * from memory.
8254  */
8255
8256 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8257 {
8258         struct net_device *dev =
8259             (struct net_device *) pci_get_drvdata(pdev);
8260         struct s2io_nic *sp;
8261
8262         if (dev == NULL) {
8263                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8264                 return;
8265         }
8266
8267         flush_scheduled_work();
8268
8269         sp = dev->priv;
8270         unregister_netdev(dev);
8271
8272         free_shared_mem(sp);
8273         iounmap(sp->bar0);
8274         iounmap(sp->bar1);
8275         pci_release_regions(pdev);
8276         pci_set_drvdata(pdev, NULL);
8277         free_netdev(dev);
8278         pci_disable_device(pdev);
8279 }
8280
8281 /**
8282  * s2io_starter - Entry point for the driver
8283  * Description: This function is the entry point for the driver. It verifies
8284  * the module loadable parameters and initializes PCI configuration space.
8285  */
8286
8287 static int __init s2io_starter(void)
8288 {
8289         return pci_register_driver(&s2io_driver);
8290 }
8291
8292 /**
8293  * s2io_closer - Cleanup routine for the driver
8294  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8295  */
8296
8297 static __exit void s2io_closer(void)
8298 {
8299         pci_unregister_driver(&s2io_driver);
8300         DBG_PRINT(INIT_DBG, "cleanup done\n");
8301 }
8302
8303 module_init(s2io_starter);
8304 module_exit(s2io_closer);
8305
8306 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8307                 struct tcphdr **tcp, struct RxD_t *rxdp,
8308                 struct s2io_nic *sp)
8309 {
8310         int ip_off;
8311         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8312
8313         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8314                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8315                           __FUNCTION__);
8316                 return -1;
8317         }
8318
8319         /* Checking for DIX type or DIX type with VLAN */
8320         if ((l2_type == 0)
8321                 || (l2_type == 4)) {
8322                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8323                 /*
8324                  * If vlan stripping is disabled and the frame is VLAN tagged,
8325                  * shift the offset by the VLAN header size bytes.
8326                  */
8327                 if ((!vlan_strip_flag) &&
8328                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8329                         ip_off += HEADER_VLAN_SIZE;
8330         } else {
8331                 /* LLC, SNAP etc are considered non-mergeable */
8332                 return -1;
8333         }
8334
8335         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8336         ip_len = (u8)((*ip)->ihl);
8337         ip_len <<= 2;
8338         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8339
8340         return 0;
8341 }
8342
8343 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8344                                   struct tcphdr *tcp)
8345 {
8346         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8347         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8348            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8349                 return -1;
8350         return 0;
8351 }
8352
8353 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8354 {
8355         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8356 }
8357
8358 static void initiate_new_session(struct lro *lro, u8 *l2h,
8359         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8360 {
8361         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8362         lro->l2h = l2h;
8363         lro->iph = ip;
8364         lro->tcph = tcp;
8365         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8366         lro->tcp_ack = tcp->ack_seq;
8367         lro->sg_num = 1;
8368         lro->total_len = ntohs(ip->tot_len);
8369         lro->frags_len = 0;
8370         lro->vlan_tag = vlan_tag;
8371         /*
8372          * check if we saw TCP timestamp. Other consistency checks have
8373          * already been done.
8374          */
8375         if (tcp->doff == 8) {
8376                 __be32 *ptr;
8377                 ptr = (__be32 *)(tcp+1);
8378                 lro->saw_ts = 1;
8379                 lro->cur_tsval = ntohl(*(ptr+1));
8380                 lro->cur_tsecr = *(ptr+2);
8381         }
8382         lro->in_use = 1;
8383 }
8384
8385 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8386 {
8387         struct iphdr *ip = lro->iph;
8388         struct tcphdr *tcp = lro->tcph;
8389         __sum16 nchk;
8390         struct stat_block *statinfo = sp->mac_control.stats_info;
8391         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8392
8393         /* Update L3 header */
8394         ip->tot_len = htons(lro->total_len);
8395         ip->check = 0;
8396         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8397         ip->check = nchk;
8398
8399         /* Update L4 header */
8400         tcp->ack_seq = lro->tcp_ack;
8401         tcp->window = lro->window;
8402
8403         /* Update tsecr field if this session has timestamps enabled */
8404         if (lro->saw_ts) {
8405                 __be32 *ptr = (__be32 *)(tcp + 1);
8406                 *(ptr+2) = lro->cur_tsecr;
8407         }
8408
8409         /* Update counters required for calculation of
8410          * average no. of packets aggregated.
8411          */
8412         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8413         statinfo->sw_stat.num_aggregations++;
8414 }
8415
8416 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8417                 struct tcphdr *tcp, u32 l4_pyld)
8418 {
8419         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8420         lro->total_len += l4_pyld;
8421         lro->frags_len += l4_pyld;
8422         lro->tcp_next_seq += l4_pyld;
8423         lro->sg_num++;
8424
8425         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8426         lro->tcp_ack = tcp->ack_seq;
8427         lro->window = tcp->window;
8428
8429         if (lro->saw_ts) {
8430                 __be32 *ptr;
8431                 /* Update tsecr and tsval from this packet */
8432                 ptr = (__be32 *)(tcp+1);
8433                 lro->cur_tsval = ntohl(*(ptr+1));
8434                 lro->cur_tsecr = *(ptr + 2);
8435         }
8436 }
8437
8438 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8439                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8440 {
8441         u8 *ptr;
8442
8443         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8444
8445         if (!tcp_pyld_len) {
8446                 /* Runt frame or a pure ack */
8447                 return -1;
8448         }
8449
8450         if (ip->ihl != 5) /* IP has options */
8451                 return -1;
8452
8453         /* If we see CE codepoint in IP header, packet is not mergeable */
8454         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8455                 return -1;
8456
8457         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8458         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8459                                     tcp->ece || tcp->cwr || !tcp->ack) {
8460                 /*
8461                  * Currently recognize only the ack control word and
8462                  * any other control field being set would result in
8463                  * flushing the LRO session
8464                  */
8465                 return -1;
8466         }
8467
8468         /*
8469          * Allow only one TCP timestamp option. Don't aggregate if
8470          * any other options are detected.
8471          */
8472         if (tcp->doff != 5 && tcp->doff != 8)
8473                 return -1;
8474
8475         if (tcp->doff == 8) {
8476                 ptr = (u8 *)(tcp + 1);
8477                 while (*ptr == TCPOPT_NOP)
8478                         ptr++;
8479                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8480                         return -1;
8481
8482                 /* Ensure timestamp value increases monotonically */
8483                 if (l_lro)
8484                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8485                                 return -1;
8486
8487                 /* timestamp echo reply should be non-zero */
8488                 if (*((__be32 *)(ptr+6)) == 0)
8489                         return -1;
8490         }
8491
8492         return 0;
8493 }
8494
8495 static int
8496 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8497         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8498         struct s2io_nic *sp)
8499 {
8500         struct iphdr *ip;
8501         struct tcphdr *tcph;
8502         int ret = 0, i;
8503         u16 vlan_tag = 0;
8504
8505         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8506                                          rxdp, sp))) {
8507                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8508                           ip->saddr, ip->daddr);
8509         } else
8510                 return ret;
8511
8512         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8513         tcph = (struct tcphdr *)*tcp;
8514         *tcp_len = get_l4_pyld_length(ip, tcph);
8515         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8516                 struct lro *l_lro = &ring_data->lro0_n[i];
8517                 if (l_lro->in_use) {
8518                         if (check_for_socket_match(l_lro, ip, tcph))
8519                                 continue;
8520                         /* Sock pair matched */
8521                         *lro = l_lro;
8522
8523                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8524                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8525                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8526                                           (*lro)->tcp_next_seq,
8527                                           ntohl(tcph->seq));
8528
8529                                 sp->mac_control.stats_info->
8530                                    sw_stat.outof_sequence_pkts++;
8531                                 ret = 2;
8532                                 break;
8533                         }
8534
8535                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8536                                 ret = 1; /* Aggregate */
8537                         else
8538                                 ret = 2; /* Flush both */
8539                         break;
8540                 }
8541         }
8542
8543         if (ret == 0) {
8544                 /* Before searching for available LRO objects,
8545                  * check if the pkt is L3/L4 aggregatable. If not
8546                  * don't create new LRO session. Just send this
8547                  * packet up.
8548                  */
8549                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8550                         return 5;
8551                 }
8552
8553                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8554                         struct lro *l_lro = &ring_data->lro0_n[i];
8555                         if (!(l_lro->in_use)) {
8556                                 *lro = l_lro;
8557                                 ret = 3; /* Begin anew */
8558                                 break;
8559                         }
8560                 }
8561         }
8562
8563         if (ret == 0) { /* sessions exceeded */
8564                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8565                           __FUNCTION__);
8566                 *lro = NULL;
8567                 return ret;
8568         }
8569
8570         switch (ret) {
8571                 case 3:
8572                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8573                                                                 vlan_tag);
8574                         break;
8575                 case 2:
8576                         update_L3L4_header(sp, *lro);
8577                         break;
8578                 case 1:
8579                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8580                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8581                                 update_L3L4_header(sp, *lro);
8582                                 ret = 4; /* Flush the LRO */
8583                         }
8584                         break;
8585                 default:
8586                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8587                                 __FUNCTION__);
8588                         break;
8589         }
8590
8591         return ret;
8592 }
8593
8594 static void clear_lro_session(struct lro *lro)
8595 {
8596         static u16 lro_struct_size = sizeof(struct lro);
8597
8598         memset(lro, 0, lro_struct_size);
8599 }
8600
8601 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8602 {
8603         struct net_device *dev = skb->dev;
8604         struct s2io_nic *sp = dev->priv;
8605
8606         skb->protocol = eth_type_trans(skb, dev);
8607         if (sp->vlgrp && vlan_tag
8608                 && (vlan_strip_flag)) {
8609                 /* Queueing the vlan frame to the upper layer */
8610                 if (sp->config.napi)
8611                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8612                 else
8613                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8614         } else {
8615                 if (sp->config.napi)
8616                         netif_receive_skb(skb);
8617                 else
8618                         netif_rx(skb);
8619         }
8620 }
8621
8622 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8623                            struct sk_buff *skb,
8624                            u32 tcp_len)
8625 {
8626         struct sk_buff *first = lro->parent;
8627
8628         first->len += tcp_len;
8629         first->data_len = lro->frags_len;
8630         skb_pull(skb, (skb->len - tcp_len));
8631         if (skb_shinfo(first)->frag_list)
8632                 lro->last_frag->next = skb;
8633         else
8634                 skb_shinfo(first)->frag_list = skb;
8635         first->truesize += skb->truesize;
8636         lro->last_frag = skb;
8637         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8638         return;
8639 }
8640
8641 /**
8642  * s2io_io_error_detected - called when PCI error is detected
8643  * @pdev: Pointer to PCI device
8644  * @state: The current pci connection state
8645  *
8646  * This function is called after a PCI bus error affecting
8647  * this device has been detected.
8648  */
8649 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8650                                                pci_channel_state_t state)
8651 {
8652         struct net_device *netdev = pci_get_drvdata(pdev);
8653         struct s2io_nic *sp = netdev->priv;
8654
8655         netif_device_detach(netdev);
8656
8657         if (netif_running(netdev)) {
8658                 /* Bring down the card, while avoiding PCI I/O */
8659                 do_s2io_card_down(sp, 0);
8660         }
8661         pci_disable_device(pdev);
8662
8663         return PCI_ERS_RESULT_NEED_RESET;
8664 }
8665
8666 /**
8667  * s2io_io_slot_reset - called after the pci bus has been reset.
8668  * @pdev: Pointer to PCI device
8669  *
8670  * Restart the card from scratch, as if from a cold-boot.
8671  * At this point, the card has exprienced a hard reset,
8672  * followed by fixups by BIOS, and has its config space
8673  * set up identically to what it was at cold boot.
8674  */
8675 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8676 {
8677         struct net_device *netdev = pci_get_drvdata(pdev);
8678         struct s2io_nic *sp = netdev->priv;
8679
8680         if (pci_enable_device(pdev)) {
8681                 printk(KERN_ERR "s2io: "
8682                        "Cannot re-enable PCI device after reset.\n");
8683                 return PCI_ERS_RESULT_DISCONNECT;
8684         }
8685
8686         pci_set_master(pdev);
8687         s2io_reset(sp);
8688
8689         return PCI_ERS_RESULT_RECOVERED;
8690 }
8691
8692 /**
8693  * s2io_io_resume - called when traffic can start flowing again.
8694  * @pdev: Pointer to PCI device
8695  *
8696  * This callback is called when the error recovery driver tells
8697  * us that its OK to resume normal operation.
8698  */
8699 static void s2io_io_resume(struct pci_dev *pdev)
8700 {
8701         struct net_device *netdev = pci_get_drvdata(pdev);
8702         struct s2io_nic *sp = netdev->priv;
8703
8704         if (netif_running(netdev)) {
8705                 if (s2io_card_up(sp)) {
8706                         printk(KERN_ERR "s2io: "
8707                                "Can't bring device back up after reset.\n");
8708                         return;
8709                 }
8710
8711                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8712                         s2io_card_down(sp);
8713                         printk(KERN_ERR "s2io: "
8714                                "Can't resetore mac addr after reset.\n");
8715                         return;
8716                 }
8717         }
8718
8719         netif_device_attach(netdev);
8720         netif_wake_queue(netdev);
8721 }