Merge branch 'pxa-devel' into pxa
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.24"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
550         if (sp->config.multiq) {
551                 for (i = 0; i < sp->config.tx_fifo_num; i++)
552                         netif_stop_subqueue(sp->dev, i);
553         } else
554 #endif
555         {
556                 for (i = 0; i < sp->config.tx_fifo_num; i++)
557                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
558                 netif_stop_queue(sp->dev);
559         }
560 }
561
562 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
563 {
564 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
565         if (sp->config.multiq)
566                 netif_stop_subqueue(sp->dev, fifo_no);
567         else
568 #endif
569         {
570                 sp->mac_control.fifos[fifo_no].queue_state =
571                         FIFO_QUEUE_STOP;
572                 netif_stop_queue(sp->dev);
573         }
574 }
575
576 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 {
578         int i;
579 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
580         if (sp->config.multiq) {
581                 for (i = 0; i < sp->config.tx_fifo_num; i++)
582                         netif_start_subqueue(sp->dev, i);
583         } else
584 #endif
585         {
586                 for (i = 0; i < sp->config.tx_fifo_num; i++)
587                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
588                 netif_start_queue(sp->dev);
589         }
590 }
591
592 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
593 {
594 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
595         if (sp->config.multiq)
596                 netif_start_subqueue(sp->dev, fifo_no);
597         else
598 #endif
599         {
600                 sp->mac_control.fifos[fifo_no].queue_state =
601                         FIFO_QUEUE_START;
602                 netif_start_queue(sp->dev);
603         }
604 }
605
606 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
607 {
608         int i;
609 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
610         if (sp->config.multiq) {
611                 for (i = 0; i < sp->config.tx_fifo_num; i++)
612                         netif_wake_subqueue(sp->dev, i);
613         } else
614 #endif
615         {
616                 for (i = 0; i < sp->config.tx_fifo_num; i++)
617                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
618                 netif_wake_queue(sp->dev);
619         }
620 }
621
622 static inline void s2io_wake_tx_queue(
623         struct fifo_info *fifo, int cnt, u8 multiq)
624 {
625
626 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
627         if (multiq) {
628                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
629                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
630         } else
631 #endif
632         if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
633                 if (netif_queue_stopped(fifo->dev)) {
634                         fifo->queue_state = FIFO_QUEUE_START;
635                         netif_wake_queue(fifo->dev);
636                 }
637         }
638 }
639
640 /**
641  * init_shared_mem - Allocation and Initialization of Memory
642  * @nic: Device private variable.
643  * Description: The function allocates all the memory areas shared
644  * between the NIC and the driver. This includes Tx descriptors,
645  * Rx descriptors and the statistics block.
646  */
647
648 static int init_shared_mem(struct s2io_nic *nic)
649 {
650         u32 size;
651         void *tmp_v_addr, *tmp_v_addr_next;
652         dma_addr_t tmp_p_addr, tmp_p_addr_next;
653         struct RxD_block *pre_rxd_blk = NULL;
654         int i, j, blk_cnt;
655         int lst_size, lst_per_page;
656         struct net_device *dev = nic->dev;
657         unsigned long tmp;
658         struct buffAdd *ba;
659
660         struct mac_info *mac_control;
661         struct config_param *config;
662         unsigned long long mem_allocated = 0;
663
664         mac_control = &nic->mac_control;
665         config = &nic->config;
666
667
668         /* Allocation and initialization of TXDLs in FIOFs */
669         size = 0;
670         for (i = 0; i < config->tx_fifo_num; i++) {
671                 size += config->tx_cfg[i].fifo_len;
672         }
673         if (size > MAX_AVAILABLE_TXDS) {
674                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
675                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
676                 return -EINVAL;
677         }
678
679         size = 0;
680         for (i = 0; i < config->tx_fifo_num; i++) {
681                 size = config->tx_cfg[i].fifo_len;
682                 /*
683                  * Legal values are from 2 to 8192
684                  */
685                 if (size < 2) {
686                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
687                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
688                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
689                                 "are 2 to 8192\n");
690                         return -EINVAL;
691                 }
692         }
693
694         lst_size = (sizeof(struct TxD) * config->max_txds);
695         lst_per_page = PAGE_SIZE / lst_size;
696
697         for (i = 0; i < config->tx_fifo_num; i++) {
698                 int fifo_len = config->tx_cfg[i].fifo_len;
699                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
700                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
701                                                           GFP_KERNEL);
702                 if (!mac_control->fifos[i].list_info) {
703                         DBG_PRINT(INFO_DBG,
704                                   "Malloc failed for list_info\n");
705                         return -ENOMEM;
706                 }
707                 mem_allocated += list_holder_size;
708         }
709         for (i = 0; i < config->tx_fifo_num; i++) {
710                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
711                                                 lst_per_page);
712                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
713                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
714                     config->tx_cfg[i].fifo_len - 1;
715                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
716                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
717                     config->tx_cfg[i].fifo_len - 1;
718                 mac_control->fifos[i].fifo_no = i;
719                 mac_control->fifos[i].nic = nic;
720                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
721                 mac_control->fifos[i].dev = dev;
722
723                 for (j = 0; j < page_num; j++) {
724                         int k = 0;
725                         dma_addr_t tmp_p;
726                         void *tmp_v;
727                         tmp_v = pci_alloc_consistent(nic->pdev,
728                                                      PAGE_SIZE, &tmp_p);
729                         if (!tmp_v) {
730                                 DBG_PRINT(INFO_DBG,
731                                           "pci_alloc_consistent ");
732                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
733                                 return -ENOMEM;
734                         }
735                         /* If we got a zero DMA address(can happen on
736                          * certain platforms like PPC), reallocate.
737                          * Store virtual address of page we don't want,
738                          * to be freed later.
739                          */
740                         if (!tmp_p) {
741                                 mac_control->zerodma_virt_addr = tmp_v;
742                                 DBG_PRINT(INIT_DBG,
743                                 "%s: Zero DMA address for TxDL. ", dev->name);
744                                 DBG_PRINT(INIT_DBG,
745                                 "Virtual address %p\n", tmp_v);
746                                 tmp_v = pci_alloc_consistent(nic->pdev,
747                                                      PAGE_SIZE, &tmp_p);
748                                 if (!tmp_v) {
749                                         DBG_PRINT(INFO_DBG,
750                                           "pci_alloc_consistent ");
751                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
752                                         return -ENOMEM;
753                                 }
754                                 mem_allocated += PAGE_SIZE;
755                         }
756                         while (k < lst_per_page) {
757                                 int l = (j * lst_per_page) + k;
758                                 if (l == config->tx_cfg[i].fifo_len)
759                                         break;
760                                 mac_control->fifos[i].list_info[l].list_virt_addr =
761                                     tmp_v + (k * lst_size);
762                                 mac_control->fifos[i].list_info[l].list_phy_addr =
763                                     tmp_p + (k * lst_size);
764                                 k++;
765                         }
766                 }
767         }
768
769         for (i = 0; i < config->tx_fifo_num; i++) {
770                 size = config->tx_cfg[i].fifo_len;
771                 mac_control->fifos[i].ufo_in_band_v
772                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
773                 if (!mac_control->fifos[i].ufo_in_band_v)
774                         return -ENOMEM;
775                 mem_allocated += (size * sizeof(u64));
776         }
777
778         /* Allocation and initialization of RXDs in Rings */
779         size = 0;
780         for (i = 0; i < config->rx_ring_num; i++) {
781                 if (config->rx_cfg[i].num_rxd %
782                     (rxd_count[nic->rxd_mode] + 1)) {
783                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
784                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
785                                   i);
786                         DBG_PRINT(ERR_DBG, "RxDs per Block");
787                         return FAILURE;
788                 }
789                 size += config->rx_cfg[i].num_rxd;
790                 mac_control->rings[i].block_count =
791                         config->rx_cfg[i].num_rxd /
792                         (rxd_count[nic->rxd_mode] + 1 );
793                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
794                         mac_control->rings[i].block_count;
795         }
796         if (nic->rxd_mode == RXD_MODE_1)
797                 size = (size * (sizeof(struct RxD1)));
798         else
799                 size = (size * (sizeof(struct RxD3)));
800
801         for (i = 0; i < config->rx_ring_num; i++) {
802                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
803                 mac_control->rings[i].rx_curr_get_info.offset = 0;
804                 mac_control->rings[i].rx_curr_get_info.ring_len =
805                     config->rx_cfg[i].num_rxd - 1;
806                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
807                 mac_control->rings[i].rx_curr_put_info.offset = 0;
808                 mac_control->rings[i].rx_curr_put_info.ring_len =
809                     config->rx_cfg[i].num_rxd - 1;
810                 mac_control->rings[i].nic = nic;
811                 mac_control->rings[i].ring_no = i;
812                 mac_control->rings[i].lro = lro_enable;
813
814                 blk_cnt = config->rx_cfg[i].num_rxd /
815                                 (rxd_count[nic->rxd_mode] + 1);
816                 /*  Allocating all the Rx blocks */
817                 for (j = 0; j < blk_cnt; j++) {
818                         struct rx_block_info *rx_blocks;
819                         int l;
820
821                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
822                         size = SIZE_OF_BLOCK; //size is always page size
823                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
824                                                           &tmp_p_addr);
825                         if (tmp_v_addr == NULL) {
826                                 /*
827                                  * In case of failure, free_shared_mem()
828                                  * is called, which should free any
829                                  * memory that was alloced till the
830                                  * failure happened.
831                                  */
832                                 rx_blocks->block_virt_addr = tmp_v_addr;
833                                 return -ENOMEM;
834                         }
835                         mem_allocated += size;
836                         memset(tmp_v_addr, 0, size);
837                         rx_blocks->block_virt_addr = tmp_v_addr;
838                         rx_blocks->block_dma_addr = tmp_p_addr;
839                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
840                                                   rxd_count[nic->rxd_mode],
841                                                   GFP_KERNEL);
842                         if (!rx_blocks->rxds)
843                                 return -ENOMEM;
844                         mem_allocated +=
845                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
846                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
847                                 rx_blocks->rxds[l].virt_addr =
848                                         rx_blocks->block_virt_addr +
849                                         (rxd_size[nic->rxd_mode] * l);
850                                 rx_blocks->rxds[l].dma_addr =
851                                         rx_blocks->block_dma_addr +
852                                         (rxd_size[nic->rxd_mode] * l);
853                         }
854                 }
855                 /* Interlinking all Rx Blocks */
856                 for (j = 0; j < blk_cnt; j++) {
857                         tmp_v_addr =
858                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
859                         tmp_v_addr_next =
860                                 mac_control->rings[i].rx_blocks[(j + 1) %
861                                               blk_cnt].block_virt_addr;
862                         tmp_p_addr =
863                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
864                         tmp_p_addr_next =
865                                 mac_control->rings[i].rx_blocks[(j + 1) %
866                                               blk_cnt].block_dma_addr;
867
868                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
869                         pre_rxd_blk->reserved_2_pNext_RxD_block =
870                             (unsigned long) tmp_v_addr_next;
871                         pre_rxd_blk->pNext_RxD_Blk_physical =
872                             (u64) tmp_p_addr_next;
873                 }
874         }
875         if (nic->rxd_mode == RXD_MODE_3B) {
876                 /*
877                  * Allocation of Storages for buffer addresses in 2BUFF mode
878                  * and the buffers as well.
879                  */
880                 for (i = 0; i < config->rx_ring_num; i++) {
881                         blk_cnt = config->rx_cfg[i].num_rxd /
882                            (rxd_count[nic->rxd_mode]+ 1);
883                         mac_control->rings[i].ba =
884                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
885                                      GFP_KERNEL);
886                         if (!mac_control->rings[i].ba)
887                                 return -ENOMEM;
888                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
889                         for (j = 0; j < blk_cnt; j++) {
890                                 int k = 0;
891                                 mac_control->rings[i].ba[j] =
892                                         kmalloc((sizeof(struct buffAdd) *
893                                                 (rxd_count[nic->rxd_mode] + 1)),
894                                                 GFP_KERNEL);
895                                 if (!mac_control->rings[i].ba[j])
896                                         return -ENOMEM;
897                                 mem_allocated += (sizeof(struct buffAdd) *  \
898                                         (rxd_count[nic->rxd_mode] + 1));
899                                 while (k != rxd_count[nic->rxd_mode]) {
900                                         ba = &mac_control->rings[i].ba[j][k];
901
902                                         ba->ba_0_org = (void *) kmalloc
903                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
904                                         if (!ba->ba_0_org)
905                                                 return -ENOMEM;
906                                         mem_allocated +=
907                                                 (BUF0_LEN + ALIGN_SIZE);
908                                         tmp = (unsigned long)ba->ba_0_org;
909                                         tmp += ALIGN_SIZE;
910                                         tmp &= ~((unsigned long) ALIGN_SIZE);
911                                         ba->ba_0 = (void *) tmp;
912
913                                         ba->ba_1_org = (void *) kmalloc
914                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
915                                         if (!ba->ba_1_org)
916                                                 return -ENOMEM;
917                                         mem_allocated
918                                                 += (BUF1_LEN + ALIGN_SIZE);
919                                         tmp = (unsigned long) ba->ba_1_org;
920                                         tmp += ALIGN_SIZE;
921                                         tmp &= ~((unsigned long) ALIGN_SIZE);
922                                         ba->ba_1 = (void *) tmp;
923                                         k++;
924                                 }
925                         }
926                 }
927         }
928
929         /* Allocation and initialization of Statistics block */
930         size = sizeof(struct stat_block);
931         mac_control->stats_mem = pci_alloc_consistent
932             (nic->pdev, size, &mac_control->stats_mem_phy);
933
934         if (!mac_control->stats_mem) {
935                 /*
936                  * In case of failure, free_shared_mem() is called, which
937                  * should free any memory that was alloced till the
938                  * failure happened.
939                  */
940                 return -ENOMEM;
941         }
942         mem_allocated += size;
943         mac_control->stats_mem_sz = size;
944
945         tmp_v_addr = mac_control->stats_mem;
946         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
947         memset(tmp_v_addr, 0, size);
948         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
949                   (unsigned long long) tmp_p_addr);
950         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
951         return SUCCESS;
952 }
953
954 /**
955  * free_shared_mem - Free the allocated Memory
956  * @nic:  Device private variable.
957  * Description: This function is to free all memory locations allocated by
958  * the init_shared_mem() function and return it to the kernel.
959  */
960
961 static void free_shared_mem(struct s2io_nic *nic)
962 {
963         int i, j, blk_cnt, size;
964         void *tmp_v_addr;
965         dma_addr_t tmp_p_addr;
966         struct mac_info *mac_control;
967         struct config_param *config;
968         int lst_size, lst_per_page;
969         struct net_device *dev;
970         int page_num = 0;
971
972         if (!nic)
973                 return;
974
975         dev = nic->dev;
976
977         mac_control = &nic->mac_control;
978         config = &nic->config;
979
980         lst_size = (sizeof(struct TxD) * config->max_txds);
981         lst_per_page = PAGE_SIZE / lst_size;
982
983         for (i = 0; i < config->tx_fifo_num; i++) {
984                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
985                                                         lst_per_page);
986                 for (j = 0; j < page_num; j++) {
987                         int mem_blks = (j * lst_per_page);
988                         if (!mac_control->fifos[i].list_info)
989                                 return;
990                         if (!mac_control->fifos[i].list_info[mem_blks].
991                                  list_virt_addr)
992                                 break;
993                         pci_free_consistent(nic->pdev, PAGE_SIZE,
994                                             mac_control->fifos[i].
995                                             list_info[mem_blks].
996                                             list_virt_addr,
997                                             mac_control->fifos[i].
998                                             list_info[mem_blks].
999                                             list_phy_addr);
1000                         nic->mac_control.stats_info->sw_stat.mem_freed
1001                                                 += PAGE_SIZE;
1002                 }
1003                 /* If we got a zero DMA address during allocation,
1004                  * free the page now
1005                  */
1006                 if (mac_control->zerodma_virt_addr) {
1007                         pci_free_consistent(nic->pdev, PAGE_SIZE,
1008                                             mac_control->zerodma_virt_addr,
1009                                             (dma_addr_t)0);
1010                         DBG_PRINT(INIT_DBG,
1011                                 "%s: Freeing TxDL with zero DMA addr. ",
1012                                 dev->name);
1013                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1014                                 mac_control->zerodma_virt_addr);
1015                         nic->mac_control.stats_info->sw_stat.mem_freed
1016                                                 += PAGE_SIZE;
1017                 }
1018                 kfree(mac_control->fifos[i].list_info);
1019                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1021         }
1022
1023         size = SIZE_OF_BLOCK;
1024         for (i = 0; i < config->rx_ring_num; i++) {
1025                 blk_cnt = mac_control->rings[i].block_count;
1026                 for (j = 0; j < blk_cnt; j++) {
1027                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1028                                 block_virt_addr;
1029                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1030                                 block_dma_addr;
1031                         if (tmp_v_addr == NULL)
1032                                 break;
1033                         pci_free_consistent(nic->pdev, size,
1034                                             tmp_v_addr, tmp_p_addr);
1035                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1036                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1037                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1038                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1039                 }
1040         }
1041
1042         if (nic->rxd_mode == RXD_MODE_3B) {
1043                 /* Freeing buffer storage addresses in 2BUFF mode. */
1044                 for (i = 0; i < config->rx_ring_num; i++) {
1045                         blk_cnt = config->rx_cfg[i].num_rxd /
1046                             (rxd_count[nic->rxd_mode] + 1);
1047                         for (j = 0; j < blk_cnt; j++) {
1048                                 int k = 0;
1049                                 if (!mac_control->rings[i].ba[j])
1050                                         continue;
1051                                 while (k != rxd_count[nic->rxd_mode]) {
1052                                         struct buffAdd *ba =
1053                                                 &mac_control->rings[i].ba[j][k];
1054                                         kfree(ba->ba_0_org);
1055                                         nic->mac_control.stats_info->sw_stat.\
1056                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1057                                         kfree(ba->ba_1_org);
1058                                         nic->mac_control.stats_info->sw_stat.\
1059                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1060                                         k++;
1061                                 }
1062                                 kfree(mac_control->rings[i].ba[j]);
1063                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1064                                         (sizeof(struct buffAdd) *
1065                                         (rxd_count[nic->rxd_mode] + 1));
1066                         }
1067                         kfree(mac_control->rings[i].ba);
1068                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1069                         (sizeof(struct buffAdd *) * blk_cnt);
1070                 }
1071         }
1072
1073         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1074                 if (mac_control->fifos[i].ufo_in_band_v) {
1075                         nic->mac_control.stats_info->sw_stat.mem_freed
1076                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1077                         kfree(mac_control->fifos[i].ufo_in_band_v);
1078                 }
1079         }
1080
1081         if (mac_control->stats_mem) {
1082                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1083                         mac_control->stats_mem_sz;
1084                 pci_free_consistent(nic->pdev,
1085                                     mac_control->stats_mem_sz,
1086                                     mac_control->stats_mem,
1087                                     mac_control->stats_mem_phy);
1088         }
1089 }
1090
1091 /**
1092  * s2io_verify_pci_mode -
1093  */
1094
1095 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1096 {
1097         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1098         register u64 val64 = 0;
1099         int     mode;
1100
1101         val64 = readq(&bar0->pci_mode);
1102         mode = (u8)GET_PCI_MODE(val64);
1103
1104         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1105                 return -1;      /* Unknown PCI mode */
1106         return mode;
1107 }
1108
1109 #define NEC_VENID   0x1033
1110 #define NEC_DEVID   0x0125
1111 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1112 {
1113         struct pci_dev *tdev = NULL;
1114         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116                         if (tdev->bus == s2io_pdev->bus->parent) {
1117                                 pci_dev_put(tdev);
1118                                 return 1;
1119                         }
1120                 }
1121         }
1122         return 0;
1123 }
1124
1125 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1126 /**
1127  * s2io_print_pci_mode -
1128  */
1129 static int s2io_print_pci_mode(struct s2io_nic *nic)
1130 {
1131         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1132         register u64 val64 = 0;
1133         int     mode;
1134         struct config_param *config = &nic->config;
1135
1136         val64 = readq(&bar0->pci_mode);
1137         mode = (u8)GET_PCI_MODE(val64);
1138
1139         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1140                 return -1;      /* Unknown PCI mode */
1141
1142         config->bus_speed = bus_speed[mode];
1143
1144         if (s2io_on_nec_bridge(nic->pdev)) {
1145                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1146                                                         nic->dev->name);
1147                 return mode;
1148         }
1149
1150         if (val64 & PCI_MODE_32_BITS) {
1151                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1152         } else {
1153                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1154         }
1155
1156         switch(mode) {
1157                 case PCI_MODE_PCI_33:
1158                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1159                         break;
1160                 case PCI_MODE_PCI_66:
1161                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1162                         break;
1163                 case PCI_MODE_PCIX_M1_66:
1164                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1165                         break;
1166                 case PCI_MODE_PCIX_M1_100:
1167                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1168                         break;
1169                 case PCI_MODE_PCIX_M1_133:
1170                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1171                         break;
1172                 case PCI_MODE_PCIX_M2_66:
1173                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1174                         break;
1175                 case PCI_MODE_PCIX_M2_100:
1176                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1177                         break;
1178                 case PCI_MODE_PCIX_M2_133:
1179                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1180                         break;
1181                 default:
1182                         return -1;      /* Unsupported bus speed */
1183         }
1184
1185         return mode;
1186 }
1187
1188 /**
1189  *  init_tti - Initialization transmit traffic interrupt scheme
1190  *  @nic: device private variable
1191  *  @link: link status (UP/DOWN) used to enable/disable continuous
1192  *  transmit interrupts
1193  *  Description: The function configures transmit traffic interrupts
1194  *  Return Value:  SUCCESS on success and
1195  *  '-1' on failure
1196  */
1197
1198 static int init_tti(struct s2io_nic *nic, int link)
1199 {
1200         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1201         register u64 val64 = 0;
1202         int i;
1203         struct config_param *config;
1204
1205         config = &nic->config;
1206
1207         for (i = 0; i < config->tx_fifo_num; i++) {
1208                 /*
1209                  * TTI Initialization. Default Tx timer gets us about
1210                  * 250 interrupts per sec. Continuous interrupts are enabled
1211                  * by default.
1212                  */
1213                 if (nic->device_type == XFRAME_II_DEVICE) {
1214                         int count = (nic->config.bus_speed * 125)/2;
1215                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1216                 } else
1217                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1218
1219                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1220                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1221                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1222                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1223                 if (i == 0)
1224                         if (use_continuous_tx_intrs && (link == LINK_UP))
1225                                 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1226                 writeq(val64, &bar0->tti_data1_mem);
1227
1228                 if (nic->config.intr_type == MSI_X) {
1229                         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1230                                 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1231                                 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1232                                 TTI_DATA2_MEM_TX_UFC_D(0x300);
1233                 } else {
1234                         if ((nic->config.tx_steering_type ==
1235                                 TX_DEFAULT_STEERING) &&
1236                                 (config->tx_fifo_num > 1) &&
1237                                 (i >= nic->udp_fifo_idx) &&
1238                                 (i < (nic->udp_fifo_idx +
1239                                 nic->total_udp_fifos)))
1240                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1241                                         TTI_DATA2_MEM_TX_UFC_B(0x80) |
1242                                         TTI_DATA2_MEM_TX_UFC_C(0x100) |
1243                                         TTI_DATA2_MEM_TX_UFC_D(0x120);
1244                         else
1245                                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1246                                         TTI_DATA2_MEM_TX_UFC_B(0x20) |
1247                                         TTI_DATA2_MEM_TX_UFC_C(0x40) |
1248                                         TTI_DATA2_MEM_TX_UFC_D(0x80);
1249                 }
1250
1251                 writeq(val64, &bar0->tti_data2_mem);
1252
1253                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1254                                 TTI_CMD_MEM_OFFSET(i);
1255                 writeq(val64, &bar0->tti_command_mem);
1256
1257                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1258                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1259                         return FAILURE;
1260         }
1261
1262         return SUCCESS;
1263 }
1264
1265 /**
1266  *  init_nic - Initialization of hardware
1267  *  @nic: device private variable
1268  *  Description: The function sequentially configures every block
1269  *  of the H/W from their reset values.
1270  *  Return Value:  SUCCESS on success and
1271  *  '-1' on failure (endian settings incorrect).
1272  */
1273
1274 static int init_nic(struct s2io_nic *nic)
1275 {
1276         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1277         struct net_device *dev = nic->dev;
1278         register u64 val64 = 0;
1279         void __iomem *add;
1280         u32 time;
1281         int i, j;
1282         struct mac_info *mac_control;
1283         struct config_param *config;
1284         int dtx_cnt = 0;
1285         unsigned long long mem_share;
1286         int mem_size;
1287
1288         mac_control = &nic->mac_control;
1289         config = &nic->config;
1290
1291         /* to set the swapper controle on the card */
1292         if(s2io_set_swapper(nic)) {
1293                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1294                 return -EIO;
1295         }
1296
1297         /*
1298          * Herc requires EOI to be removed from reset before XGXS, so..
1299          */
1300         if (nic->device_type & XFRAME_II_DEVICE) {
1301                 val64 = 0xA500000000ULL;
1302                 writeq(val64, &bar0->sw_reset);
1303                 msleep(500);
1304                 val64 = readq(&bar0->sw_reset);
1305         }
1306
1307         /* Remove XGXS from reset state */
1308         val64 = 0;
1309         writeq(val64, &bar0->sw_reset);
1310         msleep(500);
1311         val64 = readq(&bar0->sw_reset);
1312
1313         /* Ensure that it's safe to access registers by checking
1314          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1315          */
1316         if (nic->device_type == XFRAME_II_DEVICE) {
1317                 for (i = 0; i < 50; i++) {
1318                         val64 = readq(&bar0->adapter_status);
1319                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1320                                 break;
1321                         msleep(10);
1322                 }
1323                 if (i == 50)
1324                         return -ENODEV;
1325         }
1326
1327         /*  Enable Receiving broadcasts */
1328         add = &bar0->mac_cfg;
1329         val64 = readq(&bar0->mac_cfg);
1330         val64 |= MAC_RMAC_BCAST_ENABLE;
1331         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1332         writel((u32) val64, add);
1333         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1334         writel((u32) (val64 >> 32), (add + 4));
1335
1336         /* Read registers in all blocks */
1337         val64 = readq(&bar0->mac_int_mask);
1338         val64 = readq(&bar0->mc_int_mask);
1339         val64 = readq(&bar0->xgxs_int_mask);
1340
1341         /*  Set MTU */
1342         val64 = dev->mtu;
1343         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1344
1345         if (nic->device_type & XFRAME_II_DEVICE) {
1346                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1347                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1348                                           &bar0->dtx_control, UF);
1349                         if (dtx_cnt & 0x1)
1350                                 msleep(1); /* Necessary!! */
1351                         dtx_cnt++;
1352                 }
1353         } else {
1354                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1355                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1356                                           &bar0->dtx_control, UF);
1357                         val64 = readq(&bar0->dtx_control);
1358                         dtx_cnt++;
1359                 }
1360         }
1361
1362         /*  Tx DMA Initialization */
1363         val64 = 0;
1364         writeq(val64, &bar0->tx_fifo_partition_0);
1365         writeq(val64, &bar0->tx_fifo_partition_1);
1366         writeq(val64, &bar0->tx_fifo_partition_2);
1367         writeq(val64, &bar0->tx_fifo_partition_3);
1368
1369
1370         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1371                 val64 |=
1372                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1373                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1374                                     ((j * 32) + 5), 3);
1375
1376                 if (i == (config->tx_fifo_num - 1)) {
1377                         if (i % 2 == 0)
1378                                 i++;
1379                 }
1380
1381                 switch (i) {
1382                 case 1:
1383                         writeq(val64, &bar0->tx_fifo_partition_0);
1384                         val64 = 0;
1385                         j = 0;
1386                         break;
1387                 case 3:
1388                         writeq(val64, &bar0->tx_fifo_partition_1);
1389                         val64 = 0;
1390                         j = 0;
1391                         break;
1392                 case 5:
1393                         writeq(val64, &bar0->tx_fifo_partition_2);
1394                         val64 = 0;
1395                         j = 0;
1396                         break;
1397                 case 7:
1398                         writeq(val64, &bar0->tx_fifo_partition_3);
1399                         val64 = 0;
1400                         j = 0;
1401                         break;
1402                 default:
1403                         j++;
1404                         break;
1405                 }
1406         }
1407
1408         /*
1409          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1410          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1411          */
1412         if ((nic->device_type == XFRAME_I_DEVICE) &&
1413                 (nic->pdev->revision < 4))
1414                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1415
1416         val64 = readq(&bar0->tx_fifo_partition_0);
1417         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1418                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1419
1420         /*
1421          * Initialization of Tx_PA_CONFIG register to ignore packet
1422          * integrity checking.
1423          */
1424         val64 = readq(&bar0->tx_pa_cfg);
1425         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1426             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1427         writeq(val64, &bar0->tx_pa_cfg);
1428
1429         /* Rx DMA intialization. */
1430         val64 = 0;
1431         for (i = 0; i < config->rx_ring_num; i++) {
1432                 val64 |=
1433                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1434                          3);
1435         }
1436         writeq(val64, &bar0->rx_queue_priority);
1437
1438         /*
1439          * Allocating equal share of memory to all the
1440          * configured Rings.
1441          */
1442         val64 = 0;
1443         if (nic->device_type & XFRAME_II_DEVICE)
1444                 mem_size = 32;
1445         else
1446                 mem_size = 64;
1447
1448         for (i = 0; i < config->rx_ring_num; i++) {
1449                 switch (i) {
1450                 case 0:
1451                         mem_share = (mem_size / config->rx_ring_num +
1452                                      mem_size % config->rx_ring_num);
1453                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1454                         continue;
1455                 case 1:
1456                         mem_share = (mem_size / config->rx_ring_num);
1457                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1458                         continue;
1459                 case 2:
1460                         mem_share = (mem_size / config->rx_ring_num);
1461                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1462                         continue;
1463                 case 3:
1464                         mem_share = (mem_size / config->rx_ring_num);
1465                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1466                         continue;
1467                 case 4:
1468                         mem_share = (mem_size / config->rx_ring_num);
1469                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1470                         continue;
1471                 case 5:
1472                         mem_share = (mem_size / config->rx_ring_num);
1473                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1474                         continue;
1475                 case 6:
1476                         mem_share = (mem_size / config->rx_ring_num);
1477                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1478                         continue;
1479                 case 7:
1480                         mem_share = (mem_size / config->rx_ring_num);
1481                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1482                         continue;
1483                 }
1484         }
1485         writeq(val64, &bar0->rx_queue_cfg);
1486
1487         /*
1488          * Filling Tx round robin registers
1489          * as per the number of FIFOs for equal scheduling priority
1490          */
1491         switch (config->tx_fifo_num) {
1492         case 1:
1493                 val64 = 0x0;
1494                 writeq(val64, &bar0->tx_w_round_robin_0);
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 writeq(val64, &bar0->tx_w_round_robin_2);
1497                 writeq(val64, &bar0->tx_w_round_robin_3);
1498                 writeq(val64, &bar0->tx_w_round_robin_4);
1499                 break;
1500         case 2:
1501                 val64 = 0x0001000100010001ULL;
1502                 writeq(val64, &bar0->tx_w_round_robin_0);
1503                 writeq(val64, &bar0->tx_w_round_robin_1);
1504                 writeq(val64, &bar0->tx_w_round_robin_2);
1505                 writeq(val64, &bar0->tx_w_round_robin_3);
1506                 val64 = 0x0001000100000000ULL;
1507                 writeq(val64, &bar0->tx_w_round_robin_4);
1508                 break;
1509         case 3:
1510                 val64 = 0x0001020001020001ULL;
1511                 writeq(val64, &bar0->tx_w_round_robin_0);
1512                 val64 = 0x0200010200010200ULL;
1513                 writeq(val64, &bar0->tx_w_round_robin_1);
1514                 val64 = 0x0102000102000102ULL;
1515                 writeq(val64, &bar0->tx_w_round_robin_2);
1516                 val64 = 0x0001020001020001ULL;
1517                 writeq(val64, &bar0->tx_w_round_robin_3);
1518                 val64 = 0x0200010200000000ULL;
1519                 writeq(val64, &bar0->tx_w_round_robin_4);
1520                 break;
1521         case 4:
1522                 val64 = 0x0001020300010203ULL;
1523                 writeq(val64, &bar0->tx_w_round_robin_0);
1524                 writeq(val64, &bar0->tx_w_round_robin_1);
1525                 writeq(val64, &bar0->tx_w_round_robin_2);
1526                 writeq(val64, &bar0->tx_w_round_robin_3);
1527                 val64 = 0x0001020300000000ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_4);
1529                 break;
1530         case 5:
1531                 val64 = 0x0001020304000102ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_0);
1533                 val64 = 0x0304000102030400ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_1);
1535                 val64 = 0x0102030400010203ULL;
1536                 writeq(val64, &bar0->tx_w_round_robin_2);
1537                 val64 = 0x0400010203040001ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_3);
1539                 val64 = 0x0203040000000000ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_4);
1541                 break;
1542         case 6:
1543                 val64 = 0x0001020304050001ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_0);
1545                 val64 = 0x0203040500010203ULL;
1546                 writeq(val64, &bar0->tx_w_round_robin_1);
1547                 val64 = 0x0405000102030405ULL;
1548                 writeq(val64, &bar0->tx_w_round_robin_2);
1549                 val64 = 0x0001020304050001ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_3);
1551                 val64 = 0x0203040500000000ULL;
1552                 writeq(val64, &bar0->tx_w_round_robin_4);
1553                 break;
1554         case 7:
1555                 val64 = 0x0001020304050600ULL;
1556                 writeq(val64, &bar0->tx_w_round_robin_0);
1557                 val64 = 0x0102030405060001ULL;
1558                 writeq(val64, &bar0->tx_w_round_robin_1);
1559                 val64 = 0x0203040506000102ULL;
1560                 writeq(val64, &bar0->tx_w_round_robin_2);
1561                 val64 = 0x0304050600010203ULL;
1562                 writeq(val64, &bar0->tx_w_round_robin_3);
1563                 val64 = 0x0405060000000000ULL;
1564                 writeq(val64, &bar0->tx_w_round_robin_4);
1565                 break;
1566         case 8:
1567                 val64 = 0x0001020304050607ULL;
1568                 writeq(val64, &bar0->tx_w_round_robin_0);
1569                 writeq(val64, &bar0->tx_w_round_robin_1);
1570                 writeq(val64, &bar0->tx_w_round_robin_2);
1571                 writeq(val64, &bar0->tx_w_round_robin_3);
1572                 val64 = 0x0001020300000000ULL;
1573                 writeq(val64, &bar0->tx_w_round_robin_4);
1574                 break;
1575         }
1576
1577         /* Enable all configured Tx FIFO partitions */
1578         val64 = readq(&bar0->tx_fifo_partition_0);
1579         val64 |= (TX_FIFO_PARTITION_EN);
1580         writeq(val64, &bar0->tx_fifo_partition_0);
1581
1582         /* Filling the Rx round robin registers as per the
1583          * number of Rings and steering based on QoS with
1584          * equal priority.
1585          */
1586         switch (config->rx_ring_num) {
1587         case 1:
1588                 val64 = 0x0;
1589                 writeq(val64, &bar0->rx_w_round_robin_0);
1590                 writeq(val64, &bar0->rx_w_round_robin_1);
1591                 writeq(val64, &bar0->rx_w_round_robin_2);
1592                 writeq(val64, &bar0->rx_w_round_robin_3);
1593                 writeq(val64, &bar0->rx_w_round_robin_4);
1594
1595                 val64 = 0x8080808080808080ULL;
1596                 writeq(val64, &bar0->rts_qos_steering);
1597                 break;
1598         case 2:
1599                 val64 = 0x0001000100010001ULL;
1600                 writeq(val64, &bar0->rx_w_round_robin_0);
1601                 writeq(val64, &bar0->rx_w_round_robin_1);
1602                 writeq(val64, &bar0->rx_w_round_robin_2);
1603                 writeq(val64, &bar0->rx_w_round_robin_3);
1604                 val64 = 0x0001000100000000ULL;
1605                 writeq(val64, &bar0->rx_w_round_robin_4);
1606
1607                 val64 = 0x8080808040404040ULL;
1608                 writeq(val64, &bar0->rts_qos_steering);
1609                 break;
1610         case 3:
1611                 val64 = 0x0001020001020001ULL;
1612                 writeq(val64, &bar0->rx_w_round_robin_0);
1613                 val64 = 0x0200010200010200ULL;
1614                 writeq(val64, &bar0->rx_w_round_robin_1);
1615                 val64 = 0x0102000102000102ULL;
1616                 writeq(val64, &bar0->rx_w_round_robin_2);
1617                 val64 = 0x0001020001020001ULL;
1618                 writeq(val64, &bar0->rx_w_round_robin_3);
1619                 val64 = 0x0200010200000000ULL;
1620                 writeq(val64, &bar0->rx_w_round_robin_4);
1621
1622                 val64 = 0x8080804040402020ULL;
1623                 writeq(val64, &bar0->rts_qos_steering);
1624                 break;
1625         case 4:
1626                 val64 = 0x0001020300010203ULL;
1627                 writeq(val64, &bar0->rx_w_round_robin_0);
1628                 writeq(val64, &bar0->rx_w_round_robin_1);
1629                 writeq(val64, &bar0->rx_w_round_robin_2);
1630                 writeq(val64, &bar0->rx_w_round_robin_3);
1631                 val64 = 0x0001020300000000ULL;
1632                 writeq(val64, &bar0->rx_w_round_robin_4);
1633
1634                 val64 = 0x8080404020201010ULL;
1635                 writeq(val64, &bar0->rts_qos_steering);
1636                 break;
1637         case 5:
1638                 val64 = 0x0001020304000102ULL;
1639                 writeq(val64, &bar0->rx_w_round_robin_0);
1640                 val64 = 0x0304000102030400ULL;
1641                 writeq(val64, &bar0->rx_w_round_robin_1);
1642                 val64 = 0x0102030400010203ULL;
1643                 writeq(val64, &bar0->rx_w_round_robin_2);
1644                 val64 = 0x0400010203040001ULL;
1645                 writeq(val64, &bar0->rx_w_round_robin_3);
1646                 val64 = 0x0203040000000000ULL;
1647                 writeq(val64, &bar0->rx_w_round_robin_4);
1648
1649                 val64 = 0x8080404020201008ULL;
1650                 writeq(val64, &bar0->rts_qos_steering);
1651                 break;
1652         case 6:
1653                 val64 = 0x0001020304050001ULL;
1654                 writeq(val64, &bar0->rx_w_round_robin_0);
1655                 val64 = 0x0203040500010203ULL;
1656                 writeq(val64, &bar0->rx_w_round_robin_1);
1657                 val64 = 0x0405000102030405ULL;
1658                 writeq(val64, &bar0->rx_w_round_robin_2);
1659                 val64 = 0x0001020304050001ULL;
1660                 writeq(val64, &bar0->rx_w_round_robin_3);
1661                 val64 = 0x0203040500000000ULL;
1662                 writeq(val64, &bar0->rx_w_round_robin_4);
1663
1664                 val64 = 0x8080404020100804ULL;
1665                 writeq(val64, &bar0->rts_qos_steering);
1666                 break;
1667         case 7:
1668                 val64 = 0x0001020304050600ULL;
1669                 writeq(val64, &bar0->rx_w_round_robin_0);
1670                 val64 = 0x0102030405060001ULL;
1671                 writeq(val64, &bar0->rx_w_round_robin_1);
1672                 val64 = 0x0203040506000102ULL;
1673                 writeq(val64, &bar0->rx_w_round_robin_2);
1674                 val64 = 0x0304050600010203ULL;
1675                 writeq(val64, &bar0->rx_w_round_robin_3);
1676                 val64 = 0x0405060000000000ULL;
1677                 writeq(val64, &bar0->rx_w_round_robin_4);
1678
1679                 val64 = 0x8080402010080402ULL;
1680                 writeq(val64, &bar0->rts_qos_steering);
1681                 break;
1682         case 8:
1683                 val64 = 0x0001020304050607ULL;
1684                 writeq(val64, &bar0->rx_w_round_robin_0);
1685                 writeq(val64, &bar0->rx_w_round_robin_1);
1686                 writeq(val64, &bar0->rx_w_round_robin_2);
1687                 writeq(val64, &bar0->rx_w_round_robin_3);
1688                 val64 = 0x0001020300000000ULL;
1689                 writeq(val64, &bar0->rx_w_round_robin_4);
1690
1691                 val64 = 0x8040201008040201ULL;
1692                 writeq(val64, &bar0->rts_qos_steering);
1693                 break;
1694         }
1695
1696         /* UDP Fix */
1697         val64 = 0;
1698         for (i = 0; i < 8; i++)
1699                 writeq(val64, &bar0->rts_frm_len_n[i]);
1700
1701         /* Set the default rts frame length for the rings configured */
1702         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1703         for (i = 0 ; i < config->rx_ring_num ; i++)
1704                 writeq(val64, &bar0->rts_frm_len_n[i]);
1705
1706         /* Set the frame length for the configured rings
1707          * desired by the user
1708          */
1709         for (i = 0; i < config->rx_ring_num; i++) {
1710                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1711                  * specified frame length steering.
1712                  * If the user provides the frame length then program
1713                  * the rts_frm_len register for those values or else
1714                  * leave it as it is.
1715                  */
1716                 if (rts_frm_len[i] != 0) {
1717                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1718                                 &bar0->rts_frm_len_n[i]);
1719                 }
1720         }
1721
1722         /* Disable differentiated services steering logic */
1723         for (i = 0; i < 64; i++) {
1724                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1725                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1726                                 dev->name);
1727                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1728                         return -ENODEV;
1729                 }
1730         }
1731
1732         /* Program statistics memory */
1733         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1734
1735         if (nic->device_type == XFRAME_II_DEVICE) {
1736                 val64 = STAT_BC(0x320);
1737                 writeq(val64, &bar0->stat_byte_cnt);
1738         }
1739
1740         /*
1741          * Initializing the sampling rate for the device to calculate the
1742          * bandwidth utilization.
1743          */
1744         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1745             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1746         writeq(val64, &bar0->mac_link_util);
1747
1748         /*
1749          * Initializing the Transmit and Receive Traffic Interrupt
1750          * Scheme.
1751          */
1752
1753         /* Initialize TTI */
1754         if (SUCCESS != init_tti(nic, nic->last_link_state))
1755                 return -ENODEV;
1756
1757         /* RTI Initialization */
1758         if (nic->device_type == XFRAME_II_DEVICE) {
1759                 /*
1760                  * Programmed to generate Apprx 500 Intrs per
1761                  * second
1762                  */
1763                 int count = (nic->config.bus_speed * 125)/4;
1764                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1765         } else
1766                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1767         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1768                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1769                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1770
1771         writeq(val64, &bar0->rti_data1_mem);
1772
1773         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1774                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1775         if (nic->config.intr_type == MSI_X)
1776             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1777                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1778         else
1779             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1780                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1781         writeq(val64, &bar0->rti_data2_mem);
1782
1783         for (i = 0; i < config->rx_ring_num; i++) {
1784                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1785                                 | RTI_CMD_MEM_OFFSET(i);
1786                 writeq(val64, &bar0->rti_command_mem);
1787
1788                 /*
1789                  * Once the operation completes, the Strobe bit of the
1790                  * command register will be reset. We poll for this
1791                  * particular condition. We wait for a maximum of 500ms
1792                  * for the operation to complete, if it's not complete
1793                  * by then we return error.
1794                  */
1795                 time = 0;
1796                 while (TRUE) {
1797                         val64 = readq(&bar0->rti_command_mem);
1798                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1799                                 break;
1800
1801                         if (time > 10) {
1802                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1803                                           dev->name);
1804                                 return -ENODEV;
1805                         }
1806                         time++;
1807                         msleep(50);
1808                 }
1809         }
1810
1811         /*
1812          * Initializing proper values as Pause threshold into all
1813          * the 8 Queues on Rx side.
1814          */
1815         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1816         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1817
1818         /* Disable RMAC PAD STRIPPING */
1819         add = &bar0->mac_cfg;
1820         val64 = readq(&bar0->mac_cfg);
1821         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1822         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1823         writel((u32) (val64), add);
1824         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1825         writel((u32) (val64 >> 32), (add + 4));
1826         val64 = readq(&bar0->mac_cfg);
1827
1828         /* Enable FCS stripping by adapter */
1829         add = &bar0->mac_cfg;
1830         val64 = readq(&bar0->mac_cfg);
1831         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1832         if (nic->device_type == XFRAME_II_DEVICE)
1833                 writeq(val64, &bar0->mac_cfg);
1834         else {
1835                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1836                 writel((u32) (val64), add);
1837                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1838                 writel((u32) (val64 >> 32), (add + 4));
1839         }
1840
1841         /*
1842          * Set the time value to be inserted in the pause frame
1843          * generated by xena.
1844          */
1845         val64 = readq(&bar0->rmac_pause_cfg);
1846         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1847         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1848         writeq(val64, &bar0->rmac_pause_cfg);
1849
1850         /*
1851          * Set the Threshold Limit for Generating the pause frame
1852          * If the amount of data in any Queue exceeds ratio of
1853          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1854          * pause frame is generated
1855          */
1856         val64 = 0;
1857         for (i = 0; i < 4; i++) {
1858                 val64 |=
1859                     (((u64) 0xFF00 | nic->mac_control.
1860                       mc_pause_threshold_q0q3)
1861                      << (i * 2 * 8));
1862         }
1863         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1864
1865         val64 = 0;
1866         for (i = 0; i < 4; i++) {
1867                 val64 |=
1868                     (((u64) 0xFF00 | nic->mac_control.
1869                       mc_pause_threshold_q4q7)
1870                      << (i * 2 * 8));
1871         }
1872         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1873
1874         /*
1875          * TxDMA will stop Read request if the number of read split has
1876          * exceeded the limit pointed by shared_splits
1877          */
1878         val64 = readq(&bar0->pic_control);
1879         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1880         writeq(val64, &bar0->pic_control);
1881
1882         if (nic->config.bus_speed == 266) {
1883                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1884                 writeq(0x0, &bar0->read_retry_delay);
1885                 writeq(0x0, &bar0->write_retry_delay);
1886         }
1887
1888         /*
1889          * Programming the Herc to split every write transaction
1890          * that does not start on an ADB to reduce disconnects.
1891          */
1892         if (nic->device_type == XFRAME_II_DEVICE) {
1893                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1894                         MISC_LINK_STABILITY_PRD(3);
1895                 writeq(val64, &bar0->misc_control);
1896                 val64 = readq(&bar0->pic_control2);
1897                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1898                 writeq(val64, &bar0->pic_control2);
1899         }
1900         if (strstr(nic->product_name, "CX4")) {
1901                 val64 = TMAC_AVG_IPG(0x17);
1902                 writeq(val64, &bar0->tmac_avg_ipg);
1903         }
1904
1905         return SUCCESS;
1906 }
1907 #define LINK_UP_DOWN_INTERRUPT          1
1908 #define MAC_RMAC_ERR_TIMER              2
1909
1910 static int s2io_link_fault_indication(struct s2io_nic *nic)
1911 {
1912         if (nic->config.intr_type != INTA)
1913                 return MAC_RMAC_ERR_TIMER;
1914         if (nic->device_type == XFRAME_II_DEVICE)
1915                 return LINK_UP_DOWN_INTERRUPT;
1916         else
1917                 return MAC_RMAC_ERR_TIMER;
1918 }
1919
1920 /**
1921  *  do_s2io_write_bits -  update alarm bits in alarm register
1922  *  @value: alarm bits
1923  *  @flag: interrupt status
1924  *  @addr: address value
1925  *  Description: update alarm bits in alarm register
1926  *  Return Value:
1927  *  NONE.
1928  */
1929 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1930 {
1931         u64 temp64;
1932
1933         temp64 = readq(addr);
1934
1935         if(flag == ENABLE_INTRS)
1936                 temp64 &= ~((u64) value);
1937         else
1938                 temp64 |= ((u64) value);
1939         writeq(temp64, addr);
1940 }
1941
1942 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1943 {
1944         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1945         register u64 gen_int_mask = 0;
1946
1947         if (mask & TX_DMA_INTR) {
1948
1949                 gen_int_mask |= TXDMA_INT_M;
1950
1951                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1952                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1953                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1954                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1955
1956                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1957                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1958                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1959                                 &bar0->pfc_err_mask);
1960
1961                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1962                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1963                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1964
1965                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1966                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1967                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1968                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1969                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1970                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1971
1972                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1973                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1974
1975                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1976                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1977                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1978                                 flag, &bar0->lso_err_mask);
1979
1980                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1981                                 flag, &bar0->tpa_err_mask);
1982
1983                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1984
1985         }
1986
1987         if (mask & TX_MAC_INTR) {
1988                 gen_int_mask |= TXMAC_INT_M;
1989                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1990                                 &bar0->mac_int_mask);
1991                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1992                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1993                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1994                                 flag, &bar0->mac_tmac_err_mask);
1995         }
1996
1997         if (mask & TX_XGXS_INTR) {
1998                 gen_int_mask |= TXXGXS_INT_M;
1999                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
2000                                 &bar0->xgxs_int_mask);
2001                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
2002                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
2003                                 flag, &bar0->xgxs_txgxs_err_mask);
2004         }
2005
2006         if (mask & RX_DMA_INTR) {
2007                 gen_int_mask |= RXDMA_INT_M;
2008                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
2009                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
2010                                 flag, &bar0->rxdma_int_mask);
2011                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
2012                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
2013                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
2014                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
2015                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
2016                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
2017                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2018                                 &bar0->prc_pcix_err_mask);
2019                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2020                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2021                                 &bar0->rpa_err_mask);
2022                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2023                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2024                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2025                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2026                                 flag, &bar0->rda_err_mask);
2027                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2028                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2029                                 flag, &bar0->rti_err_mask);
2030         }
2031
2032         if (mask & RX_MAC_INTR) {
2033                 gen_int_mask |= RXMAC_INT_M;
2034                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2035                                 &bar0->mac_int_mask);
2036                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2037                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2038                                 RMAC_DOUBLE_ECC_ERR |
2039                                 RMAC_LINK_STATE_CHANGE_INT,
2040                                 flag, &bar0->mac_rmac_err_mask);
2041         }
2042
2043         if (mask & RX_XGXS_INTR)
2044         {
2045                 gen_int_mask |= RXXGXS_INT_M;
2046                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2047                                 &bar0->xgxs_int_mask);
2048                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2049                                 &bar0->xgxs_rxgxs_err_mask);
2050         }
2051
2052         if (mask & MC_INTR) {
2053                 gen_int_mask |= MC_INT_M;
2054                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2055                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2056                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2057                                 &bar0->mc_err_mask);
2058         }
2059         nic->general_int_mask = gen_int_mask;
2060
2061         /* Remove this line when alarm interrupts are enabled */
2062         nic->general_int_mask = 0;
2063 }
2064 /**
2065  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2066  *  @nic: device private variable,
2067  *  @mask: A mask indicating which Intr block must be modified and,
2068  *  @flag: A flag indicating whether to enable or disable the Intrs.
2069  *  Description: This function will either disable or enable the interrupts
2070  *  depending on the flag argument. The mask argument can be used to
2071  *  enable/disable any Intr block.
2072  *  Return Value: NONE.
2073  */
2074
2075 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2076 {
2077         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2078         register u64 temp64 = 0, intr_mask = 0;
2079
2080         intr_mask = nic->general_int_mask;
2081
2082         /*  Top level interrupt classification */
2083         /*  PIC Interrupts */
2084         if (mask & TX_PIC_INTR) {
2085                 /*  Enable PIC Intrs in the general intr mask register */
2086                 intr_mask |= TXPIC_INT_M;
2087                 if (flag == ENABLE_INTRS) {
2088                         /*
2089                          * If Hercules adapter enable GPIO otherwise
2090                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2091                          * interrupts for now.
2092                          * TODO
2093                          */
2094                         if (s2io_link_fault_indication(nic) ==
2095                                         LINK_UP_DOWN_INTERRUPT ) {
2096                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2097                                                 &bar0->pic_int_mask);
2098                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2099                                                 &bar0->gpio_int_mask);
2100                         } else
2101                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2102                 } else if (flag == DISABLE_INTRS) {
2103                         /*
2104                          * Disable PIC Intrs in the general
2105                          * intr mask register
2106                          */
2107                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2108                 }
2109         }
2110
2111         /*  Tx traffic interrupts */
2112         if (mask & TX_TRAFFIC_INTR) {
2113                 intr_mask |= TXTRAFFIC_INT_M;
2114                 if (flag == ENABLE_INTRS) {
2115                         /*
2116                          * Enable all the Tx side interrupts
2117                          * writing 0 Enables all 64 TX interrupt levels
2118                          */
2119                         writeq(0x0, &bar0->tx_traffic_mask);
2120                 } else if (flag == DISABLE_INTRS) {
2121                         /*
2122                          * Disable Tx Traffic Intrs in the general intr mask
2123                          * register.
2124                          */
2125                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2126                 }
2127         }
2128
2129         /*  Rx traffic interrupts */
2130         if (mask & RX_TRAFFIC_INTR) {
2131                 intr_mask |= RXTRAFFIC_INT_M;
2132                 if (flag == ENABLE_INTRS) {
2133                         /* writing 0 Enables all 8 RX interrupt levels */
2134                         writeq(0x0, &bar0->rx_traffic_mask);
2135                 } else if (flag == DISABLE_INTRS) {
2136                         /*
2137                          * Disable Rx Traffic Intrs in the general intr mask
2138                          * register.
2139                          */
2140                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2141                 }
2142         }
2143
2144         temp64 = readq(&bar0->general_int_mask);
2145         if (flag == ENABLE_INTRS)
2146                 temp64 &= ~((u64) intr_mask);
2147         else
2148                 temp64 = DISABLE_ALL_INTRS;
2149         writeq(temp64, &bar0->general_int_mask);
2150
2151         nic->general_int_mask = readq(&bar0->general_int_mask);
2152 }
2153
2154 /**
2155  *  verify_pcc_quiescent- Checks for PCC quiescent state
2156  *  Return: 1 If PCC is quiescence
2157  *          0 If PCC is not quiescence
2158  */
2159 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2160 {
2161         int ret = 0, herc;
2162         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2163         u64 val64 = readq(&bar0->adapter_status);
2164
2165         herc = (sp->device_type == XFRAME_II_DEVICE);
2166
2167         if (flag == FALSE) {
2168                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2169                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2170                                 ret = 1;
2171                 } else {
2172                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2173                                 ret = 1;
2174                 }
2175         } else {
2176                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2177                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2178                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2179                                 ret = 1;
2180                 } else {
2181                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2182                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2183                                 ret = 1;
2184                 }
2185         }
2186
2187         return ret;
2188 }
2189 /**
2190  *  verify_xena_quiescence - Checks whether the H/W is ready
2191  *  Description: Returns whether the H/W is ready to go or not. Depending
2192  *  on whether adapter enable bit was written or not the comparison
2193  *  differs and the calling function passes the input argument flag to
2194  *  indicate this.
2195  *  Return: 1 If xena is quiescence
2196  *          0 If Xena is not quiescence
2197  */
2198
2199 static int verify_xena_quiescence(struct s2io_nic *sp)
2200 {
2201         int  mode;
2202         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2203         u64 val64 = readq(&bar0->adapter_status);
2204         mode = s2io_verify_pci_mode(sp);
2205
2206         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2207                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2208                 return 0;
2209         }
2210         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2211         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2212                 return 0;
2213         }
2214         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2215                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2216                 return 0;
2217         }
2218         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2219                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2220                 return 0;
2221         }
2222         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2223                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2224                 return 0;
2225         }
2226         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2227                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2228                 return 0;
2229         }
2230         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2231                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2232                 return 0;
2233         }
2234         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2235                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2236                 return 0;
2237         }
2238
2239         /*
2240          * In PCI 33 mode, the P_PLL is not used, and therefore,
2241          * the the P_PLL_LOCK bit in the adapter_status register will
2242          * not be asserted.
2243          */
2244         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2245                 sp->device_type == XFRAME_II_DEVICE && mode !=
2246                 PCI_MODE_PCI_33) {
2247                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2248                 return 0;
2249         }
2250         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2251                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2252                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2253                 return 0;
2254         }
2255         return 1;
2256 }
2257
2258 /**
2259  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2260  * @sp: Pointer to device specifc structure
2261  * Description :
2262  * New procedure to clear mac address reading  problems on Alpha platforms
2263  *
2264  */
2265
2266 static void fix_mac_address(struct s2io_nic * sp)
2267 {
2268         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2269         u64 val64;
2270         int i = 0;
2271
2272         while (fix_mac[i] != END_SIGN) {
2273                 writeq(fix_mac[i++], &bar0->gpio_control);
2274                 udelay(10);
2275                 val64 = readq(&bar0->gpio_control);
2276         }
2277 }
2278
2279 /**
2280  *  start_nic - Turns the device on
2281  *  @nic : device private variable.
2282  *  Description:
2283  *  This function actually turns the device on. Before this  function is
2284  *  called,all Registers are configured from their reset states
2285  *  and shared memory is allocated but the NIC is still quiescent. On
2286  *  calling this function, the device interrupts are cleared and the NIC is
2287  *  literally switched on by writing into the adapter control register.
2288  *  Return Value:
2289  *  SUCCESS on success and -1 on failure.
2290  */
2291
2292 static int start_nic(struct s2io_nic *nic)
2293 {
2294         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2295         struct net_device *dev = nic->dev;
2296         register u64 val64 = 0;
2297         u16 subid, i;
2298         struct mac_info *mac_control;
2299         struct config_param *config;
2300
2301         mac_control = &nic->mac_control;
2302         config = &nic->config;
2303
2304         /*  PRC Initialization and configuration */
2305         for (i = 0; i < config->rx_ring_num; i++) {
2306                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2307                        &bar0->prc_rxd0_n[i]);
2308
2309                 val64 = readq(&bar0->prc_ctrl_n[i]);
2310                 if (nic->rxd_mode == RXD_MODE_1)
2311                         val64 |= PRC_CTRL_RC_ENABLED;
2312                 else
2313                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2314                 if (nic->device_type == XFRAME_II_DEVICE)
2315                         val64 |= PRC_CTRL_GROUP_READS;
2316                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2317                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2318                 writeq(val64, &bar0->prc_ctrl_n[i]);
2319         }
2320
2321         if (nic->rxd_mode == RXD_MODE_3B) {
2322                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2323                 val64 = readq(&bar0->rx_pa_cfg);
2324                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2325                 writeq(val64, &bar0->rx_pa_cfg);
2326         }
2327
2328         if (vlan_tag_strip == 0) {
2329                 val64 = readq(&bar0->rx_pa_cfg);
2330                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2331                 writeq(val64, &bar0->rx_pa_cfg);
2332                 vlan_strip_flag = 0;
2333         }
2334
2335         /*
2336          * Enabling MC-RLDRAM. After enabling the device, we timeout
2337          * for around 100ms, which is approximately the time required
2338          * for the device to be ready for operation.
2339          */
2340         val64 = readq(&bar0->mc_rldram_mrs);
2341         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2342         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2343         val64 = readq(&bar0->mc_rldram_mrs);
2344
2345         msleep(100);    /* Delay by around 100 ms. */
2346
2347         /* Enabling ECC Protection. */
2348         val64 = readq(&bar0->adapter_control);
2349         val64 &= ~ADAPTER_ECC_EN;
2350         writeq(val64, &bar0->adapter_control);
2351
2352         /*
2353          * Verify if the device is ready to be enabled, if so enable
2354          * it.
2355          */
2356         val64 = readq(&bar0->adapter_status);
2357         if (!verify_xena_quiescence(nic)) {
2358                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2359                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2360                           (unsigned long long) val64);
2361                 return FAILURE;
2362         }
2363
2364         /*
2365          * With some switches, link might be already up at this point.
2366          * Because of this weird behavior, when we enable laser,
2367          * we may not get link. We need to handle this. We cannot
2368          * figure out which switch is misbehaving. So we are forced to
2369          * make a global change.
2370          */
2371
2372         /* Enabling Laser. */
2373         val64 = readq(&bar0->adapter_control);
2374         val64 |= ADAPTER_EOI_TX_ON;
2375         writeq(val64, &bar0->adapter_control);
2376
2377         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2378                 /*
2379                  * Dont see link state interrupts initally on some switches,
2380                  * so directly scheduling the link state task here.
2381                  */
2382                 schedule_work(&nic->set_link_task);
2383         }
2384         /* SXE-002: Initialize link and activity LED */
2385         subid = nic->pdev->subsystem_device;
2386         if (((subid & 0xFF) >= 0x07) &&
2387             (nic->device_type == XFRAME_I_DEVICE)) {
2388                 val64 = readq(&bar0->gpio_control);
2389                 val64 |= 0x0000800000000000ULL;
2390                 writeq(val64, &bar0->gpio_control);
2391                 val64 = 0x0411040400000000ULL;
2392                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2393         }
2394
2395         return SUCCESS;
2396 }
2397 /**
2398  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2399  */
2400 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2401                                         TxD *txdlp, int get_off)
2402 {
2403         struct s2io_nic *nic = fifo_data->nic;
2404         struct sk_buff *skb;
2405         struct TxD *txds;
2406         u16 j, frg_cnt;
2407
2408         txds = txdlp;
2409         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2410                 pci_unmap_single(nic->pdev, (dma_addr_t)
2411                         txds->Buffer_Pointer, sizeof(u64),
2412                         PCI_DMA_TODEVICE);
2413                 txds++;
2414         }
2415
2416         skb = (struct sk_buff *) ((unsigned long)
2417                         txds->Host_Control);
2418         if (!skb) {
2419                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2420                 return NULL;
2421         }
2422         pci_unmap_single(nic->pdev, (dma_addr_t)
2423                          txds->Buffer_Pointer,
2424                          skb->len - skb->data_len,
2425                          PCI_DMA_TODEVICE);
2426         frg_cnt = skb_shinfo(skb)->nr_frags;
2427         if (frg_cnt) {
2428                 txds++;
2429                 for (j = 0; j < frg_cnt; j++, txds++) {
2430                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2431                         if (!txds->Buffer_Pointer)
2432                                 break;
2433                         pci_unmap_page(nic->pdev, (dma_addr_t)
2434                                         txds->Buffer_Pointer,
2435                                        frag->size, PCI_DMA_TODEVICE);
2436                 }
2437         }
2438         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2439         return(skb);
2440 }
2441
2442 /**
2443  *  free_tx_buffers - Free all queued Tx buffers
2444  *  @nic : device private variable.
2445  *  Description:
2446  *  Free all queued Tx buffers.
2447  *  Return Value: void
2448 */
2449
2450 static void free_tx_buffers(struct s2io_nic *nic)
2451 {
2452         struct net_device *dev = nic->dev;
2453         struct sk_buff *skb;
2454         struct TxD *txdp;
2455         int i, j;
2456         struct mac_info *mac_control;
2457         struct config_param *config;
2458         int cnt = 0;
2459
2460         mac_control = &nic->mac_control;
2461         config = &nic->config;
2462
2463         for (i = 0; i < config->tx_fifo_num; i++) {
2464                 unsigned long flags;
2465                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2466                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2467                         txdp = (struct TxD *) \
2468                         mac_control->fifos[i].list_info[j].list_virt_addr;
2469                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2470                         if (skb) {
2471                                 nic->mac_control.stats_info->sw_stat.mem_freed
2472                                         += skb->truesize;
2473                                 dev_kfree_skb(skb);
2474                                 cnt++;
2475                         }
2476                 }
2477                 DBG_PRINT(INTR_DBG,
2478                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2479                           dev->name, cnt, i);
2480                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2481                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2482                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2483         }
2484 }
2485
2486 /**
2487  *   stop_nic -  To stop the nic
2488  *   @nic ; device private variable.
2489  *   Description:
2490  *   This function does exactly the opposite of what the start_nic()
2491  *   function does. This function is called to stop the device.
2492  *   Return Value:
2493  *   void.
2494  */
2495
2496 static void stop_nic(struct s2io_nic *nic)
2497 {
2498         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2499         register u64 val64 = 0;
2500         u16 interruptible;
2501         struct mac_info *mac_control;
2502         struct config_param *config;
2503
2504         mac_control = &nic->mac_control;
2505         config = &nic->config;
2506
2507         /*  Disable all interrupts */
2508         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2509         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2510         interruptible |= TX_PIC_INTR;
2511         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2512
2513         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2514         val64 = readq(&bar0->adapter_control);
2515         val64 &= ~(ADAPTER_CNTL_EN);
2516         writeq(val64, &bar0->adapter_control);
2517 }
2518
2519 /**
2520  *  fill_rx_buffers - Allocates the Rx side skbs
2521  *  @ring_info: per ring structure
2522  *  Description:
2523  *  The function allocates Rx side skbs and puts the physical
2524  *  address of these buffers into the RxD buffer pointers, so that the NIC
2525  *  can DMA the received frame into these locations.
2526  *  The NIC supports 3 receive modes, viz
2527  *  1. single buffer,
2528  *  2. three buffer and
2529  *  3. Five buffer modes.
2530  *  Each mode defines how many fragments the received frame will be split
2531  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2532  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2533  *  is split into 3 fragments. As of now only single buffer mode is
2534  *  supported.
2535  *   Return Value:
2536  *  SUCCESS on success or an appropriate -ve value on failure.
2537  */
2538
2539 static int fill_rx_buffers(struct ring_info *ring)
2540 {
2541         struct sk_buff *skb;
2542         struct RxD_t *rxdp;
2543         int off, size, block_no, block_no1;
2544         u32 alloc_tab = 0;
2545         u32 alloc_cnt;
2546         u64 tmp;
2547         struct buffAdd *ba;
2548         struct RxD_t *first_rxdp = NULL;
2549         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2550         int rxd_index = 0;
2551         struct RxD1 *rxdp1;
2552         struct RxD3 *rxdp3;
2553         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2554
2555         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2556
2557         block_no1 = ring->rx_curr_get_info.block_index;
2558         while (alloc_tab < alloc_cnt) {
2559                 block_no = ring->rx_curr_put_info.block_index;
2560
2561                 off = ring->rx_curr_put_info.offset;
2562
2563                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2564
2565                 rxd_index = off + 1;
2566                 if (block_no)
2567                         rxd_index += (block_no * ring->rxd_count);
2568
2569                 if ((block_no == block_no1) && 
2570                         (off == ring->rx_curr_get_info.offset) &&
2571                         (rxdp->Host_Control)) {
2572                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2573                                 ring->dev->name);
2574                         DBG_PRINT(INTR_DBG, " info equated\n");
2575                         goto end;
2576                 }
2577                 if (off && (off == ring->rxd_count)) {
2578                         ring->rx_curr_put_info.block_index++;
2579                         if (ring->rx_curr_put_info.block_index ==
2580                                                         ring->block_count)
2581                                 ring->rx_curr_put_info.block_index = 0;
2582                         block_no = ring->rx_curr_put_info.block_index;
2583                         off = 0;
2584                         ring->rx_curr_put_info.offset = off;
2585                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2586                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2587                                   ring->dev->name, rxdp);
2588
2589                 }
2590
2591                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2592                         ((ring->rxd_mode == RXD_MODE_3B) &&
2593                                 (rxdp->Control_2 & s2BIT(0)))) {
2594                         ring->rx_curr_put_info.offset = off;
2595                         goto end;
2596                 }
2597                 /* calculate size of skb based on ring mode */
2598                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2599                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2600                 if (ring->rxd_mode == RXD_MODE_1)
2601                         size += NET_IP_ALIGN;
2602                 else
2603                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2604
2605                 /* allocate skb */
2606                 skb = dev_alloc_skb(size);
2607                 if(!skb) {
2608                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2609                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2610                         if (first_rxdp) {
2611                                 wmb();
2612                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2613                         }
2614                         stats->mem_alloc_fail_cnt++;
2615                                 
2616                         return -ENOMEM ;
2617                 }
2618                 stats->mem_allocated += skb->truesize;
2619
2620                 if (ring->rxd_mode == RXD_MODE_1) {
2621                         /* 1 buffer mode - normal operation mode */
2622                         rxdp1 = (struct RxD1*)rxdp;
2623                         memset(rxdp, 0, sizeof(struct RxD1));
2624                         skb_reserve(skb, NET_IP_ALIGN);
2625                         rxdp1->Buffer0_ptr = pci_map_single
2626                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2627                                 PCI_DMA_FROMDEVICE);
2628                         if( (rxdp1->Buffer0_ptr == 0) ||
2629                                 (rxdp1->Buffer0_ptr ==
2630                                 DMA_ERROR_CODE))
2631                                 goto pci_map_failed;
2632
2633                         rxdp->Control_2 =
2634                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2635                         rxdp->Host_Control = (unsigned long) (skb);
2636                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2637                         /*
2638                          * 2 buffer mode -
2639                          * 2 buffer mode provides 128
2640                          * byte aligned receive buffers.
2641                          */
2642
2643                         rxdp3 = (struct RxD3*)rxdp;
2644                         /* save buffer pointers to avoid frequent dma mapping */
2645                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2646                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2647                         memset(rxdp, 0, sizeof(struct RxD3));
2648                         /* restore the buffer pointers for dma sync*/
2649                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2650                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2651
2652                         ba = &ring->ba[block_no][off];
2653                         skb_reserve(skb, BUF0_LEN);
2654                         tmp = (u64)(unsigned long) skb->data;
2655                         tmp += ALIGN_SIZE;
2656                         tmp &= ~ALIGN_SIZE;
2657                         skb->data = (void *) (unsigned long)tmp;
2658                         skb_reset_tail_pointer(skb);
2659
2660                         if (!(rxdp3->Buffer0_ptr))
2661                                 rxdp3->Buffer0_ptr =
2662                                    pci_map_single(ring->pdev, ba->ba_0,
2663                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2664                         else
2665                                 pci_dma_sync_single_for_device(ring->pdev,
2666                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2667                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2668                         if( (rxdp3->Buffer0_ptr == 0) ||
2669                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2670                                 goto pci_map_failed;
2671
2672                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2673                         if (ring->rxd_mode == RXD_MODE_3B) {
2674                                 /* Two buffer mode */
2675
2676                                 /*
2677                                  * Buffer2 will have L3/L4 header plus
2678                                  * L4 payload
2679                                  */
2680                                 rxdp3->Buffer2_ptr = pci_map_single
2681                                 (ring->pdev, skb->data, ring->mtu + 4,
2682                                                 PCI_DMA_FROMDEVICE);
2683
2684                                 if( (rxdp3->Buffer2_ptr == 0) ||
2685                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2686                                         goto pci_map_failed;
2687
2688                                 if (!rxdp3->Buffer1_ptr)
2689                                         rxdp3->Buffer1_ptr =
2690                                                 pci_map_single(ring->pdev,
2691                                                 ba->ba_1, BUF1_LEN,
2692                                                 PCI_DMA_FROMDEVICE);
2693
2694                                 if( (rxdp3->Buffer1_ptr == 0) ||
2695                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2696                                         pci_unmap_single
2697                                                 (ring->pdev,
2698                                                 (dma_addr_t)(unsigned long)
2699                                                 skb->data,
2700                                                 ring->mtu + 4,
2701                                                 PCI_DMA_FROMDEVICE);
2702                                         goto pci_map_failed;
2703                                 }
2704                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2705                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2706                                                                 (ring->mtu + 4);
2707                         }
2708                         rxdp->Control_2 |= s2BIT(0);
2709                         rxdp->Host_Control = (unsigned long) (skb);
2710                 }
2711                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2712                         rxdp->Control_1 |= RXD_OWN_XENA;
2713                 off++;
2714                 if (off == (ring->rxd_count + 1))
2715                         off = 0;
2716                 ring->rx_curr_put_info.offset = off;
2717
2718                 rxdp->Control_2 |= SET_RXD_MARKER;
2719                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2720                         if (first_rxdp) {
2721                                 wmb();
2722                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2723                         }
2724                         first_rxdp = rxdp;
2725                 }
2726                 ring->rx_bufs_left += 1;
2727                 alloc_tab++;
2728         }
2729
2730       end:
2731         /* Transfer ownership of first descriptor to adapter just before
2732          * exiting. Before that, use memory barrier so that ownership
2733          * and other fields are seen by adapter correctly.
2734          */
2735         if (first_rxdp) {
2736                 wmb();
2737                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2738         }
2739
2740         return SUCCESS;
2741 pci_map_failed:
2742         stats->pci_map_fail_cnt++;
2743         stats->mem_freed += skb->truesize;
2744         dev_kfree_skb_irq(skb);
2745         return -ENOMEM;
2746 }
2747
2748 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2749 {
2750         struct net_device *dev = sp->dev;
2751         int j;
2752         struct sk_buff *skb;
2753         struct RxD_t *rxdp;
2754         struct mac_info *mac_control;
2755         struct buffAdd *ba;
2756         struct RxD1 *rxdp1;
2757         struct RxD3 *rxdp3;
2758
2759         mac_control = &sp->mac_control;
2760         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2761                 rxdp = mac_control->rings[ring_no].
2762                                 rx_blocks[blk].rxds[j].virt_addr;
2763                 skb = (struct sk_buff *)
2764                         ((unsigned long) rxdp->Host_Control);
2765                 if (!skb) {
2766                         continue;
2767                 }
2768                 if (sp->rxd_mode == RXD_MODE_1) {
2769                         rxdp1 = (struct RxD1*)rxdp;
2770                         pci_unmap_single(sp->pdev, (dma_addr_t)
2771                                 rxdp1->Buffer0_ptr,
2772                                 dev->mtu +
2773                                 HEADER_ETHERNET_II_802_3_SIZE
2774                                 + HEADER_802_2_SIZE +
2775                                 HEADER_SNAP_SIZE,
2776                                 PCI_DMA_FROMDEVICE);
2777                         memset(rxdp, 0, sizeof(struct RxD1));
2778                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2779                         rxdp3 = (struct RxD3*)rxdp;
2780                         ba = &mac_control->rings[ring_no].
2781                                 ba[blk][j];
2782                         pci_unmap_single(sp->pdev, (dma_addr_t)
2783                                 rxdp3->Buffer0_ptr,
2784                                 BUF0_LEN,
2785                                 PCI_DMA_FROMDEVICE);
2786                         pci_unmap_single(sp->pdev, (dma_addr_t)
2787                                 rxdp3->Buffer1_ptr,
2788                                 BUF1_LEN,
2789                                 PCI_DMA_FROMDEVICE);
2790                         pci_unmap_single(sp->pdev, (dma_addr_t)
2791                                 rxdp3->Buffer2_ptr,
2792                                 dev->mtu + 4,
2793                                 PCI_DMA_FROMDEVICE);
2794                         memset(rxdp, 0, sizeof(struct RxD3));
2795                 }
2796                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2797                 dev_kfree_skb(skb);
2798                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2799         }
2800 }
2801
2802 /**
2803  *  free_rx_buffers - Frees all Rx buffers
2804  *  @sp: device private variable.
2805  *  Description:
2806  *  This function will free all Rx buffers allocated by host.
2807  *  Return Value:
2808  *  NONE.
2809  */
2810
2811 static void free_rx_buffers(struct s2io_nic *sp)
2812 {
2813         struct net_device *dev = sp->dev;
2814         int i, blk = 0, buf_cnt = 0;
2815         struct mac_info *mac_control;
2816         struct config_param *config;
2817
2818         mac_control = &sp->mac_control;
2819         config = &sp->config;
2820
2821         for (i = 0; i < config->rx_ring_num; i++) {
2822                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2823                         free_rxd_blk(sp,i,blk);
2824
2825                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2826                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2827                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2828                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2829                 mac_control->rings[i].rx_bufs_left = 0;
2830                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2831                           dev->name, buf_cnt, i);
2832         }
2833 }
2834
2835 static int s2io_chk_rx_buffers(struct ring_info *ring)
2836 {
2837         if (fill_rx_buffers(ring) == -ENOMEM) {
2838                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2839                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2840         }
2841         return 0;
2842 }
2843
2844 /**
2845  * s2io_poll - Rx interrupt handler for NAPI support
2846  * @napi : pointer to the napi structure.
2847  * @budget : The number of packets that were budgeted to be processed
2848  * during  one pass through the 'Poll" function.
2849  * Description:
2850  * Comes into picture only if NAPI support has been incorporated. It does
2851  * the same thing that rx_intr_handler does, but not in a interrupt context
2852  * also It will process only a given number of packets.
2853  * Return value:
2854  * 0 on success and 1 if there are No Rx packets to be processed.
2855  */
2856
2857 static int s2io_poll_msix(struct napi_struct *napi, int budget)
2858 {
2859         struct ring_info *ring = container_of(napi, struct ring_info, napi);
2860         struct net_device *dev = ring->dev;
2861         struct config_param *config;
2862         struct mac_info *mac_control;
2863         int pkts_processed = 0;
2864         u8 __iomem *addr = NULL;
2865         u8 val8 = 0;
2866         struct s2io_nic *nic = dev->priv;
2867         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2868         int budget_org = budget;
2869
2870         config = &nic->config;
2871         mac_control = &nic->mac_control;
2872
2873         if (unlikely(!is_s2io_card_up(nic)))
2874                 return 0;
2875
2876         pkts_processed = rx_intr_handler(ring, budget);
2877         s2io_chk_rx_buffers(ring);
2878
2879         if (pkts_processed < budget_org) {
2880                 netif_rx_complete(dev, napi);
2881                 /*Re Enable MSI-Rx Vector*/
2882                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2883                 addr += 7 - ring->ring_no;
2884                 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2885                 writeb(val8, addr);
2886                 val8 = readb(addr);
2887         }
2888         return pkts_processed;
2889 }
2890 static int s2io_poll_inta(struct napi_struct *napi, int budget)
2891 {
2892         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2893         struct ring_info *ring;
2894         struct net_device *dev = nic->dev;
2895         struct config_param *config;
2896         struct mac_info *mac_control;
2897         int pkts_processed = 0;
2898         int ring_pkts_processed, i;
2899         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2900         int budget_org = budget;
2901
2902         config = &nic->config;
2903         mac_control = &nic->mac_control;
2904
2905         if (unlikely(!is_s2io_card_up(nic)))
2906                 return 0;
2907
2908         for (i = 0; i < config->rx_ring_num; i++) {
2909                 ring = &mac_control->rings[i];
2910                 ring_pkts_processed = rx_intr_handler(ring, budget);
2911                 s2io_chk_rx_buffers(ring);
2912                 pkts_processed += ring_pkts_processed;
2913                 budget -= ring_pkts_processed;
2914                 if (budget <= 0)
2915                         break;
2916         }
2917         if (pkts_processed < budget_org) {
2918                 netif_rx_complete(dev, napi);
2919                 /* Re enable the Rx interrupts for the ring */
2920                 writeq(0, &bar0->rx_traffic_mask);
2921                 readl(&bar0->rx_traffic_mask);
2922         }
2923         return pkts_processed;
2924 }
2925
2926 #ifdef CONFIG_NET_POLL_CONTROLLER
2927 /**
2928  * s2io_netpoll - netpoll event handler entry point
2929  * @dev : pointer to the device structure.
2930  * Description:
2931  *      This function will be called by upper layer to check for events on the
2932  * interface in situations where interrupts are disabled. It is used for
2933  * specific in-kernel networking tasks, such as remote consoles and kernel
2934  * debugging over the network (example netdump in RedHat).
2935  */
2936 static void s2io_netpoll(struct net_device *dev)
2937 {
2938         struct s2io_nic *nic = dev->priv;
2939         struct mac_info *mac_control;
2940         struct config_param *config;
2941         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2942         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2943         int i;
2944
2945         if (pci_channel_offline(nic->pdev))
2946                 return;
2947
2948         disable_irq(dev->irq);
2949
2950         mac_control = &nic->mac_control;
2951         config = &nic->config;
2952
2953         writeq(val64, &bar0->rx_traffic_int);
2954         writeq(val64, &bar0->tx_traffic_int);
2955
2956         /* we need to free up the transmitted skbufs or else netpoll will
2957          * run out of skbs and will fail and eventually netpoll application such
2958          * as netdump will fail.
2959          */
2960         for (i = 0; i < config->tx_fifo_num; i++)
2961                 tx_intr_handler(&mac_control->fifos[i]);
2962
2963         /* check for received packet and indicate up to network */
2964         for (i = 0; i < config->rx_ring_num; i++)
2965                 rx_intr_handler(&mac_control->rings[i], 0);
2966
2967         for (i = 0; i < config->rx_ring_num; i++) {
2968                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2969                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2970                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2971                         break;
2972                 }
2973         }
2974         enable_irq(dev->irq);
2975         return;
2976 }
2977 #endif
2978
2979 /**
2980  *  rx_intr_handler - Rx interrupt handler
2981  *  @ring_info: per ring structure.
2982  *  @budget: budget for napi processing.
2983  *  Description:
2984  *  If the interrupt is because of a received frame or if the
2985  *  receive ring contains fresh as yet un-processed frames,this function is
2986  *  called. It picks out the RxD at which place the last Rx processing had
2987  *  stopped and sends the skb to the OSM's Rx handler and then increments
2988  *  the offset.
2989  *  Return Value:
2990  *  No. of napi packets processed.
2991  */
2992 static int rx_intr_handler(struct ring_info *ring_data, int budget)
2993 {
2994         int get_block, put_block;
2995         struct rx_curr_get_info get_info, put_info;
2996         struct RxD_t *rxdp;
2997         struct sk_buff *skb;
2998         int pkt_cnt = 0, napi_pkts = 0;
2999         int i;
3000         struct RxD1* rxdp1;
3001         struct RxD3* rxdp3;
3002
3003         get_info = ring_data->rx_curr_get_info;
3004         get_block = get_info.block_index;
3005         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
3006         put_block = put_info.block_index;
3007         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
3008
3009         while (RXD_IS_UP2DT(rxdp)) {
3010                 /*
3011                  * If your are next to put index then it's
3012                  * FIFO full condition
3013                  */
3014                 if ((get_block == put_block) &&
3015                     (get_info.offset + 1) == put_info.offset) {
3016                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
3017                                 ring_data->dev->name);
3018                         break;
3019                 }
3020                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3021                 if (skb == NULL) {
3022                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
3023                                   ring_data->dev->name);
3024                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
3025                         return 0;
3026                 }
3027                 if (ring_data->rxd_mode == RXD_MODE_1) {
3028                         rxdp1 = (struct RxD1*)rxdp;
3029                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3030                                 rxdp1->Buffer0_ptr,
3031                                 ring_data->mtu +
3032                                 HEADER_ETHERNET_II_802_3_SIZE +
3033                                 HEADER_802_2_SIZE +
3034                                 HEADER_SNAP_SIZE,
3035                                 PCI_DMA_FROMDEVICE);
3036                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
3037                         rxdp3 = (struct RxD3*)rxdp;
3038                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
3039                                 rxdp3->Buffer0_ptr,
3040                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
3041                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
3042                                 rxdp3->Buffer2_ptr,
3043                                 ring_data->mtu + 4,
3044                                 PCI_DMA_FROMDEVICE);
3045                 }
3046                 prefetch(skb->data);
3047                 rx_osm_handler(ring_data, rxdp);
3048                 get_info.offset++;
3049                 ring_data->rx_curr_get_info.offset = get_info.offset;
3050                 rxdp = ring_data->rx_blocks[get_block].
3051                                 rxds[get_info.offset].virt_addr;
3052                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3053                         get_info.offset = 0;
3054                         ring_data->rx_curr_get_info.offset = get_info.offset;
3055                         get_block++;
3056                         if (get_block == ring_data->block_count)
3057                                 get_block = 0;
3058                         ring_data->rx_curr_get_info.block_index = get_block;
3059                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3060                 }
3061
3062                 if (ring_data->nic->config.napi) {
3063                         budget--;
3064                         napi_pkts++;
3065                         if (!budget)
3066                                 break;
3067                 }
3068                 pkt_cnt++;
3069                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3070                         break;
3071         }
3072         if (ring_data->lro) {
3073                 /* Clear all LRO sessions before exiting */
3074                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3075                         struct lro *lro = &ring_data->lro0_n[i];
3076                         if (lro->in_use) {
3077                                 update_L3L4_header(ring_data->nic, lro);
3078                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3079                                 clear_lro_session(lro);
3080                         }
3081                 }
3082         }
3083         return(napi_pkts);
3084 }
3085
3086 /**
3087  *  tx_intr_handler - Transmit interrupt handler
3088  *  @nic : device private variable
3089  *  Description:
3090  *  If an interrupt was raised to indicate DMA complete of the
3091  *  Tx packet, this function is called. It identifies the last TxD
3092  *  whose buffer was freed and frees all skbs whose data have already
3093  *  DMA'ed into the NICs internal memory.
3094  *  Return Value:
3095  *  NONE
3096  */
3097
3098 static void tx_intr_handler(struct fifo_info *fifo_data)
3099 {
3100         struct s2io_nic *nic = fifo_data->nic;
3101         struct tx_curr_get_info get_info, put_info;
3102         struct sk_buff *skb = NULL;
3103         struct TxD *txdlp;
3104         int pkt_cnt = 0;
3105         unsigned long flags = 0;
3106         u8 err_mask;
3107
3108         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3109                         return;
3110
3111         get_info = fifo_data->tx_curr_get_info;
3112         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3113         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3114             list_virt_addr;
3115         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3116                (get_info.offset != put_info.offset) &&
3117                (txdlp->Host_Control)) {
3118                 /* Check for TxD errors */
3119                 if (txdlp->Control_1 & TXD_T_CODE) {
3120                         unsigned long long err;
3121                         err = txdlp->Control_1 & TXD_T_CODE;
3122                         if (err & 0x1) {
3123                                 nic->mac_control.stats_info->sw_stat.
3124                                                 parity_err_cnt++;
3125                         }
3126
3127                         /* update t_code statistics */
3128                         err_mask = err >> 48;
3129                         switch(err_mask) {
3130                                 case 2:
3131                                         nic->mac_control.stats_info->sw_stat.
3132                                                         tx_buf_abort_cnt++;
3133                                 break;
3134
3135                                 case 3:
3136                                         nic->mac_control.stats_info->sw_stat.
3137                                                         tx_desc_abort_cnt++;
3138                                 break;
3139
3140                                 case 7:
3141                                         nic->mac_control.stats_info->sw_stat.
3142                                                         tx_parity_err_cnt++;
3143                                 break;
3144
3145                                 case 10:
3146                                         nic->mac_control.stats_info->sw_stat.
3147                                                         tx_link_loss_cnt++;
3148                                 break;
3149
3150                                 case 15:
3151                                         nic->mac_control.stats_info->sw_stat.
3152                                                         tx_list_proc_err_cnt++;
3153                                 break;
3154                         }
3155                 }
3156
3157                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3158                 if (skb == NULL) {
3159                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3160                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3161                         __FUNCTION__);
3162                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3163                         return;
3164                 }
3165                 pkt_cnt++;
3166
3167                 /* Updating the statistics block */
3168                 nic->stats.tx_bytes += skb->len;
3169                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3170                 dev_kfree_skb_irq(skb);
3171
3172                 get_info.offset++;
3173                 if (get_info.offset == get_info.fifo_len + 1)
3174                         get_info.offset = 0;
3175                 txdlp = (struct TxD *) fifo_data->list_info
3176                     [get_info.offset].list_virt_addr;
3177                 fifo_data->tx_curr_get_info.offset =
3178                     get_info.offset;
3179         }
3180
3181         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3182
3183         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3184 }
3185
3186 /**
3187  *  s2io_mdio_write - Function to write in to MDIO registers
3188  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3189  *  @addr     : address value
3190  *  @value    : data value
3191  *  @dev      : pointer to net_device structure
3192  *  Description:
3193  *  This function is used to write values to the MDIO registers
3194  *  NONE
3195  */
3196 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3197 {
3198         u64 val64 = 0x0;
3199         struct s2io_nic *sp = dev->priv;
3200         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3201
3202         //address transaction
3203         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3204                         | MDIO_MMD_DEV_ADDR(mmd_type)
3205                         | MDIO_MMS_PRT_ADDR(0x0);
3206         writeq(val64, &bar0->mdio_control);
3207         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3208         writeq(val64, &bar0->mdio_control);
3209         udelay(100);
3210
3211         //Data transaction
3212         val64 = 0x0;
3213         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3214                         | MDIO_MMD_DEV_ADDR(mmd_type)
3215                         | MDIO_MMS_PRT_ADDR(0x0)
3216                         | MDIO_MDIO_DATA(value)
3217                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3218         writeq(val64, &bar0->mdio_control);
3219         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3220         writeq(val64, &bar0->mdio_control);
3221         udelay(100);
3222
3223         val64 = 0x0;
3224         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3225         | MDIO_MMD_DEV_ADDR(mmd_type)
3226         | MDIO_MMS_PRT_ADDR(0x0)
3227         | MDIO_OP(MDIO_OP_READ_TRANS);
3228         writeq(val64, &bar0->mdio_control);
3229         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3230         writeq(val64, &bar0->mdio_control);
3231         udelay(100);
3232
3233 }
3234
3235 /**
3236  *  s2io_mdio_read - Function to write in to MDIO registers
3237  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3238  *  @addr     : address value
3239  *  @dev      : pointer to net_device structure
3240  *  Description:
3241  *  This function is used to read values to the MDIO registers
3242  *  NONE
3243  */
3244 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3245 {
3246         u64 val64 = 0x0;
3247         u64 rval64 = 0x0;
3248         struct s2io_nic *sp = dev->priv;
3249         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3250
3251         /* address transaction */
3252         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3253                         | MDIO_MMD_DEV_ADDR(mmd_type)
3254                         | MDIO_MMS_PRT_ADDR(0x0);
3255         writeq(val64, &bar0->mdio_control);
3256         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3257         writeq(val64, &bar0->mdio_control);
3258         udelay(100);
3259
3260         /* Data transaction */
3261         val64 = 0x0;
3262         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3263                         | MDIO_MMD_DEV_ADDR(mmd_type)
3264                         | MDIO_MMS_PRT_ADDR(0x0)
3265                         | MDIO_OP(MDIO_OP_READ_TRANS);
3266         writeq(val64, &bar0->mdio_control);
3267         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3268         writeq(val64, &bar0->mdio_control);
3269         udelay(100);
3270
3271         /* Read the value from regs */
3272         rval64 = readq(&bar0->mdio_control);
3273         rval64 = rval64 & 0xFFFF0000;
3274         rval64 = rval64 >> 16;
3275         return rval64;
3276 }
3277 /**
3278  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3279  *  @counter      : couter value to be updated
3280  *  @flag         : flag to indicate the status
3281  *  @type         : counter type
3282  *  Description:
3283  *  This function is to check the status of the xpak counters value
3284  *  NONE
3285  */
3286
3287 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3288 {
3289         u64 mask = 0x3;
3290         u64 val64;
3291         int i;
3292         for(i = 0; i <index; i++)
3293                 mask = mask << 0x2;
3294
3295         if(flag > 0)
3296         {
3297                 *counter = *counter + 1;
3298                 val64 = *regs_stat & mask;
3299                 val64 = val64 >> (index * 0x2);
3300                 val64 = val64 + 1;
3301                 if(val64 == 3)
3302                 {
3303                         switch(type)
3304                         {
3305                         case 1:
3306                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3307                                           "service. Excessive temperatures may "
3308                                           "result in premature transceiver "
3309                                           "failure \n");
3310                         break;
3311                         case 2:
3312                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3313                                           "service Excessive bias currents may "
3314                                           "indicate imminent laser diode "
3315                                           "failure \n");
3316                         break;
3317                         case 3:
3318                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3319                                           "service Excessive laser output "
3320                                           "power may saturate far-end "
3321                                           "receiver\n");
3322                         break;
3323                         default:
3324                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3325                                           "type \n");
3326                         }
3327                         val64 = 0x0;
3328                 }
3329                 val64 = val64 << (index * 0x2);
3330                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3331
3332         } else {
3333                 *regs_stat = *regs_stat & (~mask);
3334         }
3335 }
3336
3337 /**
3338  *  s2io_updt_xpak_counter - Function to update the xpak counters
3339  *  @dev         : pointer to net_device struct
3340  *  Description:
3341  *  This function is to upate the status of the xpak counters value
3342  *  NONE
3343  */
3344 static void s2io_updt_xpak_counter(struct net_device *dev)
3345 {
3346         u16 flag  = 0x0;
3347         u16 type  = 0x0;
3348         u16 val16 = 0x0;
3349         u64 val64 = 0x0;
3350         u64 addr  = 0x0;
3351
3352         struct s2io_nic *sp = dev->priv;
3353         struct stat_block *stat_info = sp->mac_control.stats_info;
3354
3355         /* Check the communication with the MDIO slave */
3356         addr = 0x0000;
3357         val64 = 0x0;
3358         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3359         if((val64 == 0xFFFF) || (val64 == 0x0000))
3360         {
3361                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3362                           "Returned %llx\n", (unsigned long long)val64);
3363                 return;
3364         }
3365
3366         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3367         if(val64 != 0x2040)
3368         {
3369                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3370                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3371                           (unsigned long long)val64);
3372                 return;
3373         }
3374
3375         /* Loading the DOM register to MDIO register */
3376         addr = 0xA100;
3377         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3378         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3379
3380         /* Reading the Alarm flags */
3381         addr = 0xA070;
3382         val64 = 0x0;
3383         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3384
3385         flag = CHECKBIT(val64, 0x7);
3386         type = 1;
3387         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3388                                 &stat_info->xpak_stat.xpak_regs_stat,
3389                                 0x0, flag, type);
3390
3391         if(CHECKBIT(val64, 0x6))
3392                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3393
3394         flag = CHECKBIT(val64, 0x3);
3395         type = 2;
3396         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3397                                 &stat_info->xpak_stat.xpak_regs_stat,
3398                                 0x2, flag, type);
3399
3400         if(CHECKBIT(val64, 0x2))
3401                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3402
3403         flag = CHECKBIT(val64, 0x1);
3404         type = 3;
3405         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3406                                 &stat_info->xpak_stat.xpak_regs_stat,
3407                                 0x4, flag, type);
3408
3409         if(CHECKBIT(val64, 0x0))
3410                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3411
3412         /* Reading the Warning flags */
3413         addr = 0xA074;
3414         val64 = 0x0;
3415         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3416
3417         if(CHECKBIT(val64, 0x7))
3418                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3419
3420         if(CHECKBIT(val64, 0x6))
3421                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3422
3423         if(CHECKBIT(val64, 0x3))
3424                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3425
3426         if(CHECKBIT(val64, 0x2))
3427                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3428
3429         if(CHECKBIT(val64, 0x1))
3430                 stat_info->xpak_stat.warn_laser_output_power_high++;
3431
3432         if(CHECKBIT(val64, 0x0))
3433                 stat_info->xpak_stat.warn_laser_output_power_low++;
3434 }
3435
3436 /**
3437  *  wait_for_cmd_complete - waits for a command to complete.
3438  *  @sp : private member of the device structure, which is a pointer to the
3439  *  s2io_nic structure.
3440  *  Description: Function that waits for a command to Write into RMAC
3441  *  ADDR DATA registers to be completed and returns either success or
3442  *  error depending on whether the command was complete or not.
3443  *  Return value:
3444  *   SUCCESS on success and FAILURE on failure.
3445  */
3446
3447 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3448                                 int bit_state)
3449 {
3450         int ret = FAILURE, cnt = 0, delay = 1;
3451         u64 val64;
3452
3453         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3454                 return FAILURE;
3455
3456         do {
3457                 val64 = readq(addr);
3458                 if (bit_state == S2IO_BIT_RESET) {
3459                         if (!(val64 & busy_bit)) {
3460                                 ret = SUCCESS;
3461                                 break;
3462                         }
3463                 } else {
3464                         if (!(val64 & busy_bit)) {
3465                                 ret = SUCCESS;
3466                                 break;
3467                         }
3468                 }
3469
3470                 if(in_interrupt())
3471                         mdelay(delay);
3472                 else
3473                         msleep(delay);
3474
3475                 if (++cnt >= 10)
3476                         delay = 50;
3477         } while (cnt < 20);
3478         return ret;
3479 }
3480 /*
3481  * check_pci_device_id - Checks if the device id is supported
3482  * @id : device id
3483  * Description: Function to check if the pci device id is supported by driver.
3484  * Return value: Actual device id if supported else PCI_ANY_ID
3485  */
3486 static u16 check_pci_device_id(u16 id)
3487 {
3488         switch (id) {
3489         case PCI_DEVICE_ID_HERC_WIN:
3490         case PCI_DEVICE_ID_HERC_UNI:
3491                 return XFRAME_II_DEVICE;
3492         case PCI_DEVICE_ID_S2IO_UNI:
3493         case PCI_DEVICE_ID_S2IO_WIN:
3494                 return XFRAME_I_DEVICE;
3495         default:
3496                 return PCI_ANY_ID;
3497         }
3498 }
3499
3500 /**
3501  *  s2io_reset - Resets the card.
3502  *  @sp : private member of the device structure.
3503  *  Description: Function to Reset the card. This function then also
3504  *  restores the previously saved PCI configuration space registers as
3505  *  the card reset also resets the configuration space.
3506  *  Return value:
3507  *  void.
3508  */
3509
3510 static void s2io_reset(struct s2io_nic * sp)
3511 {
3512         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3513         u64 val64;
3514         u16 subid, pci_cmd;
3515         int i;
3516         u16 val16;
3517         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3518         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3519
3520         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3521                         __FUNCTION__, sp->dev->name);
3522
3523         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3524         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3525
3526         val64 = SW_RESET_ALL;
3527         writeq(val64, &bar0->sw_reset);
3528         if (strstr(sp->product_name, "CX4")) {
3529                 msleep(750);
3530         }
3531         msleep(250);
3532         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3533
3534                 /* Restore the PCI state saved during initialization. */
3535                 pci_restore_state(sp->pdev);
3536                 pci_read_config_word(sp->pdev, 0x2, &val16);
3537                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3538                         break;
3539                 msleep(200);
3540         }
3541
3542         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3543                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3544         }
3545
3546         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3547
3548         s2io_init_pci(sp);
3549
3550         /* Set swapper to enable I/O register access */
3551         s2io_set_swapper(sp);
3552
3553         /* restore mac_addr entries */
3554         do_s2io_restore_unicast_mc(sp);
3555
3556         /* Restore the MSIX table entries from local variables */
3557         restore_xmsi_data(sp);
3558
3559         /* Clear certain PCI/PCI-X fields after reset */
3560         if (sp->device_type == XFRAME_II_DEVICE) {
3561                 /* Clear "detected parity error" bit */
3562                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3563
3564                 /* Clearing PCIX Ecc status register */
3565                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3566
3567                 /* Clearing PCI_STATUS error reflected here */
3568                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3569         }
3570
3571         /* Reset device statistics maintained by OS */
3572         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3573
3574         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3575         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3576         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3577         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3578         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3579         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3580         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3581         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3582         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3583         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3584         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3585         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3586         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3587         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3588         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3589         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3590         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3591         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3592         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3593
3594         /* SXE-002: Configure link and activity LED to turn it off */
3595         subid = sp->pdev->subsystem_device;
3596         if (((subid & 0xFF) >= 0x07) &&
3597             (sp->device_type == XFRAME_I_DEVICE)) {
3598                 val64 = readq(&bar0->gpio_control);
3599                 val64 |= 0x0000800000000000ULL;
3600                 writeq(val64, &bar0->gpio_control);
3601                 val64 = 0x0411040400000000ULL;
3602                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3603         }
3604
3605         /*
3606          * Clear spurious ECC interrupts that would have occured on
3607          * XFRAME II cards after reset.
3608          */
3609         if (sp->device_type == XFRAME_II_DEVICE) {
3610                 val64 = readq(&bar0->pcc_err_reg);
3611                 writeq(val64, &bar0->pcc_err_reg);
3612         }
3613
3614         sp->device_enabled_once = FALSE;
3615 }
3616
3617 /**
3618  *  s2io_set_swapper - to set the swapper controle on the card
3619  *  @sp : private member of the device structure,
3620  *  pointer to the s2io_nic structure.
3621  *  Description: Function to set the swapper control on the card
3622  *  correctly depending on the 'endianness' of the system.
3623  *  Return value:
3624  *  SUCCESS on success and FAILURE on failure.
3625  */
3626
3627 static int s2io_set_swapper(struct s2io_nic * sp)
3628 {
3629         struct net_device *dev = sp->dev;
3630         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3631         u64 val64, valt, valr;
3632
3633         /*
3634          * Set proper endian settings and verify the same by reading
3635          * the PIF Feed-back register.
3636          */
3637
3638         val64 = readq(&bar0->pif_rd_swapper_fb);
3639         if (val64 != 0x0123456789ABCDEFULL) {
3640                 int i = 0;
3641                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3642                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3643                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3644                                 0};                     /* FE=0, SE=0 */
3645
3646                 while(i<4) {
3647                         writeq(value[i], &bar0->swapper_ctrl);
3648                         val64 = readq(&bar0->pif_rd_swapper_fb);
3649                         if (val64 == 0x0123456789ABCDEFULL)
3650                                 break;
3651                         i++;
3652                 }
3653                 if (i == 4) {
3654                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3655                                 dev->name);
3656                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3657                                 (unsigned long long) val64);
3658                         return FAILURE;
3659                 }
3660                 valr = value[i];
3661         } else {
3662                 valr = readq(&bar0->swapper_ctrl);
3663         }
3664
3665         valt = 0x0123456789ABCDEFULL;
3666         writeq(valt, &bar0->xmsi_address);
3667         val64 = readq(&bar0->xmsi_address);
3668
3669         if(val64 != valt) {
3670                 int i = 0;
3671                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3672                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3673                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3674                                 0};                     /* FE=0, SE=0 */
3675
3676                 while(i<4) {
3677                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3678                         writeq(valt, &bar0->xmsi_address);
3679                         val64 = readq(&bar0->xmsi_address);
3680                         if(val64 == valt)
3681                                 break;
3682                         i++;
3683                 }
3684                 if(i == 4) {
3685                         unsigned long long x = val64;
3686                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3687                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3688                         return FAILURE;
3689                 }
3690         }
3691         val64 = readq(&bar0->swapper_ctrl);
3692         val64 &= 0xFFFF000000000000ULL;
3693
3694 #ifdef  __BIG_ENDIAN
3695         /*
3696          * The device by default set to a big endian format, so a
3697          * big endian driver need not set anything.
3698          */
3699         val64 |= (SWAPPER_CTRL_TXP_FE |
3700                  SWAPPER_CTRL_TXP_SE |
3701                  SWAPPER_CTRL_TXD_R_FE |
3702                  SWAPPER_CTRL_TXD_W_FE |
3703                  SWAPPER_CTRL_TXF_R_FE |
3704                  SWAPPER_CTRL_RXD_R_FE |
3705                  SWAPPER_CTRL_RXD_W_FE |
3706                  SWAPPER_CTRL_RXF_W_FE |
3707                  SWAPPER_CTRL_XMSI_FE |
3708                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3709         if (sp->config.intr_type == INTA)
3710                 val64 |= SWAPPER_CTRL_XMSI_SE;
3711         writeq(val64, &bar0->swapper_ctrl);
3712 #else
3713         /*
3714          * Initially we enable all bits to make it accessible by the
3715          * driver, then we selectively enable only those bits that
3716          * we want to set.
3717          */
3718         val64 |= (SWAPPER_CTRL_TXP_FE |
3719                  SWAPPER_CTRL_TXP_SE |
3720                  SWAPPER_CTRL_TXD_R_FE |
3721                  SWAPPER_CTRL_TXD_R_SE |
3722                  SWAPPER_CTRL_TXD_W_FE |
3723                  SWAPPER_CTRL_TXD_W_SE |
3724                  SWAPPER_CTRL_TXF_R_FE |
3725                  SWAPPER_CTRL_RXD_R_FE |
3726                  SWAPPER_CTRL_RXD_R_SE |
3727                  SWAPPER_CTRL_RXD_W_FE |
3728                  SWAPPER_CTRL_RXD_W_SE |
3729                  SWAPPER_CTRL_RXF_W_FE |
3730                  SWAPPER_CTRL_XMSI_FE |
3731                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3732         if (sp->config.intr_type == INTA)
3733                 val64 |= SWAPPER_CTRL_XMSI_SE;
3734         writeq(val64, &bar0->swapper_ctrl);
3735 #endif
3736         val64 = readq(&bar0->swapper_ctrl);
3737
3738         /*
3739          * Verifying if endian settings are accurate by reading a
3740          * feedback register.
3741          */
3742         val64 = readq(&bar0->pif_rd_swapper_fb);
3743         if (val64 != 0x0123456789ABCDEFULL) {
3744                 /* Endian settings are incorrect, calls for another dekko. */
3745                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3746                           dev->name);
3747                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3748                           (unsigned long long) val64);
3749                 return FAILURE;
3750         }
3751
3752         return SUCCESS;
3753 }
3754
3755 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3756 {
3757         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3758         u64 val64;
3759         int ret = 0, cnt = 0;
3760
3761         do {
3762                 val64 = readq(&bar0->xmsi_access);
3763                 if (!(val64 & s2BIT(15)))
3764                         break;
3765                 mdelay(1);
3766                 cnt++;
3767         } while(cnt < 5);
3768         if (cnt == 5) {
3769                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3770                 ret = 1;
3771         }
3772
3773         return ret;
3774 }
3775
3776 static void restore_xmsi_data(struct s2io_nic *nic)
3777 {
3778         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3779         u64 val64;
3780         int i, msix_index;
3781
3782
3783         if (nic->device_type == XFRAME_I_DEVICE)
3784                 return;
3785
3786         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3787                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3788                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3789                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3790                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3791                 writeq(val64, &bar0->xmsi_access);
3792                 if (wait_for_msix_trans(nic, msix_index)) {
3793                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3794                         continue;
3795                 }
3796         }
3797 }
3798
3799 static void store_xmsi_data(struct s2io_nic *nic)
3800 {
3801         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3802         u64 val64, addr, data;
3803         int i, msix_index;
3804
3805         if (nic->device_type == XFRAME_I_DEVICE)
3806                 return;
3807
3808         /* Store and display */
3809         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3810                 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3811                 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3812                 writeq(val64, &bar0->xmsi_access);
3813                 if (wait_for_msix_trans(nic, msix_index)) {
3814                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3815                         continue;
3816                 }
3817                 addr = readq(&bar0->xmsi_address);
3818                 data = readq(&bar0->xmsi_data);
3819                 if (addr && data) {
3820                         nic->msix_info[i].addr = addr;
3821                         nic->msix_info[i].data = data;
3822                 }
3823         }
3824 }
3825
3826 static int s2io_enable_msi_x(struct s2io_nic *nic)
3827 {
3828         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3829         u64 rx_mat;
3830         u16 msi_control; /* Temp variable */
3831         int ret, i, j, msix_indx = 1;
3832
3833         nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
3834                                GFP_KERNEL);
3835         if (!nic->entries) {
3836                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3837                         __FUNCTION__);
3838                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3839                 return -ENOMEM;
3840         }
3841         nic->mac_control.stats_info->sw_stat.mem_allocated
3842                 += (nic->num_entries * sizeof(struct msix_entry));
3843
3844         memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
3845
3846         nic->s2io_entries =
3847                 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
3848                                    GFP_KERNEL);
3849         if (!nic->s2io_entries) {
3850                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3851                         __FUNCTION__);
3852                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3853                 kfree(nic->entries);
3854                 nic->mac_control.stats_info->sw_stat.mem_freed
3855                         += (nic->num_entries * sizeof(struct msix_entry));
3856                 return -ENOMEM;
3857         }
3858          nic->mac_control.stats_info->sw_stat.mem_allocated
3859                 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3860         memset(nic->s2io_entries, 0,
3861                 nic->num_entries * sizeof(struct s2io_msix_entry));
3862
3863         nic->entries[0].entry = 0;
3864         nic->s2io_entries[0].entry = 0;
3865         nic->s2io_entries[0].in_use = MSIX_FLG;
3866         nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3867         nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3868
3869         for (i = 1; i < nic->num_entries; i++) {
3870                 nic->entries[i].entry = ((i - 1) * 8) + 1;
3871                 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3872                 nic->s2io_entries[i].arg = NULL;
3873                 nic->s2io_entries[i].in_use = 0;
3874         }
3875
3876         rx_mat = readq(&bar0->rx_mat);
3877         for (j = 0; j < nic->config.rx_ring_num; j++) {
3878                 rx_mat |= RX_MAT_SET(j, msix_indx);
3879                 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3880                 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3881                 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3882                 msix_indx += 8;
3883         }
3884         writeq(rx_mat, &bar0->rx_mat);
3885         readq(&bar0->rx_mat);
3886
3887         ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
3888         /* We fail init if error or we get less vectors than min required */
3889         if (ret) {
3890                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3891                 kfree(nic->entries);
3892                 nic->mac_control.stats_info->sw_stat.mem_freed
3893                         += (nic->num_entries * sizeof(struct msix_entry));
3894                 kfree(nic->s2io_entries);
3895                 nic->mac_control.stats_info->sw_stat.mem_freed
3896                         += (nic->num_entries * sizeof(struct s2io_msix_entry));
3897                 nic->entries = NULL;
3898                 nic->s2io_entries = NULL;
3899                 return -ENOMEM;
3900         }
3901
3902         /*
3903          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3904          * in the herc NIC. (Temp change, needs to be removed later)
3905          */
3906         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3907         msi_control |= 0x1; /* Enable MSI */
3908         pci_write_config_word(nic->pdev, 0x42, msi_control);
3909
3910         return 0;
3911 }
3912
3913 /* Handle software interrupt used during MSI(X) test */
3914 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3915 {
3916         struct s2io_nic *sp = dev_id;
3917
3918         sp->msi_detected = 1;
3919         wake_up(&sp->msi_wait);
3920
3921         return IRQ_HANDLED;
3922 }
3923
3924 /* Test interrupt path by forcing a a software IRQ */
3925 static int s2io_test_msi(struct s2io_nic *sp)
3926 {
3927         struct pci_dev *pdev = sp->pdev;
3928         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3929         int err;
3930         u64 val64, saved64;
3931
3932         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3933                         sp->name, sp);
3934         if (err) {
3935                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3936                        sp->dev->name, pci_name(pdev), pdev->irq);
3937                 return err;
3938         }
3939
3940         init_waitqueue_head (&sp->msi_wait);
3941         sp->msi_detected = 0;
3942
3943         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3944         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3945         val64 |= SCHED_INT_CTRL_TIMER_EN;
3946         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3947         writeq(val64, &bar0->scheduled_int_ctrl);
3948
3949         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3950
3951         if (!sp->msi_detected) {
3952                 /* MSI(X) test failed, go back to INTx mode */
3953                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3954                         "using MSI(X) during test\n", sp->dev->name,
3955                         pci_name(pdev));
3956
3957                 err = -EOPNOTSUPP;
3958         }
3959
3960         free_irq(sp->entries[1].vector, sp);
3961
3962         writeq(saved64, &bar0->scheduled_int_ctrl);
3963
3964         return err;
3965 }
3966
3967 static void remove_msix_isr(struct s2io_nic *sp)
3968 {
3969         int i;
3970         u16 msi_control;
3971
3972         for (i = 0; i < sp->num_entries; i++) {
3973                 if (sp->s2io_entries[i].in_use ==
3974                         MSIX_REGISTERED_SUCCESS) {
3975                         int vector = sp->entries[i].vector;
3976                         void *arg = sp->s2io_entries[i].arg;
3977                         free_irq(vector, arg);
3978                 }
3979         }
3980
3981         kfree(sp->entries);
3982         kfree(sp->s2io_entries);
3983         sp->entries = NULL;
3984         sp->s2io_entries = NULL;
3985
3986         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3987         msi_control &= 0xFFFE; /* Disable MSI */
3988         pci_write_config_word(sp->pdev, 0x42, msi_control);
3989
3990         pci_disable_msix(sp->pdev);
3991 }
3992
3993 static void remove_inta_isr(struct s2io_nic *sp)
3994 {
3995         struct net_device *dev = sp->dev;
3996
3997         free_irq(sp->pdev->irq, dev);
3998 }
3999
4000 /* ********************************************************* *
4001  * Functions defined below concern the OS part of the driver *
4002  * ********************************************************* */
4003
4004 /**
4005  *  s2io_open - open entry point of the driver
4006  *  @dev : pointer to the device structure.
4007  *  Description:
4008  *  This function is the open entry point of the driver. It mainly calls a
4009  *  function to allocate Rx buffers and inserts them into the buffer
4010  *  descriptors and then enables the Rx part of the NIC.
4011  *  Return value:
4012  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4013  *   file on failure.
4014  */
4015
4016 static int s2io_open(struct net_device *dev)
4017 {
4018         struct s2io_nic *sp = dev->priv;
4019         int err = 0;
4020
4021         /*
4022          * Make sure you have link off by default every time
4023          * Nic is initialized
4024          */
4025         netif_carrier_off(dev);
4026         sp->last_link_state = 0;
4027
4028         /* Initialize H/W and enable interrupts */
4029         err = s2io_card_up(sp);
4030         if (err) {
4031                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4032                           dev->name);
4033                 goto hw_init_failed;
4034         }
4035
4036         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4037                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4038                 s2io_card_down(sp);
4039                 err = -ENODEV;
4040                 goto hw_init_failed;
4041         }
4042         s2io_start_all_tx_queue(sp);
4043         return 0;
4044
4045 hw_init_failed:
4046         if (sp->config.intr_type == MSI_X) {
4047                 if (sp->entries) {
4048                         kfree(sp->entries);
4049                         sp->mac_control.stats_info->sw_stat.mem_freed
4050                         += (sp->num_entries * sizeof(struct msix_entry));
4051                 }
4052                 if (sp->s2io_entries) {
4053                         kfree(sp->s2io_entries);
4054                         sp->mac_control.stats_info->sw_stat.mem_freed
4055                         += (sp->num_entries * sizeof(struct s2io_msix_entry));
4056                 }
4057         }
4058         return err;
4059 }
4060
4061 /**
4062  *  s2io_close -close entry point of the driver
4063  *  @dev : device pointer.
4064  *  Description:
4065  *  This is the stop entry point of the driver. It needs to undo exactly
4066  *  whatever was done by the open entry point,thus it's usually referred to
4067  *  as the close function.Among other things this function mainly stops the
4068  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4069  *  Return value:
4070  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4071  *  file on failure.
4072  */
4073
4074 static int s2io_close(struct net_device *dev)
4075 {
4076         struct s2io_nic *sp = dev->priv;
4077         struct config_param *config = &sp->config;
4078         u64 tmp64;
4079         int offset;
4080
4081         /* Return if the device is already closed               *
4082         *  Can happen when s2io_card_up failed in change_mtu    *
4083         */
4084         if (!is_s2io_card_up(sp))
4085                 return 0;
4086
4087         s2io_stop_all_tx_queue(sp);
4088         /* delete all populated mac entries */
4089         for (offset = 1; offset < config->max_mc_addr; offset++) {
4090                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4091                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4092                         do_s2io_delete_unicast_mc(sp, tmp64);
4093         }
4094
4095         s2io_card_down(sp);
4096
4097         return 0;
4098 }
4099
4100 /**
4101  *  s2io_xmit - Tx entry point of te driver
4102  *  @skb : the socket buffer containing the Tx data.
4103  *  @dev : device pointer.
4104  *  Description :
4105  *  This function is the Tx entry point of the driver. S2IO NIC supports
4106  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4107  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4108  *  not be upadted.
4109  *  Return value:
4110  *  0 on success & 1 on failure.
4111  */
4112
4113 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4114 {
4115         struct s2io_nic *sp = dev->priv;
4116         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4117         register u64 val64;
4118         struct TxD *txdp;
4119         struct TxFIFO_element __iomem *tx_fifo;
4120         unsigned long flags = 0;
4121         u16 vlan_tag = 0;
4122         struct fifo_info *fifo = NULL;
4123         struct mac_info *mac_control;
4124         struct config_param *config;
4125         int do_spin_lock = 1;
4126         int offload_type;
4127         int enable_per_list_interrupt = 0;
4128         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4129
4130         mac_control = &sp->mac_control;
4131         config = &sp->config;
4132
4133         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4134
4135         if (unlikely(skb->len <= 0)) {
4136                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4137                 dev_kfree_skb_any(skb);
4138                 return 0;
4139         }
4140
4141         if (!is_s2io_card_up(sp)) {
4142                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4143                           dev->name);
4144                 dev_kfree_skb(skb);
4145                 return 0;
4146         }
4147
4148         queue = 0;
4149         if (sp->vlgrp && vlan_tx_tag_present(skb))
4150                 vlan_tag = vlan_tx_tag_get(skb);
4151         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4152                 if (skb->protocol == htons(ETH_P_IP)) {
4153                         struct iphdr *ip;
4154                         struct tcphdr *th;
4155                         ip = ip_hdr(skb);
4156
4157                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4158                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4159                                                 ip->ihl*4);
4160
4161                                 if (ip->protocol == IPPROTO_TCP) {
4162                                         queue_len = sp->total_tcp_fifos;
4163                                         queue = (ntohs(th->source) +
4164                                                         ntohs(th->dest)) &
4165                                             sp->fifo_selector[queue_len - 1];
4166                                         if (queue >= queue_len)
4167                                                 queue = queue_len - 1;
4168                                 } else if (ip->protocol == IPPROTO_UDP) {
4169                                         queue_len = sp->total_udp_fifos;
4170                                         queue = (ntohs(th->source) +
4171                                                         ntohs(th->dest)) &
4172                                             sp->fifo_selector[queue_len - 1];
4173                                         if (queue >= queue_len)
4174                                                 queue = queue_len - 1;
4175                                         queue += sp->udp_fifo_idx;
4176                                         if (skb->len > 1024)
4177                                                 enable_per_list_interrupt = 1;
4178                                         do_spin_lock = 0;
4179                                 }
4180                         }
4181                 }
4182         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4183                 /* get fifo number based on skb->priority value */
4184                 queue = config->fifo_mapping
4185                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4186         fifo = &mac_control->fifos[queue];
4187
4188         if (do_spin_lock)
4189                 spin_lock_irqsave(&fifo->tx_lock, flags);
4190         else {
4191                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4192                         return NETDEV_TX_LOCKED;
4193         }
4194
4195 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
4196         if (sp->config.multiq) {
4197                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4198                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4199                         return NETDEV_TX_BUSY;
4200                 }
4201         } else
4202 #endif
4203         if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4204                 if (netif_queue_stopped(dev)) {
4205                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4206                         return NETDEV_TX_BUSY;
4207                 }
4208         }
4209
4210         put_off = (u16) fifo->tx_curr_put_info.offset;
4211         get_off = (u16) fifo->tx_curr_get_info.offset;
4212         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4213
4214         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4215         /* Avoid "put" pointer going beyond "get" pointer */
4216         if (txdp->Host_Control ||
4217                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4218                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4219                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4220                 dev_kfree_skb(skb);
4221                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4222                 return 0;
4223         }
4224
4225         offload_type = s2io_offload_type(skb);
4226         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4227                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4228                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4229         }
4230         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4231                 txdp->Control_2 |=
4232                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4233                      TXD_TX_CKO_UDP_EN);
4234         }
4235         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4236         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4237         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4238         if (enable_per_list_interrupt)
4239                 if (put_off & (queue_len >> 5))
4240                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4241         if (vlan_tag) {
4242                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4243                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4244         }
4245
4246         frg_len = skb->len - skb->data_len;
4247         if (offload_type == SKB_GSO_UDP) {
4248                 int ufo_size;
4249
4250                 ufo_size = s2io_udp_mss(skb);
4251                 ufo_size &= ~7;
4252                 txdp->Control_1 |= TXD_UFO_EN;
4253                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4254                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4255 #ifdef __BIG_ENDIAN
4256                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4257                 fifo->ufo_in_band_v[put_off] =
4258                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4259 #else
4260                 fifo->ufo_in_band_v[put_off] =
4261                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4262 #endif
4263                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4264                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4265                                         fifo->ufo_in_band_v,
4266                                         sizeof(u64), PCI_DMA_TODEVICE);
4267                 if((txdp->Buffer_Pointer == 0) ||
4268                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4269                         goto pci_map_failed;
4270                 txdp++;
4271         }
4272
4273         txdp->Buffer_Pointer = pci_map_single
4274             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4275         if((txdp->Buffer_Pointer == 0) ||
4276                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4277                 goto pci_map_failed;
4278
4279         txdp->Host_Control = (unsigned long) skb;
4280         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4281         if (offload_type == SKB_GSO_UDP)
4282                 txdp->Control_1 |= TXD_UFO_EN;
4283
4284         frg_cnt = skb_shinfo(skb)->nr_frags;
4285         /* For fragmented SKB. */
4286         for (i = 0; i < frg_cnt; i++) {
4287                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4288                 /* A '0' length fragment will be ignored */
4289                 if (!frag->size)
4290                         continue;
4291                 txdp++;
4292                 txdp->Buffer_Pointer = (u64) pci_map_page
4293                     (sp->pdev, frag->page, frag->page_offset,
4294                      frag->size, PCI_DMA_TODEVICE);
4295                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4296                 if (offload_type == SKB_GSO_UDP)
4297                         txdp->Control_1 |= TXD_UFO_EN;
4298         }
4299         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4300
4301         if (offload_type == SKB_GSO_UDP)
4302                 frg_cnt++; /* as Txd0 was used for inband header */
4303
4304         tx_fifo = mac_control->tx_FIFO_start[queue];
4305         val64 = fifo->list_info[put_off].list_phy_addr;
4306         writeq(val64, &tx_fifo->TxDL_Pointer);
4307
4308         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4309                  TX_FIFO_LAST_LIST);
4310         if (offload_type)
4311                 val64 |= TX_FIFO_SPECIAL_FUNC;
4312
4313         writeq(val64, &tx_fifo->List_Control);
4314
4315         mmiowb();
4316
4317         put_off++;
4318         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4319                 put_off = 0;
4320         fifo->tx_curr_put_info.offset = put_off;
4321
4322         /* Avoid "put" pointer going beyond "get" pointer */
4323         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4324                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4325                 DBG_PRINT(TX_DBG,
4326                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4327                           put_off, get_off);
4328                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4329         }
4330         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4331         dev->trans_start = jiffies;
4332         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4333
4334         if (sp->config.intr_type == MSI_X)
4335                 tx_intr_handler(fifo);
4336
4337         return 0;
4338 pci_map_failed:
4339         stats->pci_map_fail_cnt++;
4340         s2io_stop_tx_queue(sp, fifo->fifo_no);
4341         stats->mem_freed += skb->truesize;
4342         dev_kfree_skb(skb);
4343         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4344         return 0;
4345 }
4346
4347 static void
4348 s2io_alarm_handle(unsigned long data)
4349 {
4350         struct s2io_nic *sp = (struct s2io_nic *)data;
4351         struct net_device *dev = sp->dev;
4352
4353         s2io_handle_errors(dev);
4354         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4355 }
4356
4357 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4358 {
4359         struct ring_info *ring = (struct ring_info *)dev_id;
4360         struct s2io_nic *sp = ring->nic;
4361         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4362         struct net_device *dev = sp->dev;
4363
4364         if (unlikely(!is_s2io_card_up(sp)))
4365                 return IRQ_HANDLED;
4366
4367         if (sp->config.napi) {
4368                 u8 __iomem *addr = NULL;
4369                 u8 val8 = 0;
4370
4371                 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4372                 addr += (7 - ring->ring_no);
4373                 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4374                 writeb(val8, addr);
4375                 val8 = readb(addr);
4376                 netif_rx_schedule(dev, &ring->napi);
4377         } else {
4378                 rx_intr_handler(ring, 0);
4379                 s2io_chk_rx_buffers(ring);
4380         }
4381
4382         return IRQ_HANDLED;
4383 }
4384
4385 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4386 {
4387         int i;
4388         struct fifo_info *fifos = (struct fifo_info *)dev_id;
4389         struct s2io_nic *sp = fifos->nic;
4390         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4391         struct config_param *config  = &sp->config;
4392         u64 reason;
4393
4394         if (unlikely(!is_s2io_card_up(sp)))
4395                 return IRQ_NONE;
4396
4397         reason = readq(&bar0->general_int_status);
4398         if (unlikely(reason == S2IO_MINUS_ONE))
4399                 /* Nothing much can be done. Get out */
4400                 return IRQ_HANDLED;
4401
4402         writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4403
4404         if (reason & GEN_INTR_TXTRAFFIC)
4405                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4406
4407         for (i = 0; i < config->tx_fifo_num; i++)
4408                 tx_intr_handler(&fifos[i]);
4409
4410         writeq(sp->general_int_mask, &bar0->general_int_mask);
4411         readl(&bar0->general_int_status);
4412
4413         return IRQ_HANDLED;
4414 }
4415
4416 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4417 {
4418         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4419         u64 val64;
4420
4421         val64 = readq(&bar0->pic_int_status);
4422         if (val64 & PIC_INT_GPIO) {
4423                 val64 = readq(&bar0->gpio_int_reg);
4424                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4425                     (val64 & GPIO_INT_REG_LINK_UP)) {
4426                         /*
4427                          * This is unstable state so clear both up/down
4428                          * interrupt and adapter to re-evaluate the link state.
4429                          */
4430                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4431                         val64 |= GPIO_INT_REG_LINK_UP;
4432                         writeq(val64, &bar0->gpio_int_reg);
4433                         val64 = readq(&bar0->gpio_int_mask);
4434                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4435                                    GPIO_INT_MASK_LINK_DOWN);
4436                         writeq(val64, &bar0->gpio_int_mask);
4437                 }
4438                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4439                         val64 = readq(&bar0->adapter_status);
4440                                 /* Enable Adapter */
4441                         val64 = readq(&bar0->adapter_control);
4442                         val64 |= ADAPTER_CNTL_EN;
4443                         writeq(val64, &bar0->adapter_control);
4444                         val64 |= ADAPTER_LED_ON;
4445                         writeq(val64, &bar0->adapter_control);
4446                         if (!sp->device_enabled_once)
4447                                 sp->device_enabled_once = 1;
4448
4449                         s2io_link(sp, LINK_UP);
4450                         /*
4451                          * unmask link down interrupt and mask link-up
4452                          * intr
4453                          */
4454                         val64 = readq(&bar0->gpio_int_mask);
4455                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4456                         val64 |= GPIO_INT_MASK_LINK_UP;
4457                         writeq(val64, &bar0->gpio_int_mask);
4458
4459                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4460                         val64 = readq(&bar0->adapter_status);
4461                         s2io_link(sp, LINK_DOWN);
4462                         /* Link is down so unmaks link up interrupt */
4463                         val64 = readq(&bar0->gpio_int_mask);
4464                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4465                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4466                         writeq(val64, &bar0->gpio_int_mask);
4467
4468                         /* turn off LED */
4469                         val64 = readq(&bar0->adapter_control);
4470                         val64 = val64 &(~ADAPTER_LED_ON);
4471                         writeq(val64, &bar0->adapter_control);
4472                 }
4473         }
4474         val64 = readq(&bar0->gpio_int_mask);
4475 }
4476
4477 /**
4478  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4479  *  @value: alarm bits
4480  *  @addr: address value
4481  *  @cnt: counter variable
4482  *  Description: Check for alarm and increment the counter
4483  *  Return Value:
4484  *  1 - if alarm bit set
4485  *  0 - if alarm bit is not set
4486  */
4487 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4488                           unsigned long long *cnt)
4489 {
4490         u64 val64;
4491         val64 = readq(addr);
4492         if ( val64 & value ) {
4493                 writeq(val64, addr);
4494                 (*cnt)++;
4495                 return 1;
4496         }
4497         return 0;
4498
4499 }
4500
4501 /**
4502  *  s2io_handle_errors - Xframe error indication handler
4503  *  @nic: device private variable
4504  *  Description: Handle alarms such as loss of link, single or
4505  *  double ECC errors, critical and serious errors.
4506  *  Return Value:
4507  *  NONE
4508  */
4509 static void s2io_handle_errors(void * dev_id)
4510 {
4511         struct net_device *dev = (struct net_device *) dev_id;
4512         struct s2io_nic *sp = dev->priv;
4513         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4514         u64 temp64 = 0,val64=0;
4515         int i = 0;
4516
4517         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4518         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4519
4520         if (!is_s2io_card_up(sp))
4521                 return;
4522
4523         if (pci_channel_offline(sp->pdev))
4524                 return;
4525
4526         memset(&sw_stat->ring_full_cnt, 0,
4527                 sizeof(sw_stat->ring_full_cnt));
4528
4529         /* Handling the XPAK counters update */
4530         if(stats->xpak_timer_count < 72000) {
4531                 /* waiting for an hour */
4532                 stats->xpak_timer_count++;
4533         } else {
4534                 s2io_updt_xpak_counter(dev);
4535                 /* reset the count to zero */
4536                 stats->xpak_timer_count = 0;
4537         }
4538
4539         /* Handling link status change error Intr */
4540         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4541                 val64 = readq(&bar0->mac_rmac_err_reg);
4542                 writeq(val64, &bar0->mac_rmac_err_reg);
4543                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4544                         schedule_work(&sp->set_link_task);
4545         }
4546
4547         /* In case of a serious error, the device will be Reset. */
4548         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4549                                 &sw_stat->serious_err_cnt))
4550                 goto reset;
4551
4552         /* Check for data parity error */
4553         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4554                                 &sw_stat->parity_err_cnt))
4555                 goto reset;
4556
4557         /* Check for ring full counter */
4558         if (sp->device_type == XFRAME_II_DEVICE) {
4559                 val64 = readq(&bar0->ring_bump_counter1);
4560                 for (i=0; i<4; i++) {
4561                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4562                         temp64 >>= 64 - ((i+1)*16);
4563                         sw_stat->ring_full_cnt[i] += temp64;
4564                 }
4565
4566                 val64 = readq(&bar0->ring_bump_counter2);
4567                 for (i=0; i<4; i++) {
4568                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4569                         temp64 >>= 64 - ((i+1)*16);
4570                          sw_stat->ring_full_cnt[i+4] += temp64;
4571                 }
4572         }
4573
4574         val64 = readq(&bar0->txdma_int_status);
4575         /*check for pfc_err*/
4576         if (val64 & TXDMA_PFC_INT) {
4577                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4578                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4579                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4580                                 &sw_stat->pfc_err_cnt))
4581                         goto reset;
4582                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4583                                 &sw_stat->pfc_err_cnt);
4584         }
4585
4586         /*check for tda_err*/
4587         if (val64 & TXDMA_TDA_INT) {
4588                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4589                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4590                                 &sw_stat->tda_err_cnt))
4591                         goto reset;
4592                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4593                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4594         }
4595         /*check for pcc_err*/
4596         if (val64 & TXDMA_PCC_INT) {
4597                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4598                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4599                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4600                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4601                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4602                                 &sw_stat->pcc_err_cnt))
4603                         goto reset;
4604                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4605                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4606         }
4607
4608         /*check for tti_err*/
4609         if (val64 & TXDMA_TTI_INT) {
4610                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4611                                 &sw_stat->tti_err_cnt))
4612                         goto reset;
4613                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4614                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4615         }
4616
4617         /*check for lso_err*/
4618         if (val64 & TXDMA_LSO_INT) {
4619                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4620                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4621                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4622                         goto reset;
4623                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4624                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4625         }
4626
4627         /*check for tpa_err*/
4628         if (val64 & TXDMA_TPA_INT) {
4629                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4630                         &sw_stat->tpa_err_cnt))
4631                         goto reset;
4632                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4633                         &sw_stat->tpa_err_cnt);
4634         }
4635
4636         /*check for sm_err*/
4637         if (val64 & TXDMA_SM_INT) {
4638                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4639                         &sw_stat->sm_err_cnt))
4640                         goto reset;
4641         }
4642
4643         val64 = readq(&bar0->mac_int_status);
4644         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4645                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4646                                 &bar0->mac_tmac_err_reg,
4647                                 &sw_stat->mac_tmac_err_cnt))
4648                         goto reset;
4649                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4650                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4651                                 &bar0->mac_tmac_err_reg,
4652                                 &sw_stat->mac_tmac_err_cnt);
4653         }
4654
4655         val64 = readq(&bar0->xgxs_int_status);
4656         if (val64 & XGXS_INT_STATUS_TXGXS) {
4657                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4658                                 &bar0->xgxs_txgxs_err_reg,
4659                                 &sw_stat->xgxs_txgxs_err_cnt))
4660                         goto reset;
4661                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4662                                 &bar0->xgxs_txgxs_err_reg,
4663                                 &sw_stat->xgxs_txgxs_err_cnt);
4664         }
4665
4666         val64 = readq(&bar0->rxdma_int_status);
4667         if (val64 & RXDMA_INT_RC_INT_M) {
4668                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4669                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4670                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4671                         goto reset;
4672                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4673                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4674                                 &sw_stat->rc_err_cnt);
4675                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4676                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4677                                 &sw_stat->prc_pcix_err_cnt))
4678                         goto reset;
4679                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4680                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4681                                 &sw_stat->prc_pcix_err_cnt);
4682         }
4683
4684         if (val64 & RXDMA_INT_RPA_INT_M) {
4685                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4686                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4687                         goto reset;
4688                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4689                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4690         }
4691
4692         if (val64 & RXDMA_INT_RDA_INT_M) {
4693                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4694                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4695                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4696                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4697                         goto reset;
4698                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4699                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4700                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4701         }
4702
4703         if (val64 & RXDMA_INT_RTI_INT_M) {
4704                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4705                                 &sw_stat->rti_err_cnt))
4706                         goto reset;
4707                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4708                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4709         }
4710
4711         val64 = readq(&bar0->mac_int_status);
4712         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4713                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4714                                 &bar0->mac_rmac_err_reg,
4715                                 &sw_stat->mac_rmac_err_cnt))
4716                         goto reset;
4717                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4718                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4719                                 &sw_stat->mac_rmac_err_cnt);
4720         }
4721
4722         val64 = readq(&bar0->xgxs_int_status);
4723         if (val64 & XGXS_INT_STATUS_RXGXS) {
4724                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4725                                 &bar0->xgxs_rxgxs_err_reg,
4726                                 &sw_stat->xgxs_rxgxs_err_cnt))
4727                         goto reset;
4728         }
4729
4730         val64 = readq(&bar0->mc_int_status);
4731         if(val64 & MC_INT_STATUS_MC_INT) {
4732                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4733                                 &sw_stat->mc_err_cnt))
4734                         goto reset;
4735
4736                 /* Handling Ecc errors */
4737                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4738                         writeq(val64, &bar0->mc_err_reg);
4739                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4740                                 sw_stat->double_ecc_errs++;
4741                                 if (sp->device_type != XFRAME_II_DEVICE) {
4742                                         /*
4743                                          * Reset XframeI only if critical error
4744                                          */
4745                                         if (val64 &
4746                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4747                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4748                                                                 goto reset;
4749                                         }
4750                         } else
4751                                 sw_stat->single_ecc_errs++;
4752                 }
4753         }
4754         return;
4755
4756 reset:
4757         s2io_stop_all_tx_queue(sp);
4758         schedule_work(&sp->rst_timer_task);
4759         sw_stat->soft_reset_cnt++;
4760         return;
4761 }
4762
4763 /**
4764  *  s2io_isr - ISR handler of the device .
4765  *  @irq: the irq of the device.
4766  *  @dev_id: a void pointer to the dev structure of the NIC.
4767  *  Description:  This function is the ISR handler of the device. It
4768  *  identifies the reason for the interrupt and calls the relevant
4769  *  service routines. As a contongency measure, this ISR allocates the
4770  *  recv buffers, if their numbers are below the panic value which is
4771  *  presently set to 25% of the original number of rcv buffers allocated.
4772  *  Return value:
4773  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4774  *   IRQ_NONE: will be returned if interrupt is not from our device
4775  */
4776 static irqreturn_t s2io_isr(int irq, void *dev_id)
4777 {
4778         struct net_device *dev = (struct net_device *) dev_id;
4779         struct s2io_nic *sp = dev->priv;
4780         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4781         int i;
4782         u64 reason = 0;
4783         struct mac_info *mac_control;
4784         struct config_param *config;
4785
4786         /* Pretend we handled any irq's from a disconnected card */
4787         if (pci_channel_offline(sp->pdev))
4788                 return IRQ_NONE;
4789
4790         if (!is_s2io_card_up(sp))
4791                 return IRQ_NONE;
4792
4793         mac_control = &sp->mac_control;
4794         config = &sp->config;
4795
4796         /*
4797          * Identify the cause for interrupt and call the appropriate
4798          * interrupt handler. Causes for the interrupt could be;
4799          * 1. Rx of packet.
4800          * 2. Tx complete.
4801          * 3. Link down.
4802          */
4803         reason = readq(&bar0->general_int_status);
4804
4805         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4806                 /* Nothing much can be done. Get out */
4807                 return IRQ_HANDLED;
4808         }
4809
4810         if (reason & (GEN_INTR_RXTRAFFIC |
4811                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4812         {
4813                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4814
4815                 if (config->napi) {
4816                         if (reason & GEN_INTR_RXTRAFFIC) {
4817                                 netif_rx_schedule(dev, &sp->napi);
4818                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4819                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4820                                 readl(&bar0->rx_traffic_int);
4821                         }
4822                 } else {
4823                         /*
4824                          * rx_traffic_int reg is an R1 register, writing all 1's
4825                          * will ensure that the actual interrupt causing bit
4826                          * get's cleared and hence a read can be avoided.
4827                          */
4828                         if (reason & GEN_INTR_RXTRAFFIC)
4829                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4830
4831                         for (i = 0; i < config->rx_ring_num; i++)
4832                                 rx_intr_handler(&mac_control->rings[i], 0);
4833                 }
4834
4835                 /*
4836                  * tx_traffic_int reg is an R1 register, writing all 1's
4837                  * will ensure that the actual interrupt causing bit get's
4838                  * cleared and hence a read can be avoided.
4839                  */
4840                 if (reason & GEN_INTR_TXTRAFFIC)
4841                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4842
4843                 for (i = 0; i < config->tx_fifo_num; i++)
4844                         tx_intr_handler(&mac_control->fifos[i]);
4845
4846                 if (reason & GEN_INTR_TXPIC)
4847                         s2io_txpic_intr_handle(sp);
4848
4849                 /*
4850                  * Reallocate the buffers from the interrupt handler itself.
4851                  */
4852                 if (!config->napi) {
4853                         for (i = 0; i < config->rx_ring_num; i++)
4854                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4855                 }
4856                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4857                 readl(&bar0->general_int_status);
4858
4859                 return IRQ_HANDLED;
4860
4861         }
4862         else if (!reason) {
4863                 /* The interrupt was not raised by us */
4864                 return IRQ_NONE;
4865         }
4866
4867         return IRQ_HANDLED;
4868 }
4869
4870 /**
4871  * s2io_updt_stats -
4872  */
4873 static void s2io_updt_stats(struct s2io_nic *sp)
4874 {
4875         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4876         u64 val64;
4877         int cnt = 0;
4878
4879         if (is_s2io_card_up(sp)) {
4880                 /* Apprx 30us on a 133 MHz bus */
4881                 val64 = SET_UPDT_CLICKS(10) |
4882                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4883                 writeq(val64, &bar0->stat_cfg);
4884                 do {
4885                         udelay(100);
4886                         val64 = readq(&bar0->stat_cfg);
4887                         if (!(val64 & s2BIT(0)))
4888                                 break;
4889                         cnt++;
4890                         if (cnt == 5)
4891                                 break; /* Updt failed */
4892                 } while(1);
4893         }
4894 }
4895
4896 /**
4897  *  s2io_get_stats - Updates the device statistics structure.
4898  *  @dev : pointer to the device structure.
4899  *  Description:
4900  *  This function updates the device statistics structure in the s2io_nic
4901  *  structure and returns a pointer to the same.
4902  *  Return value:
4903  *  pointer to the updated net_device_stats structure.
4904  */
4905
4906 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4907 {
4908         struct s2io_nic *sp = dev->priv;
4909         struct mac_info *mac_control;
4910         struct config_param *config;
4911         int i;
4912
4913
4914         mac_control = &sp->mac_control;
4915         config = &sp->config;
4916
4917         /* Configure Stats for immediate updt */
4918         s2io_updt_stats(sp);
4919
4920         sp->stats.tx_packets =
4921                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4922         sp->stats.tx_errors =
4923                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4924         sp->stats.rx_errors =
4925                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4926         sp->stats.multicast =
4927                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4928         sp->stats.rx_length_errors =
4929                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4930
4931         /* collect per-ring rx_packets and rx_bytes */
4932         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4933         for (i = 0; i < config->rx_ring_num; i++) {
4934                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4935                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4936         }
4937
4938         return (&sp->stats);
4939 }
4940
4941 /**
4942  *  s2io_set_multicast - entry point for multicast address enable/disable.
4943  *  @dev : pointer to the device structure
4944  *  Description:
4945  *  This function is a driver entry point which gets called by the kernel
4946  *  whenever multicast addresses must be enabled/disabled. This also gets
4947  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4948  *  determine, if multicast address must be enabled or if promiscuous mode
4949  *  is to be disabled etc.
4950  *  Return value:
4951  *  void.
4952  */
4953
4954 static void s2io_set_multicast(struct net_device *dev)
4955 {
4956         int i, j, prev_cnt;
4957         struct dev_mc_list *mclist;
4958         struct s2io_nic *sp = dev->priv;
4959         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4960         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4961             0xfeffffffffffULL;
4962         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4963         void __iomem *add;
4964         struct config_param *config = &sp->config;
4965
4966         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4967                 /*  Enable all Multicast addresses */
4968                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4969                        &bar0->rmac_addr_data0_mem);
4970                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4971                        &bar0->rmac_addr_data1_mem);
4972                 val64 = RMAC_ADDR_CMD_MEM_WE |
4973                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4974                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4975                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4976                 /* Wait till command completes */
4977                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4978                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4979                                         S2IO_BIT_RESET);
4980
4981                 sp->m_cast_flg = 1;
4982                 sp->all_multi_pos = config->max_mc_addr - 1;
4983         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4984                 /*  Disable all Multicast addresses */
4985                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4986                        &bar0->rmac_addr_data0_mem);
4987                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4988                        &bar0->rmac_addr_data1_mem);
4989                 val64 = RMAC_ADDR_CMD_MEM_WE |
4990                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4991                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4992                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4993                 /* Wait till command completes */
4994                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4995                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4996                                         S2IO_BIT_RESET);
4997
4998                 sp->m_cast_flg = 0;
4999                 sp->all_multi_pos = 0;
5000         }
5001
5002         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
5003                 /*  Put the NIC into promiscuous mode */
5004                 add = &bar0->mac_cfg;
5005                 val64 = readq(&bar0->mac_cfg);
5006                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
5007
5008                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5009                 writel((u32) val64, add);
5010                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5011                 writel((u32) (val64 >> 32), (add + 4));
5012
5013                 if (vlan_tag_strip != 1) {
5014                         val64 = readq(&bar0->rx_pa_cfg);
5015                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5016                         writeq(val64, &bar0->rx_pa_cfg);
5017                         vlan_strip_flag = 0;
5018                 }
5019
5020                 val64 = readq(&bar0->mac_cfg);
5021                 sp->promisc_flg = 1;
5022                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5023                           dev->name);
5024         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5025                 /*  Remove the NIC from promiscuous mode */
5026                 add = &bar0->mac_cfg;
5027                 val64 = readq(&bar0->mac_cfg);
5028                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5029
5030                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5031                 writel((u32) val64, add);
5032                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5033                 writel((u32) (val64 >> 32), (add + 4));
5034
5035                 if (vlan_tag_strip != 0) {
5036                         val64 = readq(&bar0->rx_pa_cfg);
5037                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5038                         writeq(val64, &bar0->rx_pa_cfg);
5039                         vlan_strip_flag = 1;
5040                 }
5041
5042                 val64 = readq(&bar0->mac_cfg);
5043                 sp->promisc_flg = 0;
5044                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5045                           dev->name);
5046         }
5047
5048         /*  Update individual M_CAST address list */
5049         if ((!sp->m_cast_flg) && dev->mc_count) {
5050                 if (dev->mc_count >
5051                     (config->max_mc_addr - config->max_mac_addr)) {
5052                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5053                                   dev->name);
5054                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5055                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5056                         return;
5057                 }
5058
5059                 prev_cnt = sp->mc_addr_count;
5060                 sp->mc_addr_count = dev->mc_count;
5061
5062                 /* Clear out the previous list of Mc in the H/W. */
5063                 for (i = 0; i < prev_cnt; i++) {
5064                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5065                                &bar0->rmac_addr_data0_mem);
5066                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5067                                 &bar0->rmac_addr_data1_mem);
5068                         val64 = RMAC_ADDR_CMD_MEM_WE |
5069                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5070                             RMAC_ADDR_CMD_MEM_OFFSET
5071                             (config->mc_start_offset + i);
5072                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5073
5074                         /* Wait for command completes */
5075                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5076                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5077                                         S2IO_BIT_RESET)) {
5078                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5079                                           dev->name);
5080                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5081                                 return;
5082                         }
5083                 }
5084
5085                 /* Create the new Rx filter list and update the same in H/W. */
5086                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5087                      i++, mclist = mclist->next) {
5088                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5089                                ETH_ALEN);
5090                         mac_addr = 0;
5091                         for (j = 0; j < ETH_ALEN; j++) {
5092                                 mac_addr |= mclist->dmi_addr[j];
5093                                 mac_addr <<= 8;
5094                         }
5095                         mac_addr >>= 8;
5096                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5097                                &bar0->rmac_addr_data0_mem);
5098                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5099                                 &bar0->rmac_addr_data1_mem);
5100                         val64 = RMAC_ADDR_CMD_MEM_WE |
5101                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5102                             RMAC_ADDR_CMD_MEM_OFFSET
5103                             (i + config->mc_start_offset);
5104                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5105
5106                         /* Wait for command completes */
5107                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5108                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5109                                         S2IO_BIT_RESET)) {
5110                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5111                                           dev->name);
5112                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5113                                 return;
5114                         }
5115                 }
5116         }
5117 }
5118
5119 /* read from CAM unicast & multicast addresses and store it in
5120  * def_mac_addr structure
5121  */
5122 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5123 {
5124         int offset;
5125         u64 mac_addr = 0x0;
5126         struct config_param *config = &sp->config;
5127
5128         /* store unicast & multicast mac addresses */
5129         for (offset = 0; offset < config->max_mc_addr; offset++) {
5130                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5131                 /* if read fails disable the entry */
5132                 if (mac_addr == FAILURE)
5133                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5134                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5135         }
5136 }
5137
5138 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5139 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5140 {
5141         int offset;
5142         struct config_param *config = &sp->config;
5143         /* restore unicast mac address */
5144         for (offset = 0; offset < config->max_mac_addr; offset++)
5145                 do_s2io_prog_unicast(sp->dev,
5146                         sp->def_mac_addr[offset].mac_addr);
5147
5148         /* restore multicast mac address */
5149         for (offset = config->mc_start_offset;
5150                 offset < config->max_mc_addr; offset++)
5151                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5152 }
5153
5154 /* add a multicast MAC address to CAM */
5155 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5156 {
5157         int i;
5158         u64 mac_addr = 0;
5159         struct config_param *config = &sp->config;
5160
5161         for (i = 0; i < ETH_ALEN; i++) {
5162                 mac_addr <<= 8;
5163                 mac_addr |= addr[i];
5164         }
5165         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5166                 return SUCCESS;
5167
5168         /* check if the multicast mac already preset in CAM */
5169         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5170                 u64 tmp64;
5171                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5172                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5173                         break;
5174
5175                 if (tmp64 == mac_addr)
5176                         return SUCCESS;
5177         }
5178         if (i == config->max_mc_addr) {
5179                 DBG_PRINT(ERR_DBG,
5180                         "CAM full no space left for multicast MAC\n");
5181                 return FAILURE;
5182         }
5183         /* Update the internal structure with this new mac address */
5184         do_s2io_copy_mac_addr(sp, i, mac_addr);
5185
5186         return (do_s2io_add_mac(sp, mac_addr, i));
5187 }
5188
5189 /* add MAC address to CAM */
5190 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5191 {
5192         u64 val64;
5193         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5194
5195         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5196                 &bar0->rmac_addr_data0_mem);
5197
5198         val64 =
5199                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5200                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5201         writeq(val64, &bar0->rmac_addr_cmd_mem);
5202
5203         /* Wait till command completes */
5204         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5205                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5206                 S2IO_BIT_RESET)) {
5207                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5208                 return FAILURE;
5209         }
5210         return SUCCESS;
5211 }
5212 /* deletes a specified unicast/multicast mac entry from CAM */
5213 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5214 {
5215         int offset;
5216         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5217         struct config_param *config = &sp->config;
5218
5219         for (offset = 1;
5220                 offset < config->max_mc_addr; offset++) {
5221                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5222                 if (tmp64 == addr) {
5223                         /* disable the entry by writing  0xffffffffffffULL */
5224                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5225                                 return FAILURE;
5226                         /* store the new mac list from CAM */
5227                         do_s2io_store_unicast_mc(sp);
5228                         return SUCCESS;
5229                 }
5230         }
5231         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5232                         (unsigned long long)addr);
5233         return FAILURE;
5234 }
5235
5236 /* read mac entries from CAM */
5237 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5238 {
5239         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5240         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5241
5242         /* read mac addr */
5243         val64 =
5244                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5245                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5246         writeq(val64, &bar0->rmac_addr_cmd_mem);
5247
5248         /* Wait till command completes */
5249         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5250                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5251                 S2IO_BIT_RESET)) {
5252                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5253                 return FAILURE;
5254         }
5255         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5256         return (tmp64 >> 16);
5257 }
5258
5259 /**
5260  * s2io_set_mac_addr driver entry point
5261  */
5262
5263 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5264 {
5265         struct sockaddr *addr = p;
5266
5267         if (!is_valid_ether_addr(addr->sa_data))
5268                 return -EINVAL;
5269
5270         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5271
5272         /* store the MAC address in CAM */
5273         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5274 }
5275 /**
5276  *  do_s2io_prog_unicast - Programs the Xframe mac address
5277  *  @dev : pointer to the device structure.
5278  *  @addr: a uchar pointer to the new mac address which is to be set.
5279  *  Description : This procedure will program the Xframe to receive
5280  *  frames with new Mac Address
5281  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5282  *  as defined in errno.h file on failure.
5283  */
5284
5285 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5286 {
5287         struct s2io_nic *sp = dev->priv;
5288         register u64 mac_addr = 0, perm_addr = 0;
5289         int i;
5290         u64 tmp64;
5291         struct config_param *config = &sp->config;
5292
5293         /*
5294         * Set the new MAC address as the new unicast filter and reflect this
5295         * change on the device address registered with the OS. It will be
5296         * at offset 0.
5297         */
5298         for (i = 0; i < ETH_ALEN; i++) {
5299                 mac_addr <<= 8;
5300                 mac_addr |= addr[i];
5301                 perm_addr <<= 8;
5302                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5303         }
5304
5305         /* check if the dev_addr is different than perm_addr */
5306         if (mac_addr == perm_addr)
5307                 return SUCCESS;
5308
5309         /* check if the mac already preset in CAM */
5310         for (i = 1; i < config->max_mac_addr; i++) {
5311                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5312                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5313                         break;
5314
5315                 if (tmp64 == mac_addr) {
5316                         DBG_PRINT(INFO_DBG,
5317                         "MAC addr:0x%llx already present in CAM\n",
5318                         (unsigned long long)mac_addr);
5319                         return SUCCESS;
5320                 }
5321         }
5322         if (i == config->max_mac_addr) {
5323                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5324                 return FAILURE;
5325         }
5326         /* Update the internal structure with this new mac address */
5327         do_s2io_copy_mac_addr(sp, i, mac_addr);
5328         return (do_s2io_add_mac(sp, mac_addr, i));
5329 }
5330
5331 /**
5332  * s2io_ethtool_sset - Sets different link parameters.
5333  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5334  * @info: pointer to the structure with parameters given by ethtool to set
5335  * link information.
5336  * Description:
5337  * The function sets different link parameters provided by the user onto
5338  * the NIC.
5339  * Return value:
5340  * 0 on success.
5341 */
5342
5343 static int s2io_ethtool_sset(struct net_device *dev,
5344                              struct ethtool_cmd *info)
5345 {
5346         struct s2io_nic *sp = dev->priv;
5347         if ((info->autoneg == AUTONEG_ENABLE) ||
5348             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5349                 return -EINVAL;
5350         else {
5351                 s2io_close(sp->dev);
5352                 s2io_open(sp->dev);
5353         }
5354
5355         return 0;
5356 }
5357
5358 /**
5359  * s2io_ethtol_gset - Return link specific information.
5360  * @sp : private member of the device structure, pointer to the
5361  *      s2io_nic structure.
5362  * @info : pointer to the structure with parameters given by ethtool
5363  * to return link information.
5364  * Description:
5365  * Returns link specific information like speed, duplex etc.. to ethtool.
5366  * Return value :
5367  * return 0 on success.
5368  */
5369
5370 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5371 {
5372         struct s2io_nic *sp = dev->priv;
5373         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5374         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5375         info->port = PORT_FIBRE;
5376
5377         /* info->transceiver */
5378         info->transceiver = XCVR_EXTERNAL;
5379
5380         if (netif_carrier_ok(sp->dev)) {
5381                 info->speed = 10000;
5382                 info->duplex = DUPLEX_FULL;
5383         } else {
5384                 info->speed = -1;
5385                 info->duplex = -1;
5386         }
5387
5388         info->autoneg = AUTONEG_DISABLE;
5389         return 0;
5390 }
5391
5392 /**
5393  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5394  * @sp : private member of the device structure, which is a pointer to the
5395  * s2io_nic structure.
5396  * @info : pointer to the structure with parameters given by ethtool to
5397  * return driver information.
5398  * Description:
5399  * Returns driver specefic information like name, version etc.. to ethtool.
5400  * Return value:
5401  *  void
5402  */
5403
5404 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5405                                   struct ethtool_drvinfo *info)
5406 {
5407         struct s2io_nic *sp = dev->priv;
5408
5409         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5410         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5411         strncpy(info->fw_version, "", sizeof(info->fw_version));
5412         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5413         info->regdump_len = XENA_REG_SPACE;
5414         info->eedump_len = XENA_EEPROM_SPACE;
5415 }
5416
5417 /**
5418  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5419  *  @sp: private member of the device structure, which is a pointer to the
5420  *  s2io_nic structure.
5421  *  @regs : pointer to the structure with parameters given by ethtool for
5422  *  dumping the registers.
5423  *  @reg_space: The input argumnet into which all the registers are dumped.
5424  *  Description:
5425  *  Dumps the entire register space of xFrame NIC into the user given
5426  *  buffer area.
5427  * Return value :
5428  * void .
5429 */
5430
5431 static void s2io_ethtool_gregs(struct net_device *dev,
5432                                struct ethtool_regs *regs, void *space)
5433 {
5434         int i;
5435         u64 reg;
5436         u8 *reg_space = (u8 *) space;
5437         struct s2io_nic *sp = dev->priv;
5438
5439         regs->len = XENA_REG_SPACE;
5440         regs->version = sp->pdev->subsystem_device;
5441
5442         for (i = 0; i < regs->len; i += 8) {
5443                 reg = readq(sp->bar0 + i);
5444                 memcpy((reg_space + i), &reg, 8);
5445         }
5446 }
5447
5448 /**
5449  *  s2io_phy_id  - timer function that alternates adapter LED.
5450  *  @data : address of the private member of the device structure, which
5451  *  is a pointer to the s2io_nic structure, provided as an u32.
5452  * Description: This is actually the timer function that alternates the
5453  * adapter LED bit of the adapter control bit to set/reset every time on
5454  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5455  *  once every second.
5456 */
5457 static void s2io_phy_id(unsigned long data)
5458 {
5459         struct s2io_nic *sp = (struct s2io_nic *) data;
5460         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5461         u64 val64 = 0;
5462         u16 subid;
5463
5464         subid = sp->pdev->subsystem_device;
5465         if ((sp->device_type == XFRAME_II_DEVICE) ||
5466                    ((subid & 0xFF) >= 0x07)) {
5467                 val64 = readq(&bar0->gpio_control);
5468                 val64 ^= GPIO_CTRL_GPIO_0;
5469                 writeq(val64, &bar0->gpio_control);
5470         } else {
5471                 val64 = readq(&bar0->adapter_control);
5472                 val64 ^= ADAPTER_LED_ON;
5473                 writeq(val64, &bar0->adapter_control);
5474         }
5475
5476         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5477 }
5478
5479 /**
5480  * s2io_ethtool_idnic - To physically identify the nic on the system.
5481  * @sp : private member of the device structure, which is a pointer to the
5482  * s2io_nic structure.
5483  * @id : pointer to the structure with identification parameters given by
5484  * ethtool.
5485  * Description: Used to physically identify the NIC on the system.
5486  * The Link LED will blink for a time specified by the user for
5487  * identification.
5488  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5489  * identification is possible only if it's link is up.
5490  * Return value:
5491  * int , returns 0 on success
5492  */
5493
5494 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5495 {
5496         u64 val64 = 0, last_gpio_ctrl_val;
5497         struct s2io_nic *sp = dev->priv;
5498         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5499         u16 subid;
5500
5501         subid = sp->pdev->subsystem_device;
5502         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5503         if ((sp->device_type == XFRAME_I_DEVICE) &&
5504                 ((subid & 0xFF) < 0x07)) {
5505                 val64 = readq(&bar0->adapter_control);
5506                 if (!(val64 & ADAPTER_CNTL_EN)) {
5507                         printk(KERN_ERR
5508                                "Adapter Link down, cannot blink LED\n");
5509                         return -EFAULT;
5510                 }
5511         }
5512         if (sp->id_timer.function == NULL) {
5513                 init_timer(&sp->id_timer);
5514                 sp->id_timer.function = s2io_phy_id;
5515                 sp->id_timer.data = (unsigned long) sp;
5516         }
5517         mod_timer(&sp->id_timer, jiffies);
5518         if (data)
5519                 msleep_interruptible(data * HZ);
5520         else
5521                 msleep_interruptible(MAX_FLICKER_TIME);
5522         del_timer_sync(&sp->id_timer);
5523
5524         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5525                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5526                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5527         }
5528
5529         return 0;
5530 }
5531
5532 static void s2io_ethtool_gringparam(struct net_device *dev,
5533                                     struct ethtool_ringparam *ering)
5534 {
5535         struct s2io_nic *sp = dev->priv;
5536         int i,tx_desc_count=0,rx_desc_count=0;
5537
5538         if (sp->rxd_mode == RXD_MODE_1)
5539                 ering->rx_max_pending = MAX_RX_DESC_1;
5540         else if (sp->rxd_mode == RXD_MODE_3B)
5541                 ering->rx_max_pending = MAX_RX_DESC_2;
5542
5543         ering->tx_max_pending = MAX_TX_DESC;
5544         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5545                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5546
5547         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5548         ering->tx_pending = tx_desc_count;
5549         rx_desc_count = 0;
5550         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5551                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5552
5553         ering->rx_pending = rx_desc_count;
5554
5555         ering->rx_mini_max_pending = 0;
5556         ering->rx_mini_pending = 0;
5557         if(sp->rxd_mode == RXD_MODE_1)
5558                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5559         else if (sp->rxd_mode == RXD_MODE_3B)
5560                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5561         ering->rx_jumbo_pending = rx_desc_count;
5562 }
5563
5564 /**
5565  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5566  * @sp : private member of the device structure, which is a pointer to the
5567  *      s2io_nic structure.
5568  * @ep : pointer to the structure with pause parameters given by ethtool.
5569  * Description:
5570  * Returns the Pause frame generation and reception capability of the NIC.
5571  * Return value:
5572  *  void
5573  */
5574 static void s2io_ethtool_getpause_data(struct net_device *dev,
5575                                        struct ethtool_pauseparam *ep)
5576 {
5577         u64 val64;
5578         struct s2io_nic *sp = dev->priv;
5579         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5580
5581         val64 = readq(&bar0->rmac_pause_cfg);
5582         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5583                 ep->tx_pause = TRUE;
5584         if (val64 & RMAC_PAUSE_RX_ENABLE)
5585                 ep->rx_pause = TRUE;
5586         ep->autoneg = FALSE;
5587 }
5588
5589 /**
5590  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5591  * @sp : private member of the device structure, which is a pointer to the
5592  *      s2io_nic structure.
5593  * @ep : pointer to the structure with pause parameters given by ethtool.
5594  * Description:
5595  * It can be used to set or reset Pause frame generation or reception
5596  * support of the NIC.
5597  * Return value:
5598  * int, returns 0 on Success
5599  */
5600
5601 static int s2io_ethtool_setpause_data(struct net_device *dev,
5602                                struct ethtool_pauseparam *ep)
5603 {
5604         u64 val64;
5605         struct s2io_nic *sp = dev->priv;
5606         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5607
5608         val64 = readq(&bar0->rmac_pause_cfg);
5609         if (ep->tx_pause)
5610                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5611         else
5612                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5613         if (ep->rx_pause)
5614                 val64 |= RMAC_PAUSE_RX_ENABLE;
5615         else
5616                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5617         writeq(val64, &bar0->rmac_pause_cfg);
5618         return 0;
5619 }
5620
5621 /**
5622  * read_eeprom - reads 4 bytes of data from user given offset.
5623  * @sp : private member of the device structure, which is a pointer to the
5624  *      s2io_nic structure.
5625  * @off : offset at which the data must be written
5626  * @data : Its an output parameter where the data read at the given
5627  *      offset is stored.
5628  * Description:
5629  * Will read 4 bytes of data from the user given offset and return the
5630  * read data.
5631  * NOTE: Will allow to read only part of the EEPROM visible through the
5632  *   I2C bus.
5633  * Return value:
5634  *  -1 on failure and 0 on success.
5635  */
5636
5637 #define S2IO_DEV_ID             5
5638 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5639 {
5640         int ret = -1;
5641         u32 exit_cnt = 0;
5642         u64 val64;
5643         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5644
5645         if (sp->device_type == XFRAME_I_DEVICE) {
5646                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5647                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5648                     I2C_CONTROL_CNTL_START;
5649                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5650
5651                 while (exit_cnt < 5) {
5652                         val64 = readq(&bar0->i2c_control);
5653                         if (I2C_CONTROL_CNTL_END(val64)) {
5654                                 *data = I2C_CONTROL_GET_DATA(val64);
5655                                 ret = 0;
5656                                 break;
5657                         }
5658                         msleep(50);
5659                         exit_cnt++;
5660                 }
5661         }
5662
5663         if (sp->device_type == XFRAME_II_DEVICE) {
5664                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5665                         SPI_CONTROL_BYTECNT(0x3) |
5666                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5667                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5668                 val64 |= SPI_CONTROL_REQ;
5669                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5670                 while (exit_cnt < 5) {
5671                         val64 = readq(&bar0->spi_control);
5672                         if (val64 & SPI_CONTROL_NACK) {
5673                                 ret = 1;
5674                                 break;
5675                         } else if (val64 & SPI_CONTROL_DONE) {
5676                                 *data = readq(&bar0->spi_data);
5677                                 *data &= 0xffffff;
5678                                 ret = 0;
5679                                 break;
5680                         }
5681                         msleep(50);
5682                         exit_cnt++;
5683                 }
5684         }
5685         return ret;
5686 }
5687
5688 /**
5689  *  write_eeprom - actually writes the relevant part of the data value.
5690  *  @sp : private member of the device structure, which is a pointer to the
5691  *       s2io_nic structure.
5692  *  @off : offset at which the data must be written
5693  *  @data : The data that is to be written
5694  *  @cnt : Number of bytes of the data that are actually to be written into
5695  *  the Eeprom. (max of 3)
5696  * Description:
5697  *  Actually writes the relevant part of the data value into the Eeprom
5698  *  through the I2C bus.
5699  * Return value:
5700  *  0 on success, -1 on failure.
5701  */
5702
5703 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5704 {
5705         int exit_cnt = 0, ret = -1;
5706         u64 val64;
5707         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5708
5709         if (sp->device_type == XFRAME_I_DEVICE) {
5710                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5711                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5712                     I2C_CONTROL_CNTL_START;
5713                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5714
5715                 while (exit_cnt < 5) {
5716                         val64 = readq(&bar0->i2c_control);
5717                         if (I2C_CONTROL_CNTL_END(val64)) {
5718                                 if (!(val64 & I2C_CONTROL_NACK))
5719                                         ret = 0;
5720                                 break;
5721                         }
5722                         msleep(50);
5723                         exit_cnt++;
5724                 }
5725         }
5726
5727         if (sp->device_type == XFRAME_II_DEVICE) {
5728                 int write_cnt = (cnt == 8) ? 0 : cnt;
5729                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5730
5731                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5732                         SPI_CONTROL_BYTECNT(write_cnt) |
5733                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5734                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5735                 val64 |= SPI_CONTROL_REQ;
5736                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5737                 while (exit_cnt < 5) {
5738                         val64 = readq(&bar0->spi_control);
5739                         if (val64 & SPI_CONTROL_NACK) {
5740                                 ret = 1;
5741                                 break;
5742                         } else if (val64 & SPI_CONTROL_DONE) {
5743                                 ret = 0;
5744                                 break;
5745                         }
5746                         msleep(50);
5747                         exit_cnt++;
5748                 }
5749         }
5750         return ret;
5751 }
5752 static void s2io_vpd_read(struct s2io_nic *nic)
5753 {
5754         u8 *vpd_data;
5755         u8 data;
5756         int i=0, cnt, fail = 0;
5757         int vpd_addr = 0x80;
5758
5759         if (nic->device_type == XFRAME_II_DEVICE) {
5760                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5761                 vpd_addr = 0x80;
5762         }
5763         else {
5764                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5765                 vpd_addr = 0x50;
5766         }
5767         strcpy(nic->serial_num, "NOT AVAILABLE");
5768
5769         vpd_data = kmalloc(256, GFP_KERNEL);
5770         if (!vpd_data) {
5771                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5772                 return;
5773         }
5774         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5775
5776         for (i = 0; i < 256; i +=4 ) {
5777                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5778                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5779                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5780                 for (cnt = 0; cnt <5; cnt++) {
5781                         msleep(2);
5782                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5783                         if (data == 0x80)
5784                                 break;
5785                 }
5786                 if (cnt >= 5) {
5787                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5788                         fail = 1;
5789                         break;
5790                 }
5791                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5792                                       (u32 *)&vpd_data[i]);
5793         }
5794
5795         if(!fail) {
5796                 /* read serial number of adapter */
5797                 for (cnt = 0; cnt < 256; cnt++) {
5798                 if ((vpd_data[cnt] == 'S') &&
5799                         (vpd_data[cnt+1] == 'N') &&
5800                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5801                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5802                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5803                                         vpd_data[cnt+2]);
5804                                 break;
5805                         }
5806                 }
5807         }
5808
5809         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5810                 memset(nic->product_name, 0, vpd_data[1]);
5811                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5812         }
5813         kfree(vpd_data);
5814         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5815 }
5816
5817 /**
5818  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5819  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5820  *  @eeprom : pointer to the user level structure provided by ethtool,
5821  *  containing all relevant information.
5822  *  @data_buf : user defined value to be written into Eeprom.
5823  *  Description: Reads the values stored in the Eeprom at given offset
5824  *  for a given length. Stores these values int the input argument data
5825  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5826  *  Return value:
5827  *  int  0 on success
5828  */
5829
5830 static int s2io_ethtool_geeprom(struct net_device *dev,
5831                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5832 {
5833         u32 i, valid;
5834         u64 data;
5835         struct s2io_nic *sp = dev->priv;
5836
5837         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5838
5839         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5840                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5841
5842         for (i = 0; i < eeprom->len; i += 4) {
5843                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5844                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5845                         return -EFAULT;
5846                 }
5847                 valid = INV(data);
5848                 memcpy((data_buf + i), &valid, 4);
5849         }
5850         return 0;
5851 }
5852
5853 /**
5854  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5855  *  @sp : private member of the device structure, which is a pointer to the
5856  *  s2io_nic structure.
5857  *  @eeprom : pointer to the user level structure provided by ethtool,
5858  *  containing all relevant information.
5859  *  @data_buf ; user defined value to be written into Eeprom.
5860  *  Description:
5861  *  Tries to write the user provided value in the Eeprom, at the offset
5862  *  given by the user.
5863  *  Return value:
5864  *  0 on success, -EFAULT on failure.
5865  */
5866
5867 static int s2io_ethtool_seeprom(struct net_device *dev,
5868                                 struct ethtool_eeprom *eeprom,
5869                                 u8 * data_buf)
5870 {
5871         int len = eeprom->len, cnt = 0;
5872         u64 valid = 0, data;
5873         struct s2io_nic *sp = dev->priv;
5874
5875         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5876                 DBG_PRINT(ERR_DBG,
5877                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5878                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5879                           eeprom->magic);
5880                 return -EFAULT;
5881         }
5882
5883         while (len) {
5884                 data = (u32) data_buf[cnt] & 0x000000FF;
5885                 if (data) {
5886                         valid = (u32) (data << 24);
5887                 } else
5888                         valid = data;
5889
5890                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5891                         DBG_PRINT(ERR_DBG,
5892                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5893                         DBG_PRINT(ERR_DBG,
5894                                   "write into the specified offset\n");
5895                         return -EFAULT;
5896                 }
5897                 cnt++;
5898                 len--;
5899         }
5900
5901         return 0;
5902 }
5903
5904 /**
5905  * s2io_register_test - reads and writes into all clock domains.
5906  * @sp : private member of the device structure, which is a pointer to the
5907  * s2io_nic structure.
5908  * @data : variable that returns the result of each of the test conducted b
5909  * by the driver.
5910  * Description:
5911  * Read and write into all clock domains. The NIC has 3 clock domains,
5912  * see that registers in all the three regions are accessible.
5913  * Return value:
5914  * 0 on success.
5915  */
5916
5917 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5918 {
5919         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5920         u64 val64 = 0, exp_val;
5921         int fail = 0;
5922
5923         val64 = readq(&bar0->pif_rd_swapper_fb);
5924         if (val64 != 0x123456789abcdefULL) {
5925                 fail = 1;
5926                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5927         }
5928
5929         val64 = readq(&bar0->rmac_pause_cfg);
5930         if (val64 != 0xc000ffff00000000ULL) {
5931                 fail = 1;
5932                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5933         }
5934
5935         val64 = readq(&bar0->rx_queue_cfg);
5936         if (sp->device_type == XFRAME_II_DEVICE)
5937                 exp_val = 0x0404040404040404ULL;
5938         else
5939                 exp_val = 0x0808080808080808ULL;
5940         if (val64 != exp_val) {
5941                 fail = 1;
5942                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5943         }
5944
5945         val64 = readq(&bar0->xgxs_efifo_cfg);
5946         if (val64 != 0x000000001923141EULL) {
5947                 fail = 1;
5948                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5949         }
5950
5951         val64 = 0x5A5A5A5A5A5A5A5AULL;
5952         writeq(val64, &bar0->xmsi_data);
5953         val64 = readq(&bar0->xmsi_data);
5954         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5955                 fail = 1;
5956                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5957         }
5958
5959         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5960         writeq(val64, &bar0->xmsi_data);
5961         val64 = readq(&bar0->xmsi_data);
5962         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5963                 fail = 1;
5964                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5965         }
5966
5967         *data = fail;
5968         return fail;
5969 }
5970
5971 /**
5972  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5973  * @sp : private member of the device structure, which is a pointer to the
5974  * s2io_nic structure.
5975  * @data:variable that returns the result of each of the test conducted by
5976  * the driver.
5977  * Description:
5978  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5979  * register.
5980  * Return value:
5981  * 0 on success.
5982  */
5983
5984 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5985 {
5986         int fail = 0;
5987         u64 ret_data, org_4F0, org_7F0;
5988         u8 saved_4F0 = 0, saved_7F0 = 0;
5989         struct net_device *dev = sp->dev;
5990
5991         /* Test Write Error at offset 0 */
5992         /* Note that SPI interface allows write access to all areas
5993          * of EEPROM. Hence doing all negative testing only for Xframe I.
5994          */
5995         if (sp->device_type == XFRAME_I_DEVICE)
5996                 if (!write_eeprom(sp, 0, 0, 3))
5997                         fail = 1;
5998
5999         /* Save current values at offsets 0x4F0 and 0x7F0 */
6000         if (!read_eeprom(sp, 0x4F0, &org_4F0))
6001                 saved_4F0 = 1;
6002         if (!read_eeprom(sp, 0x7F0, &org_7F0))
6003                 saved_7F0 = 1;
6004
6005         /* Test Write at offset 4f0 */
6006         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
6007                 fail = 1;
6008         if (read_eeprom(sp, 0x4F0, &ret_data))
6009                 fail = 1;
6010
6011         if (ret_data != 0x012345) {
6012                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
6013                         "Data written %llx Data read %llx\n",
6014                         dev->name, (unsigned long long)0x12345,
6015                         (unsigned long long)ret_data);
6016                 fail = 1;
6017         }
6018
6019         /* Reset the EEPROM data go FFFF */
6020         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6021
6022         /* Test Write Request Error at offset 0x7c */
6023         if (sp->device_type == XFRAME_I_DEVICE)
6024                 if (!write_eeprom(sp, 0x07C, 0, 3))
6025                         fail = 1;
6026
6027         /* Test Write Request at offset 0x7f0 */
6028         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6029                 fail = 1;
6030         if (read_eeprom(sp, 0x7F0, &ret_data))
6031                 fail = 1;
6032
6033         if (ret_data != 0x012345) {
6034                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6035                         "Data written %llx Data read %llx\n",
6036                         dev->name, (unsigned long long)0x12345,
6037                         (unsigned long long)ret_data);
6038                 fail = 1;
6039         }
6040
6041         /* Reset the EEPROM data go FFFF */
6042         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6043
6044         if (sp->device_type == XFRAME_I_DEVICE) {
6045                 /* Test Write Error at offset 0x80 */
6046                 if (!write_eeprom(sp, 0x080, 0, 3))
6047                         fail = 1;
6048
6049                 /* Test Write Error at offset 0xfc */
6050                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6051                         fail = 1;
6052
6053                 /* Test Write Error at offset 0x100 */
6054                 if (!write_eeprom(sp, 0x100, 0, 3))
6055                         fail = 1;
6056
6057                 /* Test Write Error at offset 4ec */
6058                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6059                         fail = 1;
6060         }
6061
6062         /* Restore values at offsets 0x4F0 and 0x7F0 */
6063         if (saved_4F0)
6064                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6065         if (saved_7F0)
6066                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6067
6068         *data = fail;
6069         return fail;
6070 }
6071
6072 /**
6073  * s2io_bist_test - invokes the MemBist test of the card .
6074  * @sp : private member of the device structure, which is a pointer to the
6075  * s2io_nic structure.
6076  * @data:variable that returns the result of each of the test conducted by
6077  * the driver.
6078  * Description:
6079  * This invokes the MemBist test of the card. We give around
6080  * 2 secs time for the Test to complete. If it's still not complete
6081  * within this peiod, we consider that the test failed.
6082  * Return value:
6083  * 0 on success and -1 on failure.
6084  */
6085
6086 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6087 {
6088         u8 bist = 0;
6089         int cnt = 0, ret = -1;
6090
6091         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6092         bist |= PCI_BIST_START;
6093         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6094
6095         while (cnt < 20) {
6096                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6097                 if (!(bist & PCI_BIST_START)) {
6098                         *data = (bist & PCI_BIST_CODE_MASK);
6099                         ret = 0;
6100                         break;
6101                 }
6102                 msleep(100);
6103                 cnt++;
6104         }
6105
6106         return ret;
6107 }
6108
6109 /**
6110  * s2io-link_test - verifies the link state of the nic
6111  * @sp ; private member of the device structure, which is a pointer to the
6112  * s2io_nic structure.
6113  * @data: variable that returns the result of each of the test conducted by
6114  * the driver.
6115  * Description:
6116  * The function verifies the link state of the NIC and updates the input
6117  * argument 'data' appropriately.
6118  * Return value:
6119  * 0 on success.
6120  */
6121
6122 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6123 {
6124         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6125         u64 val64;
6126
6127         val64 = readq(&bar0->adapter_status);
6128         if(!(LINK_IS_UP(val64)))
6129                 *data = 1;
6130         else
6131                 *data = 0;
6132
6133         return *data;
6134 }
6135
6136 /**
6137  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6138  * @sp - private member of the device structure, which is a pointer to the
6139  * s2io_nic structure.
6140  * @data - variable that returns the result of each of the test
6141  * conducted by the driver.
6142  * Description:
6143  *  This is one of the offline test that tests the read and write
6144  *  access to the RldRam chip on the NIC.
6145  * Return value:
6146  *  0 on success.
6147  */
6148
6149 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6150 {
6151         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6152         u64 val64;
6153         int cnt, iteration = 0, test_fail = 0;
6154
6155         val64 = readq(&bar0->adapter_control);
6156         val64 &= ~ADAPTER_ECC_EN;
6157         writeq(val64, &bar0->adapter_control);
6158
6159         val64 = readq(&bar0->mc_rldram_test_ctrl);
6160         val64 |= MC_RLDRAM_TEST_MODE;
6161         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6162
6163         val64 = readq(&bar0->mc_rldram_mrs);
6164         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6165         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6166
6167         val64 |= MC_RLDRAM_MRS_ENABLE;
6168         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6169
6170         while (iteration < 2) {
6171                 val64 = 0x55555555aaaa0000ULL;
6172                 if (iteration == 1) {
6173                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6174                 }
6175                 writeq(val64, &bar0->mc_rldram_test_d0);
6176
6177                 val64 = 0xaaaa5a5555550000ULL;
6178                 if (iteration == 1) {
6179                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6180                 }
6181                 writeq(val64, &bar0->mc_rldram_test_d1);
6182
6183                 val64 = 0x55aaaaaaaa5a0000ULL;
6184                 if (iteration == 1) {
6185                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6186                 }
6187                 writeq(val64, &bar0->mc_rldram_test_d2);
6188
6189                 val64 = (u64) (0x0000003ffffe0100ULL);
6190                 writeq(val64, &bar0->mc_rldram_test_add);
6191
6192                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6193                         MC_RLDRAM_TEST_GO;
6194                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6195
6196                 for (cnt = 0; cnt < 5; cnt++) {
6197                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6198                         if (val64 & MC_RLDRAM_TEST_DONE)
6199                                 break;
6200                         msleep(200);
6201                 }
6202
6203                 if (cnt == 5)
6204                         break;
6205
6206                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6207                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6208
6209                 for (cnt = 0; cnt < 5; cnt++) {
6210                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6211                         if (val64 & MC_RLDRAM_TEST_DONE)
6212                                 break;
6213                         msleep(500);
6214                 }
6215
6216                 if (cnt == 5)
6217                         break;
6218
6219                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6220                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6221                         test_fail = 1;
6222
6223                 iteration++;
6224         }
6225
6226         *data = test_fail;
6227
6228         /* Bring the adapter out of test mode */
6229         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6230
6231         return test_fail;
6232 }
6233
6234 /**
6235  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6236  *  @sp : private member of the device structure, which is a pointer to the
6237  *  s2io_nic structure.
6238  *  @ethtest : pointer to a ethtool command specific structure that will be
6239  *  returned to the user.
6240  *  @data : variable that returns the result of each of the test
6241  * conducted by the driver.
6242  * Description:
6243  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6244  *  the health of the card.
6245  * Return value:
6246  *  void
6247  */
6248
6249 static void s2io_ethtool_test(struct net_device *dev,
6250                               struct ethtool_test *ethtest,
6251                               uint64_t * data)
6252 {
6253         struct s2io_nic *sp = dev->priv;
6254         int orig_state = netif_running(sp->dev);
6255
6256         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6257                 /* Offline Tests. */
6258                 if (orig_state)
6259                         s2io_close(sp->dev);
6260
6261                 if (s2io_register_test(sp, &data[0]))
6262                         ethtest->flags |= ETH_TEST_FL_FAILED;
6263
6264                 s2io_reset(sp);
6265
6266                 if (s2io_rldram_test(sp, &data[3]))
6267                         ethtest->flags |= ETH_TEST_FL_FAILED;
6268
6269                 s2io_reset(sp);
6270
6271                 if (s2io_eeprom_test(sp, &data[1]))
6272                         ethtest->flags |= ETH_TEST_FL_FAILED;
6273
6274                 if (s2io_bist_test(sp, &data[4]))
6275                         ethtest->flags |= ETH_TEST_FL_FAILED;
6276
6277                 if (orig_state)
6278                         s2io_open(sp->dev);
6279
6280                 data[2] = 0;
6281         } else {
6282                 /* Online Tests. */
6283                 if (!orig_state) {
6284                         DBG_PRINT(ERR_DBG,
6285                                   "%s: is not up, cannot run test\n",
6286                                   dev->name);
6287                         data[0] = -1;
6288                         data[1] = -1;
6289                         data[2] = -1;
6290                         data[3] = -1;
6291                         data[4] = -1;
6292                 }
6293
6294                 if (s2io_link_test(sp, &data[2]))
6295                         ethtest->flags |= ETH_TEST_FL_FAILED;
6296
6297                 data[0] = 0;
6298                 data[1] = 0;
6299                 data[3] = 0;
6300                 data[4] = 0;
6301         }
6302 }
6303
6304 static void s2io_get_ethtool_stats(struct net_device *dev,
6305                                    struct ethtool_stats *estats,
6306                                    u64 * tmp_stats)
6307 {
6308         int i = 0, k;
6309         struct s2io_nic *sp = dev->priv;
6310         struct stat_block *stat_info = sp->mac_control.stats_info;
6311
6312         s2io_updt_stats(sp);
6313         tmp_stats[i++] =
6314                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6315                 le32_to_cpu(stat_info->tmac_frms);
6316         tmp_stats[i++] =
6317                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6318                 le32_to_cpu(stat_info->tmac_data_octets);
6319         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6320         tmp_stats[i++] =
6321                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6322                 le32_to_cpu(stat_info->tmac_mcst_frms);
6323         tmp_stats[i++] =
6324                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6325                 le32_to_cpu(stat_info->tmac_bcst_frms);
6326         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6327         tmp_stats[i++] =
6328                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6329                 le32_to_cpu(stat_info->tmac_ttl_octets);
6330         tmp_stats[i++] =
6331                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6332                 le32_to_cpu(stat_info->tmac_ucst_frms);
6333         tmp_stats[i++] =
6334                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6335                 le32_to_cpu(stat_info->tmac_nucst_frms);
6336         tmp_stats[i++] =
6337                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6338                 le32_to_cpu(stat_info->tmac_any_err_frms);
6339         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6340         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6341         tmp_stats[i++] =
6342                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6343                 le32_to_cpu(stat_info->tmac_vld_ip);
6344         tmp_stats[i++] =
6345                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6346                 le32_to_cpu(stat_info->tmac_drop_ip);
6347         tmp_stats[i++] =
6348                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6349                 le32_to_cpu(stat_info->tmac_icmp);
6350         tmp_stats[i++] =
6351                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6352                 le32_to_cpu(stat_info->tmac_rst_tcp);
6353         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6354         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6355                 le32_to_cpu(stat_info->tmac_udp);
6356         tmp_stats[i++] =
6357                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6358                 le32_to_cpu(stat_info->rmac_vld_frms);
6359         tmp_stats[i++] =
6360                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6361                 le32_to_cpu(stat_info->rmac_data_octets);
6362         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6363         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6364         tmp_stats[i++] =
6365                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6366                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6367         tmp_stats[i++] =
6368                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6369                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6370         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6371         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6372         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6373         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6374         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6375         tmp_stats[i++] =
6376                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6377                 le32_to_cpu(stat_info->rmac_ttl_octets);
6378         tmp_stats[i++] =
6379                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6380                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6381         tmp_stats[i++] =
6382                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6383                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6384         tmp_stats[i++] =
6385                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6386                 le32_to_cpu(stat_info->rmac_discarded_frms);
6387         tmp_stats[i++] =
6388                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6389                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6390         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6391         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6392         tmp_stats[i++] =
6393                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6394                 le32_to_cpu(stat_info->rmac_usized_frms);
6395         tmp_stats[i++] =
6396                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6397                 le32_to_cpu(stat_info->rmac_osized_frms);
6398         tmp_stats[i++] =
6399                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6400                 le32_to_cpu(stat_info->rmac_frag_frms);
6401         tmp_stats[i++] =
6402                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6403                 le32_to_cpu(stat_info->rmac_jabber_frms);
6404         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6405         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6406         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6407         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6408         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6409         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6410         tmp_stats[i++] =
6411                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6412                 le32_to_cpu(stat_info->rmac_ip);
6413         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6414         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6415         tmp_stats[i++] =
6416                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6417                 le32_to_cpu(stat_info->rmac_drop_ip);
6418         tmp_stats[i++] =
6419                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6420                 le32_to_cpu(stat_info->rmac_icmp);
6421         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6422         tmp_stats[i++] =
6423                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6424                 le32_to_cpu(stat_info->rmac_udp);
6425         tmp_stats[i++] =
6426                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6427                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6428         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6429         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6430         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6431         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6432         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6433         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6434         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6435         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6436         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6437         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6438         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6439         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6440         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6441         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6442         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6443         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6444         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6445         tmp_stats[i++] =
6446                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6447                 le32_to_cpu(stat_info->rmac_pause_cnt);
6448         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6449         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6450         tmp_stats[i++] =
6451                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6452                 le32_to_cpu(stat_info->rmac_accepted_ip);
6453         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6454         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6455         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6456         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6457         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6458         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6459         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6460         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6461         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6462         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6463         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6464         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6465         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6466         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6467         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6468         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6469         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6470         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6471         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6472
6473         /* Enhanced statistics exist only for Hercules */
6474         if(sp->device_type == XFRAME_II_DEVICE) {
6475                 tmp_stats[i++] =
6476                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6477                 tmp_stats[i++] =
6478                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6479                 tmp_stats[i++] =
6480                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6481                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6482                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6483                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6484                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6485                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6486                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6487                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6488                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6489                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6490                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6491                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6492                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6493                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6494         }
6495
6496         tmp_stats[i++] = 0;
6497         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6498         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6499         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6500         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6501         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6502         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6503         for (k = 0; k < MAX_RX_RINGS; k++)
6504                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6505         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6506         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6507         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6508         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6509         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6510         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6511         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6512         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6513         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6514         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6515         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6516         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6517         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6518         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6519         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6520         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6521         if (stat_info->sw_stat.num_aggregations) {
6522                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6523                 int count = 0;
6524                 /*
6525                  * Since 64-bit divide does not work on all platforms,
6526                  * do repeated subtraction.
6527                  */
6528                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6529                         tmp -= stat_info->sw_stat.num_aggregations;
6530                         count++;
6531                 }
6532                 tmp_stats[i++] = count;
6533         }
6534         else
6535                 tmp_stats[i++] = 0;
6536         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6537         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6538         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6539         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6540         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6541         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6542         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6543         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6544         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6545
6546         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6547         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6548         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6549         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6550         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6551
6552         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6553         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6554         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6555         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6556         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6557         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6558         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6559         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6560         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6561         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6562         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6563         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6564         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6565         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6566         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6567         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6568         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6569         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6570         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6571         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6572         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6573         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6574         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6575         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6576         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6577         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6578 }
6579
6580 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6581 {
6582         return (XENA_REG_SPACE);
6583 }
6584
6585
6586 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6587 {
6588         struct s2io_nic *sp = dev->priv;
6589
6590         return (sp->rx_csum);
6591 }
6592
6593 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6594 {
6595         struct s2io_nic *sp = dev->priv;
6596
6597         if (data)
6598                 sp->rx_csum = 1;
6599         else
6600                 sp->rx_csum = 0;
6601
6602         return 0;
6603 }
6604
6605 static int s2io_get_eeprom_len(struct net_device *dev)
6606 {
6607         return (XENA_EEPROM_SPACE);
6608 }
6609
6610 static int s2io_get_sset_count(struct net_device *dev, int sset)
6611 {
6612         struct s2io_nic *sp = dev->priv;
6613
6614         switch (sset) {
6615         case ETH_SS_TEST:
6616                 return S2IO_TEST_LEN;
6617         case ETH_SS_STATS:
6618                 switch(sp->device_type) {
6619                 case XFRAME_I_DEVICE:
6620                         return XFRAME_I_STAT_LEN;
6621                 case XFRAME_II_DEVICE:
6622                         return XFRAME_II_STAT_LEN;
6623                 default:
6624                         return 0;
6625                 }
6626         default:
6627                 return -EOPNOTSUPP;
6628         }
6629 }
6630
6631 static void s2io_ethtool_get_strings(struct net_device *dev,
6632                                      u32 stringset, u8 * data)
6633 {
6634         int stat_size = 0;
6635         struct s2io_nic *sp = dev->priv;
6636
6637         switch (stringset) {
6638         case ETH_SS_TEST:
6639                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6640                 break;
6641         case ETH_SS_STATS:
6642                 stat_size = sizeof(ethtool_xena_stats_keys);
6643                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6644                 if(sp->device_type == XFRAME_II_DEVICE) {
6645                         memcpy(data + stat_size,
6646                                 &ethtool_enhanced_stats_keys,
6647                                 sizeof(ethtool_enhanced_stats_keys));
6648                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6649                 }
6650
6651                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6652                         sizeof(ethtool_driver_stats_keys));
6653         }
6654 }
6655
6656 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6657 {
6658         if (data)
6659                 dev->features |= NETIF_F_IP_CSUM;
6660         else
6661                 dev->features &= ~NETIF_F_IP_CSUM;
6662
6663         return 0;
6664 }
6665
6666 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6667 {
6668         return (dev->features & NETIF_F_TSO) != 0;
6669 }
6670 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6671 {
6672         if (data)
6673                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6674         else
6675                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6676
6677         return 0;
6678 }
6679
6680 static const struct ethtool_ops netdev_ethtool_ops = {
6681         .get_settings = s2io_ethtool_gset,
6682         .set_settings = s2io_ethtool_sset,
6683         .get_drvinfo = s2io_ethtool_gdrvinfo,
6684         .get_regs_len = s2io_ethtool_get_regs_len,
6685         .get_regs = s2io_ethtool_gregs,
6686         .get_link = ethtool_op_get_link,
6687         .get_eeprom_len = s2io_get_eeprom_len,
6688         .get_eeprom = s2io_ethtool_geeprom,
6689         .set_eeprom = s2io_ethtool_seeprom,
6690         .get_ringparam = s2io_ethtool_gringparam,
6691         .get_pauseparam = s2io_ethtool_getpause_data,
6692         .set_pauseparam = s2io_ethtool_setpause_data,
6693         .get_rx_csum = s2io_ethtool_get_rx_csum,
6694         .set_rx_csum = s2io_ethtool_set_rx_csum,
6695         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6696         .set_sg = ethtool_op_set_sg,
6697         .get_tso = s2io_ethtool_op_get_tso,
6698         .set_tso = s2io_ethtool_op_set_tso,
6699         .set_ufo = ethtool_op_set_ufo,
6700         .self_test = s2io_ethtool_test,
6701         .get_strings = s2io_ethtool_get_strings,
6702         .phys_id = s2io_ethtool_idnic,
6703         .get_ethtool_stats = s2io_get_ethtool_stats,
6704         .get_sset_count = s2io_get_sset_count,
6705 };
6706
6707 /**
6708  *  s2io_ioctl - Entry point for the Ioctl
6709  *  @dev :  Device pointer.
6710  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6711  *  a proprietary structure used to pass information to the driver.
6712  *  @cmd :  This is used to distinguish between the different commands that
6713  *  can be passed to the IOCTL functions.
6714  *  Description:
6715  *  Currently there are no special functionality supported in IOCTL, hence
6716  *  function always return EOPNOTSUPPORTED
6717  */
6718
6719 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6720 {
6721         return -EOPNOTSUPP;
6722 }
6723
6724 /**
6725  *  s2io_change_mtu - entry point to change MTU size for the device.
6726  *   @dev : device pointer.
6727  *   @new_mtu : the new MTU size for the device.
6728  *   Description: A driver entry point to change MTU size for the device.
6729  *   Before changing the MTU the device must be stopped.
6730  *  Return value:
6731  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6732  *   file on failure.
6733  */
6734
6735 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6736 {
6737         struct s2io_nic *sp = dev->priv;
6738         int ret = 0;
6739
6740         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6741                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6742                           dev->name);
6743                 return -EPERM;
6744         }
6745
6746         dev->mtu = new_mtu;
6747         if (netif_running(dev)) {
6748                 s2io_stop_all_tx_queue(sp);
6749                 s2io_card_down(sp);
6750                 ret = s2io_card_up(sp);
6751                 if (ret) {
6752                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6753                                   __FUNCTION__);
6754                         return ret;
6755                 }
6756                 s2io_wake_all_tx_queue(sp);
6757         } else { /* Device is down */
6758                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6759                 u64 val64 = new_mtu;
6760
6761                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6762         }
6763
6764         return ret;
6765 }
6766
6767 /**
6768  * s2io_set_link - Set the LInk status
6769  * @data: long pointer to device private structue
6770  * Description: Sets the link status for the adapter
6771  */
6772
6773 static void s2io_set_link(struct work_struct *work)
6774 {
6775         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6776         struct net_device *dev = nic->dev;
6777         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6778         register u64 val64;
6779         u16 subid;
6780
6781         rtnl_lock();
6782
6783         if (!netif_running(dev))
6784                 goto out_unlock;
6785
6786         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6787                 /* The card is being reset, no point doing anything */
6788                 goto out_unlock;
6789         }
6790
6791         subid = nic->pdev->subsystem_device;
6792         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6793                 /*
6794                  * Allow a small delay for the NICs self initiated
6795                  * cleanup to complete.
6796                  */
6797                 msleep(100);
6798         }
6799
6800         val64 = readq(&bar0->adapter_status);
6801         if (LINK_IS_UP(val64)) {
6802                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6803                         if (verify_xena_quiescence(nic)) {
6804                                 val64 = readq(&bar0->adapter_control);
6805                                 val64 |= ADAPTER_CNTL_EN;
6806                                 writeq(val64, &bar0->adapter_control);
6807                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6808                                         nic->device_type, subid)) {
6809                                         val64 = readq(&bar0->gpio_control);
6810                                         val64 |= GPIO_CTRL_GPIO_0;
6811                                         writeq(val64, &bar0->gpio_control);
6812                                         val64 = readq(&bar0->gpio_control);
6813                                 } else {
6814                                         val64 |= ADAPTER_LED_ON;
6815                                         writeq(val64, &bar0->adapter_control);
6816                                 }
6817                                 nic->device_enabled_once = TRUE;
6818                         } else {
6819                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6820                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6821                                 s2io_stop_all_tx_queue(nic);
6822                         }
6823                 }
6824                 val64 = readq(&bar0->adapter_control);
6825                 val64 |= ADAPTER_LED_ON;
6826                 writeq(val64, &bar0->adapter_control);
6827                 s2io_link(nic, LINK_UP);
6828         } else {
6829                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6830                                                       subid)) {
6831                         val64 = readq(&bar0->gpio_control);
6832                         val64 &= ~GPIO_CTRL_GPIO_0;
6833                         writeq(val64, &bar0->gpio_control);
6834                         val64 = readq(&bar0->gpio_control);
6835                 }
6836                 /* turn off LED */
6837                 val64 = readq(&bar0->adapter_control);
6838                 val64 = val64 &(~ADAPTER_LED_ON);
6839                 writeq(val64, &bar0->adapter_control);
6840                 s2io_link(nic, LINK_DOWN);
6841         }
6842         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6843
6844 out_unlock:
6845         rtnl_unlock();
6846 }
6847
6848 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6849                                 struct buffAdd *ba,
6850                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6851                                 u64 *temp2, int size)
6852 {
6853         struct net_device *dev = sp->dev;
6854         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6855
6856         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6857                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6858                 /* allocate skb */
6859                 if (*skb) {
6860                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6861                         /*
6862                          * As Rx frame are not going to be processed,
6863                          * using same mapped address for the Rxd
6864                          * buffer pointer
6865                          */
6866                         rxdp1->Buffer0_ptr = *temp0;
6867                 } else {
6868                         *skb = dev_alloc_skb(size);
6869                         if (!(*skb)) {
6870                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6871                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6872                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6873                                 sp->mac_control.stats_info->sw_stat. \
6874                                         mem_alloc_fail_cnt++;
6875                                 return -ENOMEM ;
6876                         }
6877                         sp->mac_control.stats_info->sw_stat.mem_allocated
6878                                 += (*skb)->truesize;
6879                         /* storing the mapped addr in a temp variable
6880                          * such it will be used for next rxd whose
6881                          * Host Control is NULL
6882                          */
6883                         rxdp1->Buffer0_ptr = *temp0 =
6884                                 pci_map_single( sp->pdev, (*skb)->data,
6885                                         size - NET_IP_ALIGN,
6886                                         PCI_DMA_FROMDEVICE);
6887                         if( (rxdp1->Buffer0_ptr == 0) ||
6888                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6889                                 goto memalloc_failed;
6890                         }
6891                         rxdp->Host_Control = (unsigned long) (*skb);
6892                 }
6893         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6894                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6895                 /* Two buffer Mode */
6896                 if (*skb) {
6897                         rxdp3->Buffer2_ptr = *temp2;
6898                         rxdp3->Buffer0_ptr = *temp0;
6899                         rxdp3->Buffer1_ptr = *temp1;
6900                 } else {
6901                         *skb = dev_alloc_skb(size);
6902                         if (!(*skb)) {
6903                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6904                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6905                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6906                                 sp->mac_control.stats_info->sw_stat. \
6907                                         mem_alloc_fail_cnt++;
6908                                 return -ENOMEM;
6909                         }
6910                         sp->mac_control.stats_info->sw_stat.mem_allocated
6911                                 += (*skb)->truesize;
6912                         rxdp3->Buffer2_ptr = *temp2 =
6913                                 pci_map_single(sp->pdev, (*skb)->data,
6914                                                dev->mtu + 4,
6915                                                PCI_DMA_FROMDEVICE);
6916                         if( (rxdp3->Buffer2_ptr == 0) ||
6917                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6918                                 goto memalloc_failed;
6919                         }
6920                         rxdp3->Buffer0_ptr = *temp0 =
6921                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6922                                                 PCI_DMA_FROMDEVICE);
6923                         if( (rxdp3->Buffer0_ptr == 0) ||
6924                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6925                                 pci_unmap_single (sp->pdev,
6926                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6927                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6928                                 goto memalloc_failed;
6929                         }
6930                         rxdp->Host_Control = (unsigned long) (*skb);
6931
6932                         /* Buffer-1 will be dummy buffer not used */
6933                         rxdp3->Buffer1_ptr = *temp1 =
6934                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6935                                                 PCI_DMA_FROMDEVICE);
6936                         if( (rxdp3->Buffer1_ptr == 0) ||
6937                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6938                                 pci_unmap_single (sp->pdev,
6939                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6940                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6941                                 pci_unmap_single (sp->pdev,
6942                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6943                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6944                                 goto memalloc_failed;
6945                         }
6946                 }
6947         }
6948         return 0;
6949         memalloc_failed:
6950                 stats->pci_map_fail_cnt++;
6951                 stats->mem_freed += (*skb)->truesize;
6952                 dev_kfree_skb(*skb);
6953                 return -ENOMEM;
6954 }
6955
6956 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6957                                 int size)
6958 {
6959         struct net_device *dev = sp->dev;
6960         if (sp->rxd_mode == RXD_MODE_1) {
6961                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6962         } else if (sp->rxd_mode == RXD_MODE_3B) {
6963                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6964                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6965                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6966         }
6967 }
6968
6969 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6970 {
6971         int i, j, k, blk_cnt = 0, size;
6972         struct mac_info * mac_control = &sp->mac_control;
6973         struct config_param *config = &sp->config;
6974         struct net_device *dev = sp->dev;
6975         struct RxD_t *rxdp = NULL;
6976         struct sk_buff *skb = NULL;
6977         struct buffAdd *ba = NULL;
6978         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6979
6980         /* Calculate the size based on ring mode */
6981         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6982                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6983         if (sp->rxd_mode == RXD_MODE_1)
6984                 size += NET_IP_ALIGN;
6985         else if (sp->rxd_mode == RXD_MODE_3B)
6986                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6987
6988         for (i = 0; i < config->rx_ring_num; i++) {
6989                 blk_cnt = config->rx_cfg[i].num_rxd /
6990                         (rxd_count[sp->rxd_mode] +1);
6991
6992                 for (j = 0; j < blk_cnt; j++) {
6993                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6994                                 rxdp = mac_control->rings[i].
6995                                         rx_blocks[j].rxds[k].virt_addr;
6996                                 if(sp->rxd_mode == RXD_MODE_3B)
6997                                         ba = &mac_control->rings[i].ba[j][k];
6998                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6999                                                        &skb,(u64 *)&temp0_64,
7000                                                        (u64 *)&temp1_64,
7001                                                        (u64 *)&temp2_64,
7002                                                         size) == ENOMEM) {
7003                                         return 0;
7004                                 }
7005
7006                                 set_rxd_buffer_size(sp, rxdp, size);
7007                                 wmb();
7008                                 /* flip the Ownership bit to Hardware */
7009                                 rxdp->Control_1 |= RXD_OWN_XENA;
7010                         }
7011                 }
7012         }
7013         return 0;
7014
7015 }
7016
7017 static int s2io_add_isr(struct s2io_nic * sp)
7018 {
7019         int ret = 0;
7020         struct net_device *dev = sp->dev;
7021         int err = 0;
7022
7023         if (sp->config.intr_type == MSI_X)
7024                 ret = s2io_enable_msi_x(sp);
7025         if (ret) {
7026                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7027                 sp->config.intr_type = INTA;
7028         }
7029
7030         /* Store the values of the MSIX table in the struct s2io_nic structure */
7031         store_xmsi_data(sp);
7032
7033         /* After proper initialization of H/W, register ISR */
7034         if (sp->config.intr_type == MSI_X) {
7035                 int i, msix_rx_cnt = 0;
7036
7037                 for (i = 0; i < sp->num_entries; i++) {
7038                         if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7039                                 if (sp->s2io_entries[i].type ==
7040                                         MSIX_RING_TYPE) {
7041                                         sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7042                                                 dev->name, i);
7043                                         err = request_irq(sp->entries[i].vector,
7044                                                 s2io_msix_ring_handle, 0,
7045                                                 sp->desc[i],
7046                                                 sp->s2io_entries[i].arg);
7047                                 } else if (sp->s2io_entries[i].type ==
7048                                         MSIX_ALARM_TYPE) {
7049                                         sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7050                                         dev->name, i);
7051                                         err = request_irq(sp->entries[i].vector,
7052                                                 s2io_msix_fifo_handle, 0,
7053                                                 sp->desc[i],
7054                                                 sp->s2io_entries[i].arg);
7055
7056                                 }
7057                                 /* if either data or addr is zero print it. */
7058                                 if (!(sp->msix_info[i].addr &&
7059                                         sp->msix_info[i].data)) {
7060                                         DBG_PRINT(ERR_DBG,
7061                                                 "%s @Addr:0x%llx Data:0x%llx\n",
7062                                                 sp->desc[i],
7063                                                 (unsigned long long)
7064                                                 sp->msix_info[i].addr,
7065                                                 (unsigned long long)
7066                                                 ntohl(sp->msix_info[i].data));
7067                                 } else
7068                                         msix_rx_cnt++;
7069                                 if (err) {
7070                                         remove_msix_isr(sp);
7071
7072                                         DBG_PRINT(ERR_DBG,
7073                                                 "%s:MSI-X-%d registration "
7074                                                 "failed\n", dev->name, i);
7075
7076                                         DBG_PRINT(ERR_DBG,
7077                                                 "%s: Defaulting to INTA\n",
7078                                                 dev->name);
7079                                         sp->config.intr_type = INTA;
7080                                         break;
7081                                 }
7082                                 sp->s2io_entries[i].in_use =
7083                                         MSIX_REGISTERED_SUCCESS;
7084                         }
7085                 }
7086                 if (!err) {
7087                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7088                                 --msix_rx_cnt);
7089                         DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7090                                                 " through alarm vector\n");
7091                 }
7092         }
7093         if (sp->config.intr_type == INTA) {
7094                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7095                                 sp->name, dev);
7096                 if (err) {
7097                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7098                                   dev->name);
7099                         return -1;
7100                 }
7101         }
7102         return 0;
7103 }
7104 static void s2io_rem_isr(struct s2io_nic * sp)
7105 {
7106         if (sp->config.intr_type == MSI_X)
7107                 remove_msix_isr(sp);
7108         else
7109                 remove_inta_isr(sp);
7110 }
7111
7112 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7113 {
7114         int cnt = 0;
7115         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7116         register u64 val64 = 0;
7117         struct config_param *config;
7118         config = &sp->config;
7119
7120         if (!is_s2io_card_up(sp))
7121                 return;
7122
7123         del_timer_sync(&sp->alarm_timer);
7124         /* If s2io_set_link task is executing, wait till it completes. */
7125         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7126                 msleep(50);
7127         }
7128         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7129
7130         /* Disable napi */
7131         if (sp->config.napi) {
7132                 int off = 0;
7133                 if (config->intr_type ==  MSI_X) {
7134                         for (; off < sp->config.rx_ring_num; off++)
7135                                 napi_disable(&sp->mac_control.rings[off].napi);
7136                         }
7137                 else
7138                         napi_disable(&sp->napi);
7139         }
7140
7141         /* disable Tx and Rx traffic on the NIC */
7142         if (do_io)
7143                 stop_nic(sp);
7144
7145         s2io_rem_isr(sp);
7146
7147         /* Check if the device is Quiescent and then Reset the NIC */
7148         while(do_io) {
7149                 /* As per the HW requirement we need to replenish the
7150                  * receive buffer to avoid the ring bump. Since there is
7151                  * no intention of processing the Rx frame at this pointwe are
7152                  * just settting the ownership bit of rxd in Each Rx
7153                  * ring to HW and set the appropriate buffer size
7154                  * based on the ring mode
7155                  */
7156                 rxd_owner_bit_reset(sp);
7157
7158                 val64 = readq(&bar0->adapter_status);
7159                 if (verify_xena_quiescence(sp)) {
7160                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7161                         break;
7162                 }
7163
7164                 msleep(50);
7165                 cnt++;
7166                 if (cnt == 10) {
7167                         DBG_PRINT(ERR_DBG,
7168                                   "s2io_close:Device not Quiescent ");
7169                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7170                                   (unsigned long long) val64);
7171                         break;
7172                 }
7173         }
7174         if (do_io)
7175                 s2io_reset(sp);
7176
7177         /* Free all Tx buffers */
7178         free_tx_buffers(sp);
7179
7180         /* Free all Rx buffers */
7181         free_rx_buffers(sp);
7182
7183         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7184 }
7185
7186 static void s2io_card_down(struct s2io_nic * sp)
7187 {
7188         do_s2io_card_down(sp, 1);
7189 }
7190
7191 static int s2io_card_up(struct s2io_nic * sp)
7192 {
7193         int i, ret = 0;
7194         struct mac_info *mac_control;
7195         struct config_param *config;
7196         struct net_device *dev = (struct net_device *) sp->dev;
7197         u16 interruptible;
7198
7199         /* Initialize the H/W I/O registers */
7200         ret = init_nic(sp);
7201         if (ret != 0) {
7202                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7203                           dev->name);
7204                 if (ret != -EIO)
7205                         s2io_reset(sp);
7206                 return ret;
7207         }
7208
7209         /*
7210          * Initializing the Rx buffers. For now we are considering only 1
7211          * Rx ring and initializing buffers into 30 Rx blocks
7212          */
7213         mac_control = &sp->mac_control;
7214         config = &sp->config;
7215
7216         for (i = 0; i < config->rx_ring_num; i++) {
7217                 mac_control->rings[i].mtu = dev->mtu;
7218                 ret = fill_rx_buffers(&mac_control->rings[i]);
7219                 if (ret) {
7220                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7221                                   dev->name);
7222                         s2io_reset(sp);
7223                         free_rx_buffers(sp);
7224                         return -ENOMEM;
7225                 }
7226                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7227                           mac_control->rings[i].rx_bufs_left);
7228         }
7229
7230         /* Initialise napi */
7231         if (config->napi) {
7232                 int i;
7233                 if (config->intr_type ==  MSI_X) {
7234                         for (i = 0; i < sp->config.rx_ring_num; i++)
7235                                 napi_enable(&sp->mac_control.rings[i].napi);
7236                 } else {
7237                         napi_enable(&sp->napi);
7238                 }
7239         }
7240
7241         /* Maintain the state prior to the open */
7242         if (sp->promisc_flg)
7243                 sp->promisc_flg = 0;
7244         if (sp->m_cast_flg) {
7245                 sp->m_cast_flg = 0;
7246                 sp->all_multi_pos= 0;
7247         }
7248
7249         /* Setting its receive mode */
7250         s2io_set_multicast(dev);
7251
7252         if (sp->lro) {
7253                 /* Initialize max aggregatable pkts per session based on MTU */
7254                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7255                 /* Check if we can use(if specified) user provided value */
7256                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7257                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7258         }
7259
7260         /* Enable Rx Traffic and interrupts on the NIC */
7261         if (start_nic(sp)) {
7262                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7263                 s2io_reset(sp);
7264                 free_rx_buffers(sp);
7265                 return -ENODEV;
7266         }
7267
7268         /* Add interrupt service routine */
7269         if (s2io_add_isr(sp) != 0) {
7270                 if (sp->config.intr_type == MSI_X)
7271                         s2io_rem_isr(sp);
7272                 s2io_reset(sp);
7273                 free_rx_buffers(sp);
7274                 return -ENODEV;
7275         }
7276
7277         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7278
7279         /*  Enable select interrupts */
7280         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7281         if (sp->config.intr_type != INTA)
7282                 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
7283         else {
7284                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7285                 interruptible |= TX_PIC_INTR;
7286                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7287         }
7288
7289         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7290         return 0;
7291 }
7292
7293 /**
7294  * s2io_restart_nic - Resets the NIC.
7295  * @data : long pointer to the device private structure
7296  * Description:
7297  * This function is scheduled to be run by the s2io_tx_watchdog
7298  * function after 0.5 secs to reset the NIC. The idea is to reduce
7299  * the run time of the watch dog routine which is run holding a
7300  * spin lock.
7301  */
7302
7303 static void s2io_restart_nic(struct work_struct *work)
7304 {
7305         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7306         struct net_device *dev = sp->dev;
7307
7308         rtnl_lock();
7309
7310         if (!netif_running(dev))
7311                 goto out_unlock;
7312
7313         s2io_card_down(sp);
7314         if (s2io_card_up(sp)) {
7315                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7316                           dev->name);
7317         }
7318         s2io_wake_all_tx_queue(sp);
7319         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7320                   dev->name);
7321 out_unlock:
7322         rtnl_unlock();
7323 }
7324
7325 /**
7326  *  s2io_tx_watchdog - Watchdog for transmit side.
7327  *  @dev : Pointer to net device structure
7328  *  Description:
7329  *  This function is triggered if the Tx Queue is stopped
7330  *  for a pre-defined amount of time when the Interface is still up.
7331  *  If the Interface is jammed in such a situation, the hardware is
7332  *  reset (by s2io_close) and restarted again (by s2io_open) to
7333  *  overcome any problem that might have been caused in the hardware.
7334  *  Return value:
7335  *  void
7336  */
7337
7338 static void s2io_tx_watchdog(struct net_device *dev)
7339 {
7340         struct s2io_nic *sp = dev->priv;
7341
7342         if (netif_carrier_ok(dev)) {
7343                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7344                 schedule_work(&sp->rst_timer_task);
7345                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7346         }
7347 }
7348
7349 /**
7350  *   rx_osm_handler - To perform some OS related operations on SKB.
7351  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7352  *   @skb : the socket buffer pointer.
7353  *   @len : length of the packet
7354  *   @cksum : FCS checksum of the frame.
7355  *   @ring_no : the ring from which this RxD was extracted.
7356  *   Description:
7357  *   This function is called by the Rx interrupt serivce routine to perform
7358  *   some OS related operations on the SKB before passing it to the upper
7359  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7360  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7361  *   to the upper layer. If the checksum is wrong, it increments the Rx
7362  *   packet error count, frees the SKB and returns error.
7363  *   Return value:
7364  *   SUCCESS on success and -1 on failure.
7365  */
7366 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7367 {
7368         struct s2io_nic *sp = ring_data->nic;
7369         struct net_device *dev = (struct net_device *) ring_data->dev;
7370         struct sk_buff *skb = (struct sk_buff *)
7371                 ((unsigned long) rxdp->Host_Control);
7372         int ring_no = ring_data->ring_no;
7373         u16 l3_csum, l4_csum;
7374         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7375         struct lro *lro;
7376         u8 err_mask;
7377
7378         skb->dev = dev;
7379
7380         if (err) {
7381                 /* Check for parity error */
7382                 if (err & 0x1) {
7383                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7384                 }
7385                 err_mask = err >> 48;
7386                 switch(err_mask) {
7387                         case 1:
7388                                 sp->mac_control.stats_info->sw_stat.
7389                                 rx_parity_err_cnt++;
7390                         break;
7391
7392                         case 2:
7393                                 sp->mac_control.stats_info->sw_stat.
7394                                 rx_abort_cnt++;
7395                         break;
7396
7397                         case 3:
7398                                 sp->mac_control.stats_info->sw_stat.
7399                                 rx_parity_abort_cnt++;
7400                         break;
7401
7402                         case 4:
7403                                 sp->mac_control.stats_info->sw_stat.
7404                                 rx_rda_fail_cnt++;
7405                         break;
7406
7407                         case 5:
7408                                 sp->mac_control.stats_info->sw_stat.
7409                                 rx_unkn_prot_cnt++;
7410                         break;
7411
7412                         case 6:
7413                                 sp->mac_control.stats_info->sw_stat.
7414                                 rx_fcs_err_cnt++;
7415                         break;
7416
7417                         case 7:
7418                                 sp->mac_control.stats_info->sw_stat.
7419                                 rx_buf_size_err_cnt++;
7420                         break;
7421
7422                         case 8:
7423                                 sp->mac_control.stats_info->sw_stat.
7424                                 rx_rxd_corrupt_cnt++;
7425                         break;
7426
7427                         case 15:
7428                                 sp->mac_control.stats_info->sw_stat.
7429                                 rx_unkn_err_cnt++;
7430                         break;
7431                 }
7432                 /*
7433                 * Drop the packet if bad transfer code. Exception being
7434                 * 0x5, which could be due to unsupported IPv6 extension header.
7435                 * In this case, we let stack handle the packet.
7436                 * Note that in this case, since checksum will be incorrect,
7437                 * stack will validate the same.
7438                 */
7439                 if (err_mask != 0x5) {
7440                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7441                                 dev->name, err_mask);
7442                         sp->stats.rx_crc_errors++;
7443                         sp->mac_control.stats_info->sw_stat.mem_freed
7444                                 += skb->truesize;
7445                         dev_kfree_skb(skb);
7446                         ring_data->rx_bufs_left -= 1;
7447                         rxdp->Host_Control = 0;
7448                         return 0;
7449                 }
7450         }
7451
7452         /* Updating statistics */
7453         ring_data->rx_packets++;
7454         rxdp->Host_Control = 0;
7455         if (sp->rxd_mode == RXD_MODE_1) {
7456                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7457
7458                 ring_data->rx_bytes += len;
7459                 skb_put(skb, len);
7460
7461         } else if (sp->rxd_mode == RXD_MODE_3B) {
7462                 int get_block = ring_data->rx_curr_get_info.block_index;
7463                 int get_off = ring_data->rx_curr_get_info.offset;
7464                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7465                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7466                 unsigned char *buff = skb_push(skb, buf0_len);
7467
7468                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7469                 ring_data->rx_bytes += buf0_len + buf2_len;
7470                 memcpy(buff, ba->ba_0, buf0_len);
7471                 skb_put(skb, buf2_len);
7472         }
7473
7474         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7475             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7476             (sp->rx_csum)) {
7477                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7478                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7479                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7480                         /*
7481                          * NIC verifies if the Checksum of the received
7482                          * frame is Ok or not and accordingly returns
7483                          * a flag in the RxD.
7484                          */
7485                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7486                         if (ring_data->lro) {
7487                                 u32 tcp_len;
7488                                 u8 *tcp;
7489                                 int ret = 0;
7490
7491                                 ret = s2io_club_tcp_session(ring_data,
7492                                         skb->data, &tcp, &tcp_len, &lro,
7493                                         rxdp, sp);
7494                                 switch (ret) {
7495                                         case 3: /* Begin anew */
7496                                                 lro->parent = skb;
7497                                                 goto aggregate;
7498                                         case 1: /* Aggregate */
7499                                         {
7500                                                 lro_append_pkt(sp, lro,
7501                                                         skb, tcp_len);
7502                                                 goto aggregate;
7503                                         }
7504                                         case 4: /* Flush session */
7505                                         {
7506                                                 lro_append_pkt(sp, lro,
7507                                                         skb, tcp_len);
7508                                                 queue_rx_frame(lro->parent,
7509                                                         lro->vlan_tag);
7510                                                 clear_lro_session(lro);
7511                                                 sp->mac_control.stats_info->
7512                                                     sw_stat.flush_max_pkts++;
7513                                                 goto aggregate;
7514                                         }
7515                                         case 2: /* Flush both */
7516                                                 lro->parent->data_len =
7517                                                         lro->frags_len;
7518                                                 sp->mac_control.stats_info->
7519                                                      sw_stat.sending_both++;
7520                                                 queue_rx_frame(lro->parent,
7521                                                         lro->vlan_tag);
7522                                                 clear_lro_session(lro);
7523                                                 goto send_up;
7524                                         case 0: /* sessions exceeded */
7525                                         case -1: /* non-TCP or not
7526                                                   * L2 aggregatable
7527                                                   */
7528                                         case 5: /*
7529                                                  * First pkt in session not
7530                                                  * L3/L4 aggregatable
7531                                                  */
7532                                                 break;
7533                                         default:
7534                                                 DBG_PRINT(ERR_DBG,
7535                                                         "%s: Samadhana!!\n",
7536                                                          __FUNCTION__);
7537                                                 BUG();
7538                                 }
7539                         }
7540                 } else {
7541                         /*
7542                          * Packet with erroneous checksum, let the
7543                          * upper layers deal with it.
7544                          */
7545                         skb->ip_summed = CHECKSUM_NONE;
7546                 }
7547         } else
7548                 skb->ip_summed = CHECKSUM_NONE;
7549
7550         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7551 send_up:
7552         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7553         dev->last_rx = jiffies;
7554 aggregate:
7555         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7556         return SUCCESS;
7557 }
7558
7559 /**
7560  *  s2io_link - stops/starts the Tx queue.
7561  *  @sp : private member of the device structure, which is a pointer to the
7562  *  s2io_nic structure.
7563  *  @link : inidicates whether link is UP/DOWN.
7564  *  Description:
7565  *  This function stops/starts the Tx queue depending on whether the link
7566  *  status of the NIC is is down or up. This is called by the Alarm
7567  *  interrupt handler whenever a link change interrupt comes up.
7568  *  Return value:
7569  *  void.
7570  */
7571
7572 static void s2io_link(struct s2io_nic * sp, int link)
7573 {
7574         struct net_device *dev = (struct net_device *) sp->dev;
7575
7576         if (link != sp->last_link_state) {
7577                 init_tti(sp, link);
7578                 if (link == LINK_DOWN) {
7579                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7580                         s2io_stop_all_tx_queue(sp);
7581                         netif_carrier_off(dev);
7582                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7583                         sp->mac_control.stats_info->sw_stat.link_up_time =
7584                                 jiffies - sp->start_time;
7585                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7586                 } else {
7587                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7588                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7589                         sp->mac_control.stats_info->sw_stat.link_down_time =
7590                                 jiffies - sp->start_time;
7591                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7592                         netif_carrier_on(dev);
7593                         s2io_wake_all_tx_queue(sp);
7594                 }
7595         }
7596         sp->last_link_state = link;
7597         sp->start_time = jiffies;
7598 }
7599
7600 /**
7601  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7602  *  @sp : private member of the device structure, which is a pointer to the
7603  *  s2io_nic structure.
7604  *  Description:
7605  *  This function initializes a few of the PCI and PCI-X configuration registers
7606  *  with recommended values.
7607  *  Return value:
7608  *  void
7609  */
7610
7611 static void s2io_init_pci(struct s2io_nic * sp)
7612 {
7613         u16 pci_cmd = 0, pcix_cmd = 0;
7614
7615         /* Enable Data Parity Error Recovery in PCI-X command register. */
7616         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7617                              &(pcix_cmd));
7618         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7619                               (pcix_cmd | 1));
7620         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7621                              &(pcix_cmd));
7622
7623         /* Set the PErr Response bit in PCI command register. */
7624         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7625         pci_write_config_word(sp->pdev, PCI_COMMAND,
7626                               (pci_cmd | PCI_COMMAND_PARITY));
7627         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7628 }
7629
7630 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7631         u8 *dev_multiq)
7632 {
7633         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7634                 (tx_fifo_num < 1)) {
7635                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7636                         "(%d) not supported\n", tx_fifo_num);
7637
7638                 if (tx_fifo_num < 1)
7639                         tx_fifo_num = 1;
7640                 else
7641                         tx_fifo_num = MAX_TX_FIFOS;
7642
7643                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7644                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7645         }
7646
7647 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
7648         if (multiq) {
7649                 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7650                 multiq = 0;
7651         }
7652 #endif
7653         if (multiq)
7654                 *dev_multiq = multiq;
7655
7656         if (tx_steering_type && (1 == tx_fifo_num)) {
7657                 if (tx_steering_type != TX_DEFAULT_STEERING)
7658                         DBG_PRINT(ERR_DBG,
7659                                 "s2io: Tx steering is not supported with "
7660                                 "one fifo. Disabling Tx steering.\n");
7661                 tx_steering_type = NO_STEERING;
7662         }
7663
7664         if ((tx_steering_type < NO_STEERING) ||
7665                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7666                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7667                          "supported\n");
7668                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7669                 tx_steering_type = NO_STEERING;
7670         }
7671
7672         if (rx_ring_num > MAX_RX_RINGS) {
7673                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7674                          "supported\n");
7675                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7676                         MAX_RX_RINGS);
7677                 rx_ring_num = MAX_RX_RINGS;
7678         }
7679
7680         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7681                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7682                           "Defaulting to INTA\n");
7683                 *dev_intr_type = INTA;
7684         }
7685
7686         if ((*dev_intr_type == MSI_X) &&
7687                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7688                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7689                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7690                                         "Defaulting to INTA\n");
7691                 *dev_intr_type = INTA;
7692         }
7693
7694         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7695                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7696                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7697                 rx_ring_mode = 1;
7698         }
7699         return SUCCESS;
7700 }
7701
7702 /**
7703  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7704  * or Traffic class respectively.
7705  * @nic: device private variable
7706  * Description: The function configures the receive steering to
7707  * desired receive ring.
7708  * Return Value:  SUCCESS on success and
7709  * '-1' on failure (endian settings incorrect).
7710  */
7711 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7712 {
7713         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7714         register u64 val64 = 0;
7715
7716         if (ds_codepoint > 63)
7717                 return FAILURE;
7718
7719         val64 = RTS_DS_MEM_DATA(ring);
7720         writeq(val64, &bar0->rts_ds_mem_data);
7721
7722         val64 = RTS_DS_MEM_CTRL_WE |
7723                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7724                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7725
7726         writeq(val64, &bar0->rts_ds_mem_ctrl);
7727
7728         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7729                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7730                                 S2IO_BIT_RESET);
7731 }
7732
7733 /**
7734  *  s2io_init_nic - Initialization of the adapter .
7735  *  @pdev : structure containing the PCI related information of the device.
7736  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7737  *  Description:
7738  *  The function initializes an adapter identified by the pci_dec structure.
7739  *  All OS related initialization including memory and device structure and
7740  *  initlaization of the device private variable is done. Also the swapper
7741  *  control register is initialized to enable read and write into the I/O
7742  *  registers of the device.
7743  *  Return value:
7744  *  returns 0 on success and negative on failure.
7745  */
7746
7747 static int __devinit
7748 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7749 {
7750         struct s2io_nic *sp;
7751         struct net_device *dev;
7752         int i, j, ret;
7753         int dma_flag = FALSE;
7754         u32 mac_up, mac_down;
7755         u64 val64 = 0, tmp64 = 0;
7756         struct XENA_dev_config __iomem *bar0 = NULL;
7757         u16 subid;
7758         struct mac_info *mac_control;
7759         struct config_param *config;
7760         int mode;
7761         u8 dev_intr_type = intr_type;
7762         u8 dev_multiq = 0;
7763         DECLARE_MAC_BUF(mac);
7764
7765         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7766         if (ret)
7767                 return ret;
7768
7769         if ((ret = pci_enable_device(pdev))) {
7770                 DBG_PRINT(ERR_DBG,
7771                           "s2io_init_nic: pci_enable_device failed\n");
7772                 return ret;
7773         }
7774
7775         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7776                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7777                 dma_flag = TRUE;
7778                 if (pci_set_consistent_dma_mask
7779                     (pdev, DMA_64BIT_MASK)) {
7780                         DBG_PRINT(ERR_DBG,
7781                                   "Unable to obtain 64bit DMA for \
7782                                         consistent allocations\n");
7783                         pci_disable_device(pdev);
7784                         return -ENOMEM;
7785                 }
7786         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7787                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7788         } else {
7789                 pci_disable_device(pdev);
7790                 return -ENOMEM;
7791         }
7792         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7793                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7794                 pci_disable_device(pdev);
7795                 return -ENODEV;
7796         }
7797 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7798         if (dev_multiq)
7799                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7800         else
7801 #endif
7802         dev = alloc_etherdev(sizeof(struct s2io_nic));
7803         if (dev == NULL) {
7804                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7805                 pci_disable_device(pdev);
7806                 pci_release_regions(pdev);
7807                 return -ENODEV;
7808         }
7809
7810         pci_set_master(pdev);
7811         pci_set_drvdata(pdev, dev);
7812         SET_NETDEV_DEV(dev, &pdev->dev);
7813
7814         /*  Private member variable initialized to s2io NIC structure */
7815         sp = dev->priv;
7816         memset(sp, 0, sizeof(struct s2io_nic));
7817         sp->dev = dev;
7818         sp->pdev = pdev;
7819         sp->high_dma_flag = dma_flag;
7820         sp->device_enabled_once = FALSE;
7821         if (rx_ring_mode == 1)
7822                 sp->rxd_mode = RXD_MODE_1;
7823         if (rx_ring_mode == 2)
7824                 sp->rxd_mode = RXD_MODE_3B;
7825
7826         sp->config.intr_type = dev_intr_type;
7827
7828         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7829                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7830                 sp->device_type = XFRAME_II_DEVICE;
7831         else
7832                 sp->device_type = XFRAME_I_DEVICE;
7833
7834         sp->lro = lro_enable;
7835
7836         /* Initialize some PCI/PCI-X fields of the NIC. */
7837         s2io_init_pci(sp);
7838
7839         /*
7840          * Setting the device configuration parameters.
7841          * Most of these parameters can be specified by the user during
7842          * module insertion as they are module loadable parameters. If
7843          * these parameters are not not specified during load time, they
7844          * are initialized with default values.
7845          */
7846         mac_control = &sp->mac_control;
7847         config = &sp->config;
7848
7849         config->napi = napi;
7850         config->tx_steering_type = tx_steering_type;
7851
7852         /* Tx side parameters. */
7853         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7854                 config->tx_fifo_num = MAX_TX_FIFOS;
7855         else
7856                 config->tx_fifo_num = tx_fifo_num;
7857
7858         /* Initialize the fifos used for tx steering */
7859         if (config->tx_fifo_num < 5) {
7860                         if (config->tx_fifo_num  == 1)
7861                                 sp->total_tcp_fifos = 1;
7862                         else
7863                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7864                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7865                         sp->total_udp_fifos = 1;
7866                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7867         } else {
7868                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7869                                                 FIFO_OTHER_MAX_NUM);
7870                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7871                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7872                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7873         }
7874
7875         config->multiq = dev_multiq;
7876         for (i = 0; i < config->tx_fifo_num; i++) {
7877                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7878                 config->tx_cfg[i].fifo_priority = i;
7879         }
7880
7881         /* mapping the QoS priority to the configured fifos */
7882         for (i = 0; i < MAX_TX_FIFOS; i++)
7883                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7884
7885         /* map the hashing selector table to the configured fifos */
7886         for (i = 0; i < config->tx_fifo_num; i++)
7887                 sp->fifo_selector[i] = fifo_selector[i];
7888
7889
7890         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7891         for (i = 0; i < config->tx_fifo_num; i++) {
7892                 config->tx_cfg[i].f_no_snoop =
7893                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7894                 if (config->tx_cfg[i].fifo_len < 65) {
7895                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7896                         break;
7897                 }
7898         }
7899         /* + 2 because one Txd for skb->data and one Txd for UFO */
7900         config->max_txds = MAX_SKB_FRAGS + 2;
7901
7902         /* Rx side parameters. */
7903         config->rx_ring_num = rx_ring_num;
7904         for (i = 0; i < config->rx_ring_num; i++) {
7905                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7906                     (rxd_count[sp->rxd_mode] + 1);
7907                 config->rx_cfg[i].ring_priority = i;
7908                 mac_control->rings[i].rx_bufs_left = 0;
7909                 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7910                 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7911                 mac_control->rings[i].pdev = sp->pdev;
7912                 mac_control->rings[i].dev = sp->dev;
7913         }
7914
7915         for (i = 0; i < rx_ring_num; i++) {
7916                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7917                 config->rx_cfg[i].f_no_snoop =
7918                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7919         }
7920
7921         /*  Setting Mac Control parameters */
7922         mac_control->rmac_pause_time = rmac_pause_time;
7923         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7924         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7925
7926
7927         /*  initialize the shared memory used by the NIC and the host */
7928         if (init_shared_mem(sp)) {
7929                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7930                           dev->name);
7931                 ret = -ENOMEM;
7932                 goto mem_alloc_failed;
7933         }
7934
7935         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7936                                      pci_resource_len(pdev, 0));
7937         if (!sp->bar0) {
7938                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7939                           dev->name);
7940                 ret = -ENOMEM;
7941                 goto bar0_remap_failed;
7942         }
7943
7944         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7945                                      pci_resource_len(pdev, 2));
7946         if (!sp->bar1) {
7947                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7948                           dev->name);
7949                 ret = -ENOMEM;
7950                 goto bar1_remap_failed;
7951         }
7952
7953         dev->irq = pdev->irq;
7954         dev->base_addr = (unsigned long) sp->bar0;
7955
7956         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7957         for (j = 0; j < MAX_TX_FIFOS; j++) {
7958                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7959                     (sp->bar1 + (j * 0x00020000));
7960         }
7961
7962         /*  Driver entry points */
7963         dev->open = &s2io_open;
7964         dev->stop = &s2io_close;
7965         dev->hard_start_xmit = &s2io_xmit;
7966         dev->get_stats = &s2io_get_stats;
7967         dev->set_multicast_list = &s2io_set_multicast;
7968         dev->do_ioctl = &s2io_ioctl;
7969         dev->set_mac_address = &s2io_set_mac_addr;
7970         dev->change_mtu = &s2io_change_mtu;
7971         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7972         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7973         dev->vlan_rx_register = s2io_vlan_rx_register;
7974         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7975
7976         /*
7977          * will use eth_mac_addr() for  dev->set_mac_address
7978          * mac address will be set every time dev->open() is called
7979          */
7980 #ifdef CONFIG_NET_POLL_CONTROLLER
7981         dev->poll_controller = s2io_netpoll;
7982 #endif
7983
7984         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7985         if (sp->high_dma_flag == TRUE)
7986                 dev->features |= NETIF_F_HIGHDMA;
7987         dev->features |= NETIF_F_TSO;
7988         dev->features |= NETIF_F_TSO6;
7989         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7990                 dev->features |= NETIF_F_UFO;
7991                 dev->features |= NETIF_F_HW_CSUM;
7992         }
7993 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7994         if (config->multiq)
7995                 dev->features |= NETIF_F_MULTI_QUEUE;
7996 #endif
7997         dev->tx_timeout = &s2io_tx_watchdog;
7998         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7999         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8000         INIT_WORK(&sp->set_link_task, s2io_set_link);
8001
8002         pci_save_state(sp->pdev);
8003
8004         /* Setting swapper control on the NIC, for proper reset operation */
8005         if (s2io_set_swapper(sp)) {
8006                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8007                           dev->name);
8008                 ret = -EAGAIN;
8009                 goto set_swap_failed;
8010         }
8011
8012         /* Verify if the Herc works on the slot its placed into */
8013         if (sp->device_type & XFRAME_II_DEVICE) {
8014                 mode = s2io_verify_pci_mode(sp);
8015                 if (mode < 0) {
8016                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
8017                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8018                         ret = -EBADSLT;
8019                         goto set_swap_failed;
8020                 }
8021         }
8022
8023         if (sp->config.intr_type == MSI_X) {
8024                 sp->num_entries = config->rx_ring_num + 1;
8025                 ret = s2io_enable_msi_x(sp);
8026
8027                 if (!ret) {
8028                         ret = s2io_test_msi(sp);
8029                         /* rollback MSI-X, will re-enable during add_isr() */
8030                         remove_msix_isr(sp);
8031                 }
8032                 if (ret) {
8033
8034                         DBG_PRINT(ERR_DBG,
8035                           "%s: MSI-X requested but failed to enable\n",
8036                           dev->name);
8037                         sp->config.intr_type = INTA;
8038                 }
8039         }
8040
8041         if (config->intr_type ==  MSI_X) {
8042                 for (i = 0; i < config->rx_ring_num ; i++)
8043                         netif_napi_add(dev, &mac_control->rings[i].napi,
8044                                 s2io_poll_msix, 64);
8045         } else {
8046                 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8047         }
8048
8049         /* Not needed for Herc */
8050         if (sp->device_type & XFRAME_I_DEVICE) {
8051                 /*
8052                  * Fix for all "FFs" MAC address problems observed on
8053                  * Alpha platforms
8054                  */
8055                 fix_mac_address(sp);
8056                 s2io_reset(sp);
8057         }
8058
8059         /*
8060          * MAC address initialization.
8061          * For now only one mac address will be read and used.
8062          */
8063         bar0 = sp->bar0;
8064         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8065             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8066         writeq(val64, &bar0->rmac_addr_cmd_mem);
8067         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8068                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8069         tmp64 = readq(&bar0->rmac_addr_data0_mem);
8070         mac_down = (u32) tmp64;
8071         mac_up = (u32) (tmp64 >> 32);
8072
8073         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8074         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8075         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8076         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8077         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8078         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8079
8080         /*  Set the factory defined MAC address initially   */
8081         dev->addr_len = ETH_ALEN;
8082         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8083         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8084
8085         /* initialize number of multicast & unicast MAC entries variables */
8086         if (sp->device_type == XFRAME_I_DEVICE) {
8087                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8088                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8089                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8090         } else if (sp->device_type == XFRAME_II_DEVICE) {
8091                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8092                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8093                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8094         }
8095
8096         /* store mac addresses from CAM to s2io_nic structure */
8097         do_s2io_store_unicast_mc(sp);
8098
8099         /* Configure MSIX vector for number of rings configured plus one */
8100         if ((sp->device_type == XFRAME_II_DEVICE) &&
8101                 (config->intr_type == MSI_X))
8102                 sp->num_entries = config->rx_ring_num + 1;
8103
8104          /* Store the values of the MSIX table in the s2io_nic structure */
8105         store_xmsi_data(sp);
8106         /* reset Nic and bring it to known state */
8107         s2io_reset(sp);
8108
8109         /*
8110          * Initialize link state flags
8111          * and the card state parameter
8112          */
8113         sp->state = 0;
8114
8115         /* Initialize spinlocks */
8116         for (i = 0; i < sp->config.tx_fifo_num; i++)
8117                 spin_lock_init(&mac_control->fifos[i].tx_lock);
8118
8119         /*
8120          * SXE-002: Configure link and activity LED to init state
8121          * on driver load.
8122          */
8123         subid = sp->pdev->subsystem_device;
8124         if ((subid & 0xFF) >= 0x07) {
8125                 val64 = readq(&bar0->gpio_control);
8126                 val64 |= 0x0000800000000000ULL;
8127                 writeq(val64, &bar0->gpio_control);
8128                 val64 = 0x0411040400000000ULL;
8129                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8130                 val64 = readq(&bar0->gpio_control);
8131         }
8132
8133         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8134
8135         if (register_netdev(dev)) {
8136                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8137                 ret = -ENODEV;
8138                 goto register_failed;
8139         }
8140         s2io_vpd_read(sp);
8141         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8142         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8143                   sp->product_name, pdev->revision);
8144         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8145                   s2io_driver_version);
8146         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8147                   dev->name, print_mac(mac, dev->dev_addr));
8148         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8149         if (sp->device_type & XFRAME_II_DEVICE) {
8150                 mode = s2io_print_pci_mode(sp);
8151                 if (mode < 0) {
8152                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8153                         ret = -EBADSLT;
8154                         unregister_netdev(dev);
8155                         goto set_swap_failed;
8156                 }
8157         }
8158         switch(sp->rxd_mode) {
8159                 case RXD_MODE_1:
8160                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8161                                                 dev->name);
8162                     break;
8163                 case RXD_MODE_3B:
8164                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8165                                                 dev->name);
8166                     break;
8167         }
8168
8169         switch (sp->config.napi) {
8170         case 0:
8171                 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8172                 break;
8173         case 1:
8174                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8175                 break;
8176         }
8177
8178         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8179                 sp->config.tx_fifo_num);
8180
8181         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8182                   sp->config.rx_ring_num);
8183
8184         switch(sp->config.intr_type) {
8185                 case INTA:
8186                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8187                     break;
8188                 case MSI_X:
8189                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8190                     break;
8191         }
8192         if (sp->config.multiq) {
8193         for (i = 0; i < sp->config.tx_fifo_num; i++)
8194                 mac_control->fifos[i].multiq = config->multiq;
8195                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8196                         dev->name);
8197         } else
8198                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8199                         dev->name);
8200
8201         switch (sp->config.tx_steering_type) {
8202         case NO_STEERING:
8203                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8204                         " transmit\n", dev->name);
8205                         break;
8206         case TX_PRIORITY_STEERING:
8207                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8208                         " transmit\n", dev->name);
8209                 break;
8210         case TX_DEFAULT_STEERING:
8211                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8212                         " transmit\n", dev->name);
8213         }
8214
8215         if (sp->lro)
8216                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8217                           dev->name);
8218         if (ufo)
8219                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8220                                         " enabled\n", dev->name);
8221         /* Initialize device name */
8222         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8223
8224         /*
8225          * Make Link state as off at this point, when the Link change
8226          * interrupt comes the state will be automatically changed to
8227          * the right state.
8228          */
8229         netif_carrier_off(dev);
8230
8231         return 0;
8232
8233       register_failed:
8234       set_swap_failed:
8235         iounmap(sp->bar1);
8236       bar1_remap_failed:
8237         iounmap(sp->bar0);
8238       bar0_remap_failed:
8239       mem_alloc_failed:
8240         free_shared_mem(sp);
8241         pci_disable_device(pdev);
8242         pci_release_regions(pdev);
8243         pci_set_drvdata(pdev, NULL);
8244         free_netdev(dev);
8245
8246         return ret;
8247 }
8248
8249 /**
8250  * s2io_rem_nic - Free the PCI device
8251  * @pdev: structure containing the PCI related information of the device.
8252  * Description: This function is called by the Pci subsystem to release a
8253  * PCI device and free up all resource held up by the device. This could
8254  * be in response to a Hot plug event or when the driver is to be removed
8255  * from memory.
8256  */
8257
8258 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8259 {
8260         struct net_device *dev =
8261             (struct net_device *) pci_get_drvdata(pdev);
8262         struct s2io_nic *sp;
8263
8264         if (dev == NULL) {
8265                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8266                 return;
8267         }
8268
8269         flush_scheduled_work();
8270
8271         sp = dev->priv;
8272         unregister_netdev(dev);
8273
8274         free_shared_mem(sp);
8275         iounmap(sp->bar0);
8276         iounmap(sp->bar1);
8277         pci_release_regions(pdev);
8278         pci_set_drvdata(pdev, NULL);
8279         free_netdev(dev);
8280         pci_disable_device(pdev);
8281 }
8282
8283 /**
8284  * s2io_starter - Entry point for the driver
8285  * Description: This function is the entry point for the driver. It verifies
8286  * the module loadable parameters and initializes PCI configuration space.
8287  */
8288
8289 static int __init s2io_starter(void)
8290 {
8291         return pci_register_driver(&s2io_driver);
8292 }
8293
8294 /**
8295  * s2io_closer - Cleanup routine for the driver
8296  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8297  */
8298
8299 static __exit void s2io_closer(void)
8300 {
8301         pci_unregister_driver(&s2io_driver);
8302         DBG_PRINT(INIT_DBG, "cleanup done\n");
8303 }
8304
8305 module_init(s2io_starter);
8306 module_exit(s2io_closer);
8307
8308 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8309                 struct tcphdr **tcp, struct RxD_t *rxdp,
8310                 struct s2io_nic *sp)
8311 {
8312         int ip_off;
8313         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8314
8315         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8316                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8317                           __FUNCTION__);
8318                 return -1;
8319         }
8320
8321         /* Checking for DIX type or DIX type with VLAN */
8322         if ((l2_type == 0)
8323                 || (l2_type == 4)) {
8324                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8325                 /*
8326                  * If vlan stripping is disabled and the frame is VLAN tagged,
8327                  * shift the offset by the VLAN header size bytes.
8328                  */
8329                 if ((!vlan_strip_flag) &&
8330                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8331                         ip_off += HEADER_VLAN_SIZE;
8332         } else {
8333                 /* LLC, SNAP etc are considered non-mergeable */
8334                 return -1;
8335         }
8336
8337         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8338         ip_len = (u8)((*ip)->ihl);
8339         ip_len <<= 2;
8340         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8341
8342         return 0;
8343 }
8344
8345 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8346                                   struct tcphdr *tcp)
8347 {
8348         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8349         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8350            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8351                 return -1;
8352         return 0;
8353 }
8354
8355 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8356 {
8357         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8358 }
8359
8360 static void initiate_new_session(struct lro *lro, u8 *l2h,
8361         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8362 {
8363         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8364         lro->l2h = l2h;
8365         lro->iph = ip;
8366         lro->tcph = tcp;
8367         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8368         lro->tcp_ack = tcp->ack_seq;
8369         lro->sg_num = 1;
8370         lro->total_len = ntohs(ip->tot_len);
8371         lro->frags_len = 0;
8372         lro->vlan_tag = vlan_tag;
8373         /*
8374          * check if we saw TCP timestamp. Other consistency checks have
8375          * already been done.
8376          */
8377         if (tcp->doff == 8) {
8378                 __be32 *ptr;
8379                 ptr = (__be32 *)(tcp+1);
8380                 lro->saw_ts = 1;
8381                 lro->cur_tsval = ntohl(*(ptr+1));
8382                 lro->cur_tsecr = *(ptr+2);
8383         }
8384         lro->in_use = 1;
8385 }
8386
8387 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8388 {
8389         struct iphdr *ip = lro->iph;
8390         struct tcphdr *tcp = lro->tcph;
8391         __sum16 nchk;
8392         struct stat_block *statinfo = sp->mac_control.stats_info;
8393         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8394
8395         /* Update L3 header */
8396         ip->tot_len = htons(lro->total_len);
8397         ip->check = 0;
8398         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8399         ip->check = nchk;
8400
8401         /* Update L4 header */
8402         tcp->ack_seq = lro->tcp_ack;
8403         tcp->window = lro->window;
8404
8405         /* Update tsecr field if this session has timestamps enabled */
8406         if (lro->saw_ts) {
8407                 __be32 *ptr = (__be32 *)(tcp + 1);
8408                 *(ptr+2) = lro->cur_tsecr;
8409         }
8410
8411         /* Update counters required for calculation of
8412          * average no. of packets aggregated.
8413          */
8414         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8415         statinfo->sw_stat.num_aggregations++;
8416 }
8417
8418 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8419                 struct tcphdr *tcp, u32 l4_pyld)
8420 {
8421         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8422         lro->total_len += l4_pyld;
8423         lro->frags_len += l4_pyld;
8424         lro->tcp_next_seq += l4_pyld;
8425         lro->sg_num++;
8426
8427         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8428         lro->tcp_ack = tcp->ack_seq;
8429         lro->window = tcp->window;
8430
8431         if (lro->saw_ts) {
8432                 __be32 *ptr;
8433                 /* Update tsecr and tsval from this packet */
8434                 ptr = (__be32 *)(tcp+1);
8435                 lro->cur_tsval = ntohl(*(ptr+1));
8436                 lro->cur_tsecr = *(ptr + 2);
8437         }
8438 }
8439
8440 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8441                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8442 {
8443         u8 *ptr;
8444
8445         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8446
8447         if (!tcp_pyld_len) {
8448                 /* Runt frame or a pure ack */
8449                 return -1;
8450         }
8451
8452         if (ip->ihl != 5) /* IP has options */
8453                 return -1;
8454
8455         /* If we see CE codepoint in IP header, packet is not mergeable */
8456         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8457                 return -1;
8458
8459         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8460         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8461                                     tcp->ece || tcp->cwr || !tcp->ack) {
8462                 /*
8463                  * Currently recognize only the ack control word and
8464                  * any other control field being set would result in
8465                  * flushing the LRO session
8466                  */
8467                 return -1;
8468         }
8469
8470         /*
8471          * Allow only one TCP timestamp option. Don't aggregate if
8472          * any other options are detected.
8473          */
8474         if (tcp->doff != 5 && tcp->doff != 8)
8475                 return -1;
8476
8477         if (tcp->doff == 8) {
8478                 ptr = (u8 *)(tcp + 1);
8479                 while (*ptr == TCPOPT_NOP)
8480                         ptr++;
8481                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8482                         return -1;
8483
8484                 /* Ensure timestamp value increases monotonically */
8485                 if (l_lro)
8486                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8487                                 return -1;
8488
8489                 /* timestamp echo reply should be non-zero */
8490                 if (*((__be32 *)(ptr+6)) == 0)
8491                         return -1;
8492         }
8493
8494         return 0;
8495 }
8496
8497 static int
8498 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8499         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8500         struct s2io_nic *sp)
8501 {
8502         struct iphdr *ip;
8503         struct tcphdr *tcph;
8504         int ret = 0, i;
8505         u16 vlan_tag = 0;
8506
8507         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8508                                          rxdp, sp))) {
8509                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8510                           ip->saddr, ip->daddr);
8511         } else
8512                 return ret;
8513
8514         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8515         tcph = (struct tcphdr *)*tcp;
8516         *tcp_len = get_l4_pyld_length(ip, tcph);
8517         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8518                 struct lro *l_lro = &ring_data->lro0_n[i];
8519                 if (l_lro->in_use) {
8520                         if (check_for_socket_match(l_lro, ip, tcph))
8521                                 continue;
8522                         /* Sock pair matched */
8523                         *lro = l_lro;
8524
8525                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8526                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8527                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8528                                           (*lro)->tcp_next_seq,
8529                                           ntohl(tcph->seq));
8530
8531                                 sp->mac_control.stats_info->
8532                                    sw_stat.outof_sequence_pkts++;
8533                                 ret = 2;
8534                                 break;
8535                         }
8536
8537                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8538                                 ret = 1; /* Aggregate */
8539                         else
8540                                 ret = 2; /* Flush both */
8541                         break;
8542                 }
8543         }
8544
8545         if (ret == 0) {
8546                 /* Before searching for available LRO objects,
8547                  * check if the pkt is L3/L4 aggregatable. If not
8548                  * don't create new LRO session. Just send this
8549                  * packet up.
8550                  */
8551                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8552                         return 5;
8553                 }
8554
8555                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8556                         struct lro *l_lro = &ring_data->lro0_n[i];
8557                         if (!(l_lro->in_use)) {
8558                                 *lro = l_lro;
8559                                 ret = 3; /* Begin anew */
8560                                 break;
8561                         }
8562                 }
8563         }
8564
8565         if (ret == 0) { /* sessions exceeded */
8566                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8567                           __FUNCTION__);
8568                 *lro = NULL;
8569                 return ret;
8570         }
8571
8572         switch (ret) {
8573                 case 3:
8574                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8575                                                                 vlan_tag);
8576                         break;
8577                 case 2:
8578                         update_L3L4_header(sp, *lro);
8579                         break;
8580                 case 1:
8581                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8582                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8583                                 update_L3L4_header(sp, *lro);
8584                                 ret = 4; /* Flush the LRO */
8585                         }
8586                         break;
8587                 default:
8588                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8589                                 __FUNCTION__);
8590                         break;
8591         }
8592
8593         return ret;
8594 }
8595
8596 static void clear_lro_session(struct lro *lro)
8597 {
8598         static u16 lro_struct_size = sizeof(struct lro);
8599
8600         memset(lro, 0, lro_struct_size);
8601 }
8602
8603 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8604 {
8605         struct net_device *dev = skb->dev;
8606         struct s2io_nic *sp = dev->priv;
8607
8608         skb->protocol = eth_type_trans(skb, dev);
8609         if (sp->vlgrp && vlan_tag
8610                 && (vlan_strip_flag)) {
8611                 /* Queueing the vlan frame to the upper layer */
8612                 if (sp->config.napi)
8613                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8614                 else
8615                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8616         } else {
8617                 if (sp->config.napi)
8618                         netif_receive_skb(skb);
8619                 else
8620                         netif_rx(skb);
8621         }
8622 }
8623
8624 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8625                            struct sk_buff *skb,
8626                            u32 tcp_len)
8627 {
8628         struct sk_buff *first = lro->parent;
8629
8630         first->len += tcp_len;
8631         first->data_len = lro->frags_len;
8632         skb_pull(skb, (skb->len - tcp_len));
8633         if (skb_shinfo(first)->frag_list)
8634                 lro->last_frag->next = skb;
8635         else
8636                 skb_shinfo(first)->frag_list = skb;
8637         first->truesize += skb->truesize;
8638         lro->last_frag = skb;
8639         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8640         return;
8641 }
8642
8643 /**
8644  * s2io_io_error_detected - called when PCI error is detected
8645  * @pdev: Pointer to PCI device
8646  * @state: The current pci connection state
8647  *
8648  * This function is called after a PCI bus error affecting
8649  * this device has been detected.
8650  */
8651 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8652                                                pci_channel_state_t state)
8653 {
8654         struct net_device *netdev = pci_get_drvdata(pdev);
8655         struct s2io_nic *sp = netdev->priv;
8656
8657         netif_device_detach(netdev);
8658
8659         if (netif_running(netdev)) {
8660                 /* Bring down the card, while avoiding PCI I/O */
8661                 do_s2io_card_down(sp, 0);
8662         }
8663         pci_disable_device(pdev);
8664
8665         return PCI_ERS_RESULT_NEED_RESET;
8666 }
8667
8668 /**
8669  * s2io_io_slot_reset - called after the pci bus has been reset.
8670  * @pdev: Pointer to PCI device
8671  *
8672  * Restart the card from scratch, as if from a cold-boot.
8673  * At this point, the card has exprienced a hard reset,
8674  * followed by fixups by BIOS, and has its config space
8675  * set up identically to what it was at cold boot.
8676  */
8677 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8678 {
8679         struct net_device *netdev = pci_get_drvdata(pdev);
8680         struct s2io_nic *sp = netdev->priv;
8681
8682         if (pci_enable_device(pdev)) {
8683                 printk(KERN_ERR "s2io: "
8684                        "Cannot re-enable PCI device after reset.\n");
8685                 return PCI_ERS_RESULT_DISCONNECT;
8686         }
8687
8688         pci_set_master(pdev);
8689         s2io_reset(sp);
8690
8691         return PCI_ERS_RESULT_RECOVERED;
8692 }
8693
8694 /**
8695  * s2io_io_resume - called when traffic can start flowing again.
8696  * @pdev: Pointer to PCI device
8697  *
8698  * This callback is called when the error recovery driver tells
8699  * us that its OK to resume normal operation.
8700  */
8701 static void s2io_io_resume(struct pci_dev *pdev)
8702 {
8703         struct net_device *netdev = pci_get_drvdata(pdev);
8704         struct s2io_nic *sp = netdev->priv;
8705
8706         if (netif_running(netdev)) {
8707                 if (s2io_card_up(sp)) {
8708                         printk(KERN_ERR "s2io: "
8709                                "Can't bring device back up after reset.\n");
8710                         return;
8711                 }
8712
8713                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8714                         s2io_card_down(sp);
8715                         printk(KERN_ERR "s2io: "
8716                                "Can't resetore mac addr after reset.\n");
8717                         return;
8718                 }
8719         }
8720
8721         netif_device_attach(netdev);
8722         netif_wake_queue(netdev);
8723 }