Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  ************************************************************************/
54
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <net/tcp.h>
76
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
79 #include <asm/io.h>
80 #include <asm/div64.h>
81 #include <asm/irq.h>
82
83 /* local include */
84 #include "s2io.h"
85 #include "s2io-regs.h"
86
87 #define DRV_VERSION "2.0.22.1"
88
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
92
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
95
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
97 {
98         int ret;
99
100         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
102
103         return ret;
104 }
105
106 /*
107  * Cards with following subsystem_id have a link state indication
108  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109  * macro below identifies these cards given the subsystem_id.
110  */
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112         (dev_type == XFRAME_I_DEVICE) ?                 \
113                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
115
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
119 #define PANIC   1
120 #define LOW     2
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
122 {
123         struct mac_info *mac_control;
124
125         mac_control = &sp->mac_control;
126         if (rxb_size <= rxd_count[sp->rxd_mode])
127                 return PANIC;
128         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
129                 return  LOW;
130         return 0;
131 }
132
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135         "Register test\t(offline)",
136         "Eeprom test\t(offline)",
137         "Link test\t(online)",
138         "RLDRAM test\t(offline)",
139         "BIST Test\t(offline)"
140 };
141
142 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
143         {"tmac_frms"},
144         {"tmac_data_octets"},
145         {"tmac_drop_frms"},
146         {"tmac_mcst_frms"},
147         {"tmac_bcst_frms"},
148         {"tmac_pause_ctrl_frms"},
149         {"tmac_ttl_octets"},
150         {"tmac_ucst_frms"},
151         {"tmac_nucst_frms"},
152         {"tmac_any_err_frms"},
153         {"tmac_ttl_less_fb_octets"},
154         {"tmac_vld_ip_octets"},
155         {"tmac_vld_ip"},
156         {"tmac_drop_ip"},
157         {"tmac_icmp"},
158         {"tmac_rst_tcp"},
159         {"tmac_tcp"},
160         {"tmac_udp"},
161         {"rmac_vld_frms"},
162         {"rmac_data_octets"},
163         {"rmac_fcs_err_frms"},
164         {"rmac_drop_frms"},
165         {"rmac_vld_mcst_frms"},
166         {"rmac_vld_bcst_frms"},
167         {"rmac_in_rng_len_err_frms"},
168         {"rmac_out_rng_len_err_frms"},
169         {"rmac_long_frms"},
170         {"rmac_pause_ctrl_frms"},
171         {"rmac_unsup_ctrl_frms"},
172         {"rmac_ttl_octets"},
173         {"rmac_accepted_ucst_frms"},
174         {"rmac_accepted_nucst_frms"},
175         {"rmac_discarded_frms"},
176         {"rmac_drop_events"},
177         {"rmac_ttl_less_fb_octets"},
178         {"rmac_ttl_frms"},
179         {"rmac_usized_frms"},
180         {"rmac_osized_frms"},
181         {"rmac_frag_frms"},
182         {"rmac_jabber_frms"},
183         {"rmac_ttl_64_frms"},
184         {"rmac_ttl_65_127_frms"},
185         {"rmac_ttl_128_255_frms"},
186         {"rmac_ttl_256_511_frms"},
187         {"rmac_ttl_512_1023_frms"},
188         {"rmac_ttl_1024_1518_frms"},
189         {"rmac_ip"},
190         {"rmac_ip_octets"},
191         {"rmac_hdr_err_ip"},
192         {"rmac_drop_ip"},
193         {"rmac_icmp"},
194         {"rmac_tcp"},
195         {"rmac_udp"},
196         {"rmac_err_drp_udp"},
197         {"rmac_xgmii_err_sym"},
198         {"rmac_frms_q0"},
199         {"rmac_frms_q1"},
200         {"rmac_frms_q2"},
201         {"rmac_frms_q3"},
202         {"rmac_frms_q4"},
203         {"rmac_frms_q5"},
204         {"rmac_frms_q6"},
205         {"rmac_frms_q7"},
206         {"rmac_full_q0"},
207         {"rmac_full_q1"},
208         {"rmac_full_q2"},
209         {"rmac_full_q3"},
210         {"rmac_full_q4"},
211         {"rmac_full_q5"},
212         {"rmac_full_q6"},
213         {"rmac_full_q7"},
214         {"rmac_pause_cnt"},
215         {"rmac_xgmii_data_err_cnt"},
216         {"rmac_xgmii_ctrl_err_cnt"},
217         {"rmac_accepted_ip"},
218         {"rmac_err_tcp"},
219         {"rd_req_cnt"},
220         {"new_rd_req_cnt"},
221         {"new_rd_req_rtry_cnt"},
222         {"rd_rtry_cnt"},
223         {"wr_rtry_rd_ack_cnt"},
224         {"wr_req_cnt"},
225         {"new_wr_req_cnt"},
226         {"new_wr_req_rtry_cnt"},
227         {"wr_rtry_cnt"},
228         {"wr_disc_cnt"},
229         {"rd_rtry_wr_ack_cnt"},
230         {"txp_wr_cnt"},
231         {"txd_rd_cnt"},
232         {"txd_wr_cnt"},
233         {"rxd_rd_cnt"},
234         {"rxd_wr_cnt"},
235         {"txf_rd_cnt"},
236         {"rxf_wr_cnt"}
237 };
238
239 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
240         {"rmac_ttl_1519_4095_frms"},
241         {"rmac_ttl_4096_8191_frms"},
242         {"rmac_ttl_8192_max_frms"},
243         {"rmac_ttl_gt_max_frms"},
244         {"rmac_osized_alt_frms"},
245         {"rmac_jabber_alt_frms"},
246         {"rmac_gt_max_alt_frms"},
247         {"rmac_vlan_frms"},
248         {"rmac_len_discard"},
249         {"rmac_fcs_discard"},
250         {"rmac_pf_discard"},
251         {"rmac_da_discard"},
252         {"rmac_red_discard"},
253         {"rmac_rts_discard"},
254         {"rmac_ingm_full_discard"},
255         {"link_fault_cnt"}
256 };
257
258 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
259         {"\n DRIVER STATISTICS"},
260         {"single_bit_ecc_errs"},
261         {"double_bit_ecc_errs"},
262         {"parity_err_cnt"},
263         {"serious_err_cnt"},
264         {"soft_reset_cnt"},
265         {"fifo_full_cnt"},
266         {"ring_full_cnt"},
267         ("alarm_transceiver_temp_high"),
268         ("alarm_transceiver_temp_low"),
269         ("alarm_laser_bias_current_high"),
270         ("alarm_laser_bias_current_low"),
271         ("alarm_laser_output_power_high"),
272         ("alarm_laser_output_power_low"),
273         ("warn_transceiver_temp_high"),
274         ("warn_transceiver_temp_low"),
275         ("warn_laser_bias_current_high"),
276         ("warn_laser_bias_current_low"),
277         ("warn_laser_output_power_high"),
278         ("warn_laser_output_power_low"),
279         ("lro_aggregated_pkts"),
280         ("lro_flush_both_count"),
281         ("lro_out_of_sequence_pkts"),
282         ("lro_flush_due_to_max_pkts"),
283         ("lro_avg_aggr_pkts"),
284 };
285
286 #define S2IO_XENA_STAT_LEN sizeof(ethtool_xena_stats_keys)/ ETH_GSTRING_LEN
287 #define S2IO_ENHANCED_STAT_LEN sizeof(ethtool_enhanced_stats_keys)/ \
288                                         ETH_GSTRING_LEN
289 #define S2IO_DRIVER_STAT_LEN sizeof(ethtool_driver_stats_keys)/ ETH_GSTRING_LEN
290
291 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
292 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
293
294 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
295 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
296
297 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
298 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
299
300 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
301                         init_timer(&timer);                     \
302                         timer.function = handle;                \
303                         timer.data = (unsigned long) arg;       \
304                         mod_timer(&timer, (jiffies + exp))      \
305
306 /* Add the vlan */
307 static void s2io_vlan_rx_register(struct net_device *dev,
308                                         struct vlan_group *grp)
309 {
310         struct s2io_nic *nic = dev->priv;
311         unsigned long flags;
312
313         spin_lock_irqsave(&nic->tx_lock, flags);
314         nic->vlgrp = grp;
315         spin_unlock_irqrestore(&nic->tx_lock, flags);
316 }
317
318 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
319 static int vlan_strip_flag;
320
321 /* Unregister the vlan */
322 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
323 {
324         struct s2io_nic *nic = dev->priv;
325         unsigned long flags;
326
327         spin_lock_irqsave(&nic->tx_lock, flags);
328         vlan_group_set_device(nic->vlgrp, vid, NULL);
329         spin_unlock_irqrestore(&nic->tx_lock, flags);
330 }
331
332 /*
333  * Constants to be programmed into the Xena's registers, to configure
334  * the XAUI.
335  */
336
337 #define END_SIGN        0x0
338 static const u64 herc_act_dtx_cfg[] = {
339         /* Set address */
340         0x8000051536750000ULL, 0x80000515367500E0ULL,
341         /* Write data */
342         0x8000051536750004ULL, 0x80000515367500E4ULL,
343         /* Set address */
344         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
345         /* Write data */
346         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
347         /* Set address */
348         0x801205150D440000ULL, 0x801205150D4400E0ULL,
349         /* Write data */
350         0x801205150D440004ULL, 0x801205150D4400E4ULL,
351         /* Set address */
352         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
353         /* Write data */
354         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
355         /* Done */
356         END_SIGN
357 };
358
359 static const u64 xena_dtx_cfg[] = {
360         /* Set address */
361         0x8000051500000000ULL, 0x80000515000000E0ULL,
362         /* Write data */
363         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
364         /* Set address */
365         0x8001051500000000ULL, 0x80010515000000E0ULL,
366         /* Write data */
367         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
368         /* Set address */
369         0x8002051500000000ULL, 0x80020515000000E0ULL,
370         /* Write data */
371         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
372         END_SIGN
373 };
374
375 /*
376  * Constants for Fixing the MacAddress problem seen mostly on
377  * Alpha machines.
378  */
379 static const u64 fix_mac[] = {
380         0x0060000000000000ULL, 0x0060600000000000ULL,
381         0x0040600000000000ULL, 0x0000600000000000ULL,
382         0x0020600000000000ULL, 0x0060600000000000ULL,
383         0x0020600000000000ULL, 0x0060600000000000ULL,
384         0x0020600000000000ULL, 0x0060600000000000ULL,
385         0x0020600000000000ULL, 0x0060600000000000ULL,
386         0x0020600000000000ULL, 0x0060600000000000ULL,
387         0x0020600000000000ULL, 0x0060600000000000ULL,
388         0x0020600000000000ULL, 0x0060600000000000ULL,
389         0x0020600000000000ULL, 0x0060600000000000ULL,
390         0x0020600000000000ULL, 0x0060600000000000ULL,
391         0x0020600000000000ULL, 0x0060600000000000ULL,
392         0x0020600000000000ULL, 0x0000600000000000ULL,
393         0x0040600000000000ULL, 0x0060600000000000ULL,
394         END_SIGN
395 };
396
397 MODULE_LICENSE("GPL");
398 MODULE_VERSION(DRV_VERSION);
399
400
401 /* Module Loadable parameters. */
402 S2IO_PARM_INT(tx_fifo_num, 1);
403 S2IO_PARM_INT(rx_ring_num, 1);
404
405
406 S2IO_PARM_INT(rx_ring_mode, 1);
407 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
408 S2IO_PARM_INT(rmac_pause_time, 0x100);
409 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
410 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
411 S2IO_PARM_INT(shared_splits, 0);
412 S2IO_PARM_INT(tmac_util_period, 5);
413 S2IO_PARM_INT(rmac_util_period, 5);
414 S2IO_PARM_INT(bimodal, 0);
415 S2IO_PARM_INT(l3l4hdr_size, 128);
416 /* Frequency of Rx desc syncs expressed as power of 2 */
417 S2IO_PARM_INT(rxsync_frequency, 3);
418 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
419 S2IO_PARM_INT(intr_type, 0);
420 /* Large receive offload feature */
421 S2IO_PARM_INT(lro, 0);
422 /* Max pkts to be aggregated by LRO at one time. If not specified,
423  * aggregation happens until we hit max IP pkt size(64K)
424  */
425 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
426 S2IO_PARM_INT(indicate_max_pkts, 0);
427
428 S2IO_PARM_INT(napi, 1);
429 S2IO_PARM_INT(ufo, 0);
430 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
431
432 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
433     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
434 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
435     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
436 static unsigned int rts_frm_len[MAX_RX_RINGS] =
437     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
438
439 module_param_array(tx_fifo_len, uint, NULL, 0);
440 module_param_array(rx_ring_sz, uint, NULL, 0);
441 module_param_array(rts_frm_len, uint, NULL, 0);
442
443 /*
444  * S2IO device table.
445  * This table lists all the devices that this driver supports.
446  */
447 static struct pci_device_id s2io_tbl[] __devinitdata = {
448         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
449          PCI_ANY_ID, PCI_ANY_ID},
450         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
451          PCI_ANY_ID, PCI_ANY_ID},
452         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
453          PCI_ANY_ID, PCI_ANY_ID},
454         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
455          PCI_ANY_ID, PCI_ANY_ID},
456         {0,}
457 };
458
459 MODULE_DEVICE_TABLE(pci, s2io_tbl);
460
461 static struct pci_driver s2io_driver = {
462       .name = "S2IO",
463       .id_table = s2io_tbl,
464       .probe = s2io_init_nic,
465       .remove = __devexit_p(s2io_rem_nic),
466 };
467
468 /* A simplifier macro used both by init and free shared_mem Fns(). */
469 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
470
471 /**
472  * init_shared_mem - Allocation and Initialization of Memory
473  * @nic: Device private variable.
474  * Description: The function allocates all the memory areas shared
475  * between the NIC and the driver. This includes Tx descriptors,
476  * Rx descriptors and the statistics block.
477  */
478
479 static int init_shared_mem(struct s2io_nic *nic)
480 {
481         u32 size;
482         void *tmp_v_addr, *tmp_v_addr_next;
483         dma_addr_t tmp_p_addr, tmp_p_addr_next;
484         struct RxD_block *pre_rxd_blk = NULL;
485         int i, j, blk_cnt;
486         int lst_size, lst_per_page;
487         struct net_device *dev = nic->dev;
488         unsigned long tmp;
489         struct buffAdd *ba;
490
491         struct mac_info *mac_control;
492         struct config_param *config;
493
494         mac_control = &nic->mac_control;
495         config = &nic->config;
496
497
498         /* Allocation and initialization of TXDLs in FIOFs */
499         size = 0;
500         for (i = 0; i < config->tx_fifo_num; i++) {
501                 size += config->tx_cfg[i].fifo_len;
502         }
503         if (size > MAX_AVAILABLE_TXDS) {
504                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
505                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
506                 return -EINVAL;
507         }
508
509         lst_size = (sizeof(struct TxD) * config->max_txds);
510         lst_per_page = PAGE_SIZE / lst_size;
511
512         for (i = 0; i < config->tx_fifo_num; i++) {
513                 int fifo_len = config->tx_cfg[i].fifo_len;
514                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
515                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
516                                                           GFP_KERNEL);
517                 if (!mac_control->fifos[i].list_info) {
518                         DBG_PRINT(INFO_DBG,
519                                   "Malloc failed for list_info\n");
520                         return -ENOMEM;
521                 }
522                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
523         }
524         for (i = 0; i < config->tx_fifo_num; i++) {
525                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
526                                                 lst_per_page);
527                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
528                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
529                     config->tx_cfg[i].fifo_len - 1;
530                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
531                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
532                     config->tx_cfg[i].fifo_len - 1;
533                 mac_control->fifos[i].fifo_no = i;
534                 mac_control->fifos[i].nic = nic;
535                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
536
537                 for (j = 0; j < page_num; j++) {
538                         int k = 0;
539                         dma_addr_t tmp_p;
540                         void *tmp_v;
541                         tmp_v = pci_alloc_consistent(nic->pdev,
542                                                      PAGE_SIZE, &tmp_p);
543                         if (!tmp_v) {
544                                 DBG_PRINT(INFO_DBG,
545                                           "pci_alloc_consistent ");
546                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
547                                 return -ENOMEM;
548                         }
549                         /* If we got a zero DMA address(can happen on
550                          * certain platforms like PPC), reallocate.
551                          * Store virtual address of page we don't want,
552                          * to be freed later.
553                          */
554                         if (!tmp_p) {
555                                 mac_control->zerodma_virt_addr = tmp_v;
556                                 DBG_PRINT(INIT_DBG,
557                                 "%s: Zero DMA address for TxDL. ", dev->name);
558                                 DBG_PRINT(INIT_DBG,
559                                 "Virtual address %p\n", tmp_v);
560                                 tmp_v = pci_alloc_consistent(nic->pdev,
561                                                      PAGE_SIZE, &tmp_p);
562                                 if (!tmp_v) {
563                                         DBG_PRINT(INFO_DBG,
564                                           "pci_alloc_consistent ");
565                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
566                                         return -ENOMEM;
567                                 }
568                         }
569                         while (k < lst_per_page) {
570                                 int l = (j * lst_per_page) + k;
571                                 if (l == config->tx_cfg[i].fifo_len)
572                                         break;
573                                 mac_control->fifos[i].list_info[l].list_virt_addr =
574                                     tmp_v + (k * lst_size);
575                                 mac_control->fifos[i].list_info[l].list_phy_addr =
576                                     tmp_p + (k * lst_size);
577                                 k++;
578                         }
579                 }
580         }
581
582         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
583         if (!nic->ufo_in_band_v)
584                 return -ENOMEM;
585
586         /* Allocation and initialization of RXDs in Rings */
587         size = 0;
588         for (i = 0; i < config->rx_ring_num; i++) {
589                 if (config->rx_cfg[i].num_rxd %
590                     (rxd_count[nic->rxd_mode] + 1)) {
591                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
592                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
593                                   i);
594                         DBG_PRINT(ERR_DBG, "RxDs per Block");
595                         return FAILURE;
596                 }
597                 size += config->rx_cfg[i].num_rxd;
598                 mac_control->rings[i].block_count =
599                         config->rx_cfg[i].num_rxd /
600                         (rxd_count[nic->rxd_mode] + 1 );
601                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
602                         mac_control->rings[i].block_count;
603         }
604         if (nic->rxd_mode == RXD_MODE_1)
605                 size = (size * (sizeof(struct RxD1)));
606         else
607                 size = (size * (sizeof(struct RxD3)));
608
609         for (i = 0; i < config->rx_ring_num; i++) {
610                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
611                 mac_control->rings[i].rx_curr_get_info.offset = 0;
612                 mac_control->rings[i].rx_curr_get_info.ring_len =
613                     config->rx_cfg[i].num_rxd - 1;
614                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
615                 mac_control->rings[i].rx_curr_put_info.offset = 0;
616                 mac_control->rings[i].rx_curr_put_info.ring_len =
617                     config->rx_cfg[i].num_rxd - 1;
618                 mac_control->rings[i].nic = nic;
619                 mac_control->rings[i].ring_no = i;
620
621                 blk_cnt = config->rx_cfg[i].num_rxd /
622                                 (rxd_count[nic->rxd_mode] + 1);
623                 /*  Allocating all the Rx blocks */
624                 for (j = 0; j < blk_cnt; j++) {
625                         struct rx_block_info *rx_blocks;
626                         int l;
627
628                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
629                         size = SIZE_OF_BLOCK; //size is always page size
630                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
631                                                           &tmp_p_addr);
632                         if (tmp_v_addr == NULL) {
633                                 /*
634                                  * In case of failure, free_shared_mem()
635                                  * is called, which should free any
636                                  * memory that was alloced till the
637                                  * failure happened.
638                                  */
639                                 rx_blocks->block_virt_addr = tmp_v_addr;
640                                 return -ENOMEM;
641                         }
642                         memset(tmp_v_addr, 0, size);
643                         rx_blocks->block_virt_addr = tmp_v_addr;
644                         rx_blocks->block_dma_addr = tmp_p_addr;
645                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
646                                                   rxd_count[nic->rxd_mode],
647                                                   GFP_KERNEL);
648                         if (!rx_blocks->rxds)
649                                 return -ENOMEM;
650                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
651                                 rx_blocks->rxds[l].virt_addr =
652                                         rx_blocks->block_virt_addr +
653                                         (rxd_size[nic->rxd_mode] * l);
654                                 rx_blocks->rxds[l].dma_addr =
655                                         rx_blocks->block_dma_addr +
656                                         (rxd_size[nic->rxd_mode] * l);
657                         }
658                 }
659                 /* Interlinking all Rx Blocks */
660                 for (j = 0; j < blk_cnt; j++) {
661                         tmp_v_addr =
662                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
663                         tmp_v_addr_next =
664                                 mac_control->rings[i].rx_blocks[(j + 1) %
665                                               blk_cnt].block_virt_addr;
666                         tmp_p_addr =
667                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
668                         tmp_p_addr_next =
669                                 mac_control->rings[i].rx_blocks[(j + 1) %
670                                               blk_cnt].block_dma_addr;
671
672                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
673                         pre_rxd_blk->reserved_2_pNext_RxD_block =
674                             (unsigned long) tmp_v_addr_next;
675                         pre_rxd_blk->pNext_RxD_Blk_physical =
676                             (u64) tmp_p_addr_next;
677                 }
678         }
679         if (nic->rxd_mode >= RXD_MODE_3A) {
680                 /*
681                  * Allocation of Storages for buffer addresses in 2BUFF mode
682                  * and the buffers as well.
683                  */
684                 for (i = 0; i < config->rx_ring_num; i++) {
685                         blk_cnt = config->rx_cfg[i].num_rxd /
686                            (rxd_count[nic->rxd_mode]+ 1);
687                         mac_control->rings[i].ba =
688                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
689                                      GFP_KERNEL);
690                         if (!mac_control->rings[i].ba)
691                                 return -ENOMEM;
692                         for (j = 0; j < blk_cnt; j++) {
693                                 int k = 0;
694                                 mac_control->rings[i].ba[j] =
695                                         kmalloc((sizeof(struct buffAdd) *
696                                                 (rxd_count[nic->rxd_mode] + 1)),
697                                                 GFP_KERNEL);
698                                 if (!mac_control->rings[i].ba[j])
699                                         return -ENOMEM;
700                                 while (k != rxd_count[nic->rxd_mode]) {
701                                         ba = &mac_control->rings[i].ba[j][k];
702
703                                         ba->ba_0_org = (void *) kmalloc
704                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
705                                         if (!ba->ba_0_org)
706                                                 return -ENOMEM;
707                                         tmp = (unsigned long)ba->ba_0_org;
708                                         tmp += ALIGN_SIZE;
709                                         tmp &= ~((unsigned long) ALIGN_SIZE);
710                                         ba->ba_0 = (void *) tmp;
711
712                                         ba->ba_1_org = (void *) kmalloc
713                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
714                                         if (!ba->ba_1_org)
715                                                 return -ENOMEM;
716                                         tmp = (unsigned long) ba->ba_1_org;
717                                         tmp += ALIGN_SIZE;
718                                         tmp &= ~((unsigned long) ALIGN_SIZE);
719                                         ba->ba_1 = (void *) tmp;
720                                         k++;
721                                 }
722                         }
723                 }
724         }
725
726         /* Allocation and initialization of Statistics block */
727         size = sizeof(struct stat_block);
728         mac_control->stats_mem = pci_alloc_consistent
729             (nic->pdev, size, &mac_control->stats_mem_phy);
730
731         if (!mac_control->stats_mem) {
732                 /*
733                  * In case of failure, free_shared_mem() is called, which
734                  * should free any memory that was alloced till the
735                  * failure happened.
736                  */
737                 return -ENOMEM;
738         }
739         mac_control->stats_mem_sz = size;
740
741         tmp_v_addr = mac_control->stats_mem;
742         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
743         memset(tmp_v_addr, 0, size);
744         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
745                   (unsigned long long) tmp_p_addr);
746
747         return SUCCESS;
748 }
749
750 /**
751  * free_shared_mem - Free the allocated Memory
752  * @nic:  Device private variable.
753  * Description: This function is to free all memory locations allocated by
754  * the init_shared_mem() function and return it to the kernel.
755  */
756
757 static void free_shared_mem(struct s2io_nic *nic)
758 {
759         int i, j, blk_cnt, size;
760         void *tmp_v_addr;
761         dma_addr_t tmp_p_addr;
762         struct mac_info *mac_control;
763         struct config_param *config;
764         int lst_size, lst_per_page;
765         struct net_device *dev = nic->dev;
766
767         if (!nic)
768                 return;
769
770         mac_control = &nic->mac_control;
771         config = &nic->config;
772
773         lst_size = (sizeof(struct TxD) * config->max_txds);
774         lst_per_page = PAGE_SIZE / lst_size;
775
776         for (i = 0; i < config->tx_fifo_num; i++) {
777                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
778                                                 lst_per_page);
779                 for (j = 0; j < page_num; j++) {
780                         int mem_blks = (j * lst_per_page);
781                         if (!mac_control->fifos[i].list_info)
782                                 return;
783                         if (!mac_control->fifos[i].list_info[mem_blks].
784                                  list_virt_addr)
785                                 break;
786                         pci_free_consistent(nic->pdev, PAGE_SIZE,
787                                             mac_control->fifos[i].
788                                             list_info[mem_blks].
789                                             list_virt_addr,
790                                             mac_control->fifos[i].
791                                             list_info[mem_blks].
792                                             list_phy_addr);
793                 }
794                 /* If we got a zero DMA address during allocation,
795                  * free the page now
796                  */
797                 if (mac_control->zerodma_virt_addr) {
798                         pci_free_consistent(nic->pdev, PAGE_SIZE,
799                                             mac_control->zerodma_virt_addr,
800                                             (dma_addr_t)0);
801                         DBG_PRINT(INIT_DBG,
802                                 "%s: Freeing TxDL with zero DMA addr. ",
803                                 dev->name);
804                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
805                                 mac_control->zerodma_virt_addr);
806                 }
807                 kfree(mac_control->fifos[i].list_info);
808         }
809
810         size = SIZE_OF_BLOCK;
811         for (i = 0; i < config->rx_ring_num; i++) {
812                 blk_cnt = mac_control->rings[i].block_count;
813                 for (j = 0; j < blk_cnt; j++) {
814                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
815                                 block_virt_addr;
816                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
817                                 block_dma_addr;
818                         if (tmp_v_addr == NULL)
819                                 break;
820                         pci_free_consistent(nic->pdev, size,
821                                             tmp_v_addr, tmp_p_addr);
822                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
823                 }
824         }
825
826         if (nic->rxd_mode >= RXD_MODE_3A) {
827                 /* Freeing buffer storage addresses in 2BUFF mode. */
828                 for (i = 0; i < config->rx_ring_num; i++) {
829                         blk_cnt = config->rx_cfg[i].num_rxd /
830                             (rxd_count[nic->rxd_mode] + 1);
831                         for (j = 0; j < blk_cnt; j++) {
832                                 int k = 0;
833                                 if (!mac_control->rings[i].ba[j])
834                                         continue;
835                                 while (k != rxd_count[nic->rxd_mode]) {
836                                         struct buffAdd *ba =
837                                                 &mac_control->rings[i].ba[j][k];
838                                         kfree(ba->ba_0_org);
839                                         kfree(ba->ba_1_org);
840                                         k++;
841                                 }
842                                 kfree(mac_control->rings[i].ba[j]);
843                         }
844                         kfree(mac_control->rings[i].ba);
845                 }
846         }
847
848         if (mac_control->stats_mem) {
849                 pci_free_consistent(nic->pdev,
850                                     mac_control->stats_mem_sz,
851                                     mac_control->stats_mem,
852                                     mac_control->stats_mem_phy);
853         }
854         if (nic->ufo_in_band_v)
855                 kfree(nic->ufo_in_band_v);
856 }
857
858 /**
859  * s2io_verify_pci_mode -
860  */
861
862 static int s2io_verify_pci_mode(struct s2io_nic *nic)
863 {
864         struct XENA_dev_config __iomem *bar0 = nic->bar0;
865         register u64 val64 = 0;
866         int     mode;
867
868         val64 = readq(&bar0->pci_mode);
869         mode = (u8)GET_PCI_MODE(val64);
870
871         if ( val64 & PCI_MODE_UNKNOWN_MODE)
872                 return -1;      /* Unknown PCI mode */
873         return mode;
874 }
875
876 #define NEC_VENID   0x1033
877 #define NEC_DEVID   0x0125
878 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
879 {
880         struct pci_dev *tdev = NULL;
881         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
882                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
883                         if (tdev->bus == s2io_pdev->bus->parent)
884                                 pci_dev_put(tdev);
885                                 return 1;
886                 }
887         }
888         return 0;
889 }
890
891 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
892 /**
893  * s2io_print_pci_mode -
894  */
895 static int s2io_print_pci_mode(struct s2io_nic *nic)
896 {
897         struct XENA_dev_config __iomem *bar0 = nic->bar0;
898         register u64 val64 = 0;
899         int     mode;
900         struct config_param *config = &nic->config;
901
902         val64 = readq(&bar0->pci_mode);
903         mode = (u8)GET_PCI_MODE(val64);
904
905         if ( val64 & PCI_MODE_UNKNOWN_MODE)
906                 return -1;      /* Unknown PCI mode */
907
908         config->bus_speed = bus_speed[mode];
909
910         if (s2io_on_nec_bridge(nic->pdev)) {
911                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
912                                                         nic->dev->name);
913                 return mode;
914         }
915
916         if (val64 & PCI_MODE_32_BITS) {
917                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
918         } else {
919                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
920         }
921
922         switch(mode) {
923                 case PCI_MODE_PCI_33:
924                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
925                         break;
926                 case PCI_MODE_PCI_66:
927                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
928                         break;
929                 case PCI_MODE_PCIX_M1_66:
930                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
931                         break;
932                 case PCI_MODE_PCIX_M1_100:
933                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
934                         break;
935                 case PCI_MODE_PCIX_M1_133:
936                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
937                         break;
938                 case PCI_MODE_PCIX_M2_66:
939                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
940                         break;
941                 case PCI_MODE_PCIX_M2_100:
942                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
943                         break;
944                 case PCI_MODE_PCIX_M2_133:
945                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
946                         break;
947                 default:
948                         return -1;      /* Unsupported bus speed */
949         }
950
951         return mode;
952 }
953
954 /**
955  *  init_nic - Initialization of hardware
956  *  @nic: device peivate variable
957  *  Description: The function sequentially configures every block
958  *  of the H/W from their reset values.
959  *  Return Value:  SUCCESS on success and
960  *  '-1' on failure (endian settings incorrect).
961  */
962
963 static int init_nic(struct s2io_nic *nic)
964 {
965         struct XENA_dev_config __iomem *bar0 = nic->bar0;
966         struct net_device *dev = nic->dev;
967         register u64 val64 = 0;
968         void __iomem *add;
969         u32 time;
970         int i, j;
971         struct mac_info *mac_control;
972         struct config_param *config;
973         int dtx_cnt = 0;
974         unsigned long long mem_share;
975         int mem_size;
976
977         mac_control = &nic->mac_control;
978         config = &nic->config;
979
980         /* to set the swapper controle on the card */
981         if(s2io_set_swapper(nic)) {
982                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
983                 return -1;
984         }
985
986         /*
987          * Herc requires EOI to be removed from reset before XGXS, so..
988          */
989         if (nic->device_type & XFRAME_II_DEVICE) {
990                 val64 = 0xA500000000ULL;
991                 writeq(val64, &bar0->sw_reset);
992                 msleep(500);
993                 val64 = readq(&bar0->sw_reset);
994         }
995
996         /* Remove XGXS from reset state */
997         val64 = 0;
998         writeq(val64, &bar0->sw_reset);
999         msleep(500);
1000         val64 = readq(&bar0->sw_reset);
1001
1002         /*  Enable Receiving broadcasts */
1003         add = &bar0->mac_cfg;
1004         val64 = readq(&bar0->mac_cfg);
1005         val64 |= MAC_RMAC_BCAST_ENABLE;
1006         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1007         writel((u32) val64, add);
1008         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1009         writel((u32) (val64 >> 32), (add + 4));
1010
1011         /* Read registers in all blocks */
1012         val64 = readq(&bar0->mac_int_mask);
1013         val64 = readq(&bar0->mc_int_mask);
1014         val64 = readq(&bar0->xgxs_int_mask);
1015
1016         /*  Set MTU */
1017         val64 = dev->mtu;
1018         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1019
1020         if (nic->device_type & XFRAME_II_DEVICE) {
1021                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1022                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1023                                           &bar0->dtx_control, UF);
1024                         if (dtx_cnt & 0x1)
1025                                 msleep(1); /* Necessary!! */
1026                         dtx_cnt++;
1027                 }
1028         } else {
1029                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1030                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1031                                           &bar0->dtx_control, UF);
1032                         val64 = readq(&bar0->dtx_control);
1033                         dtx_cnt++;
1034                 }
1035         }
1036
1037         /*  Tx DMA Initialization */
1038         val64 = 0;
1039         writeq(val64, &bar0->tx_fifo_partition_0);
1040         writeq(val64, &bar0->tx_fifo_partition_1);
1041         writeq(val64, &bar0->tx_fifo_partition_2);
1042         writeq(val64, &bar0->tx_fifo_partition_3);
1043
1044
1045         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1046                 val64 |=
1047                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1048                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1049                                     ((i * 32) + 5), 3);
1050
1051                 if (i == (config->tx_fifo_num - 1)) {
1052                         if (i % 2 == 0)
1053                                 i++;
1054                 }
1055
1056                 switch (i) {
1057                 case 1:
1058                         writeq(val64, &bar0->tx_fifo_partition_0);
1059                         val64 = 0;
1060                         break;
1061                 case 3:
1062                         writeq(val64, &bar0->tx_fifo_partition_1);
1063                         val64 = 0;
1064                         break;
1065                 case 5:
1066                         writeq(val64, &bar0->tx_fifo_partition_2);
1067                         val64 = 0;
1068                         break;
1069                 case 7:
1070                         writeq(val64, &bar0->tx_fifo_partition_3);
1071                         break;
1072                 }
1073         }
1074
1075         /*
1076          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1077          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1078          */
1079         if ((nic->device_type == XFRAME_I_DEVICE) &&
1080                 (get_xena_rev_id(nic->pdev) < 4))
1081                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1082
1083         val64 = readq(&bar0->tx_fifo_partition_0);
1084         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1085                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1086
1087         /*
1088          * Initialization of Tx_PA_CONFIG register to ignore packet
1089          * integrity checking.
1090          */
1091         val64 = readq(&bar0->tx_pa_cfg);
1092         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1093             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1094         writeq(val64, &bar0->tx_pa_cfg);
1095
1096         /* Rx DMA intialization. */
1097         val64 = 0;
1098         for (i = 0; i < config->rx_ring_num; i++) {
1099                 val64 |=
1100                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1101                          3);
1102         }
1103         writeq(val64, &bar0->rx_queue_priority);
1104
1105         /*
1106          * Allocating equal share of memory to all the
1107          * configured Rings.
1108          */
1109         val64 = 0;
1110         if (nic->device_type & XFRAME_II_DEVICE)
1111                 mem_size = 32;
1112         else
1113                 mem_size = 64;
1114
1115         for (i = 0; i < config->rx_ring_num; i++) {
1116                 switch (i) {
1117                 case 0:
1118                         mem_share = (mem_size / config->rx_ring_num +
1119                                      mem_size % config->rx_ring_num);
1120                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1121                         continue;
1122                 case 1:
1123                         mem_share = (mem_size / config->rx_ring_num);
1124                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1125                         continue;
1126                 case 2:
1127                         mem_share = (mem_size / config->rx_ring_num);
1128                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1129                         continue;
1130                 case 3:
1131                         mem_share = (mem_size / config->rx_ring_num);
1132                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1133                         continue;
1134                 case 4:
1135                         mem_share = (mem_size / config->rx_ring_num);
1136                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1137                         continue;
1138                 case 5:
1139                         mem_share = (mem_size / config->rx_ring_num);
1140                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1141                         continue;
1142                 case 6:
1143                         mem_share = (mem_size / config->rx_ring_num);
1144                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1145                         continue;
1146                 case 7:
1147                         mem_share = (mem_size / config->rx_ring_num);
1148                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1149                         continue;
1150                 }
1151         }
1152         writeq(val64, &bar0->rx_queue_cfg);
1153
1154         /*
1155          * Filling Tx round robin registers
1156          * as per the number of FIFOs
1157          */
1158         switch (config->tx_fifo_num) {
1159         case 1:
1160                 val64 = 0x0000000000000000ULL;
1161                 writeq(val64, &bar0->tx_w_round_robin_0);
1162                 writeq(val64, &bar0->tx_w_round_robin_1);
1163                 writeq(val64, &bar0->tx_w_round_robin_2);
1164                 writeq(val64, &bar0->tx_w_round_robin_3);
1165                 writeq(val64, &bar0->tx_w_round_robin_4);
1166                 break;
1167         case 2:
1168                 val64 = 0x0000010000010000ULL;
1169                 writeq(val64, &bar0->tx_w_round_robin_0);
1170                 val64 = 0x0100000100000100ULL;
1171                 writeq(val64, &bar0->tx_w_round_robin_1);
1172                 val64 = 0x0001000001000001ULL;
1173                 writeq(val64, &bar0->tx_w_round_robin_2);
1174                 val64 = 0x0000010000010000ULL;
1175                 writeq(val64, &bar0->tx_w_round_robin_3);
1176                 val64 = 0x0100000000000000ULL;
1177                 writeq(val64, &bar0->tx_w_round_robin_4);
1178                 break;
1179         case 3:
1180                 val64 = 0x0001000102000001ULL;
1181                 writeq(val64, &bar0->tx_w_round_robin_0);
1182                 val64 = 0x0001020000010001ULL;
1183                 writeq(val64, &bar0->tx_w_round_robin_1);
1184                 val64 = 0x0200000100010200ULL;
1185                 writeq(val64, &bar0->tx_w_round_robin_2);
1186                 val64 = 0x0001000102000001ULL;
1187                 writeq(val64, &bar0->tx_w_round_robin_3);
1188                 val64 = 0x0001020000000000ULL;
1189                 writeq(val64, &bar0->tx_w_round_robin_4);
1190                 break;
1191         case 4:
1192                 val64 = 0x0001020300010200ULL;
1193                 writeq(val64, &bar0->tx_w_round_robin_0);
1194                 val64 = 0x0100000102030001ULL;
1195                 writeq(val64, &bar0->tx_w_round_robin_1);
1196                 val64 = 0x0200010000010203ULL;
1197                 writeq(val64, &bar0->tx_w_round_robin_2);
1198                 val64 = 0x0001020001000001ULL;
1199                 writeq(val64, &bar0->tx_w_round_robin_3);
1200                 val64 = 0x0203000100000000ULL;
1201                 writeq(val64, &bar0->tx_w_round_robin_4);
1202                 break;
1203         case 5:
1204                 val64 = 0x0001000203000102ULL;
1205                 writeq(val64, &bar0->tx_w_round_robin_0);
1206                 val64 = 0x0001020001030004ULL;
1207                 writeq(val64, &bar0->tx_w_round_robin_1);
1208                 val64 = 0x0001000203000102ULL;
1209                 writeq(val64, &bar0->tx_w_round_robin_2);
1210                 val64 = 0x0001020001030004ULL;
1211                 writeq(val64, &bar0->tx_w_round_robin_3);
1212                 val64 = 0x0001000000000000ULL;
1213                 writeq(val64, &bar0->tx_w_round_robin_4);
1214                 break;
1215         case 6:
1216                 val64 = 0x0001020304000102ULL;
1217                 writeq(val64, &bar0->tx_w_round_robin_0);
1218                 val64 = 0x0304050001020001ULL;
1219                 writeq(val64, &bar0->tx_w_round_robin_1);
1220                 val64 = 0x0203000100000102ULL;
1221                 writeq(val64, &bar0->tx_w_round_robin_2);
1222                 val64 = 0x0304000102030405ULL;
1223                 writeq(val64, &bar0->tx_w_round_robin_3);
1224                 val64 = 0x0001000200000000ULL;
1225                 writeq(val64, &bar0->tx_w_round_robin_4);
1226                 break;
1227         case 7:
1228                 val64 = 0x0001020001020300ULL;
1229                 writeq(val64, &bar0->tx_w_round_robin_0);
1230                 val64 = 0x0102030400010203ULL;
1231                 writeq(val64, &bar0->tx_w_round_robin_1);
1232                 val64 = 0x0405060001020001ULL;
1233                 writeq(val64, &bar0->tx_w_round_robin_2);
1234                 val64 = 0x0304050000010200ULL;
1235                 writeq(val64, &bar0->tx_w_round_robin_3);
1236                 val64 = 0x0102030000000000ULL;
1237                 writeq(val64, &bar0->tx_w_round_robin_4);
1238                 break;
1239         case 8:
1240                 val64 = 0x0001020300040105ULL;
1241                 writeq(val64, &bar0->tx_w_round_robin_0);
1242                 val64 = 0x0200030106000204ULL;
1243                 writeq(val64, &bar0->tx_w_round_robin_1);
1244                 val64 = 0x0103000502010007ULL;
1245                 writeq(val64, &bar0->tx_w_round_robin_2);
1246                 val64 = 0x0304010002060500ULL;
1247                 writeq(val64, &bar0->tx_w_round_robin_3);
1248                 val64 = 0x0103020400000000ULL;
1249                 writeq(val64, &bar0->tx_w_round_robin_4);
1250                 break;
1251         }
1252
1253         /* Enable all configured Tx FIFO partitions */
1254         val64 = readq(&bar0->tx_fifo_partition_0);
1255         val64 |= (TX_FIFO_PARTITION_EN);
1256         writeq(val64, &bar0->tx_fifo_partition_0);
1257
1258         /* Filling the Rx round robin registers as per the
1259          * number of Rings and steering based on QoS.
1260          */
1261         switch (config->rx_ring_num) {
1262         case 1:
1263                 val64 = 0x8080808080808080ULL;
1264                 writeq(val64, &bar0->rts_qos_steering);
1265                 break;
1266         case 2:
1267                 val64 = 0x0000010000010000ULL;
1268                 writeq(val64, &bar0->rx_w_round_robin_0);
1269                 val64 = 0x0100000100000100ULL;
1270                 writeq(val64, &bar0->rx_w_round_robin_1);
1271                 val64 = 0x0001000001000001ULL;
1272                 writeq(val64, &bar0->rx_w_round_robin_2);
1273                 val64 = 0x0000010000010000ULL;
1274                 writeq(val64, &bar0->rx_w_round_robin_3);
1275                 val64 = 0x0100000000000000ULL;
1276                 writeq(val64, &bar0->rx_w_round_robin_4);
1277
1278                 val64 = 0x8080808040404040ULL;
1279                 writeq(val64, &bar0->rts_qos_steering);
1280                 break;
1281         case 3:
1282                 val64 = 0x0001000102000001ULL;
1283                 writeq(val64, &bar0->rx_w_round_robin_0);
1284                 val64 = 0x0001020000010001ULL;
1285                 writeq(val64, &bar0->rx_w_round_robin_1);
1286                 val64 = 0x0200000100010200ULL;
1287                 writeq(val64, &bar0->rx_w_round_robin_2);
1288                 val64 = 0x0001000102000001ULL;
1289                 writeq(val64, &bar0->rx_w_round_robin_3);
1290                 val64 = 0x0001020000000000ULL;
1291                 writeq(val64, &bar0->rx_w_round_robin_4);
1292
1293                 val64 = 0x8080804040402020ULL;
1294                 writeq(val64, &bar0->rts_qos_steering);
1295                 break;
1296         case 4:
1297                 val64 = 0x0001020300010200ULL;
1298                 writeq(val64, &bar0->rx_w_round_robin_0);
1299                 val64 = 0x0100000102030001ULL;
1300                 writeq(val64, &bar0->rx_w_round_robin_1);
1301                 val64 = 0x0200010000010203ULL;
1302                 writeq(val64, &bar0->rx_w_round_robin_2);
1303                 val64 = 0x0001020001000001ULL;
1304                 writeq(val64, &bar0->rx_w_round_robin_3);
1305                 val64 = 0x0203000100000000ULL;
1306                 writeq(val64, &bar0->rx_w_round_robin_4);
1307
1308                 val64 = 0x8080404020201010ULL;
1309                 writeq(val64, &bar0->rts_qos_steering);
1310                 break;
1311         case 5:
1312                 val64 = 0x0001000203000102ULL;
1313                 writeq(val64, &bar0->rx_w_round_robin_0);
1314                 val64 = 0x0001020001030004ULL;
1315                 writeq(val64, &bar0->rx_w_round_robin_1);
1316                 val64 = 0x0001000203000102ULL;
1317                 writeq(val64, &bar0->rx_w_round_robin_2);
1318                 val64 = 0x0001020001030004ULL;
1319                 writeq(val64, &bar0->rx_w_round_robin_3);
1320                 val64 = 0x0001000000000000ULL;
1321                 writeq(val64, &bar0->rx_w_round_robin_4);
1322
1323                 val64 = 0x8080404020201008ULL;
1324                 writeq(val64, &bar0->rts_qos_steering);
1325                 break;
1326         case 6:
1327                 val64 = 0x0001020304000102ULL;
1328                 writeq(val64, &bar0->rx_w_round_robin_0);
1329                 val64 = 0x0304050001020001ULL;
1330                 writeq(val64, &bar0->rx_w_round_robin_1);
1331                 val64 = 0x0203000100000102ULL;
1332                 writeq(val64, &bar0->rx_w_round_robin_2);
1333                 val64 = 0x0304000102030405ULL;
1334                 writeq(val64, &bar0->rx_w_round_robin_3);
1335                 val64 = 0x0001000200000000ULL;
1336                 writeq(val64, &bar0->rx_w_round_robin_4);
1337
1338                 val64 = 0x8080404020100804ULL;
1339                 writeq(val64, &bar0->rts_qos_steering);
1340                 break;
1341         case 7:
1342                 val64 = 0x0001020001020300ULL;
1343                 writeq(val64, &bar0->rx_w_round_robin_0);
1344                 val64 = 0x0102030400010203ULL;
1345                 writeq(val64, &bar0->rx_w_round_robin_1);
1346                 val64 = 0x0405060001020001ULL;
1347                 writeq(val64, &bar0->rx_w_round_robin_2);
1348                 val64 = 0x0304050000010200ULL;
1349                 writeq(val64, &bar0->rx_w_round_robin_3);
1350                 val64 = 0x0102030000000000ULL;
1351                 writeq(val64, &bar0->rx_w_round_robin_4);
1352
1353                 val64 = 0x8080402010080402ULL;
1354                 writeq(val64, &bar0->rts_qos_steering);
1355                 break;
1356         case 8:
1357                 val64 = 0x0001020300040105ULL;
1358                 writeq(val64, &bar0->rx_w_round_robin_0);
1359                 val64 = 0x0200030106000204ULL;
1360                 writeq(val64, &bar0->rx_w_round_robin_1);
1361                 val64 = 0x0103000502010007ULL;
1362                 writeq(val64, &bar0->rx_w_round_robin_2);
1363                 val64 = 0x0304010002060500ULL;
1364                 writeq(val64, &bar0->rx_w_round_robin_3);
1365                 val64 = 0x0103020400000000ULL;
1366                 writeq(val64, &bar0->rx_w_round_robin_4);
1367
1368                 val64 = 0x8040201008040201ULL;
1369                 writeq(val64, &bar0->rts_qos_steering);
1370                 break;
1371         }
1372
1373         /* UDP Fix */
1374         val64 = 0;
1375         for (i = 0; i < 8; i++)
1376                 writeq(val64, &bar0->rts_frm_len_n[i]);
1377
1378         /* Set the default rts frame length for the rings configured */
1379         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1380         for (i = 0 ; i < config->rx_ring_num ; i++)
1381                 writeq(val64, &bar0->rts_frm_len_n[i]);
1382
1383         /* Set the frame length for the configured rings
1384          * desired by the user
1385          */
1386         for (i = 0; i < config->rx_ring_num; i++) {
1387                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1388                  * specified frame length steering.
1389                  * If the user provides the frame length then program
1390                  * the rts_frm_len register for those values or else
1391                  * leave it as it is.
1392                  */
1393                 if (rts_frm_len[i] != 0) {
1394                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1395                                 &bar0->rts_frm_len_n[i]);
1396                 }
1397         }
1398         
1399         /* Disable differentiated services steering logic */
1400         for (i = 0; i < 64; i++) {
1401                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1402                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1403                                 dev->name);
1404                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1405                         return FAILURE;
1406                 }
1407         }
1408
1409         /* Program statistics memory */
1410         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1411
1412         if (nic->device_type == XFRAME_II_DEVICE) {
1413                 val64 = STAT_BC(0x320);
1414                 writeq(val64, &bar0->stat_byte_cnt);
1415         }
1416
1417         /*
1418          * Initializing the sampling rate for the device to calculate the
1419          * bandwidth utilization.
1420          */
1421         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1422             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1423         writeq(val64, &bar0->mac_link_util);
1424
1425
1426         /*
1427          * Initializing the Transmit and Receive Traffic Interrupt
1428          * Scheme.
1429          */
1430         /*
1431          * TTI Initialization. Default Tx timer gets us about
1432          * 250 interrupts per sec. Continuous interrupts are enabled
1433          * by default.
1434          */
1435         if (nic->device_type == XFRAME_II_DEVICE) {
1436                 int count = (nic->config.bus_speed * 125)/2;
1437                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1438         } else {
1439
1440                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1441         }
1442         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1443             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1444             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1445                 if (use_continuous_tx_intrs)
1446                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1447         writeq(val64, &bar0->tti_data1_mem);
1448
1449         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1450             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1451             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1452         writeq(val64, &bar0->tti_data2_mem);
1453
1454         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1455         writeq(val64, &bar0->tti_command_mem);
1456
1457         /*
1458          * Once the operation completes, the Strobe bit of the command
1459          * register will be reset. We poll for this particular condition
1460          * We wait for a maximum of 500ms for the operation to complete,
1461          * if it's not complete by then we return error.
1462          */
1463         time = 0;
1464         while (TRUE) {
1465                 val64 = readq(&bar0->tti_command_mem);
1466                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1467                         break;
1468                 }
1469                 if (time > 10) {
1470                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1471                                   dev->name);
1472                         return -1;
1473                 }
1474                 msleep(50);
1475                 time++;
1476         }
1477
1478         if (nic->config.bimodal) {
1479                 int k = 0;
1480                 for (k = 0; k < config->rx_ring_num; k++) {
1481                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1482                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1483                         writeq(val64, &bar0->tti_command_mem);
1484
1485                 /*
1486                  * Once the operation completes, the Strobe bit of the command
1487                  * register will be reset. We poll for this particular condition
1488                  * We wait for a maximum of 500ms for the operation to complete,
1489                  * if it's not complete by then we return error.
1490                 */
1491                         time = 0;
1492                         while (TRUE) {
1493                                 val64 = readq(&bar0->tti_command_mem);
1494                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1495                                         break;
1496                                 }
1497                                 if (time > 10) {
1498                                         DBG_PRINT(ERR_DBG,
1499                                                 "%s: TTI init Failed\n",
1500                                         dev->name);
1501                                         return -1;
1502                                 }
1503                                 time++;
1504                                 msleep(50);
1505                         }
1506                 }
1507         } else {
1508
1509                 /* RTI Initialization */
1510                 if (nic->device_type == XFRAME_II_DEVICE) {
1511                         /*
1512                          * Programmed to generate Apprx 500 Intrs per
1513                          * second
1514                          */
1515                         int count = (nic->config.bus_speed * 125)/4;
1516                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1517                 } else {
1518                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1519                 }
1520                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1521                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1522                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1523
1524                 writeq(val64, &bar0->rti_data1_mem);
1525
1526                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1527                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1528                 if (nic->intr_type == MSI_X)
1529                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1530                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1531                 else
1532                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1533                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1534                 writeq(val64, &bar0->rti_data2_mem);
1535
1536                 for (i = 0; i < config->rx_ring_num; i++) {
1537                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1538                                         | RTI_CMD_MEM_OFFSET(i);
1539                         writeq(val64, &bar0->rti_command_mem);
1540
1541                         /*
1542                          * Once the operation completes, the Strobe bit of the
1543                          * command register will be reset. We poll for this
1544                          * particular condition. We wait for a maximum of 500ms
1545                          * for the operation to complete, if it's not complete
1546                          * by then we return error.
1547                          */
1548                         time = 0;
1549                         while (TRUE) {
1550                                 val64 = readq(&bar0->rti_command_mem);
1551                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1552                                         break;
1553                                 }
1554                                 if (time > 10) {
1555                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1556                                                   dev->name);
1557                                         return -1;
1558                                 }
1559                                 time++;
1560                                 msleep(50);
1561                         }
1562                 }
1563         }
1564
1565         /*
1566          * Initializing proper values as Pause threshold into all
1567          * the 8 Queues on Rx side.
1568          */
1569         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1570         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1571
1572         /* Disable RMAC PAD STRIPPING */
1573         add = &bar0->mac_cfg;
1574         val64 = readq(&bar0->mac_cfg);
1575         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1576         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1577         writel((u32) (val64), add);
1578         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1579         writel((u32) (val64 >> 32), (add + 4));
1580         val64 = readq(&bar0->mac_cfg);
1581
1582         /* Enable FCS stripping by adapter */
1583         add = &bar0->mac_cfg;
1584         val64 = readq(&bar0->mac_cfg);
1585         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1586         if (nic->device_type == XFRAME_II_DEVICE)
1587                 writeq(val64, &bar0->mac_cfg);
1588         else {
1589                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1590                 writel((u32) (val64), add);
1591                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1592                 writel((u32) (val64 >> 32), (add + 4));
1593         }
1594
1595         /*
1596          * Set the time value to be inserted in the pause frame
1597          * generated by xena.
1598          */
1599         val64 = readq(&bar0->rmac_pause_cfg);
1600         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1601         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1602         writeq(val64, &bar0->rmac_pause_cfg);
1603
1604         /*
1605          * Set the Threshold Limit for Generating the pause frame
1606          * If the amount of data in any Queue exceeds ratio of
1607          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1608          * pause frame is generated
1609          */
1610         val64 = 0;
1611         for (i = 0; i < 4; i++) {
1612                 val64 |=
1613                     (((u64) 0xFF00 | nic->mac_control.
1614                       mc_pause_threshold_q0q3)
1615                      << (i * 2 * 8));
1616         }
1617         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1618
1619         val64 = 0;
1620         for (i = 0; i < 4; i++) {
1621                 val64 |=
1622                     (((u64) 0xFF00 | nic->mac_control.
1623                       mc_pause_threshold_q4q7)
1624                      << (i * 2 * 8));
1625         }
1626         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1627
1628         /*
1629          * TxDMA will stop Read request if the number of read split has
1630          * exceeded the limit pointed by shared_splits
1631          */
1632         val64 = readq(&bar0->pic_control);
1633         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1634         writeq(val64, &bar0->pic_control);
1635
1636         if (nic->config.bus_speed == 266) {
1637                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1638                 writeq(0x0, &bar0->read_retry_delay);
1639                 writeq(0x0, &bar0->write_retry_delay);
1640         }
1641
1642         /*
1643          * Programming the Herc to split every write transaction
1644          * that does not start on an ADB to reduce disconnects.
1645          */
1646         if (nic->device_type == XFRAME_II_DEVICE) {
1647                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1648                         MISC_LINK_STABILITY_PRD(3);
1649                 writeq(val64, &bar0->misc_control);
1650                 val64 = readq(&bar0->pic_control2);
1651                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1652                 writeq(val64, &bar0->pic_control2);
1653         }
1654         if (strstr(nic->product_name, "CX4")) {
1655                 val64 = TMAC_AVG_IPG(0x17);
1656                 writeq(val64, &bar0->tmac_avg_ipg);
1657         }
1658
1659         return SUCCESS;
1660 }
1661 #define LINK_UP_DOWN_INTERRUPT          1
1662 #define MAC_RMAC_ERR_TIMER              2
1663
1664 static int s2io_link_fault_indication(struct s2io_nic *nic)
1665 {
1666         if (nic->intr_type != INTA)
1667                 return MAC_RMAC_ERR_TIMER;
1668         if (nic->device_type == XFRAME_II_DEVICE)
1669                 return LINK_UP_DOWN_INTERRUPT;
1670         else
1671                 return MAC_RMAC_ERR_TIMER;
1672 }
1673
1674 /**
1675  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1676  *  @nic: device private variable,
1677  *  @mask: A mask indicating which Intr block must be modified and,
1678  *  @flag: A flag indicating whether to enable or disable the Intrs.
1679  *  Description: This function will either disable or enable the interrupts
1680  *  depending on the flag argument. The mask argument can be used to
1681  *  enable/disable any Intr block.
1682  *  Return Value: NONE.
1683  */
1684
1685 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1686 {
1687         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1688         register u64 val64 = 0, temp64 = 0;
1689
1690         /*  Top level interrupt classification */
1691         /*  PIC Interrupts */
1692         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1693                 /*  Enable PIC Intrs in the general intr mask register */
1694                 val64 = TXPIC_INT_M;
1695                 if (flag == ENABLE_INTRS) {
1696                         temp64 = readq(&bar0->general_int_mask);
1697                         temp64 &= ~((u64) val64);
1698                         writeq(temp64, &bar0->general_int_mask);
1699                         /*
1700                          * If Hercules adapter enable GPIO otherwise
1701                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1702                          * interrupts for now.
1703                          * TODO
1704                          */
1705                         if (s2io_link_fault_indication(nic) ==
1706                                         LINK_UP_DOWN_INTERRUPT ) {
1707                                 temp64 = readq(&bar0->pic_int_mask);
1708                                 temp64 &= ~((u64) PIC_INT_GPIO);
1709                                 writeq(temp64, &bar0->pic_int_mask);
1710                                 temp64 = readq(&bar0->gpio_int_mask);
1711                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1712                                 writeq(temp64, &bar0->gpio_int_mask);
1713                         } else {
1714                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1715                         }
1716                         /*
1717                          * No MSI Support is available presently, so TTI and
1718                          * RTI interrupts are also disabled.
1719                          */
1720                 } else if (flag == DISABLE_INTRS) {
1721                         /*
1722                          * Disable PIC Intrs in the general
1723                          * intr mask register
1724                          */
1725                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1726                         temp64 = readq(&bar0->general_int_mask);
1727                         val64 |= temp64;
1728                         writeq(val64, &bar0->general_int_mask);
1729                 }
1730         }
1731
1732         /*  MAC Interrupts */
1733         /*  Enabling/Disabling MAC interrupts */
1734         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1735                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1736                 if (flag == ENABLE_INTRS) {
1737                         temp64 = readq(&bar0->general_int_mask);
1738                         temp64 &= ~((u64) val64);
1739                         writeq(temp64, &bar0->general_int_mask);
1740                         /*
1741                          * All MAC block error interrupts are disabled for now
1742                          * TODO
1743                          */
1744                 } else if (flag == DISABLE_INTRS) {
1745                         /*
1746                          * Disable MAC Intrs in the general intr mask register
1747                          */
1748                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1749                         writeq(DISABLE_ALL_INTRS,
1750                                &bar0->mac_rmac_err_mask);
1751
1752                         temp64 = readq(&bar0->general_int_mask);
1753                         val64 |= temp64;
1754                         writeq(val64, &bar0->general_int_mask);
1755                 }
1756         }
1757
1758         /*  Tx traffic interrupts */
1759         if (mask & TX_TRAFFIC_INTR) {
1760                 val64 = TXTRAFFIC_INT_M;
1761                 if (flag == ENABLE_INTRS) {
1762                         temp64 = readq(&bar0->general_int_mask);
1763                         temp64 &= ~((u64) val64);
1764                         writeq(temp64, &bar0->general_int_mask);
1765                         /*
1766                          * Enable all the Tx side interrupts
1767                          * writing 0 Enables all 64 TX interrupt levels
1768                          */
1769                         writeq(0x0, &bar0->tx_traffic_mask);
1770                 } else if (flag == DISABLE_INTRS) {
1771                         /*
1772                          * Disable Tx Traffic Intrs in the general intr mask
1773                          * register.
1774                          */
1775                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1776                         temp64 = readq(&bar0->general_int_mask);
1777                         val64 |= temp64;
1778                         writeq(val64, &bar0->general_int_mask);
1779                 }
1780         }
1781
1782         /*  Rx traffic interrupts */
1783         if (mask & RX_TRAFFIC_INTR) {
1784                 val64 = RXTRAFFIC_INT_M;
1785                 if (flag == ENABLE_INTRS) {
1786                         temp64 = readq(&bar0->general_int_mask);
1787                         temp64 &= ~((u64) val64);
1788                         writeq(temp64, &bar0->general_int_mask);
1789                         /* writing 0 Enables all 8 RX interrupt levels */
1790                         writeq(0x0, &bar0->rx_traffic_mask);
1791                 } else if (flag == DISABLE_INTRS) {
1792                         /*
1793                          * Disable Rx Traffic Intrs in the general intr mask
1794                          * register.
1795                          */
1796                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1797                         temp64 = readq(&bar0->general_int_mask);
1798                         val64 |= temp64;
1799                         writeq(val64, &bar0->general_int_mask);
1800                 }
1801         }
1802 }
1803
1804 /**
1805  *  verify_pcc_quiescent- Checks for PCC quiescent state
1806  *  Return: 1 If PCC is quiescence
1807  *          0 If PCC is not quiescence
1808  */
1809 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1810 {
1811         int ret = 0, herc;
1812         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1813         u64 val64 = readq(&bar0->adapter_status);
1814         
1815         herc = (sp->device_type == XFRAME_II_DEVICE);
1816
1817         if (flag == FALSE) {
1818                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1819                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1820                                 ret = 1;
1821                 } else {
1822                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1823                                 ret = 1;
1824                 }
1825         } else {
1826                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1827                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1828                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1829                                 ret = 1;
1830                 } else {
1831                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1832                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1833                                 ret = 1;
1834                 }
1835         }
1836
1837         return ret;
1838 }
1839 /**
1840  *  verify_xena_quiescence - Checks whether the H/W is ready
1841  *  Description: Returns whether the H/W is ready to go or not. Depending
1842  *  on whether adapter enable bit was written or not the comparison
1843  *  differs and the calling function passes the input argument flag to
1844  *  indicate this.
1845  *  Return: 1 If xena is quiescence
1846  *          0 If Xena is not quiescence
1847  */
1848
1849 static int verify_xena_quiescence(struct s2io_nic *sp)
1850 {
1851         int  mode;
1852         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1853         u64 val64 = readq(&bar0->adapter_status);
1854         mode = s2io_verify_pci_mode(sp);
1855
1856         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1857                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1858                 return 0;
1859         }
1860         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1861         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1862                 return 0;
1863         }
1864         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1865                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1866                 return 0;
1867         }
1868         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1869                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1870                 return 0;
1871         }
1872         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1873                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1874                 return 0;
1875         }
1876         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1877                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1878                 return 0;
1879         }
1880         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1881                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1882                 return 0;
1883         }
1884         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1885                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1886                 return 0;
1887         }
1888
1889         /*
1890          * In PCI 33 mode, the P_PLL is not used, and therefore,
1891          * the the P_PLL_LOCK bit in the adapter_status register will
1892          * not be asserted.
1893          */
1894         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1895                 sp->device_type == XFRAME_II_DEVICE && mode !=
1896                 PCI_MODE_PCI_33) {
1897                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1898                 return 0;
1899         }
1900         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1901                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1902                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1903                 return 0;
1904         }
1905         return 1;
1906 }
1907
1908 /**
1909  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1910  * @sp: Pointer to device specifc structure
1911  * Description :
1912  * New procedure to clear mac address reading  problems on Alpha platforms
1913  *
1914  */
1915
1916 static void fix_mac_address(struct s2io_nic * sp)
1917 {
1918         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1919         u64 val64;
1920         int i = 0;
1921
1922         while (fix_mac[i] != END_SIGN) {
1923                 writeq(fix_mac[i++], &bar0->gpio_control);
1924                 udelay(10);
1925                 val64 = readq(&bar0->gpio_control);
1926         }
1927 }
1928
1929 /**
1930  *  start_nic - Turns the device on
1931  *  @nic : device private variable.
1932  *  Description:
1933  *  This function actually turns the device on. Before this  function is
1934  *  called,all Registers are configured from their reset states
1935  *  and shared memory is allocated but the NIC is still quiescent. On
1936  *  calling this function, the device interrupts are cleared and the NIC is
1937  *  literally switched on by writing into the adapter control register.
1938  *  Return Value:
1939  *  SUCCESS on success and -1 on failure.
1940  */
1941
1942 static int start_nic(struct s2io_nic *nic)
1943 {
1944         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1945         struct net_device *dev = nic->dev;
1946         register u64 val64 = 0;
1947         u16 subid, i;
1948         struct mac_info *mac_control;
1949         struct config_param *config;
1950
1951         mac_control = &nic->mac_control;
1952         config = &nic->config;
1953
1954         /*  PRC Initialization and configuration */
1955         for (i = 0; i < config->rx_ring_num; i++) {
1956                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1957                        &bar0->prc_rxd0_n[i]);
1958
1959                 val64 = readq(&bar0->prc_ctrl_n[i]);
1960                 if (nic->config.bimodal)
1961                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1962                 if (nic->rxd_mode == RXD_MODE_1)
1963                         val64 |= PRC_CTRL_RC_ENABLED;
1964                 else
1965                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1966                 if (nic->device_type == XFRAME_II_DEVICE)
1967                         val64 |= PRC_CTRL_GROUP_READS;
1968                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1969                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1970                 writeq(val64, &bar0->prc_ctrl_n[i]);
1971         }
1972
1973         if (nic->rxd_mode == RXD_MODE_3B) {
1974                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1975                 val64 = readq(&bar0->rx_pa_cfg);
1976                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1977                 writeq(val64, &bar0->rx_pa_cfg);
1978         }
1979
1980         if (vlan_tag_strip == 0) {
1981                 val64 = readq(&bar0->rx_pa_cfg);
1982                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1983                 writeq(val64, &bar0->rx_pa_cfg);
1984                 vlan_strip_flag = 0;
1985         }
1986
1987         /*
1988          * Enabling MC-RLDRAM. After enabling the device, we timeout
1989          * for around 100ms, which is approximately the time required
1990          * for the device to be ready for operation.
1991          */
1992         val64 = readq(&bar0->mc_rldram_mrs);
1993         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1994         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1995         val64 = readq(&bar0->mc_rldram_mrs);
1996
1997         msleep(100);    /* Delay by around 100 ms. */
1998
1999         /* Enabling ECC Protection. */
2000         val64 = readq(&bar0->adapter_control);
2001         val64 &= ~ADAPTER_ECC_EN;
2002         writeq(val64, &bar0->adapter_control);
2003
2004         /*
2005          * Clearing any possible Link state change interrupts that
2006          * could have popped up just before Enabling the card.
2007          */
2008         val64 = readq(&bar0->mac_rmac_err_reg);
2009         if (val64)
2010                 writeq(val64, &bar0->mac_rmac_err_reg);
2011
2012         /*
2013          * Verify if the device is ready to be enabled, if so enable
2014          * it.
2015          */
2016         val64 = readq(&bar0->adapter_status);
2017         if (!verify_xena_quiescence(nic)) {
2018                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2019                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2020                           (unsigned long long) val64);
2021                 return FAILURE;
2022         }
2023
2024         /*
2025          * With some switches, link might be already up at this point.
2026          * Because of this weird behavior, when we enable laser,
2027          * we may not get link. We need to handle this. We cannot
2028          * figure out which switch is misbehaving. So we are forced to
2029          * make a global change.
2030          */
2031
2032         /* Enabling Laser. */
2033         val64 = readq(&bar0->adapter_control);
2034         val64 |= ADAPTER_EOI_TX_ON;
2035         writeq(val64, &bar0->adapter_control);
2036
2037         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2038                 /*
2039                  * Dont see link state interrupts initally on some switches,
2040                  * so directly scheduling the link state task here.
2041                  */
2042                 schedule_work(&nic->set_link_task);
2043         }
2044         /* SXE-002: Initialize link and activity LED */
2045         subid = nic->pdev->subsystem_device;
2046         if (((subid & 0xFF) >= 0x07) &&
2047             (nic->device_type == XFRAME_I_DEVICE)) {
2048                 val64 = readq(&bar0->gpio_control);
2049                 val64 |= 0x0000800000000000ULL;
2050                 writeq(val64, &bar0->gpio_control);
2051                 val64 = 0x0411040400000000ULL;
2052                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2053         }
2054
2055         return SUCCESS;
2056 }
2057 /**
2058  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2059  */
2060 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2061                                         TxD *txdlp, int get_off)
2062 {
2063         struct s2io_nic *nic = fifo_data->nic;
2064         struct sk_buff *skb;
2065         struct TxD *txds;
2066         u16 j, frg_cnt;
2067
2068         txds = txdlp;
2069         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2070                 pci_unmap_single(nic->pdev, (dma_addr_t)
2071                         txds->Buffer_Pointer, sizeof(u64),
2072                         PCI_DMA_TODEVICE);
2073                 txds++;
2074         }
2075
2076         skb = (struct sk_buff *) ((unsigned long)
2077                         txds->Host_Control);
2078         if (!skb) {
2079                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2080                 return NULL;
2081         }
2082         pci_unmap_single(nic->pdev, (dma_addr_t)
2083                          txds->Buffer_Pointer,
2084                          skb->len - skb->data_len,
2085                          PCI_DMA_TODEVICE);
2086         frg_cnt = skb_shinfo(skb)->nr_frags;
2087         if (frg_cnt) {
2088                 txds++;
2089                 for (j = 0; j < frg_cnt; j++, txds++) {
2090                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2091                         if (!txds->Buffer_Pointer)
2092                                 break;
2093                         pci_unmap_page(nic->pdev, (dma_addr_t)
2094                                         txds->Buffer_Pointer,
2095                                        frag->size, PCI_DMA_TODEVICE);
2096                 }
2097         }
2098         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2099         return(skb);
2100 }
2101
2102 /**
2103  *  free_tx_buffers - Free all queued Tx buffers
2104  *  @nic : device private variable.
2105  *  Description:
2106  *  Free all queued Tx buffers.
2107  *  Return Value: void
2108 */
2109
2110 static void free_tx_buffers(struct s2io_nic *nic)
2111 {
2112         struct net_device *dev = nic->dev;
2113         struct sk_buff *skb;
2114         struct TxD *txdp;
2115         int i, j;
2116         struct mac_info *mac_control;
2117         struct config_param *config;
2118         int cnt = 0;
2119
2120         mac_control = &nic->mac_control;
2121         config = &nic->config;
2122
2123         for (i = 0; i < config->tx_fifo_num; i++) {
2124                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2125                         txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2126                             list_virt_addr;
2127                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2128                         if (skb) {
2129                                 dev_kfree_skb(skb);
2130                                 cnt++;
2131                         }
2132                 }
2133                 DBG_PRINT(INTR_DBG,
2134                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2135                           dev->name, cnt, i);
2136                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2137                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2138         }
2139 }
2140
2141 /**
2142  *   stop_nic -  To stop the nic
2143  *   @nic ; device private variable.
2144  *   Description:
2145  *   This function does exactly the opposite of what the start_nic()
2146  *   function does. This function is called to stop the device.
2147  *   Return Value:
2148  *   void.
2149  */
2150
2151 static void stop_nic(struct s2io_nic *nic)
2152 {
2153         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2154         register u64 val64 = 0;
2155         u16 interruptible;
2156         struct mac_info *mac_control;
2157         struct config_param *config;
2158
2159         mac_control = &nic->mac_control;
2160         config = &nic->config;
2161
2162         /*  Disable all interrupts */
2163         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2164         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2165         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2166         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2167
2168         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2169         val64 = readq(&bar0->adapter_control);
2170         val64 &= ~(ADAPTER_CNTL_EN);
2171         writeq(val64, &bar0->adapter_control);
2172 }
2173
2174 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2175                                 sk_buff *skb)
2176 {
2177         struct net_device *dev = nic->dev;
2178         struct sk_buff *frag_list;
2179         void *tmp;
2180
2181         /* Buffer-1 receives L3/L4 headers */
2182         ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2183                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2184                         PCI_DMA_FROMDEVICE);
2185
2186         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2187         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2188         if (skb_shinfo(skb)->frag_list == NULL) {
2189                 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2190                 return -ENOMEM ;
2191         }
2192         frag_list = skb_shinfo(skb)->frag_list;
2193         skb->truesize += frag_list->truesize;
2194         frag_list->next = NULL;
2195         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2196         frag_list->data = tmp;
2197         skb_reset_tail_pointer(frag_list);
2198
2199         /* Buffer-2 receives L4 data payload */
2200         ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2201                                 frag_list->data, dev->mtu,
2202                                 PCI_DMA_FROMDEVICE);
2203         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2204         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2205
2206         return SUCCESS;
2207 }
2208
2209 /**
2210  *  fill_rx_buffers - Allocates the Rx side skbs
2211  *  @nic:  device private variable
2212  *  @ring_no: ring number
2213  *  Description:
2214  *  The function allocates Rx side skbs and puts the physical
2215  *  address of these buffers into the RxD buffer pointers, so that the NIC
2216  *  can DMA the received frame into these locations.
2217  *  The NIC supports 3 receive modes, viz
2218  *  1. single buffer,
2219  *  2. three buffer and
2220  *  3. Five buffer modes.
2221  *  Each mode defines how many fragments the received frame will be split
2222  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2223  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2224  *  is split into 3 fragments. As of now only single buffer mode is
2225  *  supported.
2226  *   Return Value:
2227  *  SUCCESS on success or an appropriate -ve value on failure.
2228  */
2229
2230 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2231 {
2232         struct net_device *dev = nic->dev;
2233         struct sk_buff *skb;
2234         struct RxD_t *rxdp;
2235         int off, off1, size, block_no, block_no1;
2236         u32 alloc_tab = 0;
2237         u32 alloc_cnt;
2238         struct mac_info *mac_control;
2239         struct config_param *config;
2240         u64 tmp;
2241         struct buffAdd *ba;
2242         unsigned long flags;
2243         struct RxD_t *first_rxdp = NULL;
2244         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2245
2246         mac_control = &nic->mac_control;
2247         config = &nic->config;
2248         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2249             atomic_read(&nic->rx_bufs_left[ring_no]);
2250
2251         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2252         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2253         while (alloc_tab < alloc_cnt) {
2254                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2255                     block_index;
2256                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2257
2258                 rxdp = mac_control->rings[ring_no].
2259                                 rx_blocks[block_no].rxds[off].virt_addr;
2260
2261                 if ((block_no == block_no1) && (off == off1) &&
2262                                         (rxdp->Host_Control)) {
2263                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2264                                   dev->name);
2265                         DBG_PRINT(INTR_DBG, " info equated\n");
2266                         goto end;
2267                 }
2268                 if (off && (off == rxd_count[nic->rxd_mode])) {
2269                         mac_control->rings[ring_no].rx_curr_put_info.
2270                             block_index++;
2271                         if (mac_control->rings[ring_no].rx_curr_put_info.
2272                             block_index == mac_control->rings[ring_no].
2273                                         block_count)
2274                                 mac_control->rings[ring_no].rx_curr_put_info.
2275                                         block_index = 0;
2276                         block_no = mac_control->rings[ring_no].
2277                                         rx_curr_put_info.block_index;
2278                         if (off == rxd_count[nic->rxd_mode])
2279                                 off = 0;
2280                         mac_control->rings[ring_no].rx_curr_put_info.
2281                                 offset = off;
2282                         rxdp = mac_control->rings[ring_no].
2283                                 rx_blocks[block_no].block_virt_addr;
2284                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2285                                   dev->name, rxdp);
2286                 }
2287                 if(!napi) {
2288                         spin_lock_irqsave(&nic->put_lock, flags);
2289                         mac_control->rings[ring_no].put_pos =
2290                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2291                         spin_unlock_irqrestore(&nic->put_lock, flags);
2292                 } else {
2293                         mac_control->rings[ring_no].put_pos =
2294                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2295                 }
2296                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2297                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2298                                 (rxdp->Control_2 & BIT(0)))) {
2299                         mac_control->rings[ring_no].rx_curr_put_info.
2300                                         offset = off;
2301                         goto end;
2302                 }
2303                 /* calculate size of skb based on ring mode */
2304                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2305                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2306                 if (nic->rxd_mode == RXD_MODE_1)
2307                         size += NET_IP_ALIGN;
2308                 else if (nic->rxd_mode == RXD_MODE_3B)
2309                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2310                 else
2311                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2312
2313                 /* allocate skb */
2314                 skb = dev_alloc_skb(size);
2315                 if(!skb) {
2316                         DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2317                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2318                         if (first_rxdp) {
2319                                 wmb();
2320                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2321                         }
2322                         return -ENOMEM ;
2323                 }
2324                 if (nic->rxd_mode == RXD_MODE_1) {
2325                         /* 1 buffer mode - normal operation mode */
2326                         memset(rxdp, 0, sizeof(struct RxD1));
2327                         skb_reserve(skb, NET_IP_ALIGN);
2328                         ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2329                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2330                                 PCI_DMA_FROMDEVICE);
2331                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2332
2333                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2334                         /*
2335                          * 2 or 3 buffer mode -
2336                          * Both 2 buffer mode and 3 buffer mode provides 128
2337                          * byte aligned receive buffers.
2338                          *
2339                          * 3 buffer mode provides header separation where in
2340                          * skb->data will have L3/L4 headers where as
2341                          * skb_shinfo(skb)->frag_list will have the L4 data
2342                          * payload
2343                          */
2344
2345                         /* save the buffer pointers to avoid frequent dma mapping */
2346                         Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr;
2347                         Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr;
2348                         memset(rxdp, 0, sizeof(struct RxD3));
2349                         /* restore the buffer pointers for dma sync*/
2350                         ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr;
2351                         ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr;
2352
2353                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2354                         skb_reserve(skb, BUF0_LEN);
2355                         tmp = (u64)(unsigned long) skb->data;
2356                         tmp += ALIGN_SIZE;
2357                         tmp &= ~ALIGN_SIZE;
2358                         skb->data = (void *) (unsigned long)tmp;
2359                         skb_reset_tail_pointer(skb);
2360
2361                         if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2362                                 ((struct RxD3*)rxdp)->Buffer0_ptr =
2363                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2364                                            PCI_DMA_FROMDEVICE);
2365                         else
2366                                 pci_dma_sync_single_for_device(nic->pdev,
2367                                     (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2368                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2369                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2370                         if (nic->rxd_mode == RXD_MODE_3B) {
2371                                 /* Two buffer mode */
2372
2373                                 /*
2374                                  * Buffer2 will have L3/L4 header plus
2375                                  * L4 payload
2376                                  */
2377                                 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2378                                 (nic->pdev, skb->data, dev->mtu + 4,
2379                                                 PCI_DMA_FROMDEVICE);
2380
2381                                 /* Buffer-1 will be dummy buffer. Not used */
2382                                 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2383                                         ((struct RxD3*)rxdp)->Buffer1_ptr =
2384                                                 pci_map_single(nic->pdev,
2385                                                 ba->ba_1, BUF1_LEN,
2386                                                 PCI_DMA_FROMDEVICE);
2387                                 }
2388                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2389                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2390                                                                 (dev->mtu + 4);
2391                         } else {
2392                                 /* 3 buffer mode */
2393                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2394                                         dev_kfree_skb_irq(skb);
2395                                         if (first_rxdp) {
2396                                                 wmb();
2397                                                 first_rxdp->Control_1 |=
2398                                                         RXD_OWN_XENA;
2399                                         }
2400                                         return -ENOMEM ;
2401                                 }
2402                         }
2403                         rxdp->Control_2 |= BIT(0);
2404                 }
2405                 rxdp->Host_Control = (unsigned long) (skb);
2406                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2407                         rxdp->Control_1 |= RXD_OWN_XENA;
2408                 off++;
2409                 if (off == (rxd_count[nic->rxd_mode] + 1))
2410                         off = 0;
2411                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2412
2413                 rxdp->Control_2 |= SET_RXD_MARKER;
2414                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2415                         if (first_rxdp) {
2416                                 wmb();
2417                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2418                         }
2419                         first_rxdp = rxdp;
2420                 }
2421                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2422                 alloc_tab++;
2423         }
2424
2425       end:
2426         /* Transfer ownership of first descriptor to adapter just before
2427          * exiting. Before that, use memory barrier so that ownership
2428          * and other fields are seen by adapter correctly.
2429          */
2430         if (first_rxdp) {
2431                 wmb();
2432                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2433         }
2434
2435         return SUCCESS;
2436 }
2437
2438 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2439 {
2440         struct net_device *dev = sp->dev;
2441         int j;
2442         struct sk_buff *skb;
2443         struct RxD_t *rxdp;
2444         struct mac_info *mac_control;
2445         struct buffAdd *ba;
2446
2447         mac_control = &sp->mac_control;
2448         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2449                 rxdp = mac_control->rings[ring_no].
2450                                 rx_blocks[blk].rxds[j].virt_addr;
2451                 skb = (struct sk_buff *)
2452                         ((unsigned long) rxdp->Host_Control);
2453                 if (!skb) {
2454                         continue;
2455                 }
2456                 if (sp->rxd_mode == RXD_MODE_1) {
2457                         pci_unmap_single(sp->pdev, (dma_addr_t)
2458                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2459                                  dev->mtu +
2460                                  HEADER_ETHERNET_II_802_3_SIZE
2461                                  + HEADER_802_2_SIZE +
2462                                  HEADER_SNAP_SIZE,
2463                                  PCI_DMA_FROMDEVICE);
2464                         memset(rxdp, 0, sizeof(struct RxD1));
2465                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2466                         ba = &mac_control->rings[ring_no].
2467                                 ba[blk][j];
2468                         pci_unmap_single(sp->pdev, (dma_addr_t)
2469                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2470                                  BUF0_LEN,
2471                                  PCI_DMA_FROMDEVICE);
2472                         pci_unmap_single(sp->pdev, (dma_addr_t)
2473                                  ((struct RxD3*)rxdp)->Buffer1_ptr,
2474                                  BUF1_LEN,
2475                                  PCI_DMA_FROMDEVICE);
2476                         pci_unmap_single(sp->pdev, (dma_addr_t)
2477                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2478                                  dev->mtu + 4,
2479                                  PCI_DMA_FROMDEVICE);
2480                         memset(rxdp, 0, sizeof(struct RxD3));
2481                 } else {
2482                         pci_unmap_single(sp->pdev, (dma_addr_t)
2483                                 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2484                                 PCI_DMA_FROMDEVICE);
2485                         pci_unmap_single(sp->pdev, (dma_addr_t)
2486                                 ((struct RxD3*)rxdp)->Buffer1_ptr,
2487                                 l3l4hdr_size + 4,
2488                                 PCI_DMA_FROMDEVICE);
2489                         pci_unmap_single(sp->pdev, (dma_addr_t)
2490                                 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2491                                 PCI_DMA_FROMDEVICE);
2492                         memset(rxdp, 0, sizeof(struct RxD3));
2493                 }
2494                 dev_kfree_skb(skb);
2495                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2496         }
2497 }
2498
2499 /**
2500  *  free_rx_buffers - Frees all Rx buffers
2501  *  @sp: device private variable.
2502  *  Description:
2503  *  This function will free all Rx buffers allocated by host.
2504  *  Return Value:
2505  *  NONE.
2506  */
2507
2508 static void free_rx_buffers(struct s2io_nic *sp)
2509 {
2510         struct net_device *dev = sp->dev;
2511         int i, blk = 0, buf_cnt = 0;
2512         struct mac_info *mac_control;
2513         struct config_param *config;
2514
2515         mac_control = &sp->mac_control;
2516         config = &sp->config;
2517
2518         for (i = 0; i < config->rx_ring_num; i++) {
2519                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2520                         free_rxd_blk(sp,i,blk);
2521
2522                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2523                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2524                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2525                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2526                 atomic_set(&sp->rx_bufs_left[i], 0);
2527                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2528                           dev->name, buf_cnt, i);
2529         }
2530 }
2531
2532 /**
2533  * s2io_poll - Rx interrupt handler for NAPI support
2534  * @dev : pointer to the device structure.
2535  * @budget : The number of packets that were budgeted to be processed
2536  * during  one pass through the 'Poll" function.
2537  * Description:
2538  * Comes into picture only if NAPI support has been incorporated. It does
2539  * the same thing that rx_intr_handler does, but not in a interrupt context
2540  * also It will process only a given number of packets.
2541  * Return value:
2542  * 0 on success and 1 if there are No Rx packets to be processed.
2543  */
2544
2545 static int s2io_poll(struct net_device *dev, int *budget)
2546 {
2547         struct s2io_nic *nic = dev->priv;
2548         int pkt_cnt = 0, org_pkts_to_process;
2549         struct mac_info *mac_control;
2550         struct config_param *config;
2551         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2552         int i;
2553
2554         atomic_inc(&nic->isr_cnt);
2555         mac_control = &nic->mac_control;
2556         config = &nic->config;
2557
2558         nic->pkts_to_process = *budget;
2559         if (nic->pkts_to_process > dev->quota)
2560                 nic->pkts_to_process = dev->quota;
2561         org_pkts_to_process = nic->pkts_to_process;
2562
2563         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2564         readl(&bar0->rx_traffic_int);
2565
2566         for (i = 0; i < config->rx_ring_num; i++) {
2567                 rx_intr_handler(&mac_control->rings[i]);
2568                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2569                 if (!nic->pkts_to_process) {
2570                         /* Quota for the current iteration has been met */
2571                         goto no_rx;
2572                 }
2573         }
2574         if (!pkt_cnt)
2575                 pkt_cnt = 1;
2576
2577         dev->quota -= pkt_cnt;
2578         *budget -= pkt_cnt;
2579         netif_rx_complete(dev);
2580
2581         for (i = 0; i < config->rx_ring_num; i++) {
2582                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2583                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2584                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2585                         break;
2586                 }
2587         }
2588         /* Re enable the Rx interrupts. */
2589         writeq(0x0, &bar0->rx_traffic_mask);
2590         readl(&bar0->rx_traffic_mask);
2591         atomic_dec(&nic->isr_cnt);
2592         return 0;
2593
2594 no_rx:
2595         dev->quota -= pkt_cnt;
2596         *budget -= pkt_cnt;
2597
2598         for (i = 0; i < config->rx_ring_num; i++) {
2599                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2600                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2601                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2602                         break;
2603                 }
2604         }
2605         atomic_dec(&nic->isr_cnt);
2606         return 1;
2607 }
2608
2609 #ifdef CONFIG_NET_POLL_CONTROLLER
2610 /**
2611  * s2io_netpoll - netpoll event handler entry point
2612  * @dev : pointer to the device structure.
2613  * Description:
2614  *      This function will be called by upper layer to check for events on the
2615  * interface in situations where interrupts are disabled. It is used for
2616  * specific in-kernel networking tasks, such as remote consoles and kernel
2617  * debugging over the network (example netdump in RedHat).
2618  */
2619 static void s2io_netpoll(struct net_device *dev)
2620 {
2621         struct s2io_nic *nic = dev->priv;
2622         struct mac_info *mac_control;
2623         struct config_param *config;
2624         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2625         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2626         int i;
2627
2628         disable_irq(dev->irq);
2629
2630         atomic_inc(&nic->isr_cnt);
2631         mac_control = &nic->mac_control;
2632         config = &nic->config;
2633
2634         writeq(val64, &bar0->rx_traffic_int);
2635         writeq(val64, &bar0->tx_traffic_int);
2636
2637         /* we need to free up the transmitted skbufs or else netpoll will
2638          * run out of skbs and will fail and eventually netpoll application such
2639          * as netdump will fail.
2640          */
2641         for (i = 0; i < config->tx_fifo_num; i++)
2642                 tx_intr_handler(&mac_control->fifos[i]);
2643
2644         /* check for received packet and indicate up to network */
2645         for (i = 0; i < config->rx_ring_num; i++)
2646                 rx_intr_handler(&mac_control->rings[i]);
2647
2648         for (i = 0; i < config->rx_ring_num; i++) {
2649                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2650                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2651                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2652                         break;
2653                 }
2654         }
2655         atomic_dec(&nic->isr_cnt);
2656         enable_irq(dev->irq);
2657         return;
2658 }
2659 #endif
2660
2661 /**
2662  *  rx_intr_handler - Rx interrupt handler
2663  *  @nic: device private variable.
2664  *  Description:
2665  *  If the interrupt is because of a received frame or if the
2666  *  receive ring contains fresh as yet un-processed frames,this function is
2667  *  called. It picks out the RxD at which place the last Rx processing had
2668  *  stopped and sends the skb to the OSM's Rx handler and then increments
2669  *  the offset.
2670  *  Return Value:
2671  *  NONE.
2672  */
2673 static void rx_intr_handler(struct ring_info *ring_data)
2674 {
2675         struct s2io_nic *nic = ring_data->nic;
2676         struct net_device *dev = (struct net_device *) nic->dev;
2677         int get_block, put_block, put_offset;
2678         struct rx_curr_get_info get_info, put_info;
2679         struct RxD_t *rxdp;
2680         struct sk_buff *skb;
2681         int pkt_cnt = 0;
2682         int i;
2683
2684         spin_lock(&nic->rx_lock);
2685         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2686                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2687                           __FUNCTION__, dev->name);
2688                 spin_unlock(&nic->rx_lock);
2689                 return;
2690         }
2691
2692         get_info = ring_data->rx_curr_get_info;
2693         get_block = get_info.block_index;
2694         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2695         put_block = put_info.block_index;
2696         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2697         if (!napi) {
2698                 spin_lock(&nic->put_lock);
2699                 put_offset = ring_data->put_pos;
2700                 spin_unlock(&nic->put_lock);
2701         } else
2702                 put_offset = ring_data->put_pos;
2703
2704         while (RXD_IS_UP2DT(rxdp)) {
2705                 /*
2706                  * If your are next to put index then it's
2707                  * FIFO full condition
2708                  */
2709                 if ((get_block == put_block) &&
2710                     (get_info.offset + 1) == put_info.offset) {
2711                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2712                         break;
2713                 }
2714                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2715                 if (skb == NULL) {
2716                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2717                                   dev->name);
2718                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2719                         spin_unlock(&nic->rx_lock);
2720                         return;
2721                 }
2722                 if (nic->rxd_mode == RXD_MODE_1) {
2723                         pci_unmap_single(nic->pdev, (dma_addr_t)
2724                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2725                                  dev->mtu +
2726                                  HEADER_ETHERNET_II_802_3_SIZE +
2727                                  HEADER_802_2_SIZE +
2728                                  HEADER_SNAP_SIZE,
2729                                  PCI_DMA_FROMDEVICE);
2730                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2731                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2732                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2733                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2734                         pci_unmap_single(nic->pdev, (dma_addr_t)
2735                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2736                                  dev->mtu + 4,
2737                                  PCI_DMA_FROMDEVICE);
2738                 } else {
2739                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2740                                          ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2741                                          PCI_DMA_FROMDEVICE);
2742                         pci_unmap_single(nic->pdev, (dma_addr_t)
2743                                          ((struct RxD3*)rxdp)->Buffer1_ptr,
2744                                          l3l4hdr_size + 4,
2745                                          PCI_DMA_FROMDEVICE);
2746                         pci_unmap_single(nic->pdev, (dma_addr_t)
2747                                          ((struct RxD3*)rxdp)->Buffer2_ptr,
2748                                          dev->mtu, PCI_DMA_FROMDEVICE);
2749                 }
2750                 prefetch(skb->data);
2751                 rx_osm_handler(ring_data, rxdp);
2752                 get_info.offset++;
2753                 ring_data->rx_curr_get_info.offset = get_info.offset;
2754                 rxdp = ring_data->rx_blocks[get_block].
2755                                 rxds[get_info.offset].virt_addr;
2756                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2757                         get_info.offset = 0;
2758                         ring_data->rx_curr_get_info.offset = get_info.offset;
2759                         get_block++;
2760                         if (get_block == ring_data->block_count)
2761                                 get_block = 0;
2762                         ring_data->rx_curr_get_info.block_index = get_block;
2763                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2764                 }
2765
2766                 nic->pkts_to_process -= 1;
2767                 if ((napi) && (!nic->pkts_to_process))
2768                         break;
2769                 pkt_cnt++;
2770                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2771                         break;
2772         }
2773         if (nic->lro) {
2774                 /* Clear all LRO sessions before exiting */
2775                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2776                         struct lro *lro = &nic->lro0_n[i];
2777                         if (lro->in_use) {
2778                                 update_L3L4_header(nic, lro);
2779                                 queue_rx_frame(lro->parent);
2780                                 clear_lro_session(lro);
2781                         }
2782                 }
2783         }
2784
2785         spin_unlock(&nic->rx_lock);
2786 }
2787
2788 /**
2789  *  tx_intr_handler - Transmit interrupt handler
2790  *  @nic : device private variable
2791  *  Description:
2792  *  If an interrupt was raised to indicate DMA complete of the
2793  *  Tx packet, this function is called. It identifies the last TxD
2794  *  whose buffer was freed and frees all skbs whose data have already
2795  *  DMA'ed into the NICs internal memory.
2796  *  Return Value:
2797  *  NONE
2798  */
2799
2800 static void tx_intr_handler(struct fifo_info *fifo_data)
2801 {
2802         struct s2io_nic *nic = fifo_data->nic;
2803         struct net_device *dev = (struct net_device *) nic->dev;
2804         struct tx_curr_get_info get_info, put_info;
2805         struct sk_buff *skb;
2806         struct TxD *txdlp;
2807
2808         get_info = fifo_data->tx_curr_get_info;
2809         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2810         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2811             list_virt_addr;
2812         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2813                (get_info.offset != put_info.offset) &&
2814                (txdlp->Host_Control)) {
2815                 /* Check for TxD errors */
2816                 if (txdlp->Control_1 & TXD_T_CODE) {
2817                         unsigned long long err;
2818                         err = txdlp->Control_1 & TXD_T_CODE;
2819                         if (err & 0x1) {
2820                                 nic->mac_control.stats_info->sw_stat.
2821                                                 parity_err_cnt++;
2822                         }
2823                         if ((err >> 48) == 0xA) {
2824                                 DBG_PRINT(TX_DBG, "TxD returned due \
2825                                                 to loss of link\n");
2826                         }
2827                         else {
2828                                 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2829                         }
2830                 }
2831
2832                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2833                 if (skb == NULL) {
2834                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2835                         __FUNCTION__);
2836                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2837                         return;
2838                 }
2839
2840                 /* Updating the statistics block */
2841                 nic->stats.tx_bytes += skb->len;
2842                 dev_kfree_skb_irq(skb);
2843
2844                 get_info.offset++;
2845                 if (get_info.offset == get_info.fifo_len + 1)
2846                         get_info.offset = 0;
2847                 txdlp = (struct TxD *) fifo_data->list_info
2848                     [get_info.offset].list_virt_addr;
2849                 fifo_data->tx_curr_get_info.offset =
2850                     get_info.offset;
2851         }
2852
2853         spin_lock(&nic->tx_lock);
2854         if (netif_queue_stopped(dev))
2855                 netif_wake_queue(dev);
2856         spin_unlock(&nic->tx_lock);
2857 }
2858
2859 /**
2860  *  s2io_mdio_write - Function to write in to MDIO registers
2861  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2862  *  @addr     : address value
2863  *  @value    : data value
2864  *  @dev      : pointer to net_device structure
2865  *  Description:
2866  *  This function is used to write values to the MDIO registers
2867  *  NONE
2868  */
2869 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2870 {
2871         u64 val64 = 0x0;
2872         struct s2io_nic *sp = dev->priv;
2873         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2874
2875         //address transaction
2876         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2877                         | MDIO_MMD_DEV_ADDR(mmd_type)
2878                         | MDIO_MMS_PRT_ADDR(0x0);
2879         writeq(val64, &bar0->mdio_control);
2880         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2881         writeq(val64, &bar0->mdio_control);
2882         udelay(100);
2883
2884         //Data transaction
2885         val64 = 0x0;
2886         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2887                         | MDIO_MMD_DEV_ADDR(mmd_type)
2888                         | MDIO_MMS_PRT_ADDR(0x0)
2889                         | MDIO_MDIO_DATA(value)
2890                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2891         writeq(val64, &bar0->mdio_control);
2892         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2893         writeq(val64, &bar0->mdio_control);
2894         udelay(100);
2895
2896         val64 = 0x0;
2897         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2898         | MDIO_MMD_DEV_ADDR(mmd_type)
2899         | MDIO_MMS_PRT_ADDR(0x0)
2900         | MDIO_OP(MDIO_OP_READ_TRANS);
2901         writeq(val64, &bar0->mdio_control);
2902         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2903         writeq(val64, &bar0->mdio_control);
2904         udelay(100);
2905
2906 }
2907
2908 /**
2909  *  s2io_mdio_read - Function to write in to MDIO registers
2910  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2911  *  @addr     : address value
2912  *  @dev      : pointer to net_device structure
2913  *  Description:
2914  *  This function is used to read values to the MDIO registers
2915  *  NONE
2916  */
2917 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2918 {
2919         u64 val64 = 0x0;
2920         u64 rval64 = 0x0;
2921         struct s2io_nic *sp = dev->priv;
2922         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2923
2924         /* address transaction */
2925         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2926                         | MDIO_MMD_DEV_ADDR(mmd_type)
2927                         | MDIO_MMS_PRT_ADDR(0x0);
2928         writeq(val64, &bar0->mdio_control);
2929         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2930         writeq(val64, &bar0->mdio_control);
2931         udelay(100);
2932
2933         /* Data transaction */
2934         val64 = 0x0;
2935         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2936                         | MDIO_MMD_DEV_ADDR(mmd_type)
2937                         | MDIO_MMS_PRT_ADDR(0x0)
2938                         | MDIO_OP(MDIO_OP_READ_TRANS);
2939         writeq(val64, &bar0->mdio_control);
2940         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2941         writeq(val64, &bar0->mdio_control);
2942         udelay(100);
2943
2944         /* Read the value from regs */
2945         rval64 = readq(&bar0->mdio_control);
2946         rval64 = rval64 & 0xFFFF0000;
2947         rval64 = rval64 >> 16;
2948         return rval64;
2949 }
2950 /**
2951  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
2952  *  @counter      : couter value to be updated
2953  *  @flag         : flag to indicate the status
2954  *  @type         : counter type
2955  *  Description:
2956  *  This function is to check the status of the xpak counters value
2957  *  NONE
2958  */
2959
2960 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2961 {
2962         u64 mask = 0x3;
2963         u64 val64;
2964         int i;
2965         for(i = 0; i <index; i++)
2966                 mask = mask << 0x2;
2967
2968         if(flag > 0)
2969         {
2970                 *counter = *counter + 1;
2971                 val64 = *regs_stat & mask;
2972                 val64 = val64 >> (index * 0x2);
2973                 val64 = val64 + 1;
2974                 if(val64 == 3)
2975                 {
2976                         switch(type)
2977                         {
2978                         case 1:
2979                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2980                                           "service. Excessive temperatures may "
2981                                           "result in premature transceiver "
2982                                           "failure \n");
2983                         break;
2984                         case 2:
2985                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2986                                           "service Excessive bias currents may "
2987                                           "indicate imminent laser diode "
2988                                           "failure \n");
2989                         break;
2990                         case 3:
2991                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2992                                           "service Excessive laser output "
2993                                           "power may saturate far-end "
2994                                           "receiver\n");
2995                         break;
2996                         default:
2997                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2998                                           "type \n");
2999                         }
3000                         val64 = 0x0;
3001                 }
3002                 val64 = val64 << (index * 0x2);
3003                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3004
3005         } else {
3006                 *regs_stat = *regs_stat & (~mask);
3007         }
3008 }
3009
3010 /**
3011  *  s2io_updt_xpak_counter - Function to update the xpak counters
3012  *  @dev         : pointer to net_device struct
3013  *  Description:
3014  *  This function is to upate the status of the xpak counters value
3015  *  NONE
3016  */
3017 static void s2io_updt_xpak_counter(struct net_device *dev)
3018 {
3019         u16 flag  = 0x0;
3020         u16 type  = 0x0;
3021         u16 val16 = 0x0;
3022         u64 val64 = 0x0;
3023         u64 addr  = 0x0;
3024
3025         struct s2io_nic *sp = dev->priv;
3026         struct stat_block *stat_info = sp->mac_control.stats_info;
3027
3028         /* Check the communication with the MDIO slave */
3029         addr = 0x0000;
3030         val64 = 0x0;
3031         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3032         if((val64 == 0xFFFF) || (val64 == 0x0000))
3033         {
3034                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3035                           "Returned %llx\n", (unsigned long long)val64);
3036                 return;
3037         }
3038
3039         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3040         if(val64 != 0x2040)
3041         {
3042                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3043                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3044                           (unsigned long long)val64);
3045                 return;
3046         }
3047
3048         /* Loading the DOM register to MDIO register */
3049         addr = 0xA100;
3050         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3051         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3052
3053         /* Reading the Alarm flags */
3054         addr = 0xA070;
3055         val64 = 0x0;
3056         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3057
3058         flag = CHECKBIT(val64, 0x7);
3059         type = 1;
3060         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3061                                 &stat_info->xpak_stat.xpak_regs_stat,
3062                                 0x0, flag, type);
3063
3064         if(CHECKBIT(val64, 0x6))
3065                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3066
3067         flag = CHECKBIT(val64, 0x3);
3068         type = 2;
3069         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3070                                 &stat_info->xpak_stat.xpak_regs_stat,
3071                                 0x2, flag, type);
3072
3073         if(CHECKBIT(val64, 0x2))
3074                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3075
3076         flag = CHECKBIT(val64, 0x1);
3077         type = 3;
3078         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3079                                 &stat_info->xpak_stat.xpak_regs_stat,
3080                                 0x4, flag, type);
3081
3082         if(CHECKBIT(val64, 0x0))
3083                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3084
3085         /* Reading the Warning flags */
3086         addr = 0xA074;
3087         val64 = 0x0;
3088         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3089
3090         if(CHECKBIT(val64, 0x7))
3091                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3092
3093         if(CHECKBIT(val64, 0x6))
3094                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3095
3096         if(CHECKBIT(val64, 0x3))
3097                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3098
3099         if(CHECKBIT(val64, 0x2))
3100                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3101
3102         if(CHECKBIT(val64, 0x1))
3103                 stat_info->xpak_stat.warn_laser_output_power_high++;
3104
3105         if(CHECKBIT(val64, 0x0))
3106                 stat_info->xpak_stat.warn_laser_output_power_low++;
3107 }
3108
3109 /**
3110  *  alarm_intr_handler - Alarm Interrrupt handler
3111  *  @nic: device private variable
3112  *  Description: If the interrupt was neither because of Rx packet or Tx
3113  *  complete, this function is called. If the interrupt was to indicate
3114  *  a loss of link, the OSM link status handler is invoked for any other
3115  *  alarm interrupt the block that raised the interrupt is displayed
3116  *  and a H/W reset is issued.
3117  *  Return Value:
3118  *  NONE
3119 */
3120
3121 static void alarm_intr_handler(struct s2io_nic *nic)
3122 {
3123         struct net_device *dev = (struct net_device *) nic->dev;
3124         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3125         register u64 val64 = 0, err_reg = 0;
3126         u64 cnt;
3127         int i;
3128         if (atomic_read(&nic->card_state) == CARD_DOWN)
3129                 return;
3130         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3131         /* Handling the XPAK counters update */
3132         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3133                 /* waiting for an hour */
3134                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3135         } else {
3136                 s2io_updt_xpak_counter(dev);
3137                 /* reset the count to zero */
3138                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3139         }
3140
3141         /* Handling link status change error Intr */
3142         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3143                 err_reg = readq(&bar0->mac_rmac_err_reg);
3144                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3145                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3146                         schedule_work(&nic->set_link_task);
3147                 }
3148         }
3149
3150         /* Handling Ecc errors */
3151         val64 = readq(&bar0->mc_err_reg);
3152         writeq(val64, &bar0->mc_err_reg);
3153         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3154                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3155                         nic->mac_control.stats_info->sw_stat.
3156                                 double_ecc_errs++;
3157                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3158                                   dev->name);
3159                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3160                         if (nic->device_type != XFRAME_II_DEVICE) {
3161                                 /* Reset XframeI only if critical error */
3162                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3163                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3164                                         netif_stop_queue(dev);
3165                                         schedule_work(&nic->rst_timer_task);
3166                                         nic->mac_control.stats_info->sw_stat.
3167                                                         soft_reset_cnt++;
3168                                 }
3169                         }
3170                 } else {
3171                         nic->mac_control.stats_info->sw_stat.
3172                                 single_ecc_errs++;
3173                 }
3174         }
3175
3176         /* In case of a serious error, the device will be Reset. */
3177         val64 = readq(&bar0->serr_source);
3178         if (val64 & SERR_SOURCE_ANY) {
3179                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3180                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3181                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3182                           (unsigned long long)val64);
3183                 netif_stop_queue(dev);
3184                 schedule_work(&nic->rst_timer_task);
3185                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3186         }
3187
3188         /*
3189          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3190          * Error occurs, the adapter will be recycled by disabling the
3191          * adapter enable bit and enabling it again after the device
3192          * becomes Quiescent.
3193          */
3194         val64 = readq(&bar0->pcc_err_reg);
3195         writeq(val64, &bar0->pcc_err_reg);
3196         if (val64 & PCC_FB_ECC_DB_ERR) {
3197                 u64 ac = readq(&bar0->adapter_control);
3198                 ac &= ~(ADAPTER_CNTL_EN);
3199                 writeq(ac, &bar0->adapter_control);
3200                 ac = readq(&bar0->adapter_control);
3201                 schedule_work(&nic->set_link_task);
3202         }
3203         /* Check for data parity error */
3204         val64 = readq(&bar0->pic_int_status);
3205         if (val64 & PIC_INT_GPIO) {
3206                 val64 = readq(&bar0->gpio_int_reg);
3207                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3208                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3209                         schedule_work(&nic->rst_timer_task);
3210                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3211                 }
3212         }
3213
3214         /* Check for ring full counter */
3215         if (nic->device_type & XFRAME_II_DEVICE) {
3216                 val64 = readq(&bar0->ring_bump_counter1);
3217                 for (i=0; i<4; i++) {
3218                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3219                         cnt >>= 64 - ((i+1)*16);
3220                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3221                                 += cnt;
3222                 }
3223
3224                 val64 = readq(&bar0->ring_bump_counter2);
3225                 for (i=0; i<4; i++) {
3226                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3227                         cnt >>= 64 - ((i+1)*16);
3228                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3229                                 += cnt;
3230                 }
3231         }
3232
3233         /* Other type of interrupts are not being handled now,  TODO */
3234 }
3235
3236 /**
3237  *  wait_for_cmd_complete - waits for a command to complete.
3238  *  @sp : private member of the device structure, which is a pointer to the
3239  *  s2io_nic structure.
3240  *  Description: Function that waits for a command to Write into RMAC
3241  *  ADDR DATA registers to be completed and returns either success or
3242  *  error depending on whether the command was complete or not.
3243  *  Return value:
3244  *   SUCCESS on success and FAILURE on failure.
3245  */
3246
3247 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3248                                 int bit_state)
3249 {
3250         int ret = FAILURE, cnt = 0, delay = 1;
3251         u64 val64;
3252
3253         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3254                 return FAILURE;
3255
3256         do {
3257                 val64 = readq(addr);
3258                 if (bit_state == S2IO_BIT_RESET) {
3259                         if (!(val64 & busy_bit)) {
3260                                 ret = SUCCESS;
3261                                 break;
3262                         }
3263                 } else {
3264                         if (!(val64 & busy_bit)) {
3265                                 ret = SUCCESS;
3266                                 break;
3267                         }
3268                 }
3269
3270                 if(in_interrupt())
3271                         mdelay(delay);
3272                 else
3273                         msleep(delay);
3274
3275                 if (++cnt >= 10)
3276                         delay = 50;
3277         } while (cnt < 20);
3278         return ret;
3279 }
3280 /*
3281  * check_pci_device_id - Checks if the device id is supported
3282  * @id : device id
3283  * Description: Function to check if the pci device id is supported by driver.
3284  * Return value: Actual device id if supported else PCI_ANY_ID
3285  */
3286 static u16 check_pci_device_id(u16 id)
3287 {
3288         switch (id) {
3289         case PCI_DEVICE_ID_HERC_WIN:
3290         case PCI_DEVICE_ID_HERC_UNI:
3291                 return XFRAME_II_DEVICE;
3292         case PCI_DEVICE_ID_S2IO_UNI:
3293         case PCI_DEVICE_ID_S2IO_WIN:
3294                 return XFRAME_I_DEVICE;
3295         default:
3296                 return PCI_ANY_ID;
3297         }
3298 }
3299
3300 /**
3301  *  s2io_reset - Resets the card.
3302  *  @sp : private member of the device structure.
3303  *  Description: Function to Reset the card. This function then also
3304  *  restores the previously saved PCI configuration space registers as
3305  *  the card reset also resets the configuration space.
3306  *  Return value:
3307  *  void.
3308  */
3309
3310 static void s2io_reset(struct s2io_nic * sp)
3311 {
3312         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3313         u64 val64;
3314         u16 subid, pci_cmd;
3315         int i;
3316         u16 val16;
3317         unsigned long long reset_cnt = 0;
3318         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3319                         __FUNCTION__, sp->dev->name);
3320
3321         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3322         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3323
3324         if (sp->device_type == XFRAME_II_DEVICE) {
3325                 int ret;
3326                 ret = pci_set_power_state(sp->pdev, 3);
3327                 if (!ret)
3328                         ret = pci_set_power_state(sp->pdev, 0);
3329                 else {
3330                         DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3331                                         __FUNCTION__);
3332                         goto old_way;
3333                 }
3334                 msleep(20);
3335                 goto new_way;
3336         }
3337 old_way:
3338         val64 = SW_RESET_ALL;
3339         writeq(val64, &bar0->sw_reset);
3340 new_way:
3341         if (strstr(sp->product_name, "CX4")) {
3342                 msleep(750);
3343         }
3344         msleep(250);
3345         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3346
3347                 /* Restore the PCI state saved during initialization. */
3348                 pci_restore_state(sp->pdev);
3349                 pci_read_config_word(sp->pdev, 0x2, &val16);
3350                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3351                         break;
3352                 msleep(200);
3353         }
3354
3355         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3356                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3357         }
3358
3359         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3360
3361         s2io_init_pci(sp);
3362
3363         /* Set swapper to enable I/O register access */
3364         s2io_set_swapper(sp);
3365
3366         /* Restore the MSIX table entries from local variables */
3367         restore_xmsi_data(sp);
3368
3369         /* Clear certain PCI/PCI-X fields after reset */
3370         if (sp->device_type == XFRAME_II_DEVICE) {
3371                 /* Clear "detected parity error" bit */
3372                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3373
3374                 /* Clearing PCIX Ecc status register */
3375                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3376
3377                 /* Clearing PCI_STATUS error reflected here */
3378                 writeq(BIT(62), &bar0->txpic_int_reg);
3379         }
3380
3381         /* Reset device statistics maintained by OS */
3382         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3383         /* save reset count */
3384         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3385         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3386         /* restore reset count */
3387         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3388
3389         /* SXE-002: Configure link and activity LED to turn it off */
3390         subid = sp->pdev->subsystem_device;
3391         if (((subid & 0xFF) >= 0x07) &&
3392             (sp->device_type == XFRAME_I_DEVICE)) {
3393                 val64 = readq(&bar0->gpio_control);
3394                 val64 |= 0x0000800000000000ULL;
3395                 writeq(val64, &bar0->gpio_control);
3396                 val64 = 0x0411040400000000ULL;
3397                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3398         }
3399
3400         /*
3401          * Clear spurious ECC interrupts that would have occured on
3402          * XFRAME II cards after reset.
3403          */
3404         if (sp->device_type == XFRAME_II_DEVICE) {
3405                 val64 = readq(&bar0->pcc_err_reg);
3406                 writeq(val64, &bar0->pcc_err_reg);
3407         }
3408
3409         /* restore the previously assigned mac address */
3410         s2io_set_mac_addr(sp->dev, (u8 *)&sp->def_mac_addr[0].mac_addr);
3411
3412         sp->device_enabled_once = FALSE;
3413 }
3414
3415 /**
3416  *  s2io_set_swapper - to set the swapper controle on the card
3417  *  @sp : private member of the device structure,
3418  *  pointer to the s2io_nic structure.
3419  *  Description: Function to set the swapper control on the card
3420  *  correctly depending on the 'endianness' of the system.
3421  *  Return value:
3422  *  SUCCESS on success and FAILURE on failure.
3423  */
3424
3425 static int s2io_set_swapper(struct s2io_nic * sp)
3426 {
3427         struct net_device *dev = sp->dev;
3428         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3429         u64 val64, valt, valr;
3430
3431         /*
3432          * Set proper endian settings and verify the same by reading
3433          * the PIF Feed-back register.
3434          */
3435
3436         val64 = readq(&bar0->pif_rd_swapper_fb);
3437         if (val64 != 0x0123456789ABCDEFULL) {
3438                 int i = 0;
3439                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3440                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3441                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3442                                 0};                     /* FE=0, SE=0 */
3443
3444                 while(i<4) {
3445                         writeq(value[i], &bar0->swapper_ctrl);
3446                         val64 = readq(&bar0->pif_rd_swapper_fb);
3447                         if (val64 == 0x0123456789ABCDEFULL)
3448                                 break;
3449                         i++;
3450                 }
3451                 if (i == 4) {
3452                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3453                                 dev->name);
3454                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3455                                 (unsigned long long) val64);
3456                         return FAILURE;
3457                 }
3458                 valr = value[i];
3459         } else {
3460                 valr = readq(&bar0->swapper_ctrl);
3461         }
3462
3463         valt = 0x0123456789ABCDEFULL;
3464         writeq(valt, &bar0->xmsi_address);
3465         val64 = readq(&bar0->xmsi_address);
3466
3467         if(val64 != valt) {
3468                 int i = 0;
3469                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3470                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3471                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3472                                 0};                     /* FE=0, SE=0 */
3473
3474                 while(i<4) {
3475                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3476                         writeq(valt, &bar0->xmsi_address);
3477                         val64 = readq(&bar0->xmsi_address);
3478                         if(val64 == valt)
3479                                 break;
3480                         i++;
3481                 }
3482                 if(i == 4) {
3483                         unsigned long long x = val64;
3484                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3485                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3486                         return FAILURE;
3487                 }
3488         }
3489         val64 = readq(&bar0->swapper_ctrl);
3490         val64 &= 0xFFFF000000000000ULL;
3491
3492 #ifdef  __BIG_ENDIAN
3493         /*
3494          * The device by default set to a big endian format, so a
3495          * big endian driver need not set anything.
3496          */
3497         val64 |= (SWAPPER_CTRL_TXP_FE |
3498                  SWAPPER_CTRL_TXP_SE |
3499                  SWAPPER_CTRL_TXD_R_FE |
3500                  SWAPPER_CTRL_TXD_W_FE |
3501                  SWAPPER_CTRL_TXF_R_FE |
3502                  SWAPPER_CTRL_RXD_R_FE |
3503                  SWAPPER_CTRL_RXD_W_FE |
3504                  SWAPPER_CTRL_RXF_W_FE |
3505                  SWAPPER_CTRL_XMSI_FE |
3506                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3507         if (sp->intr_type == INTA)
3508                 val64 |= SWAPPER_CTRL_XMSI_SE;
3509         writeq(val64, &bar0->swapper_ctrl);
3510 #else
3511         /*
3512          * Initially we enable all bits to make it accessible by the
3513          * driver, then we selectively enable only those bits that
3514          * we want to set.
3515          */
3516         val64 |= (SWAPPER_CTRL_TXP_FE |
3517                  SWAPPER_CTRL_TXP_SE |
3518                  SWAPPER_CTRL_TXD_R_FE |
3519                  SWAPPER_CTRL_TXD_R_SE |
3520                  SWAPPER_CTRL_TXD_W_FE |
3521                  SWAPPER_CTRL_TXD_W_SE |
3522                  SWAPPER_CTRL_TXF_R_FE |
3523                  SWAPPER_CTRL_RXD_R_FE |
3524                  SWAPPER_CTRL_RXD_R_SE |
3525                  SWAPPER_CTRL_RXD_W_FE |
3526                  SWAPPER_CTRL_RXD_W_SE |
3527                  SWAPPER_CTRL_RXF_W_FE |
3528                  SWAPPER_CTRL_XMSI_FE |
3529                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3530         if (sp->intr_type == INTA)
3531                 val64 |= SWAPPER_CTRL_XMSI_SE;
3532         writeq(val64, &bar0->swapper_ctrl);
3533 #endif
3534         val64 = readq(&bar0->swapper_ctrl);
3535
3536         /*
3537          * Verifying if endian settings are accurate by reading a
3538          * feedback register.
3539          */
3540         val64 = readq(&bar0->pif_rd_swapper_fb);
3541         if (val64 != 0x0123456789ABCDEFULL) {
3542                 /* Endian settings are incorrect, calls for another dekko. */
3543                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3544                           dev->name);
3545                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3546                           (unsigned long long) val64);
3547                 return FAILURE;
3548         }
3549
3550         return SUCCESS;
3551 }
3552
3553 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3554 {
3555         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3556         u64 val64;
3557         int ret = 0, cnt = 0;
3558
3559         do {
3560                 val64 = readq(&bar0->xmsi_access);
3561                 if (!(val64 & BIT(15)))
3562                         break;
3563                 mdelay(1);
3564                 cnt++;
3565         } while(cnt < 5);
3566         if (cnt == 5) {
3567                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3568                 ret = 1;
3569         }
3570
3571         return ret;
3572 }
3573
3574 static void restore_xmsi_data(struct s2io_nic *nic)
3575 {
3576         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3577         u64 val64;
3578         int i;
3579
3580         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3581                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3582                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3583                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3584                 writeq(val64, &bar0->xmsi_access);
3585                 if (wait_for_msix_trans(nic, i)) {
3586                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3587                         continue;
3588                 }
3589         }
3590 }
3591
3592 static void store_xmsi_data(struct s2io_nic *nic)
3593 {
3594         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3595         u64 val64, addr, data;
3596         int i;
3597
3598         /* Store and display */
3599         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3600                 val64 = (BIT(15) | vBIT(i, 26, 6));
3601                 writeq(val64, &bar0->xmsi_access);
3602                 if (wait_for_msix_trans(nic, i)) {
3603                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3604                         continue;
3605                 }
3606                 addr = readq(&bar0->xmsi_address);
3607                 data = readq(&bar0->xmsi_data);
3608                 if (addr && data) {
3609                         nic->msix_info[i].addr = addr;
3610                         nic->msix_info[i].data = data;
3611                 }
3612         }
3613 }
3614
3615 int s2io_enable_msi(struct s2io_nic *nic)
3616 {
3617         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3618         u16 msi_ctrl, msg_val;
3619         struct config_param *config = &nic->config;
3620         struct net_device *dev = nic->dev;
3621         u64 val64, tx_mat, rx_mat;
3622         int i, err;
3623
3624         val64 = readq(&bar0->pic_control);
3625         val64 &= ~BIT(1);
3626         writeq(val64, &bar0->pic_control);
3627
3628         err = pci_enable_msi(nic->pdev);
3629         if (err) {
3630                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3631                           nic->dev->name);
3632                 return err;
3633         }
3634
3635         /*
3636          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3637          * for interrupt handling.
3638          */
3639         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3640         msg_val ^= 0x1;
3641         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3642         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3643
3644         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3645         msi_ctrl |= 0x10;
3646         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3647
3648         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3649         tx_mat = readq(&bar0->tx_mat0_n[0]);
3650         for (i=0; i<config->tx_fifo_num; i++) {
3651                 tx_mat |= TX_MAT_SET(i, 1);
3652         }
3653         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3654
3655         rx_mat = readq(&bar0->rx_mat);
3656         for (i=0; i<config->rx_ring_num; i++) {
3657                 rx_mat |= RX_MAT_SET(i, 1);
3658         }
3659         writeq(rx_mat, &bar0->rx_mat);
3660
3661         dev->irq = nic->pdev->irq;
3662         return 0;
3663 }
3664
3665 static int s2io_enable_msi_x(struct s2io_nic *nic)
3666 {
3667         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3668         u64 tx_mat, rx_mat;
3669         u16 msi_control; /* Temp variable */
3670         int ret, i, j, msix_indx = 1;
3671
3672         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3673                                GFP_KERNEL);
3674         if (nic->entries == NULL) {
3675                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3676                 return -ENOMEM;
3677         }
3678         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3679
3680         nic->s2io_entries =
3681                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3682                                    GFP_KERNEL);
3683         if (nic->s2io_entries == NULL) {
3684                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3685                 kfree(nic->entries);
3686                 return -ENOMEM;
3687         }
3688         memset(nic->s2io_entries, 0,
3689                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3690
3691         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3692                 nic->entries[i].entry = i;
3693                 nic->s2io_entries[i].entry = i;
3694                 nic->s2io_entries[i].arg = NULL;
3695                 nic->s2io_entries[i].in_use = 0;
3696         }
3697
3698         tx_mat = readq(&bar0->tx_mat0_n[0]);
3699         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3700                 tx_mat |= TX_MAT_SET(i, msix_indx);
3701                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3702                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3703                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3704         }
3705         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3706
3707         if (!nic->config.bimodal) {
3708                 rx_mat = readq(&bar0->rx_mat);
3709                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3710                         rx_mat |= RX_MAT_SET(j, msix_indx);
3711                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3712                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3713                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3714                 }
3715                 writeq(rx_mat, &bar0->rx_mat);
3716         } else {
3717                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3718                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3719                         tx_mat |= TX_MAT_SET(i, msix_indx);
3720                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3721                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3722                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3723                 }
3724                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3725         }
3726
3727         nic->avail_msix_vectors = 0;
3728         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3729         /* We fail init if error or we get less vectors than min required */
3730         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3731                 nic->avail_msix_vectors = ret;
3732                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3733         }
3734         if (ret) {
3735                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3736                 kfree(nic->entries);
3737                 kfree(nic->s2io_entries);
3738                 nic->entries = NULL;
3739                 nic->s2io_entries = NULL;
3740                 nic->avail_msix_vectors = 0;
3741                 return -ENOMEM;
3742         }
3743         if (!nic->avail_msix_vectors)
3744                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3745
3746         /*
3747          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3748          * in the herc NIC. (Temp change, needs to be removed later)
3749          */
3750         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3751         msi_control |= 0x1; /* Enable MSI */
3752         pci_write_config_word(nic->pdev, 0x42, msi_control);
3753
3754         return 0;
3755 }
3756
3757 /* ********************************************************* *
3758  * Functions defined below concern the OS part of the driver *
3759  * ********************************************************* */
3760
3761 /**
3762  *  s2io_open - open entry point of the driver
3763  *  @dev : pointer to the device structure.
3764  *  Description:
3765  *  This function is the open entry point of the driver. It mainly calls a
3766  *  function to allocate Rx buffers and inserts them into the buffer
3767  *  descriptors and then enables the Rx part of the NIC.
3768  *  Return value:
3769  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3770  *   file on failure.
3771  */
3772
3773 static int s2io_open(struct net_device *dev)
3774 {
3775         struct s2io_nic *sp = dev->priv;
3776         int err = 0;
3777
3778         /*
3779          * Make sure you have link off by default every time
3780          * Nic is initialized
3781          */
3782         netif_carrier_off(dev);
3783         sp->last_link_state = 0;
3784
3785         /* Initialize H/W and enable interrupts */
3786         err = s2io_card_up(sp);
3787         if (err) {
3788                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3789                           dev->name);
3790                 goto hw_init_failed;
3791         }
3792
3793         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3794                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3795                 s2io_card_down(sp);
3796                 err = -ENODEV;
3797                 goto hw_init_failed;
3798         }
3799
3800         netif_start_queue(dev);
3801         return 0;
3802
3803 hw_init_failed:
3804         if (sp->intr_type == MSI_X) {
3805                 if (sp->entries)
3806                         kfree(sp->entries);
3807                 if (sp->s2io_entries)
3808                         kfree(sp->s2io_entries);
3809         }
3810         return err;
3811 }
3812
3813 /**
3814  *  s2io_close -close entry point of the driver
3815  *  @dev : device pointer.
3816  *  Description:
3817  *  This is the stop entry point of the driver. It needs to undo exactly
3818  *  whatever was done by the open entry point,thus it's usually referred to
3819  *  as the close function.Among other things this function mainly stops the
3820  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3821  *  Return value:
3822  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3823  *  file on failure.
3824  */
3825
3826 static int s2io_close(struct net_device *dev)
3827 {
3828         struct s2io_nic *sp = dev->priv;
3829
3830         netif_stop_queue(dev);
3831         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3832         s2io_card_down(sp);
3833
3834         sp->device_close_flag = TRUE;   /* Device is shut down. */
3835         return 0;
3836 }
3837
3838 /**
3839  *  s2io_xmit - Tx entry point of te driver
3840  *  @skb : the socket buffer containing the Tx data.
3841  *  @dev : device pointer.
3842  *  Description :
3843  *  This function is the Tx entry point of the driver. S2IO NIC supports
3844  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3845  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3846  *  not be upadted.
3847  *  Return value:
3848  *  0 on success & 1 on failure.
3849  */
3850
3851 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3852 {
3853         struct s2io_nic *sp = dev->priv;
3854         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3855         register u64 val64;
3856         struct TxD *txdp;
3857         struct TxFIFO_element __iomem *tx_fifo;
3858         unsigned long flags;
3859         u16 vlan_tag = 0;
3860         int vlan_priority = 0;
3861         struct mac_info *mac_control;
3862         struct config_param *config;
3863         int offload_type;
3864
3865         mac_control = &sp->mac_control;
3866         config = &sp->config;
3867
3868         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3869         spin_lock_irqsave(&sp->tx_lock, flags);
3870         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3871                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3872                           dev->name);
3873                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3874                 dev_kfree_skb(skb);
3875                 return 0;
3876         }
3877
3878         queue = 0;
3879
3880         /* Get Fifo number to Transmit based on vlan priority */
3881         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3882                 vlan_tag = vlan_tx_tag_get(skb);
3883                 vlan_priority = vlan_tag >> 13;
3884                 queue = config->fifo_mapping[vlan_priority];
3885         }
3886
3887         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3888         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3889         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3890                 list_virt_addr;
3891
3892         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3893         /* Avoid "put" pointer going beyond "get" pointer */
3894         if (txdp->Host_Control ||
3895                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3896                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3897                 netif_stop_queue(dev);
3898                 dev_kfree_skb(skb);
3899                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3900                 return 0;
3901         }
3902
3903         /* A buffer with no data will be dropped */
3904         if (!skb->len) {
3905                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3906                 dev_kfree_skb(skb);
3907                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3908                 return 0;
3909         }
3910
3911         offload_type = s2io_offload_type(skb);
3912         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3913                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3914                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3915         }
3916         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3917                 txdp->Control_2 |=
3918                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3919                      TXD_TX_CKO_UDP_EN);
3920         }
3921         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3922         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3923         txdp->Control_2 |= config->tx_intr_type;
3924
3925         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3926                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3927                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3928         }
3929
3930         frg_len = skb->len - skb->data_len;
3931         if (offload_type == SKB_GSO_UDP) {
3932                 int ufo_size;
3933
3934                 ufo_size = s2io_udp_mss(skb);
3935                 ufo_size &= ~7;
3936                 txdp->Control_1 |= TXD_UFO_EN;
3937                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3938                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3939 #ifdef __BIG_ENDIAN
3940                 sp->ufo_in_band_v[put_off] =
3941                                 (u64)skb_shinfo(skb)->ip6_frag_id;
3942 #else
3943                 sp->ufo_in_band_v[put_off] =
3944                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3945 #endif
3946                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3947                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3948                                         sp->ufo_in_band_v,
3949                                         sizeof(u64), PCI_DMA_TODEVICE);
3950                 txdp++;
3951         }
3952
3953         txdp->Buffer_Pointer = pci_map_single
3954             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3955         txdp->Host_Control = (unsigned long) skb;
3956         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3957         if (offload_type == SKB_GSO_UDP)
3958                 txdp->Control_1 |= TXD_UFO_EN;
3959
3960         frg_cnt = skb_shinfo(skb)->nr_frags;
3961         /* For fragmented SKB. */
3962         for (i = 0; i < frg_cnt; i++) {
3963                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3964                 /* A '0' length fragment will be ignored */
3965                 if (!frag->size)
3966                         continue;
3967                 txdp++;
3968                 txdp->Buffer_Pointer = (u64) pci_map_page
3969                     (sp->pdev, frag->page, frag->page_offset,
3970                      frag->size, PCI_DMA_TODEVICE);
3971                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3972                 if (offload_type == SKB_GSO_UDP)
3973                         txdp->Control_1 |= TXD_UFO_EN;
3974         }
3975         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3976
3977         if (offload_type == SKB_GSO_UDP)
3978                 frg_cnt++; /* as Txd0 was used for inband header */
3979
3980         tx_fifo = mac_control->tx_FIFO_start[queue];
3981         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3982         writeq(val64, &tx_fifo->TxDL_Pointer);
3983
3984         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3985                  TX_FIFO_LAST_LIST);
3986         if (offload_type)
3987                 val64 |= TX_FIFO_SPECIAL_FUNC;
3988
3989         writeq(val64, &tx_fifo->List_Control);
3990
3991         mmiowb();
3992
3993         put_off++;
3994         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3995                 put_off = 0;
3996         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3997
3998         /* Avoid "put" pointer going beyond "get" pointer */
3999         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4000                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4001                 DBG_PRINT(TX_DBG,
4002                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4003                           put_off, get_off);
4004                 netif_stop_queue(dev);
4005         }
4006
4007         dev->trans_start = jiffies;
4008         spin_unlock_irqrestore(&sp->tx_lock, flags);
4009
4010         return 0;
4011 }
4012
4013 static void
4014 s2io_alarm_handle(unsigned long data)
4015 {
4016         struct s2io_nic *sp = (struct s2io_nic *)data;
4017
4018         alarm_intr_handler(sp);
4019         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4020 }
4021
4022 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4023 {
4024         int rxb_size, level;
4025
4026         if (!sp->lro) {
4027                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4028                 level = rx_buffer_level(sp, rxb_size, rng_n);
4029
4030                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4031                         int ret;
4032                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4033                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
4034                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4035                                 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4036                                           __FUNCTION__);
4037                                 clear_bit(0, (&sp->tasklet_status));
4038                                 return -1;
4039                         }
4040                         clear_bit(0, (&sp->tasklet_status));
4041                 } else if (level == LOW)
4042                         tasklet_schedule(&sp->task);
4043
4044         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4045                         DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4046                         DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4047         }
4048         return 0;
4049 }
4050
4051 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4052 {
4053         struct net_device *dev = (struct net_device *) dev_id;
4054         struct s2io_nic *sp = dev->priv;
4055         int i;
4056         struct mac_info *mac_control;
4057         struct config_param *config;
4058
4059         atomic_inc(&sp->isr_cnt);
4060         mac_control = &sp->mac_control;
4061         config = &sp->config;
4062         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4063
4064         /* If Intr is because of Rx Traffic */
4065         for (i = 0; i < config->rx_ring_num; i++)
4066                 rx_intr_handler(&mac_control->rings[i]);
4067
4068         /* If Intr is because of Tx Traffic */
4069         for (i = 0; i < config->tx_fifo_num; i++)
4070                 tx_intr_handler(&mac_control->fifos[i]);
4071
4072         /*
4073          * If the Rx buffer count is below the panic threshold then
4074          * reallocate the buffers from the interrupt handler itself,
4075          * else schedule a tasklet to reallocate the buffers.
4076          */
4077         for (i = 0; i < config->rx_ring_num; i++)
4078                 s2io_chk_rx_buffers(sp, i);
4079
4080         atomic_dec(&sp->isr_cnt);
4081         return IRQ_HANDLED;
4082 }
4083
4084 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4085 {
4086         struct ring_info *ring = (struct ring_info *)dev_id;
4087         struct s2io_nic *sp = ring->nic;
4088
4089         atomic_inc(&sp->isr_cnt);
4090
4091         rx_intr_handler(ring);
4092         s2io_chk_rx_buffers(sp, ring->ring_no);
4093
4094         atomic_dec(&sp->isr_cnt);
4095         return IRQ_HANDLED;
4096 }
4097
4098 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4099 {
4100         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4101         struct s2io_nic *sp = fifo->nic;
4102
4103         atomic_inc(&sp->isr_cnt);
4104         tx_intr_handler(fifo);
4105         atomic_dec(&sp->isr_cnt);
4106         return IRQ_HANDLED;
4107 }
4108 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4109 {
4110         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4111         u64 val64;
4112
4113         val64 = readq(&bar0->pic_int_status);
4114         if (val64 & PIC_INT_GPIO) {
4115                 val64 = readq(&bar0->gpio_int_reg);
4116                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4117                     (val64 & GPIO_INT_REG_LINK_UP)) {
4118                         /*
4119                          * This is unstable state so clear both up/down
4120                          * interrupt and adapter to re-evaluate the link state.
4121                          */
4122                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4123                         val64 |= GPIO_INT_REG_LINK_UP;
4124                         writeq(val64, &bar0->gpio_int_reg);
4125                         val64 = readq(&bar0->gpio_int_mask);
4126                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4127                                    GPIO_INT_MASK_LINK_DOWN);
4128                         writeq(val64, &bar0->gpio_int_mask);
4129                 }
4130                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4131                         val64 = readq(&bar0->adapter_status);
4132                                 /* Enable Adapter */
4133                         val64 = readq(&bar0->adapter_control);
4134                         val64 |= ADAPTER_CNTL_EN;
4135                         writeq(val64, &bar0->adapter_control);
4136                         val64 |= ADAPTER_LED_ON;
4137                         writeq(val64, &bar0->adapter_control);
4138                         if (!sp->device_enabled_once)
4139                                 sp->device_enabled_once = 1;
4140
4141                         s2io_link(sp, LINK_UP);
4142                         /*
4143                          * unmask link down interrupt and mask link-up
4144                          * intr
4145                          */
4146                         val64 = readq(&bar0->gpio_int_mask);
4147                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4148                         val64 |= GPIO_INT_MASK_LINK_UP;
4149                         writeq(val64, &bar0->gpio_int_mask);
4150
4151                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4152                         val64 = readq(&bar0->adapter_status);
4153                         s2io_link(sp, LINK_DOWN);
4154                         /* Link is down so unmaks link up interrupt */
4155                         val64 = readq(&bar0->gpio_int_mask);
4156                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4157                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4158                         writeq(val64, &bar0->gpio_int_mask);
4159
4160                         /* turn off LED */
4161                         val64 = readq(&bar0->adapter_control);
4162                         val64 = val64 &(~ADAPTER_LED_ON);
4163                         writeq(val64, &bar0->adapter_control);
4164                 }
4165         }
4166         val64 = readq(&bar0->gpio_int_mask);
4167 }
4168
4169 /**
4170  *  s2io_isr - ISR handler of the device .
4171  *  @irq: the irq of the device.
4172  *  @dev_id: a void pointer to the dev structure of the NIC.
4173  *  Description:  This function is the ISR handler of the device. It
4174  *  identifies the reason for the interrupt and calls the relevant
4175  *  service routines. As a contongency measure, this ISR allocates the
4176  *  recv buffers, if their numbers are below the panic value which is
4177  *  presently set to 25% of the original number of rcv buffers allocated.
4178  *  Return value:
4179  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4180  *   IRQ_NONE: will be returned if interrupt is not from our device
4181  */
4182 static irqreturn_t s2io_isr(int irq, void *dev_id)
4183 {
4184         struct net_device *dev = (struct net_device *) dev_id;
4185         struct s2io_nic *sp = dev->priv;
4186         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4187         int i;
4188         u64 reason = 0;
4189         struct mac_info *mac_control;
4190         struct config_param *config;
4191
4192         atomic_inc(&sp->isr_cnt);
4193         mac_control = &sp->mac_control;
4194         config = &sp->config;
4195
4196         /*
4197          * Identify the cause for interrupt and call the appropriate
4198          * interrupt handler. Causes for the interrupt could be;
4199          * 1. Rx of packet.
4200          * 2. Tx complete.
4201          * 3. Link down.
4202          * 4. Error in any functional blocks of the NIC.
4203          */
4204         reason = readq(&bar0->general_int_status);
4205
4206         if (!reason) {
4207                 /* The interrupt was not raised by us. */
4208                 atomic_dec(&sp->isr_cnt);
4209                 return IRQ_NONE;
4210         }
4211         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4212                 /* Disable device and get out */
4213                 atomic_dec(&sp->isr_cnt);
4214                 return IRQ_NONE;
4215         }
4216
4217         if (napi) {
4218                 if (reason & GEN_INTR_RXTRAFFIC) {
4219                         if ( likely ( netif_rx_schedule_prep(dev)) ) {
4220                                 __netif_rx_schedule(dev);
4221                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4222                         }
4223                         else
4224                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4225                 }
4226         } else {
4227                 /*
4228                  * Rx handler is called by default, without checking for the
4229                  * cause of interrupt.
4230                  * rx_traffic_int reg is an R1 register, writing all 1's
4231                  * will ensure that the actual interrupt causing bit get's
4232                  * cleared and hence a read can be avoided.
4233                  */
4234                 if (reason & GEN_INTR_RXTRAFFIC)
4235                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4236
4237                 for (i = 0; i < config->rx_ring_num; i++) {
4238                         rx_intr_handler(&mac_control->rings[i]);
4239                 }
4240         }
4241
4242         /*
4243          * tx_traffic_int reg is an R1 register, writing all 1's
4244          * will ensure that the actual interrupt causing bit get's
4245          * cleared and hence a read can be avoided.
4246          */
4247         if (reason & GEN_INTR_TXTRAFFIC)
4248                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4249
4250         for (i = 0; i < config->tx_fifo_num; i++)
4251                 tx_intr_handler(&mac_control->fifos[i]);
4252
4253         if (reason & GEN_INTR_TXPIC)
4254                 s2io_txpic_intr_handle(sp);
4255         /*
4256          * If the Rx buffer count is below the panic threshold then
4257          * reallocate the buffers from the interrupt handler itself,
4258          * else schedule a tasklet to reallocate the buffers.
4259          */
4260         if (!napi) {
4261                 for (i = 0; i < config->rx_ring_num; i++)
4262                         s2io_chk_rx_buffers(sp, i);
4263         }
4264
4265         writeq(0, &bar0->general_int_mask);
4266         readl(&bar0->general_int_status);
4267
4268         atomic_dec(&sp->isr_cnt);
4269         return IRQ_HANDLED;
4270 }
4271
4272 /**
4273  * s2io_updt_stats -
4274  */
4275 static void s2io_updt_stats(struct s2io_nic *sp)
4276 {
4277         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4278         u64 val64;
4279         int cnt = 0;
4280
4281         if (atomic_read(&sp->card_state) == CARD_UP) {
4282                 /* Apprx 30us on a 133 MHz bus */
4283                 val64 = SET_UPDT_CLICKS(10) |
4284                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4285                 writeq(val64, &bar0->stat_cfg);
4286                 do {
4287                         udelay(100);
4288                         val64 = readq(&bar0->stat_cfg);
4289                         if (!(val64 & BIT(0)))
4290                                 break;
4291                         cnt++;
4292                         if (cnt == 5)
4293                                 break; /* Updt failed */
4294                 } while(1);
4295         } 
4296 }
4297
4298 /**
4299  *  s2io_get_stats - Updates the device statistics structure.
4300  *  @dev : pointer to the device structure.
4301  *  Description:
4302  *  This function updates the device statistics structure in the s2io_nic
4303  *  structure and returns a pointer to the same.
4304  *  Return value:
4305  *  pointer to the updated net_device_stats structure.
4306  */
4307
4308 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4309 {
4310         struct s2io_nic *sp = dev->priv;
4311         struct mac_info *mac_control;
4312         struct config_param *config;
4313
4314
4315         mac_control = &sp->mac_control;
4316         config = &sp->config;
4317
4318         /* Configure Stats for immediate updt */
4319         s2io_updt_stats(sp);
4320
4321         sp->stats.tx_packets =
4322                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4323         sp->stats.tx_errors =
4324                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4325         sp->stats.rx_errors =
4326                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4327         sp->stats.multicast =
4328                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4329         sp->stats.rx_length_errors =
4330                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4331
4332         return (&sp->stats);
4333 }
4334
4335 /**
4336  *  s2io_set_multicast - entry point for multicast address enable/disable.
4337  *  @dev : pointer to the device structure
4338  *  Description:
4339  *  This function is a driver entry point which gets called by the kernel
4340  *  whenever multicast addresses must be enabled/disabled. This also gets
4341  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4342  *  determine, if multicast address must be enabled or if promiscuous mode
4343  *  is to be disabled etc.
4344  *  Return value:
4345  *  void.
4346  */
4347
4348 static void s2io_set_multicast(struct net_device *dev)
4349 {
4350         int i, j, prev_cnt;
4351         struct dev_mc_list *mclist;
4352         struct s2io_nic *sp = dev->priv;
4353         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4354         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4355             0xfeffffffffffULL;
4356         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4357         void __iomem *add;
4358
4359         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4360                 /*  Enable all Multicast addresses */
4361                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4362                        &bar0->rmac_addr_data0_mem);
4363                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4364                        &bar0->rmac_addr_data1_mem);
4365                 val64 = RMAC_ADDR_CMD_MEM_WE |
4366                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4367                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4368                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4369                 /* Wait till command completes */
4370                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4371                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4372                                         S2IO_BIT_RESET);
4373
4374                 sp->m_cast_flg = 1;
4375                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4376         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4377                 /*  Disable all Multicast addresses */
4378                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4379                        &bar0->rmac_addr_data0_mem);
4380                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4381                        &bar0->rmac_addr_data1_mem);
4382                 val64 = RMAC_ADDR_CMD_MEM_WE |
4383                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4384                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4385                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4386                 /* Wait till command completes */
4387                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4388                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4389                                         S2IO_BIT_RESET);
4390
4391                 sp->m_cast_flg = 0;
4392                 sp->all_multi_pos = 0;
4393         }
4394
4395         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4396                 /*  Put the NIC into promiscuous mode */
4397                 add = &bar0->mac_cfg;
4398                 val64 = readq(&bar0->mac_cfg);
4399                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4400
4401                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4402                 writel((u32) val64, add);
4403                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4404                 writel((u32) (val64 >> 32), (add + 4));
4405
4406                 if (vlan_tag_strip != 1) {
4407                         val64 = readq(&bar0->rx_pa_cfg);
4408                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4409                         writeq(val64, &bar0->rx_pa_cfg);
4410                         vlan_strip_flag = 0;
4411                 }
4412
4413                 val64 = readq(&bar0->mac_cfg);
4414                 sp->promisc_flg = 1;
4415                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4416                           dev->name);
4417         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4418                 /*  Remove the NIC from promiscuous mode */
4419                 add = &bar0->mac_cfg;
4420                 val64 = readq(&bar0->mac_cfg);
4421                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4422
4423                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4424                 writel((u32) val64, add);
4425                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4426                 writel((u32) (val64 >> 32), (add + 4));
4427
4428                 if (vlan_tag_strip != 0) {
4429                         val64 = readq(&bar0->rx_pa_cfg);
4430                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4431                         writeq(val64, &bar0->rx_pa_cfg);
4432                         vlan_strip_flag = 1;
4433                 }
4434
4435                 val64 = readq(&bar0->mac_cfg);
4436                 sp->promisc_flg = 0;
4437                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4438                           dev->name);
4439         }
4440
4441         /*  Update individual M_CAST address list */
4442         if ((!sp->m_cast_flg) && dev->mc_count) {
4443                 if (dev->mc_count >
4444                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4445                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4446                                   dev->name);
4447                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4448                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4449                         return;
4450                 }
4451
4452                 prev_cnt = sp->mc_addr_count;
4453                 sp->mc_addr_count = dev->mc_count;
4454
4455                 /* Clear out the previous list of Mc in the H/W. */
4456                 for (i = 0; i < prev_cnt; i++) {
4457                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4458                                &bar0->rmac_addr_data0_mem);
4459                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4460                                 &bar0->rmac_addr_data1_mem);
4461                         val64 = RMAC_ADDR_CMD_MEM_WE |
4462                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4463                             RMAC_ADDR_CMD_MEM_OFFSET
4464                             (MAC_MC_ADDR_START_OFFSET + i);
4465                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4466
4467                         /* Wait for command completes */
4468                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4469                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4470                                         S2IO_BIT_RESET)) {
4471                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4472                                           dev->name);
4473                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4474                                 return;
4475                         }
4476                 }
4477
4478                 /* Create the new Rx filter list and update the same in H/W. */
4479                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4480                      i++, mclist = mclist->next) {
4481                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4482                                ETH_ALEN);
4483                         mac_addr = 0;
4484                         for (j = 0; j < ETH_ALEN; j++) {
4485                                 mac_addr |= mclist->dmi_addr[j];
4486                                 mac_addr <<= 8;
4487                         }
4488                         mac_addr >>= 8;
4489                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4490                                &bar0->rmac_addr_data0_mem);
4491                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4492                                 &bar0->rmac_addr_data1_mem);
4493                         val64 = RMAC_ADDR_CMD_MEM_WE |
4494                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4495                             RMAC_ADDR_CMD_MEM_OFFSET
4496                             (i + MAC_MC_ADDR_START_OFFSET);
4497                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4498
4499                         /* Wait for command completes */
4500                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4501                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4502                                         S2IO_BIT_RESET)) {
4503                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4504                                           dev->name);
4505                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4506                                 return;
4507                         }
4508                 }
4509         }
4510 }
4511
4512 /**
4513  *  s2io_set_mac_addr - Programs the Xframe mac address
4514  *  @dev : pointer to the device structure.
4515  *  @addr: a uchar pointer to the new mac address which is to be set.
4516  *  Description : This procedure will program the Xframe to receive
4517  *  frames with new Mac Address
4518  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4519  *  as defined in errno.h file on failure.
4520  */
4521
4522 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4523 {
4524         struct s2io_nic *sp = dev->priv;
4525         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4526         register u64 val64, mac_addr = 0;
4527         int i;
4528         u64 old_mac_addr = 0;
4529
4530         /*
4531          * Set the new MAC address as the new unicast filter and reflect this
4532          * change on the device address registered with the OS. It will be
4533          * at offset 0.
4534          */
4535         for (i = 0; i < ETH_ALEN; i++) {
4536                 mac_addr <<= 8;
4537                 mac_addr |= addr[i];
4538                 old_mac_addr <<= 8;
4539                 old_mac_addr |= sp->def_mac_addr[0].mac_addr[i];
4540         }
4541
4542         if(0 == mac_addr)
4543                 return SUCCESS;
4544
4545         /* Update the internal structure with this new mac address */
4546         if(mac_addr != old_mac_addr) {
4547                 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4548                 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_addr);
4549                 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_addr >> 8);
4550                 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_addr >> 16);
4551                 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_addr >> 24);
4552                 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_addr >> 32);
4553                 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_addr >> 40);
4554         }
4555
4556         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4557                &bar0->rmac_addr_data0_mem);
4558
4559         val64 =
4560             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4561             RMAC_ADDR_CMD_MEM_OFFSET(0);
4562         writeq(val64, &bar0->rmac_addr_cmd_mem);
4563         /* Wait till command completes */
4564         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4565                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4566                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4567                 return FAILURE;
4568         }
4569
4570         return SUCCESS;
4571 }
4572
4573 /**
4574  * s2io_ethtool_sset - Sets different link parameters.
4575  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4576  * @info: pointer to the structure with parameters given by ethtool to set
4577  * link information.
4578  * Description:
4579  * The function sets different link parameters provided by the user onto
4580  * the NIC.
4581  * Return value:
4582  * 0 on success.
4583 */
4584
4585 static int s2io_ethtool_sset(struct net_device *dev,
4586                              struct ethtool_cmd *info)
4587 {
4588         struct s2io_nic *sp = dev->priv;
4589         if ((info->autoneg == AUTONEG_ENABLE) ||
4590             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4591                 return -EINVAL;
4592         else {
4593                 s2io_close(sp->dev);
4594                 s2io_open(sp->dev);
4595         }
4596
4597         return 0;
4598 }
4599
4600 /**
4601  * s2io_ethtol_gset - Return link specific information.
4602  * @sp : private member of the device structure, pointer to the
4603  *      s2io_nic structure.
4604  * @info : pointer to the structure with parameters given by ethtool
4605  * to return link information.
4606  * Description:
4607  * Returns link specific information like speed, duplex etc.. to ethtool.
4608  * Return value :
4609  * return 0 on success.
4610  */
4611
4612 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4613 {
4614         struct s2io_nic *sp = dev->priv;
4615         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4616         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4617         info->port = PORT_FIBRE;
4618         /* info->transceiver?? TODO */
4619
4620         if (netif_carrier_ok(sp->dev)) {
4621                 info->speed = 10000;
4622                 info->duplex = DUPLEX_FULL;
4623         } else {
4624                 info->speed = -1;
4625                 info->duplex = -1;
4626         }
4627
4628         info->autoneg = AUTONEG_DISABLE;
4629         return 0;
4630 }
4631
4632 /**
4633  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4634  * @sp : private member of the device structure, which is a pointer to the
4635  * s2io_nic structure.
4636  * @info : pointer to the structure with parameters given by ethtool to
4637  * return driver information.
4638  * Description:
4639  * Returns driver specefic information like name, version etc.. to ethtool.
4640  * Return value:
4641  *  void
4642  */
4643
4644 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4645                                   struct ethtool_drvinfo *info)
4646 {
4647         struct s2io_nic *sp = dev->priv;
4648
4649         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4650         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4651         strncpy(info->fw_version, "", sizeof(info->fw_version));
4652         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4653         info->regdump_len = XENA_REG_SPACE;
4654         info->eedump_len = XENA_EEPROM_SPACE;
4655         info->testinfo_len = S2IO_TEST_LEN;
4656
4657         if (sp->device_type == XFRAME_I_DEVICE)
4658                 info->n_stats = XFRAME_I_STAT_LEN;
4659         else
4660                 info->n_stats = XFRAME_II_STAT_LEN;
4661 }
4662
4663 /**
4664  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4665  *  @sp: private member of the device structure, which is a pointer to the
4666  *  s2io_nic structure.
4667  *  @regs : pointer to the structure with parameters given by ethtool for
4668  *  dumping the registers.
4669  *  @reg_space: The input argumnet into which all the registers are dumped.
4670  *  Description:
4671  *  Dumps the entire register space of xFrame NIC into the user given
4672  *  buffer area.
4673  * Return value :
4674  * void .
4675 */
4676
4677 static void s2io_ethtool_gregs(struct net_device *dev,
4678                                struct ethtool_regs *regs, void *space)
4679 {
4680         int i;
4681         u64 reg;
4682         u8 *reg_space = (u8 *) space;
4683         struct s2io_nic *sp = dev->priv;
4684
4685         regs->len = XENA_REG_SPACE;
4686         regs->version = sp->pdev->subsystem_device;
4687
4688         for (i = 0; i < regs->len; i += 8) {
4689                 reg = readq(sp->bar0 + i);
4690                 memcpy((reg_space + i), &reg, 8);
4691         }
4692 }
4693
4694 /**
4695  *  s2io_phy_id  - timer function that alternates adapter LED.
4696  *  @data : address of the private member of the device structure, which
4697  *  is a pointer to the s2io_nic structure, provided as an u32.
4698  * Description: This is actually the timer function that alternates the
4699  * adapter LED bit of the adapter control bit to set/reset every time on
4700  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4701  *  once every second.
4702 */
4703 static void s2io_phy_id(unsigned long data)
4704 {
4705         struct s2io_nic *sp = (struct s2io_nic *) data;
4706         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4707         u64 val64 = 0;
4708         u16 subid;
4709
4710         subid = sp->pdev->subsystem_device;
4711         if ((sp->device_type == XFRAME_II_DEVICE) ||
4712                    ((subid & 0xFF) >= 0x07)) {
4713                 val64 = readq(&bar0->gpio_control);
4714                 val64 ^= GPIO_CTRL_GPIO_0;
4715                 writeq(val64, &bar0->gpio_control);
4716         } else {
4717                 val64 = readq(&bar0->adapter_control);
4718                 val64 ^= ADAPTER_LED_ON;
4719                 writeq(val64, &bar0->adapter_control);
4720         }
4721
4722         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4723 }
4724
4725 /**
4726  * s2io_ethtool_idnic - To physically identify the nic on the system.
4727  * @sp : private member of the device structure, which is a pointer to the
4728  * s2io_nic structure.
4729  * @id : pointer to the structure with identification parameters given by
4730  * ethtool.
4731  * Description: Used to physically identify the NIC on the system.
4732  * The Link LED will blink for a time specified by the user for
4733  * identification.
4734  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4735  * identification is possible only if it's link is up.
4736  * Return value:
4737  * int , returns 0 on success
4738  */
4739
4740 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4741 {
4742         u64 val64 = 0, last_gpio_ctrl_val;
4743         struct s2io_nic *sp = dev->priv;
4744         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4745         u16 subid;
4746
4747         subid = sp->pdev->subsystem_device;
4748         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4749         if ((sp->device_type == XFRAME_I_DEVICE) &&
4750                 ((subid & 0xFF) < 0x07)) {
4751                 val64 = readq(&bar0->adapter_control);
4752                 if (!(val64 & ADAPTER_CNTL_EN)) {
4753                         printk(KERN_ERR
4754                                "Adapter Link down, cannot blink LED\n");
4755                         return -EFAULT;
4756                 }
4757         }
4758         if (sp->id_timer.function == NULL) {
4759                 init_timer(&sp->id_timer);
4760                 sp->id_timer.function = s2io_phy_id;
4761                 sp->id_timer.data = (unsigned long) sp;
4762         }
4763         mod_timer(&sp->id_timer, jiffies);
4764         if (data)
4765                 msleep_interruptible(data * HZ);
4766         else
4767                 msleep_interruptible(MAX_FLICKER_TIME);
4768         del_timer_sync(&sp->id_timer);
4769
4770         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4771                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4772                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4773         }
4774
4775         return 0;
4776 }
4777
4778 /**
4779  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4780  * @sp : private member of the device structure, which is a pointer to the
4781  *      s2io_nic structure.
4782  * @ep : pointer to the structure with pause parameters given by ethtool.
4783  * Description:
4784  * Returns the Pause frame generation and reception capability of the NIC.
4785  * Return value:
4786  *  void
4787  */
4788 static void s2io_ethtool_getpause_data(struct net_device *dev,
4789                                        struct ethtool_pauseparam *ep)
4790 {
4791         u64 val64;
4792         struct s2io_nic *sp = dev->priv;
4793         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4794
4795         val64 = readq(&bar0->rmac_pause_cfg);
4796         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4797                 ep->tx_pause = TRUE;
4798         if (val64 & RMAC_PAUSE_RX_ENABLE)
4799                 ep->rx_pause = TRUE;
4800         ep->autoneg = FALSE;
4801 }
4802
4803 /**
4804  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4805  * @sp : private member of the device structure, which is a pointer to the
4806  *      s2io_nic structure.
4807  * @ep : pointer to the structure with pause parameters given by ethtool.
4808  * Description:
4809  * It can be used to set or reset Pause frame generation or reception
4810  * support of the NIC.
4811  * Return value:
4812  * int, returns 0 on Success
4813  */
4814
4815 static int s2io_ethtool_setpause_data(struct net_device *dev,
4816                                struct ethtool_pauseparam *ep)
4817 {
4818         u64 val64;
4819         struct s2io_nic *sp = dev->priv;
4820         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4821
4822         val64 = readq(&bar0->rmac_pause_cfg);
4823         if (ep->tx_pause)
4824                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4825         else
4826                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4827         if (ep->rx_pause)
4828                 val64 |= RMAC_PAUSE_RX_ENABLE;
4829         else
4830                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4831         writeq(val64, &bar0->rmac_pause_cfg);
4832         return 0;
4833 }
4834
4835 /**
4836  * read_eeprom - reads 4 bytes of data from user given offset.
4837  * @sp : private member of the device structure, which is a pointer to the
4838  *      s2io_nic structure.
4839  * @off : offset at which the data must be written
4840  * @data : Its an output parameter where the data read at the given
4841  *      offset is stored.
4842  * Description:
4843  * Will read 4 bytes of data from the user given offset and return the
4844  * read data.
4845  * NOTE: Will allow to read only part of the EEPROM visible through the
4846  *   I2C bus.
4847  * Return value:
4848  *  -1 on failure and 0 on success.
4849  */
4850
4851 #define S2IO_DEV_ID             5
4852 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4853 {
4854         int ret = -1;
4855         u32 exit_cnt = 0;
4856         u64 val64;
4857         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4858
4859         if (sp->device_type == XFRAME_I_DEVICE) {
4860                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4861                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4862                     I2C_CONTROL_CNTL_START;
4863                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4864
4865                 while (exit_cnt < 5) {
4866                         val64 = readq(&bar0->i2c_control);
4867                         if (I2C_CONTROL_CNTL_END(val64)) {
4868                                 *data = I2C_CONTROL_GET_DATA(val64);
4869                                 ret = 0;
4870                                 break;
4871                         }
4872                         msleep(50);
4873                         exit_cnt++;
4874                 }
4875         }
4876
4877         if (sp->device_type == XFRAME_II_DEVICE) {
4878                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4879                         SPI_CONTROL_BYTECNT(0x3) |
4880                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4881                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4882                 val64 |= SPI_CONTROL_REQ;
4883                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4884                 while (exit_cnt < 5) {
4885                         val64 = readq(&bar0->spi_control);
4886                         if (val64 & SPI_CONTROL_NACK) {
4887                                 ret = 1;
4888                                 break;
4889                         } else if (val64 & SPI_CONTROL_DONE) {
4890                                 *data = readq(&bar0->spi_data);
4891                                 *data &= 0xffffff;
4892                                 ret = 0;
4893                                 break;
4894                         }
4895                         msleep(50);
4896                         exit_cnt++;
4897                 }
4898         }
4899         return ret;
4900 }
4901
4902 /**
4903  *  write_eeprom - actually writes the relevant part of the data value.
4904  *  @sp : private member of the device structure, which is a pointer to the
4905  *       s2io_nic structure.
4906  *  @off : offset at which the data must be written
4907  *  @data : The data that is to be written
4908  *  @cnt : Number of bytes of the data that are actually to be written into
4909  *  the Eeprom. (max of 3)
4910  * Description:
4911  *  Actually writes the relevant part of the data value into the Eeprom
4912  *  through the I2C bus.
4913  * Return value:
4914  *  0 on success, -1 on failure.
4915  */
4916
4917 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4918 {
4919         int exit_cnt = 0, ret = -1;
4920         u64 val64;
4921         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4922
4923         if (sp->device_type == XFRAME_I_DEVICE) {
4924                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4925                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4926                     I2C_CONTROL_CNTL_START;
4927                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4928
4929                 while (exit_cnt < 5) {
4930                         val64 = readq(&bar0->i2c_control);
4931                         if (I2C_CONTROL_CNTL_END(val64)) {
4932                                 if (!(val64 & I2C_CONTROL_NACK))
4933                                         ret = 0;
4934                                 break;
4935                         }
4936                         msleep(50);
4937                         exit_cnt++;
4938                 }
4939         }
4940
4941         if (sp->device_type == XFRAME_II_DEVICE) {
4942                 int write_cnt = (cnt == 8) ? 0 : cnt;
4943                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4944
4945                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4946                         SPI_CONTROL_BYTECNT(write_cnt) |
4947                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4948                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4949                 val64 |= SPI_CONTROL_REQ;
4950                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4951                 while (exit_cnt < 5) {
4952                         val64 = readq(&bar0->spi_control);
4953                         if (val64 & SPI_CONTROL_NACK) {
4954                                 ret = 1;
4955                                 break;
4956                         } else if (val64 & SPI_CONTROL_DONE) {
4957                                 ret = 0;
4958                                 break;
4959                         }
4960                         msleep(50);
4961                         exit_cnt++;
4962                 }
4963         }
4964         return ret;
4965 }
4966 static void s2io_vpd_read(struct s2io_nic *nic)
4967 {
4968         u8 *vpd_data;
4969         u8 data;
4970         int i=0, cnt, fail = 0;
4971         int vpd_addr = 0x80;
4972
4973         if (nic->device_type == XFRAME_II_DEVICE) {
4974                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4975                 vpd_addr = 0x80;
4976         }
4977         else {
4978                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4979                 vpd_addr = 0x50;
4980         }
4981         strcpy(nic->serial_num, "NOT AVAILABLE");
4982
4983         vpd_data = kmalloc(256, GFP_KERNEL);
4984         if (!vpd_data)
4985                 return;
4986
4987         for (i = 0; i < 256; i +=4 ) {
4988                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4989                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
4990                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4991                 for (cnt = 0; cnt <5; cnt++) {
4992                         msleep(2);
4993                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4994                         if (data == 0x80)
4995                                 break;
4996                 }
4997                 if (cnt >= 5) {
4998                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4999                         fail = 1;
5000                         break;
5001                 }
5002                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5003                                       (u32 *)&vpd_data[i]);
5004         }
5005
5006         if(!fail) {
5007                 /* read serial number of adapter */
5008                 for (cnt = 0; cnt < 256; cnt++) {
5009                 if ((vpd_data[cnt] == 'S') &&
5010                         (vpd_data[cnt+1] == 'N') &&
5011                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5012                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5013                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5014                                         vpd_data[cnt+2]);
5015                                 break;
5016                         }
5017                 }
5018         }
5019
5020         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5021                 memset(nic->product_name, 0, vpd_data[1]);
5022                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5023         }
5024         kfree(vpd_data);
5025 }
5026
5027 /**
5028  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5029  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5030  *  @eeprom : pointer to the user level structure provided by ethtool,
5031  *  containing all relevant information.
5032  *  @data_buf : user defined value to be written into Eeprom.
5033  *  Description: Reads the values stored in the Eeprom at given offset
5034  *  for a given length. Stores these values int the input argument data
5035  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5036  *  Return value:
5037  *  int  0 on success
5038  */
5039
5040 static int s2io_ethtool_geeprom(struct net_device *dev,
5041                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5042 {
5043         u32 i, valid;
5044         u64 data;
5045         struct s2io_nic *sp = dev->priv;
5046
5047         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5048
5049         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5050                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5051
5052         for (i = 0; i < eeprom->len; i += 4) {
5053                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5054                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5055                         return -EFAULT;
5056                 }
5057                 valid = INV(data);
5058                 memcpy((data_buf + i), &valid, 4);
5059         }
5060         return 0;
5061 }
5062
5063 /**
5064  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5065  *  @sp : private member of the device structure, which is a pointer to the
5066  *  s2io_nic structure.
5067  *  @eeprom : pointer to the user level structure provided by ethtool,
5068  *  containing all relevant information.
5069  *  @data_buf ; user defined value to be written into Eeprom.
5070  *  Description:
5071  *  Tries to write the user provided value in the Eeprom, at the offset
5072  *  given by the user.
5073  *  Return value:
5074  *  0 on success, -EFAULT on failure.
5075  */
5076
5077 static int s2io_ethtool_seeprom(struct net_device *dev,
5078                                 struct ethtool_eeprom *eeprom,
5079                                 u8 * data_buf)
5080 {
5081         int len = eeprom->len, cnt = 0;
5082         u64 valid = 0, data;
5083         struct s2io_nic *sp = dev->priv;
5084
5085         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5086                 DBG_PRINT(ERR_DBG,
5087                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5088                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5089                           eeprom->magic);
5090                 return -EFAULT;
5091         }
5092
5093         while (len) {
5094                 data = (u32) data_buf[cnt] & 0x000000FF;
5095                 if (data) {
5096                         valid = (u32) (data << 24);
5097                 } else
5098                         valid = data;
5099
5100                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5101                         DBG_PRINT(ERR_DBG,
5102                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5103                         DBG_PRINT(ERR_DBG,
5104                                   "write into the specified offset\n");
5105                         return -EFAULT;
5106                 }
5107                 cnt++;
5108                 len--;
5109         }
5110
5111         return 0;
5112 }
5113
5114 /**
5115  * s2io_register_test - reads and writes into all clock domains.
5116  * @sp : private member of the device structure, which is a pointer to the
5117  * s2io_nic structure.
5118  * @data : variable that returns the result of each of the test conducted b
5119  * by the driver.
5120  * Description:
5121  * Read and write into all clock domains. The NIC has 3 clock domains,
5122  * see that registers in all the three regions are accessible.
5123  * Return value:
5124  * 0 on success.
5125  */
5126
5127 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5128 {
5129         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5130         u64 val64 = 0, exp_val;
5131         int fail = 0;
5132
5133         val64 = readq(&bar0->pif_rd_swapper_fb);
5134         if (val64 != 0x123456789abcdefULL) {
5135                 fail = 1;
5136                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5137         }
5138
5139         val64 = readq(&bar0->rmac_pause_cfg);
5140         if (val64 != 0xc000ffff00000000ULL) {
5141                 fail = 1;
5142                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5143         }
5144
5145         val64 = readq(&bar0->rx_queue_cfg);
5146         if (sp->device_type == XFRAME_II_DEVICE)
5147                 exp_val = 0x0404040404040404ULL;
5148         else
5149                 exp_val = 0x0808080808080808ULL;
5150         if (val64 != exp_val) {
5151                 fail = 1;
5152                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5153         }
5154
5155         val64 = readq(&bar0->xgxs_efifo_cfg);
5156         if (val64 != 0x000000001923141EULL) {
5157                 fail = 1;
5158                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5159         }
5160
5161         val64 = 0x5A5A5A5A5A5A5A5AULL;
5162         writeq(val64, &bar0->xmsi_data);
5163         val64 = readq(&bar0->xmsi_data);
5164         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5165                 fail = 1;
5166                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5167         }
5168
5169         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5170         writeq(val64, &bar0->xmsi_data);
5171         val64 = readq(&bar0->xmsi_data);
5172         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5173                 fail = 1;
5174                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5175         }
5176
5177         *data = fail;
5178         return fail;
5179 }
5180
5181 /**
5182  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5183  * @sp : private member of the device structure, which is a pointer to the
5184  * s2io_nic structure.
5185  * @data:variable that returns the result of each of the test conducted by
5186  * the driver.
5187  * Description:
5188  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5189  * register.
5190  * Return value:
5191  * 0 on success.
5192  */
5193
5194 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5195 {
5196         int fail = 0;
5197         u64 ret_data, org_4F0, org_7F0;
5198         u8 saved_4F0 = 0, saved_7F0 = 0;
5199         struct net_device *dev = sp->dev;
5200
5201         /* Test Write Error at offset 0 */
5202         /* Note that SPI interface allows write access to all areas
5203          * of EEPROM. Hence doing all negative testing only for Xframe I.
5204          */
5205         if (sp->device_type == XFRAME_I_DEVICE)
5206                 if (!write_eeprom(sp, 0, 0, 3))
5207                         fail = 1;
5208
5209         /* Save current values at offsets 0x4F0 and 0x7F0 */
5210         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5211                 saved_4F0 = 1;
5212         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5213                 saved_7F0 = 1;
5214
5215         /* Test Write at offset 4f0 */
5216         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5217                 fail = 1;
5218         if (read_eeprom(sp, 0x4F0, &ret_data))
5219                 fail = 1;
5220
5221         if (ret_data != 0x012345) {
5222                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5223                         "Data written %llx Data read %llx\n",
5224                         dev->name, (unsigned long long)0x12345,
5225                         (unsigned long long)ret_data);
5226                 fail = 1;
5227         }
5228
5229         /* Reset the EEPROM data go FFFF */
5230         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5231
5232         /* Test Write Request Error at offset 0x7c */
5233         if (sp->device_type == XFRAME_I_DEVICE)
5234                 if (!write_eeprom(sp, 0x07C, 0, 3))
5235                         fail = 1;
5236
5237         /* Test Write Request at offset 0x7f0 */
5238         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5239                 fail = 1;
5240         if (read_eeprom(sp, 0x7F0, &ret_data))
5241                 fail = 1;
5242
5243         if (ret_data != 0x012345) {
5244                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5245                         "Data written %llx Data read %llx\n",
5246                         dev->name, (unsigned long long)0x12345,
5247                         (unsigned long long)ret_data);
5248                 fail = 1;
5249         }
5250
5251         /* Reset the EEPROM data go FFFF */
5252         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5253
5254         if (sp->device_type == XFRAME_I_DEVICE) {
5255                 /* Test Write Error at offset 0x80 */
5256                 if (!write_eeprom(sp, 0x080, 0, 3))
5257                         fail = 1;
5258
5259                 /* Test Write Error at offset 0xfc */
5260                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5261                         fail = 1;
5262
5263                 /* Test Write Error at offset 0x100 */
5264                 if (!write_eeprom(sp, 0x100, 0, 3))
5265                         fail = 1;
5266
5267                 /* Test Write Error at offset 4ec */
5268                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5269                         fail = 1;
5270         }
5271
5272         /* Restore values at offsets 0x4F0 and 0x7F0 */
5273         if (saved_4F0)
5274                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5275         if (saved_7F0)
5276                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5277
5278         *data = fail;
5279         return fail;
5280 }
5281
5282 /**
5283  * s2io_bist_test - invokes the MemBist test of the card .
5284  * @sp : private member of the device structure, which is a pointer to the
5285  * s2io_nic structure.
5286  * @data:variable that returns the result of each of the test conducted by
5287  * the driver.
5288  * Description:
5289  * This invokes the MemBist test of the card. We give around
5290  * 2 secs time for the Test to complete. If it's still not complete
5291  * within this peiod, we consider that the test failed.
5292  * Return value:
5293  * 0 on success and -1 on failure.
5294  */
5295
5296 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5297 {
5298         u8 bist = 0;
5299         int cnt = 0, ret = -1;
5300
5301         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5302         bist |= PCI_BIST_START;
5303         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5304
5305         while (cnt < 20) {
5306                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5307                 if (!(bist & PCI_BIST_START)) {
5308                         *data = (bist & PCI_BIST_CODE_MASK);
5309                         ret = 0;
5310                         break;
5311                 }
5312                 msleep(100);
5313                 cnt++;
5314         }
5315
5316         return ret;
5317 }
5318
5319 /**
5320  * s2io-link_test - verifies the link state of the nic
5321  * @sp ; private member of the device structure, which is a pointer to the
5322  * s2io_nic structure.
5323  * @data: variable that returns the result of each of the test conducted by
5324  * the driver.
5325  * Description:
5326  * The function verifies the link state of the NIC and updates the input
5327  * argument 'data' appropriately.
5328  * Return value:
5329  * 0 on success.
5330  */
5331
5332 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5333 {
5334         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5335         u64 val64;
5336
5337         val64 = readq(&bar0->adapter_status);
5338         if(!(LINK_IS_UP(val64)))
5339                 *data = 1;
5340         else
5341                 *data = 0;
5342
5343         return *data;
5344 }
5345
5346 /**
5347  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5348  * @sp - private member of the device structure, which is a pointer to the
5349  * s2io_nic structure.
5350  * @data - variable that returns the result of each of the test
5351  * conducted by the driver.
5352  * Description:
5353  *  This is one of the offline test that tests the read and write
5354  *  access to the RldRam chip on the NIC.
5355  * Return value:
5356  *  0 on success.
5357  */
5358
5359 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5360 {
5361         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5362         u64 val64;
5363         int cnt, iteration = 0, test_fail = 0;
5364
5365         val64 = readq(&bar0->adapter_control);
5366         val64 &= ~ADAPTER_ECC_EN;
5367         writeq(val64, &bar0->adapter_control);
5368
5369         val64 = readq(&bar0->mc_rldram_test_ctrl);
5370         val64 |= MC_RLDRAM_TEST_MODE;
5371         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5372
5373         val64 = readq(&bar0->mc_rldram_mrs);
5374         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5375         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5376
5377         val64 |= MC_RLDRAM_MRS_ENABLE;
5378         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5379
5380         while (iteration < 2) {
5381                 val64 = 0x55555555aaaa0000ULL;
5382                 if (iteration == 1) {
5383                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5384                 }
5385                 writeq(val64, &bar0->mc_rldram_test_d0);
5386
5387                 val64 = 0xaaaa5a5555550000ULL;
5388                 if (iteration == 1) {
5389                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5390                 }
5391                 writeq(val64, &bar0->mc_rldram_test_d1);
5392
5393                 val64 = 0x55aaaaaaaa5a0000ULL;
5394                 if (iteration == 1) {
5395                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5396                 }
5397                 writeq(val64, &bar0->mc_rldram_test_d2);
5398
5399                 val64 = (u64) (0x0000003ffffe0100ULL);
5400                 writeq(val64, &bar0->mc_rldram_test_add);
5401
5402                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5403                         MC_RLDRAM_TEST_GO;
5404                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5405
5406                 for (cnt = 0; cnt < 5; cnt++) {
5407                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5408                         if (val64 & MC_RLDRAM_TEST_DONE)
5409                                 break;
5410                         msleep(200);
5411                 }
5412
5413                 if (cnt == 5)
5414                         break;
5415
5416                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5417                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5418
5419                 for (cnt = 0; cnt < 5; cnt++) {
5420                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5421                         if (val64 & MC_RLDRAM_TEST_DONE)
5422                                 break;
5423                         msleep(500);
5424                 }
5425
5426                 if (cnt == 5)
5427                         break;
5428
5429                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5430                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5431                         test_fail = 1;
5432
5433                 iteration++;
5434         }
5435
5436         *data = test_fail;
5437
5438         /* Bring the adapter out of test mode */
5439         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5440
5441         return test_fail;
5442 }
5443
5444 /**
5445  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5446  *  @sp : private member of the device structure, which is a pointer to the
5447  *  s2io_nic structure.
5448  *  @ethtest : pointer to a ethtool command specific structure that will be
5449  *  returned to the user.
5450  *  @data : variable that returns the result of each of the test
5451  * conducted by the driver.
5452  * Description:
5453  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5454  *  the health of the card.
5455  * Return value:
5456  *  void
5457  */
5458
5459 static void s2io_ethtool_test(struct net_device *dev,
5460                               struct ethtool_test *ethtest,
5461                               uint64_t * data)
5462 {
5463         struct s2io_nic *sp = dev->priv;
5464         int orig_state = netif_running(sp->dev);
5465
5466         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5467                 /* Offline Tests. */
5468                 if (orig_state)
5469                         s2io_close(sp->dev);
5470
5471                 if (s2io_register_test(sp, &data[0]))
5472                         ethtest->flags |= ETH_TEST_FL_FAILED;
5473
5474                 s2io_reset(sp);
5475
5476                 if (s2io_rldram_test(sp, &data[3]))
5477                         ethtest->flags |= ETH_TEST_FL_FAILED;
5478
5479                 s2io_reset(sp);
5480
5481                 if (s2io_eeprom_test(sp, &data[1]))
5482                         ethtest->flags |= ETH_TEST_FL_FAILED;
5483
5484                 if (s2io_bist_test(sp, &data[4]))
5485                         ethtest->flags |= ETH_TEST_FL_FAILED;
5486
5487                 if (orig_state)
5488                         s2io_open(sp->dev);
5489
5490                 data[2] = 0;
5491         } else {
5492                 /* Online Tests. */
5493                 if (!orig_state) {
5494                         DBG_PRINT(ERR_DBG,
5495                                   "%s: is not up, cannot run test\n",
5496                                   dev->name);
5497                         data[0] = -1;
5498                         data[1] = -1;
5499                         data[2] = -1;
5500                         data[3] = -1;
5501                         data[4] = -1;
5502                 }
5503
5504                 if (s2io_link_test(sp, &data[2]))
5505                         ethtest->flags |= ETH_TEST_FL_FAILED;
5506
5507                 data[0] = 0;
5508                 data[1] = 0;
5509                 data[3] = 0;
5510                 data[4] = 0;
5511         }
5512 }
5513
5514 static void s2io_get_ethtool_stats(struct net_device *dev,
5515                                    struct ethtool_stats *estats,
5516                                    u64 * tmp_stats)
5517 {
5518         int i = 0;
5519         struct s2io_nic *sp = dev->priv;
5520         struct stat_block *stat_info = sp->mac_control.stats_info;
5521
5522         s2io_updt_stats(sp);
5523         tmp_stats[i++] =
5524                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5525                 le32_to_cpu(stat_info->tmac_frms);
5526         tmp_stats[i++] =
5527                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5528                 le32_to_cpu(stat_info->tmac_data_octets);
5529         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5530         tmp_stats[i++] =
5531                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5532                 le32_to_cpu(stat_info->tmac_mcst_frms);
5533         tmp_stats[i++] =
5534                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5535                 le32_to_cpu(stat_info->tmac_bcst_frms);
5536         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5537         tmp_stats[i++] =
5538                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5539                 le32_to_cpu(stat_info->tmac_ttl_octets);
5540         tmp_stats[i++] =
5541                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5542                 le32_to_cpu(stat_info->tmac_ucst_frms);
5543         tmp_stats[i++] =
5544                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5545                 le32_to_cpu(stat_info->tmac_nucst_frms);
5546         tmp_stats[i++] =
5547                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5548                 le32_to_cpu(stat_info->tmac_any_err_frms);
5549         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5550         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5551         tmp_stats[i++] =
5552                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5553                 le32_to_cpu(stat_info->tmac_vld_ip);
5554         tmp_stats[i++] =
5555                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5556                 le32_to_cpu(stat_info->tmac_drop_ip);
5557         tmp_stats[i++] =
5558                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5559                 le32_to_cpu(stat_info->tmac_icmp);
5560         tmp_stats[i++] =
5561                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5562                 le32_to_cpu(stat_info->tmac_rst_tcp);
5563         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5564         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5565                 le32_to_cpu(stat_info->tmac_udp);
5566         tmp_stats[i++] =
5567                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5568                 le32_to_cpu(stat_info->rmac_vld_frms);
5569         tmp_stats[i++] =
5570                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5571                 le32_to_cpu(stat_info->rmac_data_octets);
5572         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5573         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5574         tmp_stats[i++] =
5575                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5576                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5577         tmp_stats[i++] =
5578                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5579                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5580         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5581         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5582         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5583         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5584         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5585         tmp_stats[i++] =
5586                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5587                 le32_to_cpu(stat_info->rmac_ttl_octets);
5588         tmp_stats[i++] =
5589                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5590                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5591         tmp_stats[i++] =
5592                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5593                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5594         tmp_stats[i++] =
5595                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5596                 le32_to_cpu(stat_info->rmac_discarded_frms);
5597         tmp_stats[i++] =
5598                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5599                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5600         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5601         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5602         tmp_stats[i++] =
5603                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5604                 le32_to_cpu(stat_info->rmac_usized_frms);
5605         tmp_stats[i++] =
5606                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5607                 le32_to_cpu(stat_info->rmac_osized_frms);
5608         tmp_stats[i++] =
5609                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5610                 le32_to_cpu(stat_info->rmac_frag_frms);
5611         tmp_stats[i++] =
5612                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5613                 le32_to_cpu(stat_info->rmac_jabber_frms);
5614         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5615         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5616         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5617         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5618         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5619         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5620         tmp_stats[i++] =
5621                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5622                 le32_to_cpu(stat_info->rmac_ip);
5623         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5624         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5625         tmp_stats[i++] =
5626                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5627                 le32_to_cpu(stat_info->rmac_drop_ip);
5628         tmp_stats[i++] =
5629                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5630                 le32_to_cpu(stat_info->rmac_icmp);
5631         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5632         tmp_stats[i++] =
5633                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5634                 le32_to_cpu(stat_info->rmac_udp);
5635         tmp_stats[i++] =
5636                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5637                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5638         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5639         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5640         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5641         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5642         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5643         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5644         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5645         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5646         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5647         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5648         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5649         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5650         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5651         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5652         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5653         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5654         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5655         tmp_stats[i++] =
5656                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5657                 le32_to_cpu(stat_info->rmac_pause_cnt);
5658         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5659         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5660         tmp_stats[i++] =
5661                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5662                 le32_to_cpu(stat_info->rmac_accepted_ip);
5663         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5664         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5665         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5666         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5667         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5668         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5669         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5670         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5671         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5672         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5673         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5674         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5675         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5676         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5677         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5678         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5679         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5680         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5681         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5682
5683         /* Enhanced statistics exist only for Hercules */
5684         if(sp->device_type == XFRAME_II_DEVICE) {
5685                 tmp_stats[i++] =
5686                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5687                 tmp_stats[i++] =
5688                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5689                 tmp_stats[i++] =
5690                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5691                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5692                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5693                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5694                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5695                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5696                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5697                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5698                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5699                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5700                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5701                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5702                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5703                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5704         }
5705
5706         tmp_stats[i++] = 0;
5707         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5708         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5709         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5710         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5711         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5712         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5713         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5714         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5715         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5716         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5717         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5718         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5719         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5720         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5721         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5722         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5723         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5724         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5725         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5726         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5727         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5728         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5729         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5730         if (stat_info->sw_stat.num_aggregations) {
5731                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5732                 int count = 0;
5733                 /*
5734                  * Since 64-bit divide does not work on all platforms,
5735                  * do repeated subtraction.
5736                  */
5737                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5738                         tmp -= stat_info->sw_stat.num_aggregations;
5739                         count++;
5740                 }
5741                 tmp_stats[i++] = count;
5742         }
5743         else
5744                 tmp_stats[i++] = 0;
5745 }
5746
5747 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5748 {
5749         return (XENA_REG_SPACE);
5750 }
5751
5752
5753 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5754 {
5755         struct s2io_nic *sp = dev->priv;
5756
5757         return (sp->rx_csum);
5758 }
5759
5760 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5761 {
5762         struct s2io_nic *sp = dev->priv;
5763
5764         if (data)
5765                 sp->rx_csum = 1;
5766         else
5767                 sp->rx_csum = 0;
5768
5769         return 0;
5770 }
5771
5772 static int s2io_get_eeprom_len(struct net_device *dev)
5773 {
5774         return (XENA_EEPROM_SPACE);
5775 }
5776
5777 static int s2io_ethtool_self_test_count(struct net_device *dev)
5778 {
5779         return (S2IO_TEST_LEN);
5780 }
5781
5782 static void s2io_ethtool_get_strings(struct net_device *dev,
5783                                      u32 stringset, u8 * data)
5784 {
5785         int stat_size = 0;
5786         struct s2io_nic *sp = dev->priv;
5787
5788         switch (stringset) {
5789         case ETH_SS_TEST:
5790                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5791                 break;
5792         case ETH_SS_STATS:
5793                 stat_size = sizeof(ethtool_xena_stats_keys);
5794                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
5795                 if(sp->device_type == XFRAME_II_DEVICE) {
5796                         memcpy(data + stat_size,
5797                                 &ethtool_enhanced_stats_keys,
5798                                 sizeof(ethtool_enhanced_stats_keys));
5799                         stat_size += sizeof(ethtool_enhanced_stats_keys);
5800                 }
5801
5802                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
5803                         sizeof(ethtool_driver_stats_keys));
5804         }
5805 }
5806 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5807 {
5808         struct s2io_nic *sp = dev->priv;
5809         int stat_count = 0;
5810         switch(sp->device_type) {
5811         case XFRAME_I_DEVICE:
5812                 stat_count = XFRAME_I_STAT_LEN;
5813         break;
5814
5815         case XFRAME_II_DEVICE:
5816                 stat_count = XFRAME_II_STAT_LEN;
5817         break;
5818         }
5819
5820         return stat_count;
5821 }
5822
5823 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5824 {
5825         if (data)
5826                 dev->features |= NETIF_F_IP_CSUM;
5827         else
5828                 dev->features &= ~NETIF_F_IP_CSUM;
5829
5830         return 0;
5831 }
5832
5833 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5834 {
5835         return (dev->features & NETIF_F_TSO) != 0;
5836 }
5837 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5838 {
5839         if (data)
5840                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5841         else
5842                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5843
5844         return 0;
5845 }
5846
5847 static const struct ethtool_ops netdev_ethtool_ops = {
5848         .get_settings = s2io_ethtool_gset,
5849         .set_settings = s2io_ethtool_sset,
5850         .get_drvinfo = s2io_ethtool_gdrvinfo,
5851         .get_regs_len = s2io_ethtool_get_regs_len,
5852         .get_regs = s2io_ethtool_gregs,
5853         .get_link = ethtool_op_get_link,
5854         .get_eeprom_len = s2io_get_eeprom_len,
5855         .get_eeprom = s2io_ethtool_geeprom,
5856         .set_eeprom = s2io_ethtool_seeprom,
5857         .get_pauseparam = s2io_ethtool_getpause_data,
5858         .set_pauseparam = s2io_ethtool_setpause_data,
5859         .get_rx_csum = s2io_ethtool_get_rx_csum,
5860         .set_rx_csum = s2io_ethtool_set_rx_csum,
5861         .get_tx_csum = ethtool_op_get_tx_csum,
5862         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5863         .get_sg = ethtool_op_get_sg,
5864         .set_sg = ethtool_op_set_sg,
5865         .get_tso = s2io_ethtool_op_get_tso,
5866         .set_tso = s2io_ethtool_op_set_tso,
5867         .get_ufo = ethtool_op_get_ufo,
5868         .set_ufo = ethtool_op_set_ufo,
5869         .self_test_count = s2io_ethtool_self_test_count,
5870         .self_test = s2io_ethtool_test,
5871         .get_strings = s2io_ethtool_get_strings,
5872         .phys_id = s2io_ethtool_idnic,
5873         .get_stats_count = s2io_ethtool_get_stats_count,
5874         .get_ethtool_stats = s2io_get_ethtool_stats
5875 };
5876
5877 /**
5878  *  s2io_ioctl - Entry point for the Ioctl
5879  *  @dev :  Device pointer.
5880  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5881  *  a proprietary structure used to pass information to the driver.
5882  *  @cmd :  This is used to distinguish between the different commands that
5883  *  can be passed to the IOCTL functions.
5884  *  Description:
5885  *  Currently there are no special functionality supported in IOCTL, hence
5886  *  function always return EOPNOTSUPPORTED
5887  */
5888
5889 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5890 {
5891         return -EOPNOTSUPP;
5892 }
5893
5894 /**
5895  *  s2io_change_mtu - entry point to change MTU size for the device.
5896  *   @dev : device pointer.
5897  *   @new_mtu : the new MTU size for the device.
5898  *   Description: A driver entry point to change MTU size for the device.
5899  *   Before changing the MTU the device must be stopped.
5900  *  Return value:
5901  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5902  *   file on failure.
5903  */
5904
5905 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5906 {
5907         struct s2io_nic *sp = dev->priv;
5908
5909         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5910                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5911                           dev->name);
5912                 return -EPERM;
5913         }
5914
5915         dev->mtu = new_mtu;
5916         if (netif_running(dev)) {
5917                 s2io_card_down(sp);
5918                 netif_stop_queue(dev);
5919                 if (s2io_card_up(sp)) {
5920                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5921                                   __FUNCTION__);
5922                 }
5923                 if (netif_queue_stopped(dev))
5924                         netif_wake_queue(dev);
5925         } else { /* Device is down */
5926                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5927                 u64 val64 = new_mtu;
5928
5929                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5930         }
5931
5932         return 0;
5933 }
5934
5935 /**
5936  *  s2io_tasklet - Bottom half of the ISR.
5937  *  @dev_adr : address of the device structure in dma_addr_t format.
5938  *  Description:
5939  *  This is the tasklet or the bottom half of the ISR. This is
5940  *  an extension of the ISR which is scheduled by the scheduler to be run
5941  *  when the load on the CPU is low. All low priority tasks of the ISR can
5942  *  be pushed into the tasklet. For now the tasklet is used only to
5943  *  replenish the Rx buffers in the Rx buffer descriptors.
5944  *  Return value:
5945  *  void.
5946  */
5947
5948 static void s2io_tasklet(unsigned long dev_addr)
5949 {
5950         struct net_device *dev = (struct net_device *) dev_addr;
5951         struct s2io_nic *sp = dev->priv;
5952         int i, ret;
5953         struct mac_info *mac_control;
5954         struct config_param *config;
5955
5956         mac_control = &sp->mac_control;
5957         config = &sp->config;
5958
5959         if (!TASKLET_IN_USE) {
5960                 for (i = 0; i < config->rx_ring_num; i++) {
5961                         ret = fill_rx_buffers(sp, i);
5962                         if (ret == -ENOMEM) {
5963                                 DBG_PRINT(INFO_DBG, "%s: Out of ",
5964                                           dev->name);
5965                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5966                                 break;
5967                         } else if (ret == -EFILL) {
5968                                 DBG_PRINT(INFO_DBG,
5969                                           "%s: Rx Ring %d is full\n",
5970                                           dev->name, i);
5971                                 break;
5972                         }
5973                 }
5974                 clear_bit(0, (&sp->tasklet_status));
5975         }
5976 }
5977
5978 /**
5979  * s2io_set_link - Set the LInk status
5980  * @data: long pointer to device private structue
5981  * Description: Sets the link status for the adapter
5982  */
5983
5984 static void s2io_set_link(struct work_struct *work)
5985 {
5986         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5987         struct net_device *dev = nic->dev;
5988         struct XENA_dev_config __iomem *bar0 = nic->bar0;
5989         register u64 val64;
5990         u16 subid;
5991
5992         rtnl_lock();
5993
5994         if (!netif_running(dev))
5995                 goto out_unlock;
5996
5997         if (test_and_set_bit(0, &(nic->link_state))) {
5998                 /* The card is being reset, no point doing anything */
5999                 goto out_unlock;
6000         }
6001
6002         subid = nic->pdev->subsystem_device;
6003         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6004                 /*
6005                  * Allow a small delay for the NICs self initiated
6006                  * cleanup to complete.
6007                  */
6008                 msleep(100);
6009         }
6010
6011         val64 = readq(&bar0->adapter_status);
6012         if (LINK_IS_UP(val64)) {
6013                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6014                         if (verify_xena_quiescence(nic)) {
6015                                 val64 = readq(&bar0->adapter_control);
6016                                 val64 |= ADAPTER_CNTL_EN;
6017                                 writeq(val64, &bar0->adapter_control);
6018                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6019                                         nic->device_type, subid)) {
6020                                         val64 = readq(&bar0->gpio_control);
6021                                         val64 |= GPIO_CTRL_GPIO_0;
6022                                         writeq(val64, &bar0->gpio_control);
6023                                         val64 = readq(&bar0->gpio_control);
6024                                 } else {
6025                                         val64 |= ADAPTER_LED_ON;
6026                                         writeq(val64, &bar0->adapter_control);
6027                                 }
6028                                 nic->device_enabled_once = TRUE;
6029                         } else {
6030                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6031                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6032                                 netif_stop_queue(dev);
6033                         }
6034                 }
6035                 val64 = readq(&bar0->adapter_status);
6036                 if (!LINK_IS_UP(val64)) {
6037                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
6038                         DBG_PRINT(ERR_DBG, " Link down after enabling ");
6039                         DBG_PRINT(ERR_DBG, "device \n");
6040                 } else
6041                         s2io_link(nic, LINK_UP);
6042         } else {
6043                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6044                                                       subid)) {
6045                         val64 = readq(&bar0->gpio_control);
6046                         val64 &= ~GPIO_CTRL_GPIO_0;
6047                         writeq(val64, &bar0->gpio_control);
6048                         val64 = readq(&bar0->gpio_control);
6049                 }
6050                 s2io_link(nic, LINK_DOWN);
6051         }
6052         clear_bit(0, &(nic->link_state));
6053
6054 out_unlock:
6055         rtnl_unlock();
6056 }
6057
6058 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6059                                 struct buffAdd *ba,
6060                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6061                                 u64 *temp2, int size)
6062 {
6063         struct net_device *dev = sp->dev;
6064         struct sk_buff *frag_list;
6065
6066         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6067                 /* allocate skb */
6068                 if (*skb) {
6069                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6070                         /*
6071                          * As Rx frame are not going to be processed,
6072                          * using same mapped address for the Rxd
6073                          * buffer pointer
6074                          */
6075                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
6076                 } else {
6077                         *skb = dev_alloc_skb(size);
6078                         if (!(*skb)) {
6079                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6080                                 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
6081                                 return -ENOMEM ;
6082                         }
6083                         /* storing the mapped addr in a temp variable
6084                          * such it will be used for next rxd whose
6085                          * Host Control is NULL
6086                          */
6087                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6088                                 pci_map_single( sp->pdev, (*skb)->data,
6089                                         size - NET_IP_ALIGN,
6090                                         PCI_DMA_FROMDEVICE);
6091                         rxdp->Host_Control = (unsigned long) (*skb);
6092                 }
6093         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6094                 /* Two buffer Mode */
6095                 if (*skb) {
6096                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6097                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6098                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6099                 } else {
6100                         *skb = dev_alloc_skb(size);
6101                         if (!(*skb)) {
6102                                 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n",
6103                                         dev->name);
6104                                 return -ENOMEM;
6105                         }
6106                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6107                                 pci_map_single(sp->pdev, (*skb)->data,
6108                                                dev->mtu + 4,
6109                                                PCI_DMA_FROMDEVICE);
6110                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6111                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6112                                                 PCI_DMA_FROMDEVICE);
6113                         rxdp->Host_Control = (unsigned long) (*skb);
6114
6115                         /* Buffer-1 will be dummy buffer not used */
6116                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6117                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6118                                                PCI_DMA_FROMDEVICE);
6119                 }
6120         } else if ((rxdp->Host_Control == 0)) {
6121                 /* Three buffer mode */
6122                 if (*skb) {
6123                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6124                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6125                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6126                 } else {
6127                         *skb = dev_alloc_skb(size);
6128                         if (!(*skb)) {
6129                                 DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n",
6130                                           dev->name);
6131                                 return -ENOMEM;
6132                         }
6133                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6134                                 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6135                                                PCI_DMA_FROMDEVICE);
6136                         /* Buffer-1 receives L3/L4 headers */
6137                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6138                                 pci_map_single( sp->pdev, (*skb)->data,
6139                                                 l3l4hdr_size + 4,
6140                                                 PCI_DMA_FROMDEVICE);
6141                         /*
6142                          * skb_shinfo(skb)->frag_list will have L4
6143                          * data payload
6144                          */
6145                         skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6146                                                                    ALIGN_SIZE);
6147                         if (skb_shinfo(*skb)->frag_list == NULL) {
6148                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6149                                           failed\n ", dev->name);
6150                                 return -ENOMEM ;
6151                         }
6152                         frag_list = skb_shinfo(*skb)->frag_list;
6153                         frag_list->next = NULL;
6154                         /*
6155                          * Buffer-2 receives L4 data payload
6156                          */
6157                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6158                                 pci_map_single( sp->pdev, frag_list->data,
6159                                                 dev->mtu, PCI_DMA_FROMDEVICE);
6160                 }
6161         }
6162         return 0;
6163 }
6164 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6165                                 int size)
6166 {
6167         struct net_device *dev = sp->dev;
6168         if (sp->rxd_mode == RXD_MODE_1) {
6169                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6170         } else if (sp->rxd_mode == RXD_MODE_3B) {
6171                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6172                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6173                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6174         } else {
6175                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6176                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6177                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6178         }
6179 }
6180
6181 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6182 {
6183         int i, j, k, blk_cnt = 0, size;
6184         struct mac_info * mac_control = &sp->mac_control;
6185         struct config_param *config = &sp->config;
6186         struct net_device *dev = sp->dev;
6187         struct RxD_t *rxdp = NULL;
6188         struct sk_buff *skb = NULL;
6189         struct buffAdd *ba = NULL;
6190         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6191
6192         /* Calculate the size based on ring mode */
6193         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6194                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6195         if (sp->rxd_mode == RXD_MODE_1)
6196                 size += NET_IP_ALIGN;
6197         else if (sp->rxd_mode == RXD_MODE_3B)
6198                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6199         else
6200                 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6201
6202         for (i = 0; i < config->rx_ring_num; i++) {
6203                 blk_cnt = config->rx_cfg[i].num_rxd /
6204                         (rxd_count[sp->rxd_mode] +1);
6205
6206                 for (j = 0; j < blk_cnt; j++) {
6207                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6208                                 rxdp = mac_control->rings[i].
6209                                         rx_blocks[j].rxds[k].virt_addr;
6210                                 if(sp->rxd_mode >= RXD_MODE_3A)
6211                                         ba = &mac_control->rings[i].ba[j][k];
6212                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6213                                                        &skb,(u64 *)&temp0_64,
6214                                                        (u64 *)&temp1_64,
6215                                                        (u64 *)&temp2_64,
6216                                                         size) == ENOMEM) {
6217                                         return 0;
6218                                 }
6219
6220                                 set_rxd_buffer_size(sp, rxdp, size);
6221                                 wmb();
6222                                 /* flip the Ownership bit to Hardware */
6223                                 rxdp->Control_1 |= RXD_OWN_XENA;
6224                         }
6225                 }
6226         }
6227         return 0;
6228
6229 }
6230
6231 static int s2io_add_isr(struct s2io_nic * sp)
6232 {
6233         int ret = 0;
6234         struct net_device *dev = sp->dev;
6235         int err = 0;
6236
6237         if (sp->intr_type == MSI)
6238                 ret = s2io_enable_msi(sp);
6239         else if (sp->intr_type == MSI_X)
6240                 ret = s2io_enable_msi_x(sp);
6241         if (ret) {
6242                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6243                 sp->intr_type = INTA;
6244         }
6245
6246         /* Store the values of the MSIX table in the struct s2io_nic structure */
6247         store_xmsi_data(sp);
6248
6249         /* After proper initialization of H/W, register ISR */
6250         if (sp->intr_type == MSI) {
6251                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6252                         IRQF_SHARED, sp->name, dev);
6253                 if (err) {
6254                         pci_disable_msi(sp->pdev);
6255                         DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6256                                   dev->name);
6257                         return -1;
6258                 }
6259         }
6260         if (sp->intr_type == MSI_X) {
6261                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6262
6263                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6264                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6265                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6266                                         dev->name, i);
6267                                 err = request_irq(sp->entries[i].vector,
6268                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6269                                                   sp->s2io_entries[i].arg);
6270                                 /* If either data or addr is zero print it */
6271                                 if(!(sp->msix_info[i].addr &&
6272                                         sp->msix_info[i].data)) {
6273                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6274                                                 "Data:0x%lx\n",sp->desc[i],
6275                                                 (unsigned long long)
6276                                                 sp->msix_info[i].addr,
6277                                                 (unsigned long)
6278                                                 ntohl(sp->msix_info[i].data));
6279                                 } else {
6280                                         msix_tx_cnt++;
6281                                 }
6282                         } else {
6283                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6284                                         dev->name, i);
6285                                 err = request_irq(sp->entries[i].vector,
6286                                           s2io_msix_ring_handle, 0, sp->desc[i],
6287                                                   sp->s2io_entries[i].arg);
6288                                 /* If either data or addr is zero print it */
6289                                 if(!(sp->msix_info[i].addr &&
6290                                         sp->msix_info[i].data)) {
6291                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6292                                                 "Data:0x%lx\n",sp->desc[i],
6293                                                 (unsigned long long)
6294                                                 sp->msix_info[i].addr,
6295                                                 (unsigned long)
6296                                                 ntohl(sp->msix_info[i].data));
6297                                 } else {
6298                                         msix_rx_cnt++;
6299                                 }
6300                         }
6301                         if (err) {
6302                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6303                                           "failed\n", dev->name, i);
6304                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6305                                 return -1;
6306                         }
6307                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6308                 }
6309                 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6310                 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6311         }
6312         if (sp->intr_type == INTA) {
6313                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6314                                 sp->name, dev);
6315                 if (err) {
6316                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6317                                   dev->name);
6318                         return -1;
6319                 }
6320         }
6321         return 0;
6322 }
6323 static void s2io_rem_isr(struct s2io_nic * sp)
6324 {
6325         int cnt = 0;
6326         struct net_device *dev = sp->dev;
6327
6328         if (sp->intr_type == MSI_X) {
6329                 int i;
6330                 u16 msi_control;
6331
6332                 for (i=1; (sp->s2io_entries[i].in_use ==
6333                         MSIX_REGISTERED_SUCCESS); i++) {
6334                         int vector = sp->entries[i].vector;
6335                         void *arg = sp->s2io_entries[i].arg;
6336
6337                         free_irq(vector, arg);
6338                 }
6339                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6340                 msi_control &= 0xFFFE; /* Disable MSI */
6341                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6342
6343                 pci_disable_msix(sp->pdev);
6344         } else {
6345                 free_irq(sp->pdev->irq, dev);
6346                 if (sp->intr_type == MSI) {
6347                         u16 val;
6348
6349                         pci_disable_msi(sp->pdev);
6350                         pci_read_config_word(sp->pdev, 0x4c, &val);
6351                         val ^= 0x1;
6352                         pci_write_config_word(sp->pdev, 0x4c, val);
6353                 }
6354         }
6355         /* Waiting till all Interrupt handlers are complete */
6356         cnt = 0;
6357         do {
6358                 msleep(10);
6359                 if (!atomic_read(&sp->isr_cnt))
6360                         break;
6361                 cnt++;
6362         } while(cnt < 5);
6363 }
6364
6365 static void s2io_card_down(struct s2io_nic * sp)
6366 {
6367         int cnt = 0;
6368         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6369         unsigned long flags;
6370         register u64 val64 = 0;
6371
6372         del_timer_sync(&sp->alarm_timer);
6373         /* If s2io_set_link task is executing, wait till it completes. */
6374         while (test_and_set_bit(0, &(sp->link_state))) {
6375                 msleep(50);
6376         }
6377         atomic_set(&sp->card_state, CARD_DOWN);
6378
6379         /* disable Tx and Rx traffic on the NIC */
6380         stop_nic(sp);
6381
6382         s2io_rem_isr(sp);
6383
6384         /* Kill tasklet. */
6385         tasklet_kill(&sp->task);
6386
6387         /* Check if the device is Quiescent and then Reset the NIC */
6388         do {
6389                 /* As per the HW requirement we need to replenish the
6390                  * receive buffer to avoid the ring bump. Since there is
6391                  * no intention of processing the Rx frame at this pointwe are
6392                  * just settting the ownership bit of rxd in Each Rx
6393                  * ring to HW and set the appropriate buffer size
6394                  * based on the ring mode
6395                  */
6396                 rxd_owner_bit_reset(sp);
6397
6398                 val64 = readq(&bar0->adapter_status);
6399                 if (verify_xena_quiescence(sp)) {
6400                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6401                         break;
6402                 }
6403
6404                 msleep(50);
6405                 cnt++;
6406                 if (cnt == 10) {
6407                         DBG_PRINT(ERR_DBG,
6408                                   "s2io_close:Device not Quiescent ");
6409                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6410                                   (unsigned long long) val64);
6411                         break;
6412                 }
6413         } while (1);
6414         s2io_reset(sp);
6415
6416         spin_lock_irqsave(&sp->tx_lock, flags);
6417         /* Free all Tx buffers */
6418         free_tx_buffers(sp);
6419         spin_unlock_irqrestore(&sp->tx_lock, flags);
6420
6421         /* Free all Rx buffers */
6422         spin_lock_irqsave(&sp->rx_lock, flags);
6423         free_rx_buffers(sp);
6424         spin_unlock_irqrestore(&sp->rx_lock, flags);
6425
6426         clear_bit(0, &(sp->link_state));
6427 }
6428
6429 static int s2io_card_up(struct s2io_nic * sp)
6430 {
6431         int i, ret = 0;
6432         struct mac_info *mac_control;
6433         struct config_param *config;
6434         struct net_device *dev = (struct net_device *) sp->dev;
6435         u16 interruptible;
6436
6437         /* Initialize the H/W I/O registers */
6438         if (init_nic(sp) != 0) {
6439                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6440                           dev->name);
6441                 s2io_reset(sp);
6442                 return -ENODEV;
6443         }
6444
6445         /*
6446          * Initializing the Rx buffers. For now we are considering only 1
6447          * Rx ring and initializing buffers into 30 Rx blocks
6448          */
6449         mac_control = &sp->mac_control;
6450         config = &sp->config;
6451
6452         for (i = 0; i < config->rx_ring_num; i++) {
6453                 if ((ret = fill_rx_buffers(sp, i))) {
6454                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6455                                   dev->name);
6456                         s2io_reset(sp);
6457                         free_rx_buffers(sp);
6458                         return -ENOMEM;
6459                 }
6460                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6461                           atomic_read(&sp->rx_bufs_left[i]));
6462         }
6463         /* Maintain the state prior to the open */
6464         if (sp->promisc_flg)
6465                 sp->promisc_flg = 0;
6466         if (sp->m_cast_flg) {
6467                 sp->m_cast_flg = 0;
6468                 sp->all_multi_pos= 0;
6469         }
6470
6471         /* Setting its receive mode */
6472         s2io_set_multicast(dev);
6473
6474         if (sp->lro) {
6475                 /* Initialize max aggregatable pkts per session based on MTU */
6476                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6477                 /* Check if we can use(if specified) user provided value */
6478                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6479                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6480         }
6481
6482         /* Enable Rx Traffic and interrupts on the NIC */
6483         if (start_nic(sp)) {
6484                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6485                 s2io_reset(sp);
6486                 free_rx_buffers(sp);
6487                 return -ENODEV;
6488         }
6489
6490         /* Add interrupt service routine */
6491         if (s2io_add_isr(sp) != 0) {
6492                 if (sp->intr_type == MSI_X)
6493                         s2io_rem_isr(sp);
6494                 s2io_reset(sp);
6495                 free_rx_buffers(sp);
6496                 return -ENODEV;
6497         }
6498
6499         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6500
6501         /* Enable tasklet for the device */
6502         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6503
6504         /*  Enable select interrupts */
6505         if (sp->intr_type != INTA)
6506                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6507         else {
6508                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6509                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6510                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6511                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6512         }
6513
6514
6515         atomic_set(&sp->card_state, CARD_UP);
6516         return 0;
6517 }
6518
6519 /**
6520  * s2io_restart_nic - Resets the NIC.
6521  * @data : long pointer to the device private structure
6522  * Description:
6523  * This function is scheduled to be run by the s2io_tx_watchdog
6524  * function after 0.5 secs to reset the NIC. The idea is to reduce
6525  * the run time of the watch dog routine which is run holding a
6526  * spin lock.
6527  */
6528
6529 static void s2io_restart_nic(struct work_struct *work)
6530 {
6531         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6532         struct net_device *dev = sp->dev;
6533
6534         rtnl_lock();
6535
6536         if (!netif_running(dev))
6537                 goto out_unlock;
6538
6539         s2io_card_down(sp);
6540         if (s2io_card_up(sp)) {
6541                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6542                           dev->name);
6543         }
6544         netif_wake_queue(dev);
6545         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6546                   dev->name);
6547 out_unlock:
6548         rtnl_unlock();
6549 }
6550
6551 /**
6552  *  s2io_tx_watchdog - Watchdog for transmit side.
6553  *  @dev : Pointer to net device structure
6554  *  Description:
6555  *  This function is triggered if the Tx Queue is stopped
6556  *  for a pre-defined amount of time when the Interface is still up.
6557  *  If the Interface is jammed in such a situation, the hardware is
6558  *  reset (by s2io_close) and restarted again (by s2io_open) to
6559  *  overcome any problem that might have been caused in the hardware.
6560  *  Return value:
6561  *  void
6562  */
6563
6564 static void s2io_tx_watchdog(struct net_device *dev)
6565 {
6566         struct s2io_nic *sp = dev->priv;
6567
6568         if (netif_carrier_ok(dev)) {
6569                 schedule_work(&sp->rst_timer_task);
6570                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6571         }
6572 }
6573
6574 /**
6575  *   rx_osm_handler - To perform some OS related operations on SKB.
6576  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6577  *   @skb : the socket buffer pointer.
6578  *   @len : length of the packet
6579  *   @cksum : FCS checksum of the frame.
6580  *   @ring_no : the ring from which this RxD was extracted.
6581  *   Description:
6582  *   This function is called by the Rx interrupt serivce routine to perform
6583  *   some OS related operations on the SKB before passing it to the upper
6584  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6585  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6586  *   to the upper layer. If the checksum is wrong, it increments the Rx
6587  *   packet error count, frees the SKB and returns error.
6588  *   Return value:
6589  *   SUCCESS on success and -1 on failure.
6590  */
6591 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6592 {
6593         struct s2io_nic *sp = ring_data->nic;
6594         struct net_device *dev = (struct net_device *) sp->dev;
6595         struct sk_buff *skb = (struct sk_buff *)
6596                 ((unsigned long) rxdp->Host_Control);
6597         int ring_no = ring_data->ring_no;
6598         u16 l3_csum, l4_csum;
6599         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6600         struct lro *lro;
6601
6602         skb->dev = dev;
6603
6604         if (err) {
6605                 /* Check for parity error */
6606                 if (err & 0x1) {
6607                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6608                 }
6609
6610                 /*
6611                 * Drop the packet if bad transfer code. Exception being
6612                 * 0x5, which could be due to unsupported IPv6 extension header.
6613                 * In this case, we let stack handle the packet.
6614                 * Note that in this case, since checksum will be incorrect,
6615                 * stack will validate the same.
6616                 */
6617                 if (err && ((err >> 48) != 0x5)) {
6618                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6619                                 dev->name, err);
6620                         sp->stats.rx_crc_errors++;
6621                         dev_kfree_skb(skb);
6622                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6623                         rxdp->Host_Control = 0;
6624                         return 0;
6625                 }
6626         }
6627
6628         /* Updating statistics */
6629         rxdp->Host_Control = 0;
6630         sp->stats.rx_packets++;
6631         if (sp->rxd_mode == RXD_MODE_1) {
6632                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6633
6634                 sp->stats.rx_bytes += len;
6635                 skb_put(skb, len);
6636
6637         } else if (sp->rxd_mode >= RXD_MODE_3A) {
6638                 int get_block = ring_data->rx_curr_get_info.block_index;
6639                 int get_off = ring_data->rx_curr_get_info.offset;
6640                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6641                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6642                 unsigned char *buff = skb_push(skb, buf0_len);
6643
6644                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6645                 sp->stats.rx_bytes += buf0_len + buf2_len;
6646                 memcpy(buff, ba->ba_0, buf0_len);
6647
6648                 if (sp->rxd_mode == RXD_MODE_3A) {
6649                         int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6650
6651                         skb_put(skb, buf1_len);
6652                         skb->len += buf2_len;
6653                         skb->data_len += buf2_len;
6654                         skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6655                         sp->stats.rx_bytes += buf1_len;
6656
6657                 } else
6658                         skb_put(skb, buf2_len);
6659         }
6660
6661         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6662             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6663             (sp->rx_csum)) {
6664                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6665                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6666                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6667                         /*
6668                          * NIC verifies if the Checksum of the received
6669                          * frame is Ok or not and accordingly returns
6670                          * a flag in the RxD.
6671                          */
6672                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6673                         if (sp->lro) {
6674                                 u32 tcp_len;
6675                                 u8 *tcp;
6676                                 int ret = 0;
6677
6678                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6679                                                 &tcp_len, &lro, rxdp, sp);
6680                                 switch (ret) {
6681                                         case 3: /* Begin anew */
6682                                                 lro->parent = skb;
6683                                                 goto aggregate;
6684                                         case 1: /* Aggregate */
6685                                         {
6686                                                 lro_append_pkt(sp, lro,
6687                                                         skb, tcp_len);
6688                                                 goto aggregate;
6689                                         }
6690                                         case 4: /* Flush session */
6691                                         {
6692                                                 lro_append_pkt(sp, lro,
6693                                                         skb, tcp_len);
6694                                                 queue_rx_frame(lro->parent);
6695                                                 clear_lro_session(lro);
6696                                                 sp->mac_control.stats_info->
6697                                                     sw_stat.flush_max_pkts++;
6698                                                 goto aggregate;
6699                                         }
6700                                         case 2: /* Flush both */
6701                                                 lro->parent->data_len =
6702                                                         lro->frags_len;
6703                                                 sp->mac_control.stats_info->
6704                                                      sw_stat.sending_both++;
6705                                                 queue_rx_frame(lro->parent);
6706                                                 clear_lro_session(lro);
6707                                                 goto send_up;
6708                                         case 0: /* sessions exceeded */
6709                                         case -1: /* non-TCP or not
6710                                                   * L2 aggregatable
6711                                                   */
6712                                         case 5: /*
6713                                                  * First pkt in session not
6714                                                  * L3/L4 aggregatable
6715                                                  */
6716                                                 break;
6717                                         default:
6718                                                 DBG_PRINT(ERR_DBG,
6719                                                         "%s: Samadhana!!\n",
6720                                                          __FUNCTION__);
6721                                                 BUG();
6722                                 }
6723                         }
6724                 } else {
6725                         /*
6726                          * Packet with erroneous checksum, let the
6727                          * upper layers deal with it.
6728                          */
6729                         skb->ip_summed = CHECKSUM_NONE;
6730                 }
6731         } else {
6732                 skb->ip_summed = CHECKSUM_NONE;
6733         }
6734
6735         if (!sp->lro) {
6736                 skb->protocol = eth_type_trans(skb, dev);
6737                 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6738                         vlan_strip_flag)) {
6739                         /* Queueing the vlan frame to the upper layer */
6740                         if (napi)
6741                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6742                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6743                         else
6744                                 vlan_hwaccel_rx(skb, sp->vlgrp,
6745                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6746                 } else {
6747                         if (napi)
6748                                 netif_receive_skb(skb);
6749                         else
6750                                 netif_rx(skb);
6751                 }
6752         } else {
6753 send_up:
6754                 queue_rx_frame(skb);
6755         }
6756         dev->last_rx = jiffies;
6757 aggregate:
6758         atomic_dec(&sp->rx_bufs_left[ring_no]);
6759         return SUCCESS;
6760 }
6761
6762 /**
6763  *  s2io_link - stops/starts the Tx queue.
6764  *  @sp : private member of the device structure, which is a pointer to the
6765  *  s2io_nic structure.
6766  *  @link : inidicates whether link is UP/DOWN.
6767  *  Description:
6768  *  This function stops/starts the Tx queue depending on whether the link
6769  *  status of the NIC is is down or up. This is called by the Alarm
6770  *  interrupt handler whenever a link change interrupt comes up.
6771  *  Return value:
6772  *  void.
6773  */
6774
6775 static void s2io_link(struct s2io_nic * sp, int link)
6776 {
6777         struct net_device *dev = (struct net_device *) sp->dev;
6778
6779         if (link != sp->last_link_state) {
6780                 if (link == LINK_DOWN) {
6781                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6782                         netif_carrier_off(dev);
6783                 } else {
6784                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6785                         netif_carrier_on(dev);
6786                 }
6787         }
6788         sp->last_link_state = link;
6789 }
6790
6791 /**
6792  *  get_xena_rev_id - to identify revision ID of xena.
6793  *  @pdev : PCI Dev structure
6794  *  Description:
6795  *  Function to identify the Revision ID of xena.
6796  *  Return value:
6797  *  returns the revision ID of the device.
6798  */
6799
6800 static int get_xena_rev_id(struct pci_dev *pdev)
6801 {
6802         u8 id = 0;
6803         int ret;
6804         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6805         return id;
6806 }
6807
6808 /**
6809  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6810  *  @sp : private member of the device structure, which is a pointer to the
6811  *  s2io_nic structure.
6812  *  Description:
6813  *  This function initializes a few of the PCI and PCI-X configuration registers
6814  *  with recommended values.
6815  *  Return value:
6816  *  void
6817  */
6818
6819 static void s2io_init_pci(struct s2io_nic * sp)
6820 {
6821         u16 pci_cmd = 0, pcix_cmd = 0;
6822
6823         /* Enable Data Parity Error Recovery in PCI-X command register. */
6824         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6825                              &(pcix_cmd));
6826         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6827                               (pcix_cmd | 1));
6828         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6829                              &(pcix_cmd));
6830
6831         /* Set the PErr Response bit in PCI command register. */
6832         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6833         pci_write_config_word(sp->pdev, PCI_COMMAND,
6834                               (pci_cmd | PCI_COMMAND_PARITY));
6835         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6836 }
6837
6838 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6839 {
6840         if ( tx_fifo_num > 8) {
6841                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6842                          "supported\n");
6843                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6844                 tx_fifo_num = 8;
6845         }
6846         if ( rx_ring_num > 8) {
6847                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6848                          "supported\n");
6849                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6850                 rx_ring_num = 8;
6851         }
6852         if (*dev_intr_type != INTA)
6853                 napi = 0;
6854
6855 #ifndef CONFIG_PCI_MSI
6856         if (*dev_intr_type != INTA) {
6857                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6858                           "MSI/MSI-X. Defaulting to INTA\n");
6859                 *dev_intr_type = INTA;
6860         }
6861 #else
6862         if (*dev_intr_type > MSI_X) {
6863                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6864                           "Defaulting to INTA\n");
6865                 *dev_intr_type = INTA;
6866         }
6867 #endif
6868         if ((*dev_intr_type == MSI_X) &&
6869                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6870                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6871                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6872                                         "Defaulting to INTA\n");
6873                 *dev_intr_type = INTA;
6874         }
6875
6876         if (rx_ring_mode > 3) {
6877                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6878                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6879                 rx_ring_mode = 3;
6880         }
6881         return SUCCESS;
6882 }
6883
6884 /**
6885  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6886  * or Traffic class respectively.
6887  * @nic: device peivate variable
6888  * Description: The function configures the receive steering to
6889  * desired receive ring.
6890  * Return Value:  SUCCESS on success and
6891  * '-1' on failure (endian settings incorrect).
6892  */
6893 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6894 {
6895         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6896         register u64 val64 = 0;
6897
6898         if (ds_codepoint > 63)
6899                 return FAILURE;
6900
6901         val64 = RTS_DS_MEM_DATA(ring);
6902         writeq(val64, &bar0->rts_ds_mem_data);
6903
6904         val64 = RTS_DS_MEM_CTRL_WE |
6905                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6906                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6907
6908         writeq(val64, &bar0->rts_ds_mem_ctrl);
6909
6910         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6911                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6912                                 S2IO_BIT_RESET);
6913 }
6914
6915 /**
6916  *  s2io_init_nic - Initialization of the adapter .
6917  *  @pdev : structure containing the PCI related information of the device.
6918  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6919  *  Description:
6920  *  The function initializes an adapter identified by the pci_dec structure.
6921  *  All OS related initialization including memory and device structure and
6922  *  initlaization of the device private variable is done. Also the swapper
6923  *  control register is initialized to enable read and write into the I/O
6924  *  registers of the device.
6925  *  Return value:
6926  *  returns 0 on success and negative on failure.
6927  */
6928
6929 static int __devinit
6930 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6931 {
6932         struct s2io_nic *sp;
6933         struct net_device *dev;
6934         int i, j, ret;
6935         int dma_flag = FALSE;
6936         u32 mac_up, mac_down;
6937         u64 val64 = 0, tmp64 = 0;
6938         struct XENA_dev_config __iomem *bar0 = NULL;
6939         u16 subid;
6940         struct mac_info *mac_control;
6941         struct config_param *config;
6942         int mode;
6943         u8 dev_intr_type = intr_type;
6944
6945         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6946                 return ret;
6947
6948         if ((ret = pci_enable_device(pdev))) {
6949                 DBG_PRINT(ERR_DBG,
6950                           "s2io_init_nic: pci_enable_device failed\n");
6951                 return ret;
6952         }
6953
6954         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6955                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6956                 dma_flag = TRUE;
6957                 if (pci_set_consistent_dma_mask
6958                     (pdev, DMA_64BIT_MASK)) {
6959                         DBG_PRINT(ERR_DBG,
6960                                   "Unable to obtain 64bit DMA for \
6961                                         consistent allocations\n");
6962                         pci_disable_device(pdev);
6963                         return -ENOMEM;
6964                 }
6965         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6966                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6967         } else {
6968                 pci_disable_device(pdev);
6969                 return -ENOMEM;
6970         }
6971         if (dev_intr_type != MSI_X) {
6972                 if (pci_request_regions(pdev, s2io_driver_name)) {
6973                         DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6974                         pci_disable_device(pdev);
6975                         return -ENODEV;
6976                 }
6977         }
6978         else {
6979                 if (!(request_mem_region(pci_resource_start(pdev, 0),
6980                          pci_resource_len(pdev, 0), s2io_driver_name))) {
6981                         DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6982                         pci_disable_device(pdev);
6983                         return -ENODEV;
6984                 }
6985                 if (!(request_mem_region(pci_resource_start(pdev, 2),
6986                          pci_resource_len(pdev, 2), s2io_driver_name))) {
6987                         DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6988                         release_mem_region(pci_resource_start(pdev, 0),
6989                                    pci_resource_len(pdev, 0));
6990                         pci_disable_device(pdev);
6991                         return -ENODEV;
6992                 }
6993         }
6994
6995         dev = alloc_etherdev(sizeof(struct s2io_nic));
6996         if (dev == NULL) {
6997                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6998                 pci_disable_device(pdev);
6999                 pci_release_regions(pdev);
7000                 return -ENODEV;
7001         }
7002
7003         pci_set_master(pdev);
7004         pci_set_drvdata(pdev, dev);
7005         SET_MODULE_OWNER(dev);
7006         SET_NETDEV_DEV(dev, &pdev->dev);
7007
7008         /*  Private member variable initialized to s2io NIC structure */
7009         sp = dev->priv;
7010         memset(sp, 0, sizeof(struct s2io_nic));
7011         sp->dev = dev;
7012         sp->pdev = pdev;
7013         sp->high_dma_flag = dma_flag;
7014         sp->device_enabled_once = FALSE;
7015         if (rx_ring_mode == 1)
7016                 sp->rxd_mode = RXD_MODE_1;
7017         if (rx_ring_mode == 2)
7018                 sp->rxd_mode = RXD_MODE_3B;
7019         if (rx_ring_mode == 3)
7020                 sp->rxd_mode = RXD_MODE_3A;
7021
7022         sp->intr_type = dev_intr_type;
7023
7024         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7025                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7026                 sp->device_type = XFRAME_II_DEVICE;
7027         else
7028                 sp->device_type = XFRAME_I_DEVICE;
7029
7030         sp->lro = lro;
7031
7032         /* Initialize some PCI/PCI-X fields of the NIC. */
7033         s2io_init_pci(sp);
7034
7035         /*
7036          * Setting the device configuration parameters.
7037          * Most of these parameters can be specified by the user during
7038          * module insertion as they are module loadable parameters. If
7039          * these parameters are not not specified during load time, they
7040          * are initialized with default values.
7041          */
7042         mac_control = &sp->mac_control;
7043         config = &sp->config;
7044
7045         /* Tx side parameters. */
7046         config->tx_fifo_num = tx_fifo_num;
7047         for (i = 0; i < MAX_TX_FIFOS; i++) {
7048                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7049                 config->tx_cfg[i].fifo_priority = i;
7050         }
7051
7052         /* mapping the QoS priority to the configured fifos */
7053         for (i = 0; i < MAX_TX_FIFOS; i++)
7054                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
7055
7056         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7057         for (i = 0; i < config->tx_fifo_num; i++) {
7058                 config->tx_cfg[i].f_no_snoop =
7059                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7060                 if (config->tx_cfg[i].fifo_len < 65) {
7061                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7062                         break;
7063                 }
7064         }
7065         /* + 2 because one Txd for skb->data and one Txd for UFO */
7066         config->max_txds = MAX_SKB_FRAGS + 2;
7067
7068         /* Rx side parameters. */
7069         config->rx_ring_num = rx_ring_num;
7070         for (i = 0; i < MAX_RX_RINGS; i++) {
7071                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7072                     (rxd_count[sp->rxd_mode] + 1);
7073                 config->rx_cfg[i].ring_priority = i;
7074         }
7075
7076         for (i = 0; i < rx_ring_num; i++) {
7077                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7078                 config->rx_cfg[i].f_no_snoop =
7079                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7080         }
7081
7082         /*  Setting Mac Control parameters */
7083         mac_control->rmac_pause_time = rmac_pause_time;
7084         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7085         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7086
7087
7088         /* Initialize Ring buffer parameters. */
7089         for (i = 0; i < config->rx_ring_num; i++)
7090                 atomic_set(&sp->rx_bufs_left[i], 0);
7091
7092         /* Initialize the number of ISRs currently running */
7093         atomic_set(&sp->isr_cnt, 0);
7094
7095         /*  initialize the shared memory used by the NIC and the host */
7096         if (init_shared_mem(sp)) {
7097                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7098                           dev->name);
7099                 ret = -ENOMEM;
7100                 goto mem_alloc_failed;
7101         }
7102
7103         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7104                                      pci_resource_len(pdev, 0));
7105         if (!sp->bar0) {
7106                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7107                           dev->name);
7108                 ret = -ENOMEM;
7109                 goto bar0_remap_failed;
7110         }
7111
7112         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7113                                      pci_resource_len(pdev, 2));
7114         if (!sp->bar1) {
7115                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7116                           dev->name);
7117                 ret = -ENOMEM;
7118                 goto bar1_remap_failed;
7119         }
7120
7121         dev->irq = pdev->irq;
7122         dev->base_addr = (unsigned long) sp->bar0;
7123
7124         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7125         for (j = 0; j < MAX_TX_FIFOS; j++) {
7126                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7127                     (sp->bar1 + (j * 0x00020000));
7128         }
7129
7130         /*  Driver entry points */
7131         dev->open = &s2io_open;
7132         dev->stop = &s2io_close;
7133         dev->hard_start_xmit = &s2io_xmit;
7134         dev->get_stats = &s2io_get_stats;
7135         dev->set_multicast_list = &s2io_set_multicast;
7136         dev->do_ioctl = &s2io_ioctl;
7137         dev->change_mtu = &s2io_change_mtu;
7138         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7139         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7140         dev->vlan_rx_register = s2io_vlan_rx_register;
7141         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7142
7143         /*
7144          * will use eth_mac_addr() for  dev->set_mac_address
7145          * mac address will be set every time dev->open() is called
7146          */
7147         dev->poll = s2io_poll;
7148         dev->weight = 32;
7149
7150 #ifdef CONFIG_NET_POLL_CONTROLLER
7151         dev->poll_controller = s2io_netpoll;
7152 #endif
7153
7154         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7155         if (sp->high_dma_flag == TRUE)
7156                 dev->features |= NETIF_F_HIGHDMA;
7157         dev->features |= NETIF_F_TSO;
7158         dev->features |= NETIF_F_TSO6;
7159         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7160                 dev->features |= NETIF_F_UFO;
7161                 dev->features |= NETIF_F_HW_CSUM;
7162         }
7163
7164         dev->tx_timeout = &s2io_tx_watchdog;
7165         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7166         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7167         INIT_WORK(&sp->set_link_task, s2io_set_link);
7168
7169         pci_save_state(sp->pdev);
7170
7171         /* Setting swapper control on the NIC, for proper reset operation */
7172         if (s2io_set_swapper(sp)) {
7173                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7174                           dev->name);
7175                 ret = -EAGAIN;
7176                 goto set_swap_failed;
7177         }
7178
7179         /* Verify if the Herc works on the slot its placed into */
7180         if (sp->device_type & XFRAME_II_DEVICE) {
7181                 mode = s2io_verify_pci_mode(sp);
7182                 if (mode < 0) {
7183                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7184                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7185                         ret = -EBADSLT;
7186                         goto set_swap_failed;
7187                 }
7188         }
7189
7190         /* Not needed for Herc */
7191         if (sp->device_type & XFRAME_I_DEVICE) {
7192                 /*
7193                  * Fix for all "FFs" MAC address problems observed on
7194                  * Alpha platforms
7195                  */
7196                 fix_mac_address(sp);
7197                 s2io_reset(sp);
7198         }
7199
7200         /*
7201          * MAC address initialization.
7202          * For now only one mac address will be read and used.
7203          */
7204         bar0 = sp->bar0;
7205         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7206             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7207         writeq(val64, &bar0->rmac_addr_cmd_mem);
7208         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7209                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7210         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7211         mac_down = (u32) tmp64;
7212         mac_up = (u32) (tmp64 >> 32);
7213
7214         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7215         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7216         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7217         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7218         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7219         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7220
7221         /*  Set the factory defined MAC address initially   */
7222         dev->addr_len = ETH_ALEN;
7223         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7224
7225         /* reset Nic and bring it to known state */
7226         s2io_reset(sp);
7227
7228         /*
7229          * Initialize the tasklet status and link state flags
7230          * and the card state parameter
7231          */
7232         atomic_set(&(sp->card_state), 0);
7233         sp->tasklet_status = 0;
7234         sp->link_state = 0;
7235
7236         /* Initialize spinlocks */
7237         spin_lock_init(&sp->tx_lock);
7238
7239         if (!napi)
7240                 spin_lock_init(&sp->put_lock);
7241         spin_lock_init(&sp->rx_lock);
7242
7243         /*
7244          * SXE-002: Configure link and activity LED to init state
7245          * on driver load.
7246          */
7247         subid = sp->pdev->subsystem_device;
7248         if ((subid & 0xFF) >= 0x07) {
7249                 val64 = readq(&bar0->gpio_control);
7250                 val64 |= 0x0000800000000000ULL;
7251                 writeq(val64, &bar0->gpio_control);
7252                 val64 = 0x0411040400000000ULL;
7253                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7254                 val64 = readq(&bar0->gpio_control);
7255         }
7256
7257         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7258
7259         if (register_netdev(dev)) {
7260                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7261                 ret = -ENODEV;
7262                 goto register_failed;
7263         }
7264         s2io_vpd_read(sp);
7265         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
7266         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7267                   sp->product_name, get_xena_rev_id(sp->pdev));
7268         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7269                   s2io_driver_version);
7270         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7271                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7272                           sp->def_mac_addr[0].mac_addr[0],
7273                           sp->def_mac_addr[0].mac_addr[1],
7274                           sp->def_mac_addr[0].mac_addr[2],
7275                           sp->def_mac_addr[0].mac_addr[3],
7276                           sp->def_mac_addr[0].mac_addr[4],
7277                           sp->def_mac_addr[0].mac_addr[5]);
7278         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7279         if (sp->device_type & XFRAME_II_DEVICE) {
7280                 mode = s2io_print_pci_mode(sp);
7281                 if (mode < 0) {
7282                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7283                         ret = -EBADSLT;
7284                         unregister_netdev(dev);
7285                         goto set_swap_failed;
7286                 }
7287         }
7288         switch(sp->rxd_mode) {
7289                 case RXD_MODE_1:
7290                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7291                                                 dev->name);
7292                     break;
7293                 case RXD_MODE_3B:
7294                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7295                                                 dev->name);
7296                     break;
7297                 case RXD_MODE_3A:
7298                     DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7299                                                 dev->name);
7300                     break;
7301         }
7302
7303         if (napi)
7304                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7305         switch(sp->intr_type) {
7306                 case INTA:
7307                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7308                     break;
7309                 case MSI:
7310                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7311                     break;
7312                 case MSI_X:
7313                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7314                     break;
7315         }
7316         if (sp->lro)
7317                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7318                           dev->name);
7319         if (ufo)
7320                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7321                                         " enabled\n", dev->name);
7322         /* Initialize device name */
7323         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7324
7325         /* Initialize bimodal Interrupts */
7326         sp->config.bimodal = bimodal;
7327         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7328                 sp->config.bimodal = 0;
7329                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7330                         dev->name);
7331         }
7332
7333         /*
7334          * Make Link state as off at this point, when the Link change
7335          * interrupt comes the state will be automatically changed to
7336          * the right state.
7337          */
7338         netif_carrier_off(dev);
7339
7340         return 0;
7341
7342       register_failed:
7343       set_swap_failed:
7344         iounmap(sp->bar1);
7345       bar1_remap_failed:
7346         iounmap(sp->bar0);
7347       bar0_remap_failed:
7348       mem_alloc_failed:
7349         free_shared_mem(sp);
7350         pci_disable_device(pdev);
7351         if (dev_intr_type != MSI_X)
7352                 pci_release_regions(pdev);
7353         else {
7354                 release_mem_region(pci_resource_start(pdev, 0),
7355                         pci_resource_len(pdev, 0));
7356                 release_mem_region(pci_resource_start(pdev, 2),
7357                         pci_resource_len(pdev, 2));
7358         }
7359         pci_set_drvdata(pdev, NULL);
7360         free_netdev(dev);
7361
7362         return ret;
7363 }
7364
7365 /**
7366  * s2io_rem_nic - Free the PCI device
7367  * @pdev: structure containing the PCI related information of the device.
7368  * Description: This function is called by the Pci subsystem to release a
7369  * PCI device and free up all resource held up by the device. This could
7370  * be in response to a Hot plug event or when the driver is to be removed
7371  * from memory.
7372  */
7373
7374 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7375 {
7376         struct net_device *dev =
7377             (struct net_device *) pci_get_drvdata(pdev);
7378         struct s2io_nic *sp;
7379
7380         if (dev == NULL) {
7381                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7382                 return;
7383         }
7384
7385         flush_scheduled_work();
7386
7387         sp = dev->priv;
7388         unregister_netdev(dev);
7389
7390         free_shared_mem(sp);
7391         iounmap(sp->bar0);
7392         iounmap(sp->bar1);
7393         if (sp->intr_type != MSI_X)
7394                 pci_release_regions(pdev);
7395         else {
7396                 release_mem_region(pci_resource_start(pdev, 0),
7397                         pci_resource_len(pdev, 0));
7398                 release_mem_region(pci_resource_start(pdev, 2),
7399                         pci_resource_len(pdev, 2));
7400         }
7401         pci_set_drvdata(pdev, NULL);
7402         free_netdev(dev);
7403         pci_disable_device(pdev);
7404 }
7405
7406 /**
7407  * s2io_starter - Entry point for the driver
7408  * Description: This function is the entry point for the driver. It verifies
7409  * the module loadable parameters and initializes PCI configuration space.
7410  */
7411
7412 int __init s2io_starter(void)
7413 {
7414         return pci_register_driver(&s2io_driver);
7415 }
7416
7417 /**
7418  * s2io_closer - Cleanup routine for the driver
7419  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7420  */
7421
7422 static __exit void s2io_closer(void)
7423 {
7424         pci_unregister_driver(&s2io_driver);
7425         DBG_PRINT(INIT_DBG, "cleanup done\n");
7426 }
7427
7428 module_init(s2io_starter);
7429 module_exit(s2io_closer);
7430
7431 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7432                 struct tcphdr **tcp, struct RxD_t *rxdp)
7433 {
7434         int ip_off;
7435         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7436
7437         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7438                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7439                           __FUNCTION__);
7440                 return -1;
7441         }
7442
7443         /* TODO:
7444          * By default the VLAN field in the MAC is stripped by the card, if this
7445          * feature is turned off in rx_pa_cfg register, then the ip_off field
7446          * has to be shifted by a further 2 bytes
7447          */
7448         switch (l2_type) {
7449                 case 0: /* DIX type */
7450                 case 4: /* DIX type with VLAN */
7451                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7452                         break;
7453                 /* LLC, SNAP etc are considered non-mergeable */
7454                 default:
7455                         return -1;
7456         }
7457
7458         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7459         ip_len = (u8)((*ip)->ihl);
7460         ip_len <<= 2;
7461         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7462
7463         return 0;
7464 }
7465
7466 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7467                                   struct tcphdr *tcp)
7468 {
7469         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7470         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7471            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7472                 return -1;
7473         return 0;
7474 }
7475
7476 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7477 {
7478         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7479 }
7480
7481 static void initiate_new_session(struct lro *lro, u8 *l2h,
7482                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7483 {
7484         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7485         lro->l2h = l2h;
7486         lro->iph = ip;
7487         lro->tcph = tcp;
7488         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7489         lro->tcp_ack = ntohl(tcp->ack_seq);
7490         lro->sg_num = 1;
7491         lro->total_len = ntohs(ip->tot_len);
7492         lro->frags_len = 0;
7493         /*
7494          * check if we saw TCP timestamp. Other consistency checks have
7495          * already been done.
7496          */
7497         if (tcp->doff == 8) {
7498                 u32 *ptr;
7499                 ptr = (u32 *)(tcp+1);
7500                 lro->saw_ts = 1;
7501                 lro->cur_tsval = *(ptr+1);
7502                 lro->cur_tsecr = *(ptr+2);
7503         }
7504         lro->in_use = 1;
7505 }
7506
7507 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7508 {
7509         struct iphdr *ip = lro->iph;
7510         struct tcphdr *tcp = lro->tcph;
7511         __sum16 nchk;
7512         struct stat_block *statinfo = sp->mac_control.stats_info;
7513         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7514
7515         /* Update L3 header */
7516         ip->tot_len = htons(lro->total_len);
7517         ip->check = 0;
7518         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7519         ip->check = nchk;
7520
7521         /* Update L4 header */
7522         tcp->ack_seq = lro->tcp_ack;
7523         tcp->window = lro->window;
7524
7525         /* Update tsecr field if this session has timestamps enabled */
7526         if (lro->saw_ts) {
7527                 u32 *ptr = (u32 *)(tcp + 1);
7528                 *(ptr+2) = lro->cur_tsecr;
7529         }
7530
7531         /* Update counters required for calculation of
7532          * average no. of packets aggregated.
7533          */
7534         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7535         statinfo->sw_stat.num_aggregations++;
7536 }
7537
7538 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7539                 struct tcphdr *tcp, u32 l4_pyld)
7540 {
7541         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7542         lro->total_len += l4_pyld;
7543         lro->frags_len += l4_pyld;
7544         lro->tcp_next_seq += l4_pyld;
7545         lro->sg_num++;
7546
7547         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7548         lro->tcp_ack = tcp->ack_seq;
7549         lro->window = tcp->window;
7550
7551         if (lro->saw_ts) {
7552                 u32 *ptr;
7553                 /* Update tsecr and tsval from this packet */
7554                 ptr = (u32 *) (tcp + 1);
7555                 lro->cur_tsval = *(ptr + 1);
7556                 lro->cur_tsecr = *(ptr + 2);
7557         }
7558 }
7559
7560 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7561                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7562 {
7563         u8 *ptr;
7564
7565         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7566
7567         if (!tcp_pyld_len) {
7568                 /* Runt frame or a pure ack */
7569                 return -1;
7570         }
7571
7572         if (ip->ihl != 5) /* IP has options */
7573                 return -1;
7574
7575         /* If we see CE codepoint in IP header, packet is not mergeable */
7576         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7577                 return -1;
7578
7579         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7580         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7581                                     tcp->ece || tcp->cwr || !tcp->ack) {
7582                 /*
7583                  * Currently recognize only the ack control word and
7584                  * any other control field being set would result in
7585                  * flushing the LRO session
7586                  */
7587                 return -1;
7588         }
7589
7590         /*
7591          * Allow only one TCP timestamp option. Don't aggregate if
7592          * any other options are detected.
7593          */
7594         if (tcp->doff != 5 && tcp->doff != 8)
7595                 return -1;
7596
7597         if (tcp->doff == 8) {
7598                 ptr = (u8 *)(tcp + 1);
7599                 while (*ptr == TCPOPT_NOP)
7600                         ptr++;
7601                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7602                         return -1;
7603
7604                 /* Ensure timestamp value increases monotonically */
7605                 if (l_lro)
7606                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7607                                 return -1;
7608
7609                 /* timestamp echo reply should be non-zero */
7610                 if (*((u32 *)(ptr+6)) == 0)
7611                         return -1;
7612         }
7613
7614         return 0;
7615 }
7616
7617 static int
7618 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7619                       struct RxD_t *rxdp, struct s2io_nic *sp)
7620 {
7621         struct iphdr *ip;
7622         struct tcphdr *tcph;
7623         int ret = 0, i;
7624
7625         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7626                                          rxdp))) {
7627                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7628                           ip->saddr, ip->daddr);
7629         } else {
7630                 return ret;
7631         }
7632
7633         tcph = (struct tcphdr *)*tcp;
7634         *tcp_len = get_l4_pyld_length(ip, tcph);
7635         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7636                 struct lro *l_lro = &sp->lro0_n[i];
7637                 if (l_lro->in_use) {
7638                         if (check_for_socket_match(l_lro, ip, tcph))
7639                                 continue;
7640                         /* Sock pair matched */
7641                         *lro = l_lro;
7642
7643                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7644                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7645                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7646                                           (*lro)->tcp_next_seq,
7647                                           ntohl(tcph->seq));
7648
7649                                 sp->mac_control.stats_info->
7650                                    sw_stat.outof_sequence_pkts++;
7651                                 ret = 2;
7652                                 break;
7653                         }
7654
7655                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7656                                 ret = 1; /* Aggregate */
7657                         else
7658                                 ret = 2; /* Flush both */
7659                         break;
7660                 }
7661         }
7662
7663         if (ret == 0) {
7664                 /* Before searching for available LRO objects,
7665                  * check if the pkt is L3/L4 aggregatable. If not
7666                  * don't create new LRO session. Just send this
7667                  * packet up.
7668                  */
7669                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7670                         return 5;
7671                 }
7672
7673                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7674                         struct lro *l_lro = &sp->lro0_n[i];
7675                         if (!(l_lro->in_use)) {
7676                                 *lro = l_lro;
7677                                 ret = 3; /* Begin anew */
7678                                 break;
7679                         }
7680                 }
7681         }
7682
7683         if (ret == 0) { /* sessions exceeded */
7684                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7685                           __FUNCTION__);
7686                 *lro = NULL;
7687                 return ret;
7688         }
7689
7690         switch (ret) {
7691                 case 3:
7692                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7693                         break;
7694                 case 2:
7695                         update_L3L4_header(sp, *lro);
7696                         break;
7697                 case 1:
7698                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7699                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7700                                 update_L3L4_header(sp, *lro);
7701                                 ret = 4; /* Flush the LRO */
7702                         }
7703                         break;
7704                 default:
7705                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7706                                 __FUNCTION__);
7707                         break;
7708         }
7709
7710         return ret;
7711 }
7712
7713 static void clear_lro_session(struct lro *lro)
7714 {
7715         static u16 lro_struct_size = sizeof(struct lro);
7716
7717         memset(lro, 0, lro_struct_size);
7718 }
7719
7720 static void queue_rx_frame(struct sk_buff *skb)
7721 {
7722         struct net_device *dev = skb->dev;
7723
7724         skb->protocol = eth_type_trans(skb, dev);
7725         if (napi)
7726                 netif_receive_skb(skb);
7727         else
7728                 netif_rx(skb);
7729 }
7730
7731 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7732                            struct sk_buff *skb,
7733                            u32 tcp_len)
7734 {
7735         struct sk_buff *first = lro->parent;
7736
7737         first->len += tcp_len;
7738         first->data_len = lro->frags_len;
7739         skb_pull(skb, (skb->len - tcp_len));
7740         if (skb_shinfo(first)->frag_list)
7741                 lro->last_frag->next = skb;
7742         else
7743                 skb_shinfo(first)->frag_list = skb;
7744         first->truesize += skb->truesize;
7745         lro->last_frag = skb;
7746         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7747         return;
7748 }