Merge branch 'master' into 83xx
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2 and 3.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
41  * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  ************************************************************************/
46
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
66 #include <linux/ip.h>
67 #include <linux/tcp.h>
68 #include <net/tcp.h>
69
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/io.h>
73 #include <asm/div64.h>
74 #include <asm/irq.h>
75
76 /* local include */
77 #include "s2io.h"
78 #include "s2io-regs.h"
79
80 #define DRV_VERSION "2.0.16.1"
81
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
85
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
88
89 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
90 {
91         int ret;
92
93         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
95
96         return ret;
97 }
98
99 /*
100  * Cards with following subsystem_id have a link state indication
101  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102  * macro below identifies these cards given the subsystem_id.
103  */
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105         (dev_type == XFRAME_I_DEVICE) ?                 \
106                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
108
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
112 #define PANIC   1
113 #define LOW     2
114 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
115 {
116         struct mac_info *mac_control;
117
118         mac_control = &sp->mac_control;
119         if (rxb_size <= rxd_count[sp->rxd_mode])
120                 return PANIC;
121         else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
122                 return  LOW;
123         return 0;
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"},
230         {"rmac_ttl_1519_4095_frms"},
231         {"rmac_ttl_4096_8191_frms"},
232         {"rmac_ttl_8192_max_frms"},
233         {"rmac_ttl_gt_max_frms"},
234         {"rmac_osized_alt_frms"},
235         {"rmac_jabber_alt_frms"},
236         {"rmac_gt_max_alt_frms"},
237         {"rmac_vlan_frms"},
238         {"rmac_len_discard"},
239         {"rmac_fcs_discard"},
240         {"rmac_pf_discard"},
241         {"rmac_da_discard"},
242         {"rmac_red_discard"},
243         {"rmac_rts_discard"},
244         {"rmac_ingm_full_discard"},
245         {"link_fault_cnt"},
246         {"\n DRIVER STATISTICS"},
247         {"single_bit_ecc_errs"},
248         {"double_bit_ecc_errs"},
249         {"parity_err_cnt"},
250         {"serious_err_cnt"},
251         {"soft_reset_cnt"},
252         {"fifo_full_cnt"},
253         {"ring_full_cnt"},
254         ("alarm_transceiver_temp_high"),
255         ("alarm_transceiver_temp_low"),
256         ("alarm_laser_bias_current_high"),
257         ("alarm_laser_bias_current_low"),
258         ("alarm_laser_output_power_high"),
259         ("alarm_laser_output_power_low"),
260         ("warn_transceiver_temp_high"),
261         ("warn_transceiver_temp_low"),
262         ("warn_laser_bias_current_high"),
263         ("warn_laser_bias_current_low"),
264         ("warn_laser_output_power_high"),
265         ("warn_laser_output_power_low"),
266         ("lro_aggregated_pkts"),
267         ("lro_flush_both_count"),
268         ("lro_out_of_sequence_pkts"),
269         ("lro_flush_due_to_max_pkts"),
270         ("lro_avg_aggr_pkts"),
271 };
272
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
275
276 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
278
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
280                         init_timer(&timer);                     \
281                         timer.function = handle;                \
282                         timer.data = (unsigned long) arg;       \
283                         mod_timer(&timer, (jiffies + exp))      \
284
285 /* Add the vlan */
286 static void s2io_vlan_rx_register(struct net_device *dev,
287                                         struct vlan_group *grp)
288 {
289         struct s2io_nic *nic = dev->priv;
290         unsigned long flags;
291
292         spin_lock_irqsave(&nic->tx_lock, flags);
293         nic->vlgrp = grp;
294         spin_unlock_irqrestore(&nic->tx_lock, flags);
295 }
296
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
299 {
300         struct s2io_nic *nic = dev->priv;
301         unsigned long flags;
302
303         spin_lock_irqsave(&nic->tx_lock, flags);
304         if (nic->vlgrp)
305                 nic->vlgrp->vlan_devices[vid] = NULL;
306         spin_unlock_irqrestore(&nic->tx_lock, flags);
307 }
308
309 /*
310  * Constants to be programmed into the Xena's registers, to configure
311  * the XAUI.
312  */
313
314 #define END_SIGN        0x0
315 static const u64 herc_act_dtx_cfg[] = {
316         /* Set address */
317         0x8000051536750000ULL, 0x80000515367500E0ULL,
318         /* Write data */
319         0x8000051536750004ULL, 0x80000515367500E4ULL,
320         /* Set address */
321         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
322         /* Write data */
323         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
324         /* Set address */
325         0x801205150D440000ULL, 0x801205150D4400E0ULL,
326         /* Write data */
327         0x801205150D440004ULL, 0x801205150D4400E4ULL,
328         /* Set address */
329         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
330         /* Write data */
331         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
332         /* Done */
333         END_SIGN
334 };
335
336 static const u64 xena_dtx_cfg[] = {
337         /* Set address */
338         0x8000051500000000ULL, 0x80000515000000E0ULL,
339         /* Write data */
340         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
341         /* Set address */
342         0x8001051500000000ULL, 0x80010515000000E0ULL,
343         /* Write data */
344         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
345         /* Set address */
346         0x8002051500000000ULL, 0x80020515000000E0ULL,
347         /* Write data */
348         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
349         END_SIGN
350 };
351
352 /*
353  * Constants for Fixing the MacAddress problem seen mostly on
354  * Alpha machines.
355  */
356 static const u64 fix_mac[] = {
357         0x0060000000000000ULL, 0x0060600000000000ULL,
358         0x0040600000000000ULL, 0x0000600000000000ULL,
359         0x0020600000000000ULL, 0x0060600000000000ULL,
360         0x0020600000000000ULL, 0x0060600000000000ULL,
361         0x0020600000000000ULL, 0x0060600000000000ULL,
362         0x0020600000000000ULL, 0x0060600000000000ULL,
363         0x0020600000000000ULL, 0x0060600000000000ULL,
364         0x0020600000000000ULL, 0x0060600000000000ULL,
365         0x0020600000000000ULL, 0x0060600000000000ULL,
366         0x0020600000000000ULL, 0x0060600000000000ULL,
367         0x0020600000000000ULL, 0x0060600000000000ULL,
368         0x0020600000000000ULL, 0x0060600000000000ULL,
369         0x0020600000000000ULL, 0x0000600000000000ULL,
370         0x0040600000000000ULL, 0x0060600000000000ULL,
371         END_SIGN
372 };
373
374 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375 MODULE_LICENSE("GPL");
376 MODULE_VERSION(DRV_VERSION);
377
378
379 /* Module Loadable parameters. */
380 S2IO_PARM_INT(tx_fifo_num, 1);
381 S2IO_PARM_INT(rx_ring_num, 1);
382
383
384 S2IO_PARM_INT(rx_ring_mode, 1);
385 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
386 S2IO_PARM_INT(rmac_pause_time, 0x100);
387 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
388 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
389 S2IO_PARM_INT(shared_splits, 0);
390 S2IO_PARM_INT(tmac_util_period, 5);
391 S2IO_PARM_INT(rmac_util_period, 5);
392 S2IO_PARM_INT(bimodal, 0);
393 S2IO_PARM_INT(l3l4hdr_size, 128);
394 /* Frequency of Rx desc syncs expressed as power of 2 */
395 S2IO_PARM_INT(rxsync_frequency, 3);
396 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
397 S2IO_PARM_INT(intr_type, 0);
398 /* Large receive offload feature */
399 S2IO_PARM_INT(lro, 0);
400 /* Max pkts to be aggregated by LRO at one time. If not specified,
401  * aggregation happens until we hit max IP pkt size(64K)
402  */
403 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404 S2IO_PARM_INT(indicate_max_pkts, 0);
405
406 S2IO_PARM_INT(napi, 1);
407 S2IO_PARM_INT(ufo, 0);
408
409 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
410     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
411 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
412     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
413 static unsigned int rts_frm_len[MAX_RX_RINGS] =
414     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
415
416 module_param_array(tx_fifo_len, uint, NULL, 0);
417 module_param_array(rx_ring_sz, uint, NULL, 0);
418 module_param_array(rts_frm_len, uint, NULL, 0);
419
420 /*
421  * S2IO device table.
422  * This table lists all the devices that this driver supports.
423  */
424 static struct pci_device_id s2io_tbl[] __devinitdata = {
425         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
426          PCI_ANY_ID, PCI_ANY_ID},
427         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
428          PCI_ANY_ID, PCI_ANY_ID},
429         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
430          PCI_ANY_ID, PCI_ANY_ID},
431         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
432          PCI_ANY_ID, PCI_ANY_ID},
433         {0,}
434 };
435
436 MODULE_DEVICE_TABLE(pci, s2io_tbl);
437
438 static struct pci_driver s2io_driver = {
439       .name = "S2IO",
440       .id_table = s2io_tbl,
441       .probe = s2io_init_nic,
442       .remove = __devexit_p(s2io_rem_nic),
443 };
444
445 /* A simplifier macro used both by init and free shared_mem Fns(). */
446 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
447
448 /**
449  * init_shared_mem - Allocation and Initialization of Memory
450  * @nic: Device private variable.
451  * Description: The function allocates all the memory areas shared
452  * between the NIC and the driver. This includes Tx descriptors,
453  * Rx descriptors and the statistics block.
454  */
455
456 static int init_shared_mem(struct s2io_nic *nic)
457 {
458         u32 size;
459         void *tmp_v_addr, *tmp_v_addr_next;
460         dma_addr_t tmp_p_addr, tmp_p_addr_next;
461         struct RxD_block *pre_rxd_blk = NULL;
462         int i, j, blk_cnt;
463         int lst_size, lst_per_page;
464         struct net_device *dev = nic->dev;
465         unsigned long tmp;
466         struct buffAdd *ba;
467
468         struct mac_info *mac_control;
469         struct config_param *config;
470
471         mac_control = &nic->mac_control;
472         config = &nic->config;
473
474
475         /* Allocation and initialization of TXDLs in FIOFs */
476         size = 0;
477         for (i = 0; i < config->tx_fifo_num; i++) {
478                 size += config->tx_cfg[i].fifo_len;
479         }
480         if (size > MAX_AVAILABLE_TXDS) {
481                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
482                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
483                 return -EINVAL;
484         }
485
486         lst_size = (sizeof(struct TxD) * config->max_txds);
487         lst_per_page = PAGE_SIZE / lst_size;
488
489         for (i = 0; i < config->tx_fifo_num; i++) {
490                 int fifo_len = config->tx_cfg[i].fifo_len;
491                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
492                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
493                                                           GFP_KERNEL);
494                 if (!mac_control->fifos[i].list_info) {
495                         DBG_PRINT(ERR_DBG,
496                                   "Malloc failed for list_info\n");
497                         return -ENOMEM;
498                 }
499                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
500         }
501         for (i = 0; i < config->tx_fifo_num; i++) {
502                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
503                                                 lst_per_page);
504                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
505                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
506                     config->tx_cfg[i].fifo_len - 1;
507                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
508                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
509                     config->tx_cfg[i].fifo_len - 1;
510                 mac_control->fifos[i].fifo_no = i;
511                 mac_control->fifos[i].nic = nic;
512                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
513
514                 for (j = 0; j < page_num; j++) {
515                         int k = 0;
516                         dma_addr_t tmp_p;
517                         void *tmp_v;
518                         tmp_v = pci_alloc_consistent(nic->pdev,
519                                                      PAGE_SIZE, &tmp_p);
520                         if (!tmp_v) {
521                                 DBG_PRINT(ERR_DBG,
522                                           "pci_alloc_consistent ");
523                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
524                                 return -ENOMEM;
525                         }
526                         /* If we got a zero DMA address(can happen on
527                          * certain platforms like PPC), reallocate.
528                          * Store virtual address of page we don't want,
529                          * to be freed later.
530                          */
531                         if (!tmp_p) {
532                                 mac_control->zerodma_virt_addr = tmp_v;
533                                 DBG_PRINT(INIT_DBG,
534                                 "%s: Zero DMA address for TxDL. ", dev->name);
535                                 DBG_PRINT(INIT_DBG,
536                                 "Virtual address %p\n", tmp_v);
537                                 tmp_v = pci_alloc_consistent(nic->pdev,
538                                                      PAGE_SIZE, &tmp_p);
539                                 if (!tmp_v) {
540                                         DBG_PRINT(ERR_DBG,
541                                           "pci_alloc_consistent ");
542                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
543                                         return -ENOMEM;
544                                 }
545                         }
546                         while (k < lst_per_page) {
547                                 int l = (j * lst_per_page) + k;
548                                 if (l == config->tx_cfg[i].fifo_len)
549                                         break;
550                                 mac_control->fifos[i].list_info[l].list_virt_addr =
551                                     tmp_v + (k * lst_size);
552                                 mac_control->fifos[i].list_info[l].list_phy_addr =
553                                     tmp_p + (k * lst_size);
554                                 k++;
555                         }
556                 }
557         }
558
559         nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
560         if (!nic->ufo_in_band_v)
561                 return -ENOMEM;
562
563         /* Allocation and initialization of RXDs in Rings */
564         size = 0;
565         for (i = 0; i < config->rx_ring_num; i++) {
566                 if (config->rx_cfg[i].num_rxd %
567                     (rxd_count[nic->rxd_mode] + 1)) {
568                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
569                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
570                                   i);
571                         DBG_PRINT(ERR_DBG, "RxDs per Block");
572                         return FAILURE;
573                 }
574                 size += config->rx_cfg[i].num_rxd;
575                 mac_control->rings[i].block_count =
576                         config->rx_cfg[i].num_rxd /
577                         (rxd_count[nic->rxd_mode] + 1 );
578                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
579                         mac_control->rings[i].block_count;
580         }
581         if (nic->rxd_mode == RXD_MODE_1)
582                 size = (size * (sizeof(struct RxD1)));
583         else
584                 size = (size * (sizeof(struct RxD3)));
585
586         for (i = 0; i < config->rx_ring_num; i++) {
587                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
588                 mac_control->rings[i].rx_curr_get_info.offset = 0;
589                 mac_control->rings[i].rx_curr_get_info.ring_len =
590                     config->rx_cfg[i].num_rxd - 1;
591                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
592                 mac_control->rings[i].rx_curr_put_info.offset = 0;
593                 mac_control->rings[i].rx_curr_put_info.ring_len =
594                     config->rx_cfg[i].num_rxd - 1;
595                 mac_control->rings[i].nic = nic;
596                 mac_control->rings[i].ring_no = i;
597
598                 blk_cnt = config->rx_cfg[i].num_rxd /
599                                 (rxd_count[nic->rxd_mode] + 1);
600                 /*  Allocating all the Rx blocks */
601                 for (j = 0; j < blk_cnt; j++) {
602                         struct rx_block_info *rx_blocks;
603                         int l;
604
605                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
606                         size = SIZE_OF_BLOCK; //size is always page size
607                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
608                                                           &tmp_p_addr);
609                         if (tmp_v_addr == NULL) {
610                                 /*
611                                  * In case of failure, free_shared_mem()
612                                  * is called, which should free any
613                                  * memory that was alloced till the
614                                  * failure happened.
615                                  */
616                                 rx_blocks->block_virt_addr = tmp_v_addr;
617                                 return -ENOMEM;
618                         }
619                         memset(tmp_v_addr, 0, size);
620                         rx_blocks->block_virt_addr = tmp_v_addr;
621                         rx_blocks->block_dma_addr = tmp_p_addr;
622                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
623                                                   rxd_count[nic->rxd_mode],
624                                                   GFP_KERNEL);
625                         if (!rx_blocks->rxds)
626                                 return -ENOMEM;
627                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
628                                 rx_blocks->rxds[l].virt_addr =
629                                         rx_blocks->block_virt_addr +
630                                         (rxd_size[nic->rxd_mode] * l);
631                                 rx_blocks->rxds[l].dma_addr =
632                                         rx_blocks->block_dma_addr +
633                                         (rxd_size[nic->rxd_mode] * l);
634                         }
635                 }
636                 /* Interlinking all Rx Blocks */
637                 for (j = 0; j < blk_cnt; j++) {
638                         tmp_v_addr =
639                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
640                         tmp_v_addr_next =
641                                 mac_control->rings[i].rx_blocks[(j + 1) %
642                                               blk_cnt].block_virt_addr;
643                         tmp_p_addr =
644                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
645                         tmp_p_addr_next =
646                                 mac_control->rings[i].rx_blocks[(j + 1) %
647                                               blk_cnt].block_dma_addr;
648
649                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
650                         pre_rxd_blk->reserved_2_pNext_RxD_block =
651                             (unsigned long) tmp_v_addr_next;
652                         pre_rxd_blk->pNext_RxD_Blk_physical =
653                             (u64) tmp_p_addr_next;
654                 }
655         }
656         if (nic->rxd_mode >= RXD_MODE_3A) {
657                 /*
658                  * Allocation of Storages for buffer addresses in 2BUFF mode
659                  * and the buffers as well.
660                  */
661                 for (i = 0; i < config->rx_ring_num; i++) {
662                         blk_cnt = config->rx_cfg[i].num_rxd /
663                            (rxd_count[nic->rxd_mode]+ 1);
664                         mac_control->rings[i].ba =
665                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
666                                      GFP_KERNEL);
667                         if (!mac_control->rings[i].ba)
668                                 return -ENOMEM;
669                         for (j = 0; j < blk_cnt; j++) {
670                                 int k = 0;
671                                 mac_control->rings[i].ba[j] =
672                                         kmalloc((sizeof(struct buffAdd) *
673                                                 (rxd_count[nic->rxd_mode] + 1)),
674                                                 GFP_KERNEL);
675                                 if (!mac_control->rings[i].ba[j])
676                                         return -ENOMEM;
677                                 while (k != rxd_count[nic->rxd_mode]) {
678                                         ba = &mac_control->rings[i].ba[j][k];
679
680                                         ba->ba_0_org = (void *) kmalloc
681                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
682                                         if (!ba->ba_0_org)
683                                                 return -ENOMEM;
684                                         tmp = (unsigned long)ba->ba_0_org;
685                                         tmp += ALIGN_SIZE;
686                                         tmp &= ~((unsigned long) ALIGN_SIZE);
687                                         ba->ba_0 = (void *) tmp;
688
689                                         ba->ba_1_org = (void *) kmalloc
690                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
691                                         if (!ba->ba_1_org)
692                                                 return -ENOMEM;
693                                         tmp = (unsigned long) ba->ba_1_org;
694                                         tmp += ALIGN_SIZE;
695                                         tmp &= ~((unsigned long) ALIGN_SIZE);
696                                         ba->ba_1 = (void *) tmp;
697                                         k++;
698                                 }
699                         }
700                 }
701         }
702
703         /* Allocation and initialization of Statistics block */
704         size = sizeof(struct stat_block);
705         mac_control->stats_mem = pci_alloc_consistent
706             (nic->pdev, size, &mac_control->stats_mem_phy);
707
708         if (!mac_control->stats_mem) {
709                 /*
710                  * In case of failure, free_shared_mem() is called, which
711                  * should free any memory that was alloced till the
712                  * failure happened.
713                  */
714                 return -ENOMEM;
715         }
716         mac_control->stats_mem_sz = size;
717
718         tmp_v_addr = mac_control->stats_mem;
719         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
720         memset(tmp_v_addr, 0, size);
721         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
722                   (unsigned long long) tmp_p_addr);
723
724         return SUCCESS;
725 }
726
727 /**
728  * free_shared_mem - Free the allocated Memory
729  * @nic:  Device private variable.
730  * Description: This function is to free all memory locations allocated by
731  * the init_shared_mem() function and return it to the kernel.
732  */
733
734 static void free_shared_mem(struct s2io_nic *nic)
735 {
736         int i, j, blk_cnt, size;
737         void *tmp_v_addr;
738         dma_addr_t tmp_p_addr;
739         struct mac_info *mac_control;
740         struct config_param *config;
741         int lst_size, lst_per_page;
742         struct net_device *dev = nic->dev;
743
744         if (!nic)
745                 return;
746
747         mac_control = &nic->mac_control;
748         config = &nic->config;
749
750         lst_size = (sizeof(struct TxD) * config->max_txds);
751         lst_per_page = PAGE_SIZE / lst_size;
752
753         for (i = 0; i < config->tx_fifo_num; i++) {
754                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
755                                                 lst_per_page);
756                 for (j = 0; j < page_num; j++) {
757                         int mem_blks = (j * lst_per_page);
758                         if (!mac_control->fifos[i].list_info)
759                                 return;
760                         if (!mac_control->fifos[i].list_info[mem_blks].
761                                  list_virt_addr)
762                                 break;
763                         pci_free_consistent(nic->pdev, PAGE_SIZE,
764                                             mac_control->fifos[i].
765                                             list_info[mem_blks].
766                                             list_virt_addr,
767                                             mac_control->fifos[i].
768                                             list_info[mem_blks].
769                                             list_phy_addr);
770                 }
771                 /* If we got a zero DMA address during allocation,
772                  * free the page now
773                  */
774                 if (mac_control->zerodma_virt_addr) {
775                         pci_free_consistent(nic->pdev, PAGE_SIZE,
776                                             mac_control->zerodma_virt_addr,
777                                             (dma_addr_t)0);
778                         DBG_PRINT(INIT_DBG,
779                                 "%s: Freeing TxDL with zero DMA addr. ",
780                                 dev->name);
781                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
782                                 mac_control->zerodma_virt_addr);
783                 }
784                 kfree(mac_control->fifos[i].list_info);
785         }
786
787         size = SIZE_OF_BLOCK;
788         for (i = 0; i < config->rx_ring_num; i++) {
789                 blk_cnt = mac_control->rings[i].block_count;
790                 for (j = 0; j < blk_cnt; j++) {
791                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
792                                 block_virt_addr;
793                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
794                                 block_dma_addr;
795                         if (tmp_v_addr == NULL)
796                                 break;
797                         pci_free_consistent(nic->pdev, size,
798                                             tmp_v_addr, tmp_p_addr);
799                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
800                 }
801         }
802
803         if (nic->rxd_mode >= RXD_MODE_3A) {
804                 /* Freeing buffer storage addresses in 2BUFF mode. */
805                 for (i = 0; i < config->rx_ring_num; i++) {
806                         blk_cnt = config->rx_cfg[i].num_rxd /
807                             (rxd_count[nic->rxd_mode] + 1);
808                         for (j = 0; j < blk_cnt; j++) {
809                                 int k = 0;
810                                 if (!mac_control->rings[i].ba[j])
811                                         continue;
812                                 while (k != rxd_count[nic->rxd_mode]) {
813                                         struct buffAdd *ba =
814                                                 &mac_control->rings[i].ba[j][k];
815                                         kfree(ba->ba_0_org);
816                                         kfree(ba->ba_1_org);
817                                         k++;
818                                 }
819                                 kfree(mac_control->rings[i].ba[j]);
820                         }
821                         kfree(mac_control->rings[i].ba);
822                 }
823         }
824
825         if (mac_control->stats_mem) {
826                 pci_free_consistent(nic->pdev,
827                                     mac_control->stats_mem_sz,
828                                     mac_control->stats_mem,
829                                     mac_control->stats_mem_phy);
830         }
831         if (nic->ufo_in_band_v)
832                 kfree(nic->ufo_in_band_v);
833 }
834
835 /**
836  * s2io_verify_pci_mode -
837  */
838
839 static int s2io_verify_pci_mode(struct s2io_nic *nic)
840 {
841         struct XENA_dev_config __iomem *bar0 = nic->bar0;
842         register u64 val64 = 0;
843         int     mode;
844
845         val64 = readq(&bar0->pci_mode);
846         mode = (u8)GET_PCI_MODE(val64);
847
848         if ( val64 & PCI_MODE_UNKNOWN_MODE)
849                 return -1;      /* Unknown PCI mode */
850         return mode;
851 }
852
853 #define NEC_VENID   0x1033
854 #define NEC_DEVID   0x0125
855 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
856 {
857         struct pci_dev *tdev = NULL;
858         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
859                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
860                         if (tdev->bus == s2io_pdev->bus->parent)
861                                 pci_dev_put(tdev);
862                                 return 1;
863                 }
864         }
865         return 0;
866 }
867
868 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
869 /**
870  * s2io_print_pci_mode -
871  */
872 static int s2io_print_pci_mode(struct s2io_nic *nic)
873 {
874         struct XENA_dev_config __iomem *bar0 = nic->bar0;
875         register u64 val64 = 0;
876         int     mode;
877         struct config_param *config = &nic->config;
878
879         val64 = readq(&bar0->pci_mode);
880         mode = (u8)GET_PCI_MODE(val64);
881
882         if ( val64 & PCI_MODE_UNKNOWN_MODE)
883                 return -1;      /* Unknown PCI mode */
884
885         config->bus_speed = bus_speed[mode];
886
887         if (s2io_on_nec_bridge(nic->pdev)) {
888                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
889                                                         nic->dev->name);
890                 return mode;
891         }
892
893         if (val64 & PCI_MODE_32_BITS) {
894                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
895         } else {
896                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
897         }
898
899         switch(mode) {
900                 case PCI_MODE_PCI_33:
901                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
902                         break;
903                 case PCI_MODE_PCI_66:
904                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
905                         break;
906                 case PCI_MODE_PCIX_M1_66:
907                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
908                         break;
909                 case PCI_MODE_PCIX_M1_100:
910                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
911                         break;
912                 case PCI_MODE_PCIX_M1_133:
913                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
914                         break;
915                 case PCI_MODE_PCIX_M2_66:
916                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
917                         break;
918                 case PCI_MODE_PCIX_M2_100:
919                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
920                         break;
921                 case PCI_MODE_PCIX_M2_133:
922                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
923                         break;
924                 default:
925                         return -1;      /* Unsupported bus speed */
926         }
927
928         return mode;
929 }
930
931 /**
932  *  init_nic - Initialization of hardware
933  *  @nic: device peivate variable
934  *  Description: The function sequentially configures every block
935  *  of the H/W from their reset values.
936  *  Return Value:  SUCCESS on success and
937  *  '-1' on failure (endian settings incorrect).
938  */
939
940 static int init_nic(struct s2io_nic *nic)
941 {
942         struct XENA_dev_config __iomem *bar0 = nic->bar0;
943         struct net_device *dev = nic->dev;
944         register u64 val64 = 0;
945         void __iomem *add;
946         u32 time;
947         int i, j;
948         struct mac_info *mac_control;
949         struct config_param *config;
950         int dtx_cnt = 0;
951         unsigned long long mem_share;
952         int mem_size;
953
954         mac_control = &nic->mac_control;
955         config = &nic->config;
956
957         /* to set the swapper controle on the card */
958         if(s2io_set_swapper(nic)) {
959                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
960                 return -1;
961         }
962
963         /*
964          * Herc requires EOI to be removed from reset before XGXS, so..
965          */
966         if (nic->device_type & XFRAME_II_DEVICE) {
967                 val64 = 0xA500000000ULL;
968                 writeq(val64, &bar0->sw_reset);
969                 msleep(500);
970                 val64 = readq(&bar0->sw_reset);
971         }
972
973         /* Remove XGXS from reset state */
974         val64 = 0;
975         writeq(val64, &bar0->sw_reset);
976         msleep(500);
977         val64 = readq(&bar0->sw_reset);
978
979         /*  Enable Receiving broadcasts */
980         add = &bar0->mac_cfg;
981         val64 = readq(&bar0->mac_cfg);
982         val64 |= MAC_RMAC_BCAST_ENABLE;
983         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984         writel((u32) val64, add);
985         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
986         writel((u32) (val64 >> 32), (add + 4));
987
988         /* Read registers in all blocks */
989         val64 = readq(&bar0->mac_int_mask);
990         val64 = readq(&bar0->mc_int_mask);
991         val64 = readq(&bar0->xgxs_int_mask);
992
993         /*  Set MTU */
994         val64 = dev->mtu;
995         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
996
997         if (nic->device_type & XFRAME_II_DEVICE) {
998                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
999                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1000                                           &bar0->dtx_control, UF);
1001                         if (dtx_cnt & 0x1)
1002                                 msleep(1); /* Necessary!! */
1003                         dtx_cnt++;
1004                 }
1005         } else {
1006                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1007                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1008                                           &bar0->dtx_control, UF);
1009                         val64 = readq(&bar0->dtx_control);
1010                         dtx_cnt++;
1011                 }
1012         }
1013
1014         /*  Tx DMA Initialization */
1015         val64 = 0;
1016         writeq(val64, &bar0->tx_fifo_partition_0);
1017         writeq(val64, &bar0->tx_fifo_partition_1);
1018         writeq(val64, &bar0->tx_fifo_partition_2);
1019         writeq(val64, &bar0->tx_fifo_partition_3);
1020
1021
1022         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1023                 val64 |=
1024                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1025                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1026                                     ((i * 32) + 5), 3);
1027
1028                 if (i == (config->tx_fifo_num - 1)) {
1029                         if (i % 2 == 0)
1030                                 i++;
1031                 }
1032
1033                 switch (i) {
1034                 case 1:
1035                         writeq(val64, &bar0->tx_fifo_partition_0);
1036                         val64 = 0;
1037                         break;
1038                 case 3:
1039                         writeq(val64, &bar0->tx_fifo_partition_1);
1040                         val64 = 0;
1041                         break;
1042                 case 5:
1043                         writeq(val64, &bar0->tx_fifo_partition_2);
1044                         val64 = 0;
1045                         break;
1046                 case 7:
1047                         writeq(val64, &bar0->tx_fifo_partition_3);
1048                         break;
1049                 }
1050         }
1051
1052         /*
1053          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1054          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1055          */
1056         if ((nic->device_type == XFRAME_I_DEVICE) &&
1057                 (get_xena_rev_id(nic->pdev) < 4))
1058                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1059
1060         val64 = readq(&bar0->tx_fifo_partition_0);
1061         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1062                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1063
1064         /*
1065          * Initialization of Tx_PA_CONFIG register to ignore packet
1066          * integrity checking.
1067          */
1068         val64 = readq(&bar0->tx_pa_cfg);
1069         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1070             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1071         writeq(val64, &bar0->tx_pa_cfg);
1072
1073         /* Rx DMA intialization. */
1074         val64 = 0;
1075         for (i = 0; i < config->rx_ring_num; i++) {
1076                 val64 |=
1077                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1078                          3);
1079         }
1080         writeq(val64, &bar0->rx_queue_priority);
1081
1082         /*
1083          * Allocating equal share of memory to all the
1084          * configured Rings.
1085          */
1086         val64 = 0;
1087         if (nic->device_type & XFRAME_II_DEVICE)
1088                 mem_size = 32;
1089         else
1090                 mem_size = 64;
1091
1092         for (i = 0; i < config->rx_ring_num; i++) {
1093                 switch (i) {
1094                 case 0:
1095                         mem_share = (mem_size / config->rx_ring_num +
1096                                      mem_size % config->rx_ring_num);
1097                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1098                         continue;
1099                 case 1:
1100                         mem_share = (mem_size / config->rx_ring_num);
1101                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1102                         continue;
1103                 case 2:
1104                         mem_share = (mem_size / config->rx_ring_num);
1105                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1106                         continue;
1107                 case 3:
1108                         mem_share = (mem_size / config->rx_ring_num);
1109                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1110                         continue;
1111                 case 4:
1112                         mem_share = (mem_size / config->rx_ring_num);
1113                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1114                         continue;
1115                 case 5:
1116                         mem_share = (mem_size / config->rx_ring_num);
1117                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1118                         continue;
1119                 case 6:
1120                         mem_share = (mem_size / config->rx_ring_num);
1121                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1122                         continue;
1123                 case 7:
1124                         mem_share = (mem_size / config->rx_ring_num);
1125                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1126                         continue;
1127                 }
1128         }
1129         writeq(val64, &bar0->rx_queue_cfg);
1130
1131         /*
1132          * Filling Tx round robin registers
1133          * as per the number of FIFOs
1134          */
1135         switch (config->tx_fifo_num) {
1136         case 1:
1137                 val64 = 0x0000000000000000ULL;
1138                 writeq(val64, &bar0->tx_w_round_robin_0);
1139                 writeq(val64, &bar0->tx_w_round_robin_1);
1140                 writeq(val64, &bar0->tx_w_round_robin_2);
1141                 writeq(val64, &bar0->tx_w_round_robin_3);
1142                 writeq(val64, &bar0->tx_w_round_robin_4);
1143                 break;
1144         case 2:
1145                 val64 = 0x0000010000010000ULL;
1146                 writeq(val64, &bar0->tx_w_round_robin_0);
1147                 val64 = 0x0100000100000100ULL;
1148                 writeq(val64, &bar0->tx_w_round_robin_1);
1149                 val64 = 0x0001000001000001ULL;
1150                 writeq(val64, &bar0->tx_w_round_robin_2);
1151                 val64 = 0x0000010000010000ULL;
1152                 writeq(val64, &bar0->tx_w_round_robin_3);
1153                 val64 = 0x0100000000000000ULL;
1154                 writeq(val64, &bar0->tx_w_round_robin_4);
1155                 break;
1156         case 3:
1157                 val64 = 0x0001000102000001ULL;
1158                 writeq(val64, &bar0->tx_w_round_robin_0);
1159                 val64 = 0x0001020000010001ULL;
1160                 writeq(val64, &bar0->tx_w_round_robin_1);
1161                 val64 = 0x0200000100010200ULL;
1162                 writeq(val64, &bar0->tx_w_round_robin_2);
1163                 val64 = 0x0001000102000001ULL;
1164                 writeq(val64, &bar0->tx_w_round_robin_3);
1165                 val64 = 0x0001020000000000ULL;
1166                 writeq(val64, &bar0->tx_w_round_robin_4);
1167                 break;
1168         case 4:
1169                 val64 = 0x0001020300010200ULL;
1170                 writeq(val64, &bar0->tx_w_round_robin_0);
1171                 val64 = 0x0100000102030001ULL;
1172                 writeq(val64, &bar0->tx_w_round_robin_1);
1173                 val64 = 0x0200010000010203ULL;
1174                 writeq(val64, &bar0->tx_w_round_robin_2);
1175                 val64 = 0x0001020001000001ULL;
1176                 writeq(val64, &bar0->tx_w_round_robin_3);
1177                 val64 = 0x0203000100000000ULL;
1178                 writeq(val64, &bar0->tx_w_round_robin_4);
1179                 break;
1180         case 5:
1181                 val64 = 0x0001000203000102ULL;
1182                 writeq(val64, &bar0->tx_w_round_robin_0);
1183                 val64 = 0x0001020001030004ULL;
1184                 writeq(val64, &bar0->tx_w_round_robin_1);
1185                 val64 = 0x0001000203000102ULL;
1186                 writeq(val64, &bar0->tx_w_round_robin_2);
1187                 val64 = 0x0001020001030004ULL;
1188                 writeq(val64, &bar0->tx_w_round_robin_3);
1189                 val64 = 0x0001000000000000ULL;
1190                 writeq(val64, &bar0->tx_w_round_robin_4);
1191                 break;
1192         case 6:
1193                 val64 = 0x0001020304000102ULL;
1194                 writeq(val64, &bar0->tx_w_round_robin_0);
1195                 val64 = 0x0304050001020001ULL;
1196                 writeq(val64, &bar0->tx_w_round_robin_1);
1197                 val64 = 0x0203000100000102ULL;
1198                 writeq(val64, &bar0->tx_w_round_robin_2);
1199                 val64 = 0x0304000102030405ULL;
1200                 writeq(val64, &bar0->tx_w_round_robin_3);
1201                 val64 = 0x0001000200000000ULL;
1202                 writeq(val64, &bar0->tx_w_round_robin_4);
1203                 break;
1204         case 7:
1205                 val64 = 0x0001020001020300ULL;
1206                 writeq(val64, &bar0->tx_w_round_robin_0);
1207                 val64 = 0x0102030400010203ULL;
1208                 writeq(val64, &bar0->tx_w_round_robin_1);
1209                 val64 = 0x0405060001020001ULL;
1210                 writeq(val64, &bar0->tx_w_round_robin_2);
1211                 val64 = 0x0304050000010200ULL;
1212                 writeq(val64, &bar0->tx_w_round_robin_3);
1213                 val64 = 0x0102030000000000ULL;
1214                 writeq(val64, &bar0->tx_w_round_robin_4);
1215                 break;
1216         case 8:
1217                 val64 = 0x0001020300040105ULL;
1218                 writeq(val64, &bar0->tx_w_round_robin_0);
1219                 val64 = 0x0200030106000204ULL;
1220                 writeq(val64, &bar0->tx_w_round_robin_1);
1221                 val64 = 0x0103000502010007ULL;
1222                 writeq(val64, &bar0->tx_w_round_robin_2);
1223                 val64 = 0x0304010002060500ULL;
1224                 writeq(val64, &bar0->tx_w_round_robin_3);
1225                 val64 = 0x0103020400000000ULL;
1226                 writeq(val64, &bar0->tx_w_round_robin_4);
1227                 break;
1228         }
1229
1230         /* Enable all configured Tx FIFO partitions */
1231         val64 = readq(&bar0->tx_fifo_partition_0);
1232         val64 |= (TX_FIFO_PARTITION_EN);
1233         writeq(val64, &bar0->tx_fifo_partition_0);
1234
1235         /* Filling the Rx round robin registers as per the
1236          * number of Rings and steering based on QoS.
1237          */
1238         switch (config->rx_ring_num) {
1239         case 1:
1240                 val64 = 0x8080808080808080ULL;
1241                 writeq(val64, &bar0->rts_qos_steering);
1242                 break;
1243         case 2:
1244                 val64 = 0x0000010000010000ULL;
1245                 writeq(val64, &bar0->rx_w_round_robin_0);
1246                 val64 = 0x0100000100000100ULL;
1247                 writeq(val64, &bar0->rx_w_round_robin_1);
1248                 val64 = 0x0001000001000001ULL;
1249                 writeq(val64, &bar0->rx_w_round_robin_2);
1250                 val64 = 0x0000010000010000ULL;
1251                 writeq(val64, &bar0->rx_w_round_robin_3);
1252                 val64 = 0x0100000000000000ULL;
1253                 writeq(val64, &bar0->rx_w_round_robin_4);
1254
1255                 val64 = 0x8080808040404040ULL;
1256                 writeq(val64, &bar0->rts_qos_steering);
1257                 break;
1258         case 3:
1259                 val64 = 0x0001000102000001ULL;
1260                 writeq(val64, &bar0->rx_w_round_robin_0);
1261                 val64 = 0x0001020000010001ULL;
1262                 writeq(val64, &bar0->rx_w_round_robin_1);
1263                 val64 = 0x0200000100010200ULL;
1264                 writeq(val64, &bar0->rx_w_round_robin_2);
1265                 val64 = 0x0001000102000001ULL;
1266                 writeq(val64, &bar0->rx_w_round_robin_3);
1267                 val64 = 0x0001020000000000ULL;
1268                 writeq(val64, &bar0->rx_w_round_robin_4);
1269
1270                 val64 = 0x8080804040402020ULL;
1271                 writeq(val64, &bar0->rts_qos_steering);
1272                 break;
1273         case 4:
1274                 val64 = 0x0001020300010200ULL;
1275                 writeq(val64, &bar0->rx_w_round_robin_0);
1276                 val64 = 0x0100000102030001ULL;
1277                 writeq(val64, &bar0->rx_w_round_robin_1);
1278                 val64 = 0x0200010000010203ULL;
1279                 writeq(val64, &bar0->rx_w_round_robin_2);
1280                 val64 = 0x0001020001000001ULL;
1281                 writeq(val64, &bar0->rx_w_round_robin_3);
1282                 val64 = 0x0203000100000000ULL;
1283                 writeq(val64, &bar0->rx_w_round_robin_4);
1284
1285                 val64 = 0x8080404020201010ULL;
1286                 writeq(val64, &bar0->rts_qos_steering);
1287                 break;
1288         case 5:
1289                 val64 = 0x0001000203000102ULL;
1290                 writeq(val64, &bar0->rx_w_round_robin_0);
1291                 val64 = 0x0001020001030004ULL;
1292                 writeq(val64, &bar0->rx_w_round_robin_1);
1293                 val64 = 0x0001000203000102ULL;
1294                 writeq(val64, &bar0->rx_w_round_robin_2);
1295                 val64 = 0x0001020001030004ULL;
1296                 writeq(val64, &bar0->rx_w_round_robin_3);
1297                 val64 = 0x0001000000000000ULL;
1298                 writeq(val64, &bar0->rx_w_round_robin_4);
1299
1300                 val64 = 0x8080404020201008ULL;
1301                 writeq(val64, &bar0->rts_qos_steering);
1302                 break;
1303         case 6:
1304                 val64 = 0x0001020304000102ULL;
1305                 writeq(val64, &bar0->rx_w_round_robin_0);
1306                 val64 = 0x0304050001020001ULL;
1307                 writeq(val64, &bar0->rx_w_round_robin_1);
1308                 val64 = 0x0203000100000102ULL;
1309                 writeq(val64, &bar0->rx_w_round_robin_2);
1310                 val64 = 0x0304000102030405ULL;
1311                 writeq(val64, &bar0->rx_w_round_robin_3);
1312                 val64 = 0x0001000200000000ULL;
1313                 writeq(val64, &bar0->rx_w_round_robin_4);
1314
1315                 val64 = 0x8080404020100804ULL;
1316                 writeq(val64, &bar0->rts_qos_steering);
1317                 break;
1318         case 7:
1319                 val64 = 0x0001020001020300ULL;
1320                 writeq(val64, &bar0->rx_w_round_robin_0);
1321                 val64 = 0x0102030400010203ULL;
1322                 writeq(val64, &bar0->rx_w_round_robin_1);
1323                 val64 = 0x0405060001020001ULL;
1324                 writeq(val64, &bar0->rx_w_round_robin_2);
1325                 val64 = 0x0304050000010200ULL;
1326                 writeq(val64, &bar0->rx_w_round_robin_3);
1327                 val64 = 0x0102030000000000ULL;
1328                 writeq(val64, &bar0->rx_w_round_robin_4);
1329
1330                 val64 = 0x8080402010080402ULL;
1331                 writeq(val64, &bar0->rts_qos_steering);
1332                 break;
1333         case 8:
1334                 val64 = 0x0001020300040105ULL;
1335                 writeq(val64, &bar0->rx_w_round_robin_0);
1336                 val64 = 0x0200030106000204ULL;
1337                 writeq(val64, &bar0->rx_w_round_robin_1);
1338                 val64 = 0x0103000502010007ULL;
1339                 writeq(val64, &bar0->rx_w_round_robin_2);
1340                 val64 = 0x0304010002060500ULL;
1341                 writeq(val64, &bar0->rx_w_round_robin_3);
1342                 val64 = 0x0103020400000000ULL;
1343                 writeq(val64, &bar0->rx_w_round_robin_4);
1344
1345                 val64 = 0x8040201008040201ULL;
1346                 writeq(val64, &bar0->rts_qos_steering);
1347                 break;
1348         }
1349
1350         /* UDP Fix */
1351         val64 = 0;
1352         for (i = 0; i < 8; i++)
1353                 writeq(val64, &bar0->rts_frm_len_n[i]);
1354
1355         /* Set the default rts frame length for the rings configured */
1356         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1357         for (i = 0 ; i < config->rx_ring_num ; i++)
1358                 writeq(val64, &bar0->rts_frm_len_n[i]);
1359
1360         /* Set the frame length for the configured rings
1361          * desired by the user
1362          */
1363         for (i = 0; i < config->rx_ring_num; i++) {
1364                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1365                  * specified frame length steering.
1366                  * If the user provides the frame length then program
1367                  * the rts_frm_len register for those values or else
1368                  * leave it as it is.
1369                  */
1370                 if (rts_frm_len[i] != 0) {
1371                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1372                                 &bar0->rts_frm_len_n[i]);
1373                 }
1374         }
1375
1376         /* Program statistics memory */
1377         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1378
1379         if (nic->device_type == XFRAME_II_DEVICE) {
1380                 val64 = STAT_BC(0x320);
1381                 writeq(val64, &bar0->stat_byte_cnt);
1382         }
1383
1384         /*
1385          * Initializing the sampling rate for the device to calculate the
1386          * bandwidth utilization.
1387          */
1388         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1389             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1390         writeq(val64, &bar0->mac_link_util);
1391
1392
1393         /*
1394          * Initializing the Transmit and Receive Traffic Interrupt
1395          * Scheme.
1396          */
1397         /*
1398          * TTI Initialization. Default Tx timer gets us about
1399          * 250 interrupts per sec. Continuous interrupts are enabled
1400          * by default.
1401          */
1402         if (nic->device_type == XFRAME_II_DEVICE) {
1403                 int count = (nic->config.bus_speed * 125)/2;
1404                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1405         } else {
1406
1407                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1408         }
1409         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1410             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1411             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1412                 if (use_continuous_tx_intrs)
1413                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1414         writeq(val64, &bar0->tti_data1_mem);
1415
1416         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1417             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1418             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1419         writeq(val64, &bar0->tti_data2_mem);
1420
1421         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1422         writeq(val64, &bar0->tti_command_mem);
1423
1424         /*
1425          * Once the operation completes, the Strobe bit of the command
1426          * register will be reset. We poll for this particular condition
1427          * We wait for a maximum of 500ms for the operation to complete,
1428          * if it's not complete by then we return error.
1429          */
1430         time = 0;
1431         while (TRUE) {
1432                 val64 = readq(&bar0->tti_command_mem);
1433                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1434                         break;
1435                 }
1436                 if (time > 10) {
1437                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1438                                   dev->name);
1439                         return -1;
1440                 }
1441                 msleep(50);
1442                 time++;
1443         }
1444
1445         if (nic->config.bimodal) {
1446                 int k = 0;
1447                 for (k = 0; k < config->rx_ring_num; k++) {
1448                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1449                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1450                         writeq(val64, &bar0->tti_command_mem);
1451
1452                 /*
1453                  * Once the operation completes, the Strobe bit of the command
1454                  * register will be reset. We poll for this particular condition
1455                  * We wait for a maximum of 500ms for the operation to complete,
1456                  * if it's not complete by then we return error.
1457                 */
1458                         time = 0;
1459                         while (TRUE) {
1460                                 val64 = readq(&bar0->tti_command_mem);
1461                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1462                                         break;
1463                                 }
1464                                 if (time > 10) {
1465                                         DBG_PRINT(ERR_DBG,
1466                                                 "%s: TTI init Failed\n",
1467                                         dev->name);
1468                                         return -1;
1469                                 }
1470                                 time++;
1471                                 msleep(50);
1472                         }
1473                 }
1474         } else {
1475
1476                 /* RTI Initialization */
1477                 if (nic->device_type == XFRAME_II_DEVICE) {
1478                         /*
1479                          * Programmed to generate Apprx 500 Intrs per
1480                          * second
1481                          */
1482                         int count = (nic->config.bus_speed * 125)/4;
1483                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1484                 } else {
1485                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1486                 }
1487                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1488                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1489                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1490
1491                 writeq(val64, &bar0->rti_data1_mem);
1492
1493                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1494                     RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1495                 if (nic->intr_type == MSI_X)
1496                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1497                                 RTI_DATA2_MEM_RX_UFC_D(0x40));
1498                 else
1499                     val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1500                                 RTI_DATA2_MEM_RX_UFC_D(0x80));
1501                 writeq(val64, &bar0->rti_data2_mem);
1502
1503                 for (i = 0; i < config->rx_ring_num; i++) {
1504                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1505                                         | RTI_CMD_MEM_OFFSET(i);
1506                         writeq(val64, &bar0->rti_command_mem);
1507
1508                         /*
1509                          * Once the operation completes, the Strobe bit of the
1510                          * command register will be reset. We poll for this
1511                          * particular condition. We wait for a maximum of 500ms
1512                          * for the operation to complete, if it's not complete
1513                          * by then we return error.
1514                          */
1515                         time = 0;
1516                         while (TRUE) {
1517                                 val64 = readq(&bar0->rti_command_mem);
1518                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1519                                         break;
1520                                 }
1521                                 if (time > 10) {
1522                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1523                                                   dev->name);
1524                                         return -1;
1525                                 }
1526                                 time++;
1527                                 msleep(50);
1528                         }
1529                 }
1530         }
1531
1532         /*
1533          * Initializing proper values as Pause threshold into all
1534          * the 8 Queues on Rx side.
1535          */
1536         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1537         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1538
1539         /* Disable RMAC PAD STRIPPING */
1540         add = &bar0->mac_cfg;
1541         val64 = readq(&bar0->mac_cfg);
1542         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1543         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544         writel((u32) (val64), add);
1545         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546         writel((u32) (val64 >> 32), (add + 4));
1547         val64 = readq(&bar0->mac_cfg);
1548
1549         /* Enable FCS stripping by adapter */
1550         add = &bar0->mac_cfg;
1551         val64 = readq(&bar0->mac_cfg);
1552         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1553         if (nic->device_type == XFRAME_II_DEVICE)
1554                 writeq(val64, &bar0->mac_cfg);
1555         else {
1556                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557                 writel((u32) (val64), add);
1558                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1559                 writel((u32) (val64 >> 32), (add + 4));
1560         }
1561
1562         /*
1563          * Set the time value to be inserted in the pause frame
1564          * generated by xena.
1565          */
1566         val64 = readq(&bar0->rmac_pause_cfg);
1567         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1568         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1569         writeq(val64, &bar0->rmac_pause_cfg);
1570
1571         /*
1572          * Set the Threshold Limit for Generating the pause frame
1573          * If the amount of data in any Queue exceeds ratio of
1574          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1575          * pause frame is generated
1576          */
1577         val64 = 0;
1578         for (i = 0; i < 4; i++) {
1579                 val64 |=
1580                     (((u64) 0xFF00 | nic->mac_control.
1581                       mc_pause_threshold_q0q3)
1582                      << (i * 2 * 8));
1583         }
1584         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1585
1586         val64 = 0;
1587         for (i = 0; i < 4; i++) {
1588                 val64 |=
1589                     (((u64) 0xFF00 | nic->mac_control.
1590                       mc_pause_threshold_q4q7)
1591                      << (i * 2 * 8));
1592         }
1593         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1594
1595         /*
1596          * TxDMA will stop Read request if the number of read split has
1597          * exceeded the limit pointed by shared_splits
1598          */
1599         val64 = readq(&bar0->pic_control);
1600         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1601         writeq(val64, &bar0->pic_control);
1602
1603         if (nic->config.bus_speed == 266) {
1604                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1605                 writeq(0x0, &bar0->read_retry_delay);
1606                 writeq(0x0, &bar0->write_retry_delay);
1607         }
1608
1609         /*
1610          * Programming the Herc to split every write transaction
1611          * that does not start on an ADB to reduce disconnects.
1612          */
1613         if (nic->device_type == XFRAME_II_DEVICE) {
1614                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615                         MISC_LINK_STABILITY_PRD(3);
1616                 writeq(val64, &bar0->misc_control);
1617                 val64 = readq(&bar0->pic_control2);
1618                 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1619                 writeq(val64, &bar0->pic_control2);
1620         }
1621         if (strstr(nic->product_name, "CX4")) {
1622                 val64 = TMAC_AVG_IPG(0x17);
1623                 writeq(val64, &bar0->tmac_avg_ipg);
1624         }
1625
1626         return SUCCESS;
1627 }
1628 #define LINK_UP_DOWN_INTERRUPT          1
1629 #define MAC_RMAC_ERR_TIMER              2
1630
1631 static int s2io_link_fault_indication(struct s2io_nic *nic)
1632 {
1633         if (nic->intr_type != INTA)
1634                 return MAC_RMAC_ERR_TIMER;
1635         if (nic->device_type == XFRAME_II_DEVICE)
1636                 return LINK_UP_DOWN_INTERRUPT;
1637         else
1638                 return MAC_RMAC_ERR_TIMER;
1639 }
1640
1641 /**
1642  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1643  *  @nic: device private variable,
1644  *  @mask: A mask indicating which Intr block must be modified and,
1645  *  @flag: A flag indicating whether to enable or disable the Intrs.
1646  *  Description: This function will either disable or enable the interrupts
1647  *  depending on the flag argument. The mask argument can be used to
1648  *  enable/disable any Intr block.
1649  *  Return Value: NONE.
1650  */
1651
1652 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1653 {
1654         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1655         register u64 val64 = 0, temp64 = 0;
1656
1657         /*  Top level interrupt classification */
1658         /*  PIC Interrupts */
1659         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1660                 /*  Enable PIC Intrs in the general intr mask register */
1661                 val64 = TXPIC_INT_M;
1662                 if (flag == ENABLE_INTRS) {
1663                         temp64 = readq(&bar0->general_int_mask);
1664                         temp64 &= ~((u64) val64);
1665                         writeq(temp64, &bar0->general_int_mask);
1666                         /*
1667                          * If Hercules adapter enable GPIO otherwise
1668                          * disable all PCIX, Flash, MDIO, IIC and GPIO
1669                          * interrupts for now.
1670                          * TODO
1671                          */
1672                         if (s2io_link_fault_indication(nic) ==
1673                                         LINK_UP_DOWN_INTERRUPT ) {
1674                                 temp64 = readq(&bar0->pic_int_mask);
1675                                 temp64 &= ~((u64) PIC_INT_GPIO);
1676                                 writeq(temp64, &bar0->pic_int_mask);
1677                                 temp64 = readq(&bar0->gpio_int_mask);
1678                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1679                                 writeq(temp64, &bar0->gpio_int_mask);
1680                         } else {
1681                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1682                         }
1683                         /*
1684                          * No MSI Support is available presently, so TTI and
1685                          * RTI interrupts are also disabled.
1686                          */
1687                 } else if (flag == DISABLE_INTRS) {
1688                         /*
1689                          * Disable PIC Intrs in the general
1690                          * intr mask register
1691                          */
1692                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1693                         temp64 = readq(&bar0->general_int_mask);
1694                         val64 |= temp64;
1695                         writeq(val64, &bar0->general_int_mask);
1696                 }
1697         }
1698
1699         /*  MAC Interrupts */
1700         /*  Enabling/Disabling MAC interrupts */
1701         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1702                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1703                 if (flag == ENABLE_INTRS) {
1704                         temp64 = readq(&bar0->general_int_mask);
1705                         temp64 &= ~((u64) val64);
1706                         writeq(temp64, &bar0->general_int_mask);
1707                         /*
1708                          * All MAC block error interrupts are disabled for now
1709                          * TODO
1710                          */
1711                 } else if (flag == DISABLE_INTRS) {
1712                         /*
1713                          * Disable MAC Intrs in the general intr mask register
1714                          */
1715                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1716                         writeq(DISABLE_ALL_INTRS,
1717                                &bar0->mac_rmac_err_mask);
1718
1719                         temp64 = readq(&bar0->general_int_mask);
1720                         val64 |= temp64;
1721                         writeq(val64, &bar0->general_int_mask);
1722                 }
1723         }
1724
1725         /*  Tx traffic interrupts */
1726         if (mask & TX_TRAFFIC_INTR) {
1727                 val64 = TXTRAFFIC_INT_M;
1728                 if (flag == ENABLE_INTRS) {
1729                         temp64 = readq(&bar0->general_int_mask);
1730                         temp64 &= ~((u64) val64);
1731                         writeq(temp64, &bar0->general_int_mask);
1732                         /*
1733                          * Enable all the Tx side interrupts
1734                          * writing 0 Enables all 64 TX interrupt levels
1735                          */
1736                         writeq(0x0, &bar0->tx_traffic_mask);
1737                 } else if (flag == DISABLE_INTRS) {
1738                         /*
1739                          * Disable Tx Traffic Intrs in the general intr mask
1740                          * register.
1741                          */
1742                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1743                         temp64 = readq(&bar0->general_int_mask);
1744                         val64 |= temp64;
1745                         writeq(val64, &bar0->general_int_mask);
1746                 }
1747         }
1748
1749         /*  Rx traffic interrupts */
1750         if (mask & RX_TRAFFIC_INTR) {
1751                 val64 = RXTRAFFIC_INT_M;
1752                 if (flag == ENABLE_INTRS) {
1753                         temp64 = readq(&bar0->general_int_mask);
1754                         temp64 &= ~((u64) val64);
1755                         writeq(temp64, &bar0->general_int_mask);
1756                         /* writing 0 Enables all 8 RX interrupt levels */
1757                         writeq(0x0, &bar0->rx_traffic_mask);
1758                 } else if (flag == DISABLE_INTRS) {
1759                         /*
1760                          * Disable Rx Traffic Intrs in the general intr mask
1761                          * register.
1762                          */
1763                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1764                         temp64 = readq(&bar0->general_int_mask);
1765                         val64 |= temp64;
1766                         writeq(val64, &bar0->general_int_mask);
1767                 }
1768         }
1769 }
1770
1771 /**
1772  *  verify_pcc_quiescent- Checks for PCC quiescent state
1773  *  Return: 1 If PCC is quiescence
1774  *          0 If PCC is not quiescence
1775  */
1776 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1777 {
1778         int ret = 0, herc;
1779         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1780         u64 val64 = readq(&bar0->adapter_status);
1781         
1782         herc = (sp->device_type == XFRAME_II_DEVICE);
1783
1784         if (flag == FALSE) {
1785                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1786                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1787                                 ret = 1;
1788                 } else {
1789                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1790                                 ret = 1;
1791                 }
1792         } else {
1793                 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1794                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1795                              ADAPTER_STATUS_RMAC_PCC_IDLE))
1796                                 ret = 1;
1797                 } else {
1798                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1799                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1800                                 ret = 1;
1801                 }
1802         }
1803
1804         return ret;
1805 }
1806 /**
1807  *  verify_xena_quiescence - Checks whether the H/W is ready
1808  *  Description: Returns whether the H/W is ready to go or not. Depending
1809  *  on whether adapter enable bit was written or not the comparison
1810  *  differs and the calling function passes the input argument flag to
1811  *  indicate this.
1812  *  Return: 1 If xena is quiescence
1813  *          0 If Xena is not quiescence
1814  */
1815
1816 static int verify_xena_quiescence(struct s2io_nic *sp)
1817 {
1818         int  mode;
1819         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1820         u64 val64 = readq(&bar0->adapter_status);
1821         mode = s2io_verify_pci_mode(sp);
1822
1823         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1824                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1825                 return 0;
1826         }
1827         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1828         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1829                 return 0;
1830         }
1831         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1832                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1833                 return 0;
1834         }
1835         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1836                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1837                 return 0;
1838         }
1839         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1840                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1841                 return 0;
1842         }
1843         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1844                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1845                 return 0;
1846         }
1847         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1848                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1849                 return 0;
1850         }
1851         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1852                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1853                 return 0;
1854         }
1855
1856         /*
1857          * In PCI 33 mode, the P_PLL is not used, and therefore,
1858          * the the P_PLL_LOCK bit in the adapter_status register will
1859          * not be asserted.
1860          */
1861         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1862                 sp->device_type == XFRAME_II_DEVICE && mode !=
1863                 PCI_MODE_PCI_33) {
1864                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1865                 return 0;
1866         }
1867         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1868                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1869                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1870                 return 0;
1871         }
1872         return 1;
1873 }
1874
1875 /**
1876  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1877  * @sp: Pointer to device specifc structure
1878  * Description :
1879  * New procedure to clear mac address reading  problems on Alpha platforms
1880  *
1881  */
1882
1883 static void fix_mac_address(struct s2io_nic * sp)
1884 {
1885         struct XENA_dev_config __iomem *bar0 = sp->bar0;
1886         u64 val64;
1887         int i = 0;
1888
1889         while (fix_mac[i] != END_SIGN) {
1890                 writeq(fix_mac[i++], &bar0->gpio_control);
1891                 udelay(10);
1892                 val64 = readq(&bar0->gpio_control);
1893         }
1894 }
1895
1896 /**
1897  *  start_nic - Turns the device on
1898  *  @nic : device private variable.
1899  *  Description:
1900  *  This function actually turns the device on. Before this  function is
1901  *  called,all Registers are configured from their reset states
1902  *  and shared memory is allocated but the NIC is still quiescent. On
1903  *  calling this function, the device interrupts are cleared and the NIC is
1904  *  literally switched on by writing into the adapter control register.
1905  *  Return Value:
1906  *  SUCCESS on success and -1 on failure.
1907  */
1908
1909 static int start_nic(struct s2io_nic *nic)
1910 {
1911         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1912         struct net_device *dev = nic->dev;
1913         register u64 val64 = 0;
1914         u16 subid, i;
1915         struct mac_info *mac_control;
1916         struct config_param *config;
1917
1918         mac_control = &nic->mac_control;
1919         config = &nic->config;
1920
1921         /*  PRC Initialization and configuration */
1922         for (i = 0; i < config->rx_ring_num; i++) {
1923                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1924                        &bar0->prc_rxd0_n[i]);
1925
1926                 val64 = readq(&bar0->prc_ctrl_n[i]);
1927                 if (nic->config.bimodal)
1928                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1929                 if (nic->rxd_mode == RXD_MODE_1)
1930                         val64 |= PRC_CTRL_RC_ENABLED;
1931                 else
1932                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1933                 if (nic->device_type == XFRAME_II_DEVICE)
1934                         val64 |= PRC_CTRL_GROUP_READS;
1935                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1936                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1937                 writeq(val64, &bar0->prc_ctrl_n[i]);
1938         }
1939
1940         if (nic->rxd_mode == RXD_MODE_3B) {
1941                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1942                 val64 = readq(&bar0->rx_pa_cfg);
1943                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1944                 writeq(val64, &bar0->rx_pa_cfg);
1945         }
1946
1947         /*
1948          * Enabling MC-RLDRAM. After enabling the device, we timeout
1949          * for around 100ms, which is approximately the time required
1950          * for the device to be ready for operation.
1951          */
1952         val64 = readq(&bar0->mc_rldram_mrs);
1953         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1954         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1955         val64 = readq(&bar0->mc_rldram_mrs);
1956
1957         msleep(100);    /* Delay by around 100 ms. */
1958
1959         /* Enabling ECC Protection. */
1960         val64 = readq(&bar0->adapter_control);
1961         val64 &= ~ADAPTER_ECC_EN;
1962         writeq(val64, &bar0->adapter_control);
1963
1964         /*
1965          * Clearing any possible Link state change interrupts that
1966          * could have popped up just before Enabling the card.
1967          */
1968         val64 = readq(&bar0->mac_rmac_err_reg);
1969         if (val64)
1970                 writeq(val64, &bar0->mac_rmac_err_reg);
1971
1972         /*
1973          * Verify if the device is ready to be enabled, if so enable
1974          * it.
1975          */
1976         val64 = readq(&bar0->adapter_status);
1977         if (!verify_xena_quiescence(nic)) {
1978                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1979                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1980                           (unsigned long long) val64);
1981                 return FAILURE;
1982         }
1983
1984         /*
1985          * With some switches, link might be already up at this point.
1986          * Because of this weird behavior, when we enable laser,
1987          * we may not get link. We need to handle this. We cannot
1988          * figure out which switch is misbehaving. So we are forced to
1989          * make a global change.
1990          */
1991
1992         /* Enabling Laser. */
1993         val64 = readq(&bar0->adapter_control);
1994         val64 |= ADAPTER_EOI_TX_ON;
1995         writeq(val64, &bar0->adapter_control);
1996
1997         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
1998                 /*
1999                  * Dont see link state interrupts initally on some switches,
2000                  * so directly scheduling the link state task here.
2001                  */
2002                 schedule_work(&nic->set_link_task);
2003         }
2004         /* SXE-002: Initialize link and activity LED */
2005         subid = nic->pdev->subsystem_device;
2006         if (((subid & 0xFF) >= 0x07) &&
2007             (nic->device_type == XFRAME_I_DEVICE)) {
2008                 val64 = readq(&bar0->gpio_control);
2009                 val64 |= 0x0000800000000000ULL;
2010                 writeq(val64, &bar0->gpio_control);
2011                 val64 = 0x0411040400000000ULL;
2012                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2013         }
2014
2015         return SUCCESS;
2016 }
2017 /**
2018  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2019  */
2020 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2021                                         TxD *txdlp, int get_off)
2022 {
2023         struct s2io_nic *nic = fifo_data->nic;
2024         struct sk_buff *skb;
2025         struct TxD *txds;
2026         u16 j, frg_cnt;
2027
2028         txds = txdlp;
2029         if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2030                 pci_unmap_single(nic->pdev, (dma_addr_t)
2031                         txds->Buffer_Pointer, sizeof(u64),
2032                         PCI_DMA_TODEVICE);
2033                 txds++;
2034         }
2035
2036         skb = (struct sk_buff *) ((unsigned long)
2037                         txds->Host_Control);
2038         if (!skb) {
2039                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2040                 return NULL;
2041         }
2042         pci_unmap_single(nic->pdev, (dma_addr_t)
2043                          txds->Buffer_Pointer,
2044                          skb->len - skb->data_len,
2045                          PCI_DMA_TODEVICE);
2046         frg_cnt = skb_shinfo(skb)->nr_frags;
2047         if (frg_cnt) {
2048                 txds++;
2049                 for (j = 0; j < frg_cnt; j++, txds++) {
2050                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2051                         if (!txds->Buffer_Pointer)
2052                                 break;
2053                         pci_unmap_page(nic->pdev, (dma_addr_t)
2054                                         txds->Buffer_Pointer,
2055                                        frag->size, PCI_DMA_TODEVICE);
2056                 }
2057         }
2058         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2059         return(skb);
2060 }
2061
2062 /**
2063  *  free_tx_buffers - Free all queued Tx buffers
2064  *  @nic : device private variable.
2065  *  Description:
2066  *  Free all queued Tx buffers.
2067  *  Return Value: void
2068 */
2069
2070 static void free_tx_buffers(struct s2io_nic *nic)
2071 {
2072         struct net_device *dev = nic->dev;
2073         struct sk_buff *skb;
2074         struct TxD *txdp;
2075         int i, j;
2076         struct mac_info *mac_control;
2077         struct config_param *config;
2078         int cnt = 0;
2079
2080         mac_control = &nic->mac_control;
2081         config = &nic->config;
2082
2083         for (i = 0; i < config->tx_fifo_num; i++) {
2084                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2085                         txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2086                             list_virt_addr;
2087                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2088                         if (skb) {
2089                                 dev_kfree_skb(skb);
2090                                 cnt++;
2091                         }
2092                 }
2093                 DBG_PRINT(INTR_DBG,
2094                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2095                           dev->name, cnt, i);
2096                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2097                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2098         }
2099 }
2100
2101 /**
2102  *   stop_nic -  To stop the nic
2103  *   @nic ; device private variable.
2104  *   Description:
2105  *   This function does exactly the opposite of what the start_nic()
2106  *   function does. This function is called to stop the device.
2107  *   Return Value:
2108  *   void.
2109  */
2110
2111 static void stop_nic(struct s2io_nic *nic)
2112 {
2113         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2114         register u64 val64 = 0;
2115         u16 interruptible;
2116         struct mac_info *mac_control;
2117         struct config_param *config;
2118
2119         mac_control = &nic->mac_control;
2120         config = &nic->config;
2121
2122         /*  Disable all interrupts */
2123         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2124         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2125         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2126         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2127
2128         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2129         val64 = readq(&bar0->adapter_control);
2130         val64 &= ~(ADAPTER_CNTL_EN);
2131         writeq(val64, &bar0->adapter_control);
2132 }
2133
2134 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2135                                 sk_buff *skb)
2136 {
2137         struct net_device *dev = nic->dev;
2138         struct sk_buff *frag_list;
2139         void *tmp;
2140
2141         /* Buffer-1 receives L3/L4 headers */
2142         ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2143                         (nic->pdev, skb->data, l3l4hdr_size + 4,
2144                         PCI_DMA_FROMDEVICE);
2145
2146         /* skb_shinfo(skb)->frag_list will have L4 data payload */
2147         skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2148         if (skb_shinfo(skb)->frag_list == NULL) {
2149                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2150                 return -ENOMEM ;
2151         }
2152         frag_list = skb_shinfo(skb)->frag_list;
2153         skb->truesize += frag_list->truesize;
2154         frag_list->next = NULL;
2155         tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2156         frag_list->data = tmp;
2157         frag_list->tail = tmp;
2158
2159         /* Buffer-2 receives L4 data payload */
2160         ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2161                                 frag_list->data, dev->mtu,
2162                                 PCI_DMA_FROMDEVICE);
2163         rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2164         rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2165
2166         return SUCCESS;
2167 }
2168
2169 /**
2170  *  fill_rx_buffers - Allocates the Rx side skbs
2171  *  @nic:  device private variable
2172  *  @ring_no: ring number
2173  *  Description:
2174  *  The function allocates Rx side skbs and puts the physical
2175  *  address of these buffers into the RxD buffer pointers, so that the NIC
2176  *  can DMA the received frame into these locations.
2177  *  The NIC supports 3 receive modes, viz
2178  *  1. single buffer,
2179  *  2. three buffer and
2180  *  3. Five buffer modes.
2181  *  Each mode defines how many fragments the received frame will be split
2182  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2183  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2184  *  is split into 3 fragments. As of now only single buffer mode is
2185  *  supported.
2186  *   Return Value:
2187  *  SUCCESS on success or an appropriate -ve value on failure.
2188  */
2189
2190 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2191 {
2192         struct net_device *dev = nic->dev;
2193         struct sk_buff *skb;
2194         struct RxD_t *rxdp;
2195         int off, off1, size, block_no, block_no1;
2196         u32 alloc_tab = 0;
2197         u32 alloc_cnt;
2198         struct mac_info *mac_control;
2199         struct config_param *config;
2200         u64 tmp;
2201         struct buffAdd *ba;
2202         unsigned long flags;
2203         struct RxD_t *first_rxdp = NULL;
2204
2205         mac_control = &nic->mac_control;
2206         config = &nic->config;
2207         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2208             atomic_read(&nic->rx_bufs_left[ring_no]);
2209
2210         block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2211         off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2212         while (alloc_tab < alloc_cnt) {
2213                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2214                     block_index;
2215                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2216
2217                 rxdp = mac_control->rings[ring_no].
2218                                 rx_blocks[block_no].rxds[off].virt_addr;
2219
2220                 if ((block_no == block_no1) && (off == off1) &&
2221                                         (rxdp->Host_Control)) {
2222                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2223                                   dev->name);
2224                         DBG_PRINT(INTR_DBG, " info equated\n");
2225                         goto end;
2226                 }
2227                 if (off && (off == rxd_count[nic->rxd_mode])) {
2228                         mac_control->rings[ring_no].rx_curr_put_info.
2229                             block_index++;
2230                         if (mac_control->rings[ring_no].rx_curr_put_info.
2231                             block_index == mac_control->rings[ring_no].
2232                                         block_count)
2233                                 mac_control->rings[ring_no].rx_curr_put_info.
2234                                         block_index = 0;
2235                         block_no = mac_control->rings[ring_no].
2236                                         rx_curr_put_info.block_index;
2237                         if (off == rxd_count[nic->rxd_mode])
2238                                 off = 0;
2239                         mac_control->rings[ring_no].rx_curr_put_info.
2240                                 offset = off;
2241                         rxdp = mac_control->rings[ring_no].
2242                                 rx_blocks[block_no].block_virt_addr;
2243                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2244                                   dev->name, rxdp);
2245                 }
2246                 if(!napi) {
2247                         spin_lock_irqsave(&nic->put_lock, flags);
2248                         mac_control->rings[ring_no].put_pos =
2249                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2250                         spin_unlock_irqrestore(&nic->put_lock, flags);
2251                 } else {
2252                         mac_control->rings[ring_no].put_pos =
2253                         (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2254                 }
2255                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2256                         ((nic->rxd_mode >= RXD_MODE_3A) &&
2257                                 (rxdp->Control_2 & BIT(0)))) {
2258                         mac_control->rings[ring_no].rx_curr_put_info.
2259                                         offset = off;
2260                         goto end;
2261                 }
2262                 /* calculate size of skb based on ring mode */
2263                 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2264                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2265                 if (nic->rxd_mode == RXD_MODE_1)
2266                         size += NET_IP_ALIGN;
2267                 else if (nic->rxd_mode == RXD_MODE_3B)
2268                         size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2269                 else
2270                         size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2271
2272                 /* allocate skb */
2273                 skb = dev_alloc_skb(size);
2274                 if(!skb) {
2275                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2276                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2277                         if (first_rxdp) {
2278                                 wmb();
2279                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2280                         }
2281                         return -ENOMEM ;
2282                 }
2283                 if (nic->rxd_mode == RXD_MODE_1) {
2284                         /* 1 buffer mode - normal operation mode */
2285                         memset(rxdp, 0, sizeof(struct RxD1));
2286                         skb_reserve(skb, NET_IP_ALIGN);
2287                         ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2288                             (nic->pdev, skb->data, size - NET_IP_ALIGN,
2289                                 PCI_DMA_FROMDEVICE);
2290                         rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2291
2292                 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2293                         /*
2294                          * 2 or 3 buffer mode -
2295                          * Both 2 buffer mode and 3 buffer mode provides 128
2296                          * byte aligned receive buffers.
2297                          *
2298                          * 3 buffer mode provides header separation where in
2299                          * skb->data will have L3/L4 headers where as
2300                          * skb_shinfo(skb)->frag_list will have the L4 data
2301                          * payload
2302                          */
2303
2304                         memset(rxdp, 0, sizeof(struct RxD3));
2305                         ba = &mac_control->rings[ring_no].ba[block_no][off];
2306                         skb_reserve(skb, BUF0_LEN);
2307                         tmp = (u64)(unsigned long) skb->data;
2308                         tmp += ALIGN_SIZE;
2309                         tmp &= ~ALIGN_SIZE;
2310                         skb->data = (void *) (unsigned long)tmp;
2311                         skb->tail = (void *) (unsigned long)tmp;
2312
2313                         if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2314                                 ((struct RxD3*)rxdp)->Buffer0_ptr =
2315                                    pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2316                                            PCI_DMA_FROMDEVICE);
2317                         else
2318                                 pci_dma_sync_single_for_device(nic->pdev,
2319                                     (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2320                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2321                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2322                         if (nic->rxd_mode == RXD_MODE_3B) {
2323                                 /* Two buffer mode */
2324
2325                                 /*
2326                                  * Buffer2 will have L3/L4 header plus
2327                                  * L4 payload
2328                                  */
2329                                 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2330                                 (nic->pdev, skb->data, dev->mtu + 4,
2331                                                 PCI_DMA_FROMDEVICE);
2332
2333                                 /* Buffer-1 will be dummy buffer. Not used */
2334                                 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2335                                         ((struct RxD3*)rxdp)->Buffer1_ptr =
2336                                                 pci_map_single(nic->pdev,
2337                                                 ba->ba_1, BUF1_LEN,
2338                                                 PCI_DMA_FROMDEVICE);
2339                                 }
2340                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2341                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2342                                                                 (dev->mtu + 4);
2343                         } else {
2344                                 /* 3 buffer mode */
2345                                 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2346                                         dev_kfree_skb_irq(skb);
2347                                         if (first_rxdp) {
2348                                                 wmb();
2349                                                 first_rxdp->Control_1 |=
2350                                                         RXD_OWN_XENA;
2351                                         }
2352                                         return -ENOMEM ;
2353                                 }
2354                         }
2355                         rxdp->Control_2 |= BIT(0);
2356                 }
2357                 rxdp->Host_Control = (unsigned long) (skb);
2358                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2359                         rxdp->Control_1 |= RXD_OWN_XENA;
2360                 off++;
2361                 if (off == (rxd_count[nic->rxd_mode] + 1))
2362                         off = 0;
2363                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2364
2365                 rxdp->Control_2 |= SET_RXD_MARKER;
2366                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2367                         if (first_rxdp) {
2368                                 wmb();
2369                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2370                         }
2371                         first_rxdp = rxdp;
2372                 }
2373                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2374                 alloc_tab++;
2375         }
2376
2377       end:
2378         /* Transfer ownership of first descriptor to adapter just before
2379          * exiting. Before that, use memory barrier so that ownership
2380          * and other fields are seen by adapter correctly.
2381          */
2382         if (first_rxdp) {
2383                 wmb();
2384                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2385         }
2386
2387         return SUCCESS;
2388 }
2389
2390 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2391 {
2392         struct net_device *dev = sp->dev;
2393         int j;
2394         struct sk_buff *skb;
2395         struct RxD_t *rxdp;
2396         struct mac_info *mac_control;
2397         struct buffAdd *ba;
2398
2399         mac_control = &sp->mac_control;
2400         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2401                 rxdp = mac_control->rings[ring_no].
2402                                 rx_blocks[blk].rxds[j].virt_addr;
2403                 skb = (struct sk_buff *)
2404                         ((unsigned long) rxdp->Host_Control);
2405                 if (!skb) {
2406                         continue;
2407                 }
2408                 if (sp->rxd_mode == RXD_MODE_1) {
2409                         pci_unmap_single(sp->pdev, (dma_addr_t)
2410                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2411                                  dev->mtu +
2412                                  HEADER_ETHERNET_II_802_3_SIZE
2413                                  + HEADER_802_2_SIZE +
2414                                  HEADER_SNAP_SIZE,
2415                                  PCI_DMA_FROMDEVICE);
2416                         memset(rxdp, 0, sizeof(struct RxD1));
2417                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2418                         ba = &mac_control->rings[ring_no].
2419                                 ba[blk][j];
2420                         pci_unmap_single(sp->pdev, (dma_addr_t)
2421                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2422                                  BUF0_LEN,
2423                                  PCI_DMA_FROMDEVICE);
2424                         pci_unmap_single(sp->pdev, (dma_addr_t)
2425                                  ((struct RxD3*)rxdp)->Buffer1_ptr,
2426                                  BUF1_LEN,
2427                                  PCI_DMA_FROMDEVICE);
2428                         pci_unmap_single(sp->pdev, (dma_addr_t)
2429                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2430                                  dev->mtu + 4,
2431                                  PCI_DMA_FROMDEVICE);
2432                         memset(rxdp, 0, sizeof(struct RxD3));
2433                 } else {
2434                         pci_unmap_single(sp->pdev, (dma_addr_t)
2435                                 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2436                                 PCI_DMA_FROMDEVICE);
2437                         pci_unmap_single(sp->pdev, (dma_addr_t)
2438                                 ((struct RxD3*)rxdp)->Buffer1_ptr,
2439                                 l3l4hdr_size + 4,
2440                                 PCI_DMA_FROMDEVICE);
2441                         pci_unmap_single(sp->pdev, (dma_addr_t)
2442                                 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2443                                 PCI_DMA_FROMDEVICE);
2444                         memset(rxdp, 0, sizeof(struct RxD3));
2445                 }
2446                 dev_kfree_skb(skb);
2447                 atomic_dec(&sp->rx_bufs_left[ring_no]);
2448         }
2449 }
2450
2451 /**
2452  *  free_rx_buffers - Frees all Rx buffers
2453  *  @sp: device private variable.
2454  *  Description:
2455  *  This function will free all Rx buffers allocated by host.
2456  *  Return Value:
2457  *  NONE.
2458  */
2459
2460 static void free_rx_buffers(struct s2io_nic *sp)
2461 {
2462         struct net_device *dev = sp->dev;
2463         int i, blk = 0, buf_cnt = 0;
2464         struct mac_info *mac_control;
2465         struct config_param *config;
2466
2467         mac_control = &sp->mac_control;
2468         config = &sp->config;
2469
2470         for (i = 0; i < config->rx_ring_num; i++) {
2471                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2472                         free_rxd_blk(sp,i,blk);
2473
2474                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2475                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2476                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2477                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2478                 atomic_set(&sp->rx_bufs_left[i], 0);
2479                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2480                           dev->name, buf_cnt, i);
2481         }
2482 }
2483
2484 /**
2485  * s2io_poll - Rx interrupt handler for NAPI support
2486  * @dev : pointer to the device structure.
2487  * @budget : The number of packets that were budgeted to be processed
2488  * during  one pass through the 'Poll" function.
2489  * Description:
2490  * Comes into picture only if NAPI support has been incorporated. It does
2491  * the same thing that rx_intr_handler does, but not in a interrupt context
2492  * also It will process only a given number of packets.
2493  * Return value:
2494  * 0 on success and 1 if there are No Rx packets to be processed.
2495  */
2496
2497 static int s2io_poll(struct net_device *dev, int *budget)
2498 {
2499         struct s2io_nic *nic = dev->priv;
2500         int pkt_cnt = 0, org_pkts_to_process;
2501         struct mac_info *mac_control;
2502         struct config_param *config;
2503         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2504         int i;
2505
2506         atomic_inc(&nic->isr_cnt);
2507         mac_control = &nic->mac_control;
2508         config = &nic->config;
2509
2510         nic->pkts_to_process = *budget;
2511         if (nic->pkts_to_process > dev->quota)
2512                 nic->pkts_to_process = dev->quota;
2513         org_pkts_to_process = nic->pkts_to_process;
2514
2515         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2516         readl(&bar0->rx_traffic_int);
2517
2518         for (i = 0; i < config->rx_ring_num; i++) {
2519                 rx_intr_handler(&mac_control->rings[i]);
2520                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2521                 if (!nic->pkts_to_process) {
2522                         /* Quota for the current iteration has been met */
2523                         goto no_rx;
2524                 }
2525         }
2526         if (!pkt_cnt)
2527                 pkt_cnt = 1;
2528
2529         dev->quota -= pkt_cnt;
2530         *budget -= pkt_cnt;
2531         netif_rx_complete(dev);
2532
2533         for (i = 0; i < config->rx_ring_num; i++) {
2534                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2535                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2536                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2537                         break;
2538                 }
2539         }
2540         /* Re enable the Rx interrupts. */
2541         writeq(0x0, &bar0->rx_traffic_mask);
2542         readl(&bar0->rx_traffic_mask);
2543         atomic_dec(&nic->isr_cnt);
2544         return 0;
2545
2546 no_rx:
2547         dev->quota -= pkt_cnt;
2548         *budget -= pkt_cnt;
2549
2550         for (i = 0; i < config->rx_ring_num; i++) {
2551                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2552                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2553                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2554                         break;
2555                 }
2556         }
2557         atomic_dec(&nic->isr_cnt);
2558         return 1;
2559 }
2560
2561 #ifdef CONFIG_NET_POLL_CONTROLLER
2562 /**
2563  * s2io_netpoll - netpoll event handler entry point
2564  * @dev : pointer to the device structure.
2565  * Description:
2566  *      This function will be called by upper layer to check for events on the
2567  * interface in situations where interrupts are disabled. It is used for
2568  * specific in-kernel networking tasks, such as remote consoles and kernel
2569  * debugging over the network (example netdump in RedHat).
2570  */
2571 static void s2io_netpoll(struct net_device *dev)
2572 {
2573         struct s2io_nic *nic = dev->priv;
2574         struct mac_info *mac_control;
2575         struct config_param *config;
2576         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2577         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2578         int i;
2579
2580         disable_irq(dev->irq);
2581
2582         atomic_inc(&nic->isr_cnt);
2583         mac_control = &nic->mac_control;
2584         config = &nic->config;
2585
2586         writeq(val64, &bar0->rx_traffic_int);
2587         writeq(val64, &bar0->tx_traffic_int);
2588
2589         /* we need to free up the transmitted skbufs or else netpoll will
2590          * run out of skbs and will fail and eventually netpoll application such
2591          * as netdump will fail.
2592          */
2593         for (i = 0; i < config->tx_fifo_num; i++)
2594                 tx_intr_handler(&mac_control->fifos[i]);
2595
2596         /* check for received packet and indicate up to network */
2597         for (i = 0; i < config->rx_ring_num; i++)
2598                 rx_intr_handler(&mac_control->rings[i]);
2599
2600         for (i = 0; i < config->rx_ring_num; i++) {
2601                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2602                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2603                         DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2604                         break;
2605                 }
2606         }
2607         atomic_dec(&nic->isr_cnt);
2608         enable_irq(dev->irq);
2609         return;
2610 }
2611 #endif
2612
2613 /**
2614  *  rx_intr_handler - Rx interrupt handler
2615  *  @nic: device private variable.
2616  *  Description:
2617  *  If the interrupt is because of a received frame or if the
2618  *  receive ring contains fresh as yet un-processed frames,this function is
2619  *  called. It picks out the RxD at which place the last Rx processing had
2620  *  stopped and sends the skb to the OSM's Rx handler and then increments
2621  *  the offset.
2622  *  Return Value:
2623  *  NONE.
2624  */
2625 static void rx_intr_handler(struct ring_info *ring_data)
2626 {
2627         struct s2io_nic *nic = ring_data->nic;
2628         struct net_device *dev = (struct net_device *) nic->dev;
2629         int get_block, put_block, put_offset;
2630         struct rx_curr_get_info get_info, put_info;
2631         struct RxD_t *rxdp;
2632         struct sk_buff *skb;
2633         int pkt_cnt = 0;
2634         int i;
2635
2636         spin_lock(&nic->rx_lock);
2637         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2638                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2639                           __FUNCTION__, dev->name);
2640                 spin_unlock(&nic->rx_lock);
2641                 return;
2642         }
2643
2644         get_info = ring_data->rx_curr_get_info;
2645         get_block = get_info.block_index;
2646         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2647         put_block = put_info.block_index;
2648         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2649         if (!napi) {
2650                 spin_lock(&nic->put_lock);
2651                 put_offset = ring_data->put_pos;
2652                 spin_unlock(&nic->put_lock);
2653         } else
2654                 put_offset = ring_data->put_pos;
2655
2656         while (RXD_IS_UP2DT(rxdp)) {
2657                 /*
2658                  * If your are next to put index then it's
2659                  * FIFO full condition
2660                  */
2661                 if ((get_block == put_block) &&
2662                     (get_info.offset + 1) == put_info.offset) {
2663                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2664                         break;
2665                 }
2666                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2667                 if (skb == NULL) {
2668                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2669                                   dev->name);
2670                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2671                         spin_unlock(&nic->rx_lock);
2672                         return;
2673                 }
2674                 if (nic->rxd_mode == RXD_MODE_1) {
2675                         pci_unmap_single(nic->pdev, (dma_addr_t)
2676                                  ((struct RxD1*)rxdp)->Buffer0_ptr,
2677                                  dev->mtu +
2678                                  HEADER_ETHERNET_II_802_3_SIZE +
2679                                  HEADER_802_2_SIZE +
2680                                  HEADER_SNAP_SIZE,
2681                                  PCI_DMA_FROMDEVICE);
2682                 } else if (nic->rxd_mode == RXD_MODE_3B) {
2683                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2684                                  ((struct RxD3*)rxdp)->Buffer0_ptr,
2685                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2686                         pci_unmap_single(nic->pdev, (dma_addr_t)
2687                                  ((struct RxD3*)rxdp)->Buffer2_ptr,
2688                                  dev->mtu + 4,
2689                                  PCI_DMA_FROMDEVICE);
2690                 } else {
2691                         pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2692                                          ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2693                                          PCI_DMA_FROMDEVICE);
2694                         pci_unmap_single(nic->pdev, (dma_addr_t)
2695                                          ((struct RxD3*)rxdp)->Buffer1_ptr,
2696                                          l3l4hdr_size + 4,
2697                                          PCI_DMA_FROMDEVICE);
2698                         pci_unmap_single(nic->pdev, (dma_addr_t)
2699                                          ((struct RxD3*)rxdp)->Buffer2_ptr,
2700                                          dev->mtu, PCI_DMA_FROMDEVICE);
2701                 }
2702                 prefetch(skb->data);
2703                 rx_osm_handler(ring_data, rxdp);
2704                 get_info.offset++;
2705                 ring_data->rx_curr_get_info.offset = get_info.offset;
2706                 rxdp = ring_data->rx_blocks[get_block].
2707                                 rxds[get_info.offset].virt_addr;
2708                 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2709                         get_info.offset = 0;
2710                         ring_data->rx_curr_get_info.offset = get_info.offset;
2711                         get_block++;
2712                         if (get_block == ring_data->block_count)
2713                                 get_block = 0;
2714                         ring_data->rx_curr_get_info.block_index = get_block;
2715                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2716                 }
2717
2718                 nic->pkts_to_process -= 1;
2719                 if ((napi) && (!nic->pkts_to_process))
2720                         break;
2721                 pkt_cnt++;
2722                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2723                         break;
2724         }
2725         if (nic->lro) {
2726                 /* Clear all LRO sessions before exiting */
2727                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2728                         struct lro *lro = &nic->lro0_n[i];
2729                         if (lro->in_use) {
2730                                 update_L3L4_header(nic, lro);
2731                                 queue_rx_frame(lro->parent);
2732                                 clear_lro_session(lro);
2733                         }
2734                 }
2735         }
2736
2737         spin_unlock(&nic->rx_lock);
2738 }
2739
2740 /**
2741  *  tx_intr_handler - Transmit interrupt handler
2742  *  @nic : device private variable
2743  *  Description:
2744  *  If an interrupt was raised to indicate DMA complete of the
2745  *  Tx packet, this function is called. It identifies the last TxD
2746  *  whose buffer was freed and frees all skbs whose data have already
2747  *  DMA'ed into the NICs internal memory.
2748  *  Return Value:
2749  *  NONE
2750  */
2751
2752 static void tx_intr_handler(struct fifo_info *fifo_data)
2753 {
2754         struct s2io_nic *nic = fifo_data->nic;
2755         struct net_device *dev = (struct net_device *) nic->dev;
2756         struct tx_curr_get_info get_info, put_info;
2757         struct sk_buff *skb;
2758         struct TxD *txdlp;
2759
2760         get_info = fifo_data->tx_curr_get_info;
2761         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2762         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2763             list_virt_addr;
2764         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2765                (get_info.offset != put_info.offset) &&
2766                (txdlp->Host_Control)) {
2767                 /* Check for TxD errors */
2768                 if (txdlp->Control_1 & TXD_T_CODE) {
2769                         unsigned long long err;
2770                         err = txdlp->Control_1 & TXD_T_CODE;
2771                         if (err & 0x1) {
2772                                 nic->mac_control.stats_info->sw_stat.
2773                                                 parity_err_cnt++;
2774                         }
2775                         if ((err >> 48) == 0xA) {
2776                                 DBG_PRINT(TX_DBG, "TxD returned due \
2777                                                 to loss of link\n");
2778                         }
2779                         else {
2780                                 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2781                         }
2782                 }
2783
2784                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2785                 if (skb == NULL) {
2786                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2787                         __FUNCTION__);
2788                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2789                         return;
2790                 }
2791
2792                 /* Updating the statistics block */
2793                 nic->stats.tx_bytes += skb->len;
2794                 dev_kfree_skb_irq(skb);
2795
2796                 get_info.offset++;
2797                 if (get_info.offset == get_info.fifo_len + 1)
2798                         get_info.offset = 0;
2799                 txdlp = (struct TxD *) fifo_data->list_info
2800                     [get_info.offset].list_virt_addr;
2801                 fifo_data->tx_curr_get_info.offset =
2802                     get_info.offset;
2803         }
2804
2805         spin_lock(&nic->tx_lock);
2806         if (netif_queue_stopped(dev))
2807                 netif_wake_queue(dev);
2808         spin_unlock(&nic->tx_lock);
2809 }
2810
2811 /**
2812  *  s2io_mdio_write - Function to write in to MDIO registers
2813  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2814  *  @addr     : address value
2815  *  @value    : data value
2816  *  @dev      : pointer to net_device structure
2817  *  Description:
2818  *  This function is used to write values to the MDIO registers
2819  *  NONE
2820  */
2821 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2822 {
2823         u64 val64 = 0x0;
2824         struct s2io_nic *sp = dev->priv;
2825         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2826
2827         //address transaction
2828         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2829                         | MDIO_MMD_DEV_ADDR(mmd_type)
2830                         | MDIO_MMS_PRT_ADDR(0x0);
2831         writeq(val64, &bar0->mdio_control);
2832         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2833         writeq(val64, &bar0->mdio_control);
2834         udelay(100);
2835
2836         //Data transaction
2837         val64 = 0x0;
2838         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2839                         | MDIO_MMD_DEV_ADDR(mmd_type)
2840                         | MDIO_MMS_PRT_ADDR(0x0)
2841                         | MDIO_MDIO_DATA(value)
2842                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
2843         writeq(val64, &bar0->mdio_control);
2844         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2845         writeq(val64, &bar0->mdio_control);
2846         udelay(100);
2847
2848         val64 = 0x0;
2849         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2850         | MDIO_MMD_DEV_ADDR(mmd_type)
2851         | MDIO_MMS_PRT_ADDR(0x0)
2852         | MDIO_OP(MDIO_OP_READ_TRANS);
2853         writeq(val64, &bar0->mdio_control);
2854         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2855         writeq(val64, &bar0->mdio_control);
2856         udelay(100);
2857
2858 }
2859
2860 /**
2861  *  s2io_mdio_read - Function to write in to MDIO registers
2862  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2863  *  @addr     : address value
2864  *  @dev      : pointer to net_device structure
2865  *  Description:
2866  *  This function is used to read values to the MDIO registers
2867  *  NONE
2868  */
2869 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2870 {
2871         u64 val64 = 0x0;
2872         u64 rval64 = 0x0;
2873         struct s2io_nic *sp = dev->priv;
2874         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2875
2876         /* address transaction */
2877         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2878                         | MDIO_MMD_DEV_ADDR(mmd_type)
2879                         | MDIO_MMS_PRT_ADDR(0x0);
2880         writeq(val64, &bar0->mdio_control);
2881         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2882         writeq(val64, &bar0->mdio_control);
2883         udelay(100);
2884
2885         /* Data transaction */
2886         val64 = 0x0;
2887         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2888                         | MDIO_MMD_DEV_ADDR(mmd_type)
2889                         | MDIO_MMS_PRT_ADDR(0x0)
2890                         | MDIO_OP(MDIO_OP_READ_TRANS);
2891         writeq(val64, &bar0->mdio_control);
2892         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2893         writeq(val64, &bar0->mdio_control);
2894         udelay(100);
2895
2896         /* Read the value from regs */
2897         rval64 = readq(&bar0->mdio_control);
2898         rval64 = rval64 & 0xFFFF0000;
2899         rval64 = rval64 >> 16;
2900         return rval64;
2901 }
2902 /**
2903  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
2904  *  @counter      : couter value to be updated
2905  *  @flag         : flag to indicate the status
2906  *  @type         : counter type
2907  *  Description:
2908  *  This function is to check the status of the xpak counters value
2909  *  NONE
2910  */
2911
2912 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2913 {
2914         u64 mask = 0x3;
2915         u64 val64;
2916         int i;
2917         for(i = 0; i <index; i++)
2918                 mask = mask << 0x2;
2919
2920         if(flag > 0)
2921         {
2922                 *counter = *counter + 1;
2923                 val64 = *regs_stat & mask;
2924                 val64 = val64 >> (index * 0x2);
2925                 val64 = val64 + 1;
2926                 if(val64 == 3)
2927                 {
2928                         switch(type)
2929                         {
2930                         case 1:
2931                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2932                                           "service. Excessive temperatures may "
2933                                           "result in premature transceiver "
2934                                           "failure \n");
2935                         break;
2936                         case 2:
2937                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2938                                           "service Excessive bias currents may "
2939                                           "indicate imminent laser diode "
2940                                           "failure \n");
2941                         break;
2942                         case 3:
2943                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2944                                           "service Excessive laser output "
2945                                           "power may saturate far-end "
2946                                           "receiver\n");
2947                         break;
2948                         default:
2949                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2950                                           "type \n");
2951                         }
2952                         val64 = 0x0;
2953                 }
2954                 val64 = val64 << (index * 0x2);
2955                 *regs_stat = (*regs_stat & (~mask)) | (val64);
2956
2957         } else {
2958                 *regs_stat = *regs_stat & (~mask);
2959         }
2960 }
2961
2962 /**
2963  *  s2io_updt_xpak_counter - Function to update the xpak counters
2964  *  @dev         : pointer to net_device struct
2965  *  Description:
2966  *  This function is to upate the status of the xpak counters value
2967  *  NONE
2968  */
2969 static void s2io_updt_xpak_counter(struct net_device *dev)
2970 {
2971         u16 flag  = 0x0;
2972         u16 type  = 0x0;
2973         u16 val16 = 0x0;
2974         u64 val64 = 0x0;
2975         u64 addr  = 0x0;
2976
2977         struct s2io_nic *sp = dev->priv;
2978         struct stat_block *stat_info = sp->mac_control.stats_info;
2979
2980         /* Check the communication with the MDIO slave */
2981         addr = 0x0000;
2982         val64 = 0x0;
2983         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
2984         if((val64 == 0xFFFF) || (val64 == 0x0000))
2985         {
2986                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
2987                           "Returned %llx\n", (unsigned long long)val64);
2988                 return;
2989         }
2990
2991         /* Check for the expecte value of 2040 at PMA address 0x0000 */
2992         if(val64 != 0x2040)
2993         {
2994                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
2995                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
2996                           (unsigned long long)val64);
2997                 return;
2998         }
2999
3000         /* Loading the DOM register to MDIO register */
3001         addr = 0xA100;
3002         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3003         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3004
3005         /* Reading the Alarm flags */
3006         addr = 0xA070;
3007         val64 = 0x0;
3008         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3009
3010         flag = CHECKBIT(val64, 0x7);
3011         type = 1;
3012         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3013                                 &stat_info->xpak_stat.xpak_regs_stat,
3014                                 0x0, flag, type);
3015
3016         if(CHECKBIT(val64, 0x6))
3017                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3018
3019         flag = CHECKBIT(val64, 0x3);
3020         type = 2;
3021         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3022                                 &stat_info->xpak_stat.xpak_regs_stat,
3023                                 0x2, flag, type);
3024
3025         if(CHECKBIT(val64, 0x2))
3026                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3027
3028         flag = CHECKBIT(val64, 0x1);
3029         type = 3;
3030         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3031                                 &stat_info->xpak_stat.xpak_regs_stat,
3032                                 0x4, flag, type);
3033
3034         if(CHECKBIT(val64, 0x0))
3035                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3036
3037         /* Reading the Warning flags */
3038         addr = 0xA074;
3039         val64 = 0x0;
3040         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3041
3042         if(CHECKBIT(val64, 0x7))
3043                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3044
3045         if(CHECKBIT(val64, 0x6))
3046                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3047
3048         if(CHECKBIT(val64, 0x3))
3049                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3050
3051         if(CHECKBIT(val64, 0x2))
3052                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3053
3054         if(CHECKBIT(val64, 0x1))
3055                 stat_info->xpak_stat.warn_laser_output_power_high++;
3056
3057         if(CHECKBIT(val64, 0x0))
3058                 stat_info->xpak_stat.warn_laser_output_power_low++;
3059 }
3060
3061 /**
3062  *  alarm_intr_handler - Alarm Interrrupt handler
3063  *  @nic: device private variable
3064  *  Description: If the interrupt was neither because of Rx packet or Tx
3065  *  complete, this function is called. If the interrupt was to indicate
3066  *  a loss of link, the OSM link status handler is invoked for any other
3067  *  alarm interrupt the block that raised the interrupt is displayed
3068  *  and a H/W reset is issued.
3069  *  Return Value:
3070  *  NONE
3071 */
3072
3073 static void alarm_intr_handler(struct s2io_nic *nic)
3074 {
3075         struct net_device *dev = (struct net_device *) nic->dev;
3076         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3077         register u64 val64 = 0, err_reg = 0;
3078         u64 cnt;
3079         int i;
3080         if (atomic_read(&nic->card_state) == CARD_DOWN)
3081                 return;
3082         nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3083         /* Handling the XPAK counters update */
3084         if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3085                 /* waiting for an hour */
3086                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3087         } else {
3088                 s2io_updt_xpak_counter(dev);
3089                 /* reset the count to zero */
3090                 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3091         }
3092
3093         /* Handling link status change error Intr */
3094         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3095                 err_reg = readq(&bar0->mac_rmac_err_reg);
3096                 writeq(err_reg, &bar0->mac_rmac_err_reg);
3097                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3098                         schedule_work(&nic->set_link_task);
3099                 }
3100         }
3101
3102         /* Handling Ecc errors */
3103         val64 = readq(&bar0->mc_err_reg);
3104         writeq(val64, &bar0->mc_err_reg);
3105         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3106                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3107                         nic->mac_control.stats_info->sw_stat.
3108                                 double_ecc_errs++;
3109                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3110                                   dev->name);
3111                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3112                         if (nic->device_type != XFRAME_II_DEVICE) {
3113                                 /* Reset XframeI only if critical error */
3114                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3115                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3116                                         netif_stop_queue(dev);
3117                                         schedule_work(&nic->rst_timer_task);
3118                                         nic->mac_control.stats_info->sw_stat.
3119                                                         soft_reset_cnt++;
3120                                 }
3121                         }
3122                 } else {
3123                         nic->mac_control.stats_info->sw_stat.
3124                                 single_ecc_errs++;
3125                 }
3126         }
3127
3128         /* In case of a serious error, the device will be Reset. */
3129         val64 = readq(&bar0->serr_source);
3130         if (val64 & SERR_SOURCE_ANY) {
3131                 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3132                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3133                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3134                           (unsigned long long)val64);
3135                 netif_stop_queue(dev);
3136                 schedule_work(&nic->rst_timer_task);
3137                 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3138         }
3139
3140         /*
3141          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3142          * Error occurs, the adapter will be recycled by disabling the
3143          * adapter enable bit and enabling it again after the device
3144          * becomes Quiescent.
3145          */
3146         val64 = readq(&bar0->pcc_err_reg);
3147         writeq(val64, &bar0->pcc_err_reg);
3148         if (val64 & PCC_FB_ECC_DB_ERR) {
3149                 u64 ac = readq(&bar0->adapter_control);
3150                 ac &= ~(ADAPTER_CNTL_EN);
3151                 writeq(ac, &bar0->adapter_control);
3152                 ac = readq(&bar0->adapter_control);
3153                 schedule_work(&nic->set_link_task);
3154         }
3155         /* Check for data parity error */
3156         val64 = readq(&bar0->pic_int_status);
3157         if (val64 & PIC_INT_GPIO) {
3158                 val64 = readq(&bar0->gpio_int_reg);
3159                 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3160                         nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3161                         schedule_work(&nic->rst_timer_task);
3162                         nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3163                 }
3164         }
3165
3166         /* Check for ring full counter */
3167         if (nic->device_type & XFRAME_II_DEVICE) {
3168                 val64 = readq(&bar0->ring_bump_counter1);
3169                 for (i=0; i<4; i++) {
3170                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3171                         cnt >>= 64 - ((i+1)*16);
3172                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3173                                 += cnt;
3174                 }
3175
3176                 val64 = readq(&bar0->ring_bump_counter2);
3177                 for (i=0; i<4; i++) {
3178                         cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3179                         cnt >>= 64 - ((i+1)*16);
3180                         nic->mac_control.stats_info->sw_stat.ring_full_cnt
3181                                 += cnt;
3182                 }
3183         }
3184
3185         /* Other type of interrupts are not being handled now,  TODO */
3186 }
3187
3188 /**
3189  *  wait_for_cmd_complete - waits for a command to complete.
3190  *  @sp : private member of the device structure, which is a pointer to the
3191  *  s2io_nic structure.
3192  *  Description: Function that waits for a command to Write into RMAC
3193  *  ADDR DATA registers to be completed and returns either success or
3194  *  error depending on whether the command was complete or not.
3195  *  Return value:
3196  *   SUCCESS on success and FAILURE on failure.
3197  */
3198
3199 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3200 {
3201         int ret = FAILURE, cnt = 0;
3202         u64 val64;
3203
3204         while (TRUE) {
3205                 val64 = readq(addr);
3206                 if (!(val64 & busy_bit)) {
3207                         ret = SUCCESS;
3208                         break;
3209                 }
3210
3211                 if(in_interrupt())
3212                         mdelay(50);
3213                 else
3214                         msleep(50);
3215
3216                 if (cnt++ > 10)
3217                         break;
3218         }
3219         return ret;
3220 }
3221 /*
3222  * check_pci_device_id - Checks if the device id is supported
3223  * @id : device id
3224  * Description: Function to check if the pci device id is supported by driver.
3225  * Return value: Actual device id if supported else PCI_ANY_ID
3226  */
3227 static u16 check_pci_device_id(u16 id)
3228 {
3229         switch (id) {
3230         case PCI_DEVICE_ID_HERC_WIN:
3231         case PCI_DEVICE_ID_HERC_UNI:
3232                 return XFRAME_II_DEVICE;
3233         case PCI_DEVICE_ID_S2IO_UNI:
3234         case PCI_DEVICE_ID_S2IO_WIN:
3235                 return XFRAME_I_DEVICE;
3236         default:
3237                 return PCI_ANY_ID;
3238         }
3239 }
3240
3241 /**
3242  *  s2io_reset - Resets the card.
3243  *  @sp : private member of the device structure.
3244  *  Description: Function to Reset the card. This function then also
3245  *  restores the previously saved PCI configuration space registers as
3246  *  the card reset also resets the configuration space.
3247  *  Return value:
3248  *  void.
3249  */
3250
3251 static void s2io_reset(struct s2io_nic * sp)
3252 {
3253         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3254         u64 val64;
3255         u16 subid, pci_cmd;
3256         int i;
3257         u16 val16;
3258         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3259                         __FUNCTION__, sp->dev->name);
3260
3261         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3262         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3263
3264         if (sp->device_type == XFRAME_II_DEVICE) {
3265                 int ret;
3266                 ret = pci_set_power_state(sp->pdev, 3);
3267                 if (!ret)
3268                         ret = pci_set_power_state(sp->pdev, 0);
3269                 else {
3270                         DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3271                                         __FUNCTION__);
3272                         goto old_way;
3273                 }
3274                 msleep(20);
3275                 goto new_way;
3276         }
3277 old_way:
3278         val64 = SW_RESET_ALL;
3279         writeq(val64, &bar0->sw_reset);
3280 new_way:
3281         if (strstr(sp->product_name, "CX4")) {
3282                 msleep(750);
3283         }
3284         msleep(250);
3285         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3286
3287                 /* Restore the PCI state saved during initialization. */
3288                 pci_restore_state(sp->pdev);
3289                 pci_read_config_word(sp->pdev, 0x2, &val16);
3290                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3291                         break;
3292                 msleep(200);
3293         }
3294
3295         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3296                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3297         }
3298
3299         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3300
3301         s2io_init_pci(sp);
3302
3303         /* Set swapper to enable I/O register access */
3304         s2io_set_swapper(sp);
3305
3306         /* Restore the MSIX table entries from local variables */
3307         restore_xmsi_data(sp);
3308
3309         /* Clear certain PCI/PCI-X fields after reset */
3310         if (sp->device_type == XFRAME_II_DEVICE) {
3311                 /* Clear "detected parity error" bit */
3312                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3313
3314                 /* Clearing PCIX Ecc status register */
3315                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3316
3317                 /* Clearing PCI_STATUS error reflected here */
3318                 writeq(BIT(62), &bar0->txpic_int_reg);
3319         }
3320
3321         /* Reset device statistics maintained by OS */
3322         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3323
3324         /* SXE-002: Configure link and activity LED to turn it off */
3325         subid = sp->pdev->subsystem_device;
3326         if (((subid & 0xFF) >= 0x07) &&
3327             (sp->device_type == XFRAME_I_DEVICE)) {
3328                 val64 = readq(&bar0->gpio_control);
3329                 val64 |= 0x0000800000000000ULL;
3330                 writeq(val64, &bar0->gpio_control);
3331                 val64 = 0x0411040400000000ULL;
3332                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3333         }
3334
3335         /*
3336          * Clear spurious ECC interrupts that would have occured on
3337          * XFRAME II cards after reset.
3338          */
3339         if (sp->device_type == XFRAME_II_DEVICE) {
3340                 val64 = readq(&bar0->pcc_err_reg);
3341                 writeq(val64, &bar0->pcc_err_reg);
3342         }
3343
3344         sp->device_enabled_once = FALSE;
3345 }
3346
3347 /**
3348  *  s2io_set_swapper - to set the swapper controle on the card
3349  *  @sp : private member of the device structure,
3350  *  pointer to the s2io_nic structure.
3351  *  Description: Function to set the swapper control on the card
3352  *  correctly depending on the 'endianness' of the system.
3353  *  Return value:
3354  *  SUCCESS on success and FAILURE on failure.
3355  */
3356
3357 static int s2io_set_swapper(struct s2io_nic * sp)
3358 {
3359         struct net_device *dev = sp->dev;
3360         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3361         u64 val64, valt, valr;
3362
3363         /*
3364          * Set proper endian settings and verify the same by reading
3365          * the PIF Feed-back register.
3366          */
3367
3368         val64 = readq(&bar0->pif_rd_swapper_fb);
3369         if (val64 != 0x0123456789ABCDEFULL) {
3370                 int i = 0;
3371                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3372                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3373                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3374                                 0};                     /* FE=0, SE=0 */
3375
3376                 while(i<4) {
3377                         writeq(value[i], &bar0->swapper_ctrl);
3378                         val64 = readq(&bar0->pif_rd_swapper_fb);
3379                         if (val64 == 0x0123456789ABCDEFULL)
3380                                 break;
3381                         i++;
3382                 }
3383                 if (i == 4) {
3384                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3385                                 dev->name);
3386                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3387                                 (unsigned long long) val64);
3388                         return FAILURE;
3389                 }
3390                 valr = value[i];
3391         } else {
3392                 valr = readq(&bar0->swapper_ctrl);
3393         }
3394
3395         valt = 0x0123456789ABCDEFULL;
3396         writeq(valt, &bar0->xmsi_address);
3397         val64 = readq(&bar0->xmsi_address);
3398
3399         if(val64 != valt) {
3400                 int i = 0;
3401                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3402                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3403                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3404                                 0};                     /* FE=0, SE=0 */
3405
3406                 while(i<4) {
3407                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3408                         writeq(valt, &bar0->xmsi_address);
3409                         val64 = readq(&bar0->xmsi_address);
3410                         if(val64 == valt)
3411                                 break;
3412                         i++;
3413                 }
3414                 if(i == 4) {
3415                         unsigned long long x = val64;
3416                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3417                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3418                         return FAILURE;
3419                 }
3420         }
3421         val64 = readq(&bar0->swapper_ctrl);
3422         val64 &= 0xFFFF000000000000ULL;
3423
3424 #ifdef  __BIG_ENDIAN
3425         /*
3426          * The device by default set to a big endian format, so a
3427          * big endian driver need not set anything.
3428          */
3429         val64 |= (SWAPPER_CTRL_TXP_FE |
3430                  SWAPPER_CTRL_TXP_SE |
3431                  SWAPPER_CTRL_TXD_R_FE |
3432                  SWAPPER_CTRL_TXD_W_FE |
3433                  SWAPPER_CTRL_TXF_R_FE |
3434                  SWAPPER_CTRL_RXD_R_FE |
3435                  SWAPPER_CTRL_RXD_W_FE |
3436                  SWAPPER_CTRL_RXF_W_FE |
3437                  SWAPPER_CTRL_XMSI_FE |
3438                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3439         if (sp->intr_type == INTA)
3440                 val64 |= SWAPPER_CTRL_XMSI_SE;
3441         writeq(val64, &bar0->swapper_ctrl);
3442 #else
3443         /*
3444          * Initially we enable all bits to make it accessible by the
3445          * driver, then we selectively enable only those bits that
3446          * we want to set.
3447          */
3448         val64 |= (SWAPPER_CTRL_TXP_FE |
3449                  SWAPPER_CTRL_TXP_SE |
3450                  SWAPPER_CTRL_TXD_R_FE |
3451                  SWAPPER_CTRL_TXD_R_SE |
3452                  SWAPPER_CTRL_TXD_W_FE |
3453                  SWAPPER_CTRL_TXD_W_SE |
3454                  SWAPPER_CTRL_TXF_R_FE |
3455                  SWAPPER_CTRL_RXD_R_FE |
3456                  SWAPPER_CTRL_RXD_R_SE |
3457                  SWAPPER_CTRL_RXD_W_FE |
3458                  SWAPPER_CTRL_RXD_W_SE |
3459                  SWAPPER_CTRL_RXF_W_FE |
3460                  SWAPPER_CTRL_XMSI_FE |
3461                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3462         if (sp->intr_type == INTA)
3463                 val64 |= SWAPPER_CTRL_XMSI_SE;
3464         writeq(val64, &bar0->swapper_ctrl);
3465 #endif
3466         val64 = readq(&bar0->swapper_ctrl);
3467
3468         /*
3469          * Verifying if endian settings are accurate by reading a
3470          * feedback register.
3471          */
3472         val64 = readq(&bar0->pif_rd_swapper_fb);
3473         if (val64 != 0x0123456789ABCDEFULL) {
3474                 /* Endian settings are incorrect, calls for another dekko. */
3475                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3476                           dev->name);
3477                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3478                           (unsigned long long) val64);
3479                 return FAILURE;
3480         }
3481
3482         return SUCCESS;
3483 }
3484
3485 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3486 {
3487         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3488         u64 val64;
3489         int ret = 0, cnt = 0;
3490
3491         do {
3492                 val64 = readq(&bar0->xmsi_access);
3493                 if (!(val64 & BIT(15)))
3494                         break;
3495                 mdelay(1);
3496                 cnt++;
3497         } while(cnt < 5);
3498         if (cnt == 5) {
3499                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3500                 ret = 1;
3501         }
3502
3503         return ret;
3504 }
3505
3506 static void restore_xmsi_data(struct s2io_nic *nic)
3507 {
3508         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3509         u64 val64;
3510         int i;
3511
3512         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3513                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3514                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3515                 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3516                 writeq(val64, &bar0->xmsi_access);
3517                 if (wait_for_msix_trans(nic, i)) {
3518                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3519                         continue;
3520                 }
3521         }
3522 }
3523
3524 static void store_xmsi_data(struct s2io_nic *nic)
3525 {
3526         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3527         u64 val64, addr, data;
3528         int i;
3529
3530         /* Store and display */
3531         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3532                 val64 = (BIT(15) | vBIT(i, 26, 6));
3533                 writeq(val64, &bar0->xmsi_access);
3534                 if (wait_for_msix_trans(nic, i)) {
3535                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3536                         continue;
3537                 }
3538                 addr = readq(&bar0->xmsi_address);
3539                 data = readq(&bar0->xmsi_data);
3540                 if (addr && data) {
3541                         nic->msix_info[i].addr = addr;
3542                         nic->msix_info[i].data = data;
3543                 }
3544         }
3545 }
3546
3547 int s2io_enable_msi(struct s2io_nic *nic)
3548 {
3549         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3550         u16 msi_ctrl, msg_val;
3551         struct config_param *config = &nic->config;
3552         struct net_device *dev = nic->dev;
3553         u64 val64, tx_mat, rx_mat;
3554         int i, err;
3555
3556         val64 = readq(&bar0->pic_control);
3557         val64 &= ~BIT(1);
3558         writeq(val64, &bar0->pic_control);
3559
3560         err = pci_enable_msi(nic->pdev);
3561         if (err) {
3562                 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3563                           nic->dev->name);
3564                 return err;
3565         }
3566
3567         /*
3568          * Enable MSI and use MSI-1 in stead of the standard MSI-0
3569          * for interrupt handling.
3570          */
3571         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3572         msg_val ^= 0x1;
3573         pci_write_config_word(nic->pdev, 0x4c, msg_val);
3574         pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3575
3576         pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3577         msi_ctrl |= 0x10;
3578         pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3579
3580         /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3581         tx_mat = readq(&bar0->tx_mat0_n[0]);
3582         for (i=0; i<config->tx_fifo_num; i++) {
3583                 tx_mat |= TX_MAT_SET(i, 1);
3584         }
3585         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3586
3587         rx_mat = readq(&bar0->rx_mat);
3588         for (i=0; i<config->rx_ring_num; i++) {
3589                 rx_mat |= RX_MAT_SET(i, 1);
3590         }
3591         writeq(rx_mat, &bar0->rx_mat);
3592
3593         dev->irq = nic->pdev->irq;
3594         return 0;
3595 }
3596
3597 static int s2io_enable_msi_x(struct s2io_nic *nic)
3598 {
3599         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3600         u64 tx_mat, rx_mat;
3601         u16 msi_control; /* Temp variable */
3602         int ret, i, j, msix_indx = 1;
3603
3604         nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3605                                GFP_KERNEL);
3606         if (nic->entries == NULL) {
3607                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3608                 return -ENOMEM;
3609         }
3610         memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3611
3612         nic->s2io_entries =
3613                 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3614                                    GFP_KERNEL);
3615         if (nic->s2io_entries == NULL) {
3616                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3617                 kfree(nic->entries);
3618                 return -ENOMEM;
3619         }
3620         memset(nic->s2io_entries, 0,
3621                MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3622
3623         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3624                 nic->entries[i].entry = i;
3625                 nic->s2io_entries[i].entry = i;
3626                 nic->s2io_entries[i].arg = NULL;
3627                 nic->s2io_entries[i].in_use = 0;
3628         }
3629
3630         tx_mat = readq(&bar0->tx_mat0_n[0]);
3631         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3632                 tx_mat |= TX_MAT_SET(i, msix_indx);
3633                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3634                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3635                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3636         }
3637         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3638
3639         if (!nic->config.bimodal) {
3640                 rx_mat = readq(&bar0->rx_mat);
3641                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3642                         rx_mat |= RX_MAT_SET(j, msix_indx);
3643                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3644                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3645                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3646                 }
3647                 writeq(rx_mat, &bar0->rx_mat);
3648         } else {
3649                 tx_mat = readq(&bar0->tx_mat0_n[7]);
3650                 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3651                         tx_mat |= TX_MAT_SET(i, msix_indx);
3652                         nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3653                         nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3654                         nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3655                 }
3656                 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3657         }
3658
3659         nic->avail_msix_vectors = 0;
3660         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3661         /* We fail init if error or we get less vectors than min required */
3662         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3663                 nic->avail_msix_vectors = ret;
3664                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3665         }
3666         if (ret) {
3667                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3668                 kfree(nic->entries);
3669                 kfree(nic->s2io_entries);
3670                 nic->entries = NULL;
3671                 nic->s2io_entries = NULL;
3672                 nic->avail_msix_vectors = 0;
3673                 return -ENOMEM;
3674         }
3675         if (!nic->avail_msix_vectors)
3676                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3677
3678         /*
3679          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3680          * in the herc NIC. (Temp change, needs to be removed later)
3681          */
3682         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3683         msi_control |= 0x1; /* Enable MSI */
3684         pci_write_config_word(nic->pdev, 0x42, msi_control);
3685
3686         return 0;
3687 }
3688
3689 /* ********************************************************* *
3690  * Functions defined below concern the OS part of the driver *
3691  * ********************************************************* */
3692
3693 /**
3694  *  s2io_open - open entry point of the driver
3695  *  @dev : pointer to the device structure.
3696  *  Description:
3697  *  This function is the open entry point of the driver. It mainly calls a
3698  *  function to allocate Rx buffers and inserts them into the buffer
3699  *  descriptors and then enables the Rx part of the NIC.
3700  *  Return value:
3701  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3702  *   file on failure.
3703  */
3704
3705 static int s2io_open(struct net_device *dev)
3706 {
3707         struct s2io_nic *sp = dev->priv;
3708         int err = 0;
3709
3710         /*
3711          * Make sure you have link off by default every time
3712          * Nic is initialized
3713          */
3714         netif_carrier_off(dev);
3715         sp->last_link_state = 0;
3716
3717         /* Initialize H/W and enable interrupts */
3718         err = s2io_card_up(sp);
3719         if (err) {
3720                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3721                           dev->name);
3722                 goto hw_init_failed;
3723         }
3724
3725         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3726                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3727                 s2io_card_down(sp);
3728                 err = -ENODEV;
3729                 goto hw_init_failed;
3730         }
3731
3732         netif_start_queue(dev);
3733         return 0;
3734
3735 hw_init_failed:
3736         if (sp->intr_type == MSI_X) {
3737                 if (sp->entries)
3738                         kfree(sp->entries);
3739                 if (sp->s2io_entries)
3740                         kfree(sp->s2io_entries);
3741         }
3742         return err;
3743 }
3744
3745 /**
3746  *  s2io_close -close entry point of the driver
3747  *  @dev : device pointer.
3748  *  Description:
3749  *  This is the stop entry point of the driver. It needs to undo exactly
3750  *  whatever was done by the open entry point,thus it's usually referred to
3751  *  as the close function.Among other things this function mainly stops the
3752  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3753  *  Return value:
3754  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3755  *  file on failure.
3756  */
3757
3758 static int s2io_close(struct net_device *dev)
3759 {
3760         struct s2io_nic *sp = dev->priv;
3761
3762         flush_scheduled_work();
3763         netif_stop_queue(dev);
3764         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3765         s2io_card_down(sp);
3766
3767         sp->device_close_flag = TRUE;   /* Device is shut down. */
3768         return 0;
3769 }
3770
3771 /**
3772  *  s2io_xmit - Tx entry point of te driver
3773  *  @skb : the socket buffer containing the Tx data.
3774  *  @dev : device pointer.
3775  *  Description :
3776  *  This function is the Tx entry point of the driver. S2IO NIC supports
3777  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3778  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3779  *  not be upadted.
3780  *  Return value:
3781  *  0 on success & 1 on failure.
3782  */
3783
3784 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3785 {
3786         struct s2io_nic *sp = dev->priv;
3787         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3788         register u64 val64;
3789         struct TxD *txdp;
3790         struct TxFIFO_element __iomem *tx_fifo;
3791         unsigned long flags;
3792         u16 vlan_tag = 0;
3793         int vlan_priority = 0;
3794         struct mac_info *mac_control;
3795         struct config_param *config;
3796         int offload_type;
3797
3798         mac_control = &sp->mac_control;
3799         config = &sp->config;
3800
3801         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3802         spin_lock_irqsave(&sp->tx_lock, flags);
3803         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3804                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3805                           dev->name);
3806                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3807                 dev_kfree_skb(skb);
3808                 return 0;
3809         }
3810
3811         queue = 0;
3812
3813         /* Get Fifo number to Transmit based on vlan priority */
3814         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3815                 vlan_tag = vlan_tx_tag_get(skb);
3816                 vlan_priority = vlan_tag >> 13;
3817                 queue = config->fifo_mapping[vlan_priority];
3818         }
3819
3820         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3821         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3822         txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3823                 list_virt_addr;
3824
3825         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3826         /* Avoid "put" pointer going beyond "get" pointer */
3827         if (txdp->Host_Control ||
3828                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3829                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3830                 netif_stop_queue(dev);
3831                 dev_kfree_skb(skb);
3832                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3833                 return 0;
3834         }
3835
3836         /* A buffer with no data will be dropped */
3837         if (!skb->len) {
3838                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3839                 dev_kfree_skb(skb);
3840                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3841                 return 0;
3842         }
3843
3844         offload_type = s2io_offload_type(skb);
3845         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3846                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3847                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3848         }
3849         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3850                 txdp->Control_2 |=
3851                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3852                      TXD_TX_CKO_UDP_EN);
3853         }
3854         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3855         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3856         txdp->Control_2 |= config->tx_intr_type;
3857
3858         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3859                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3860                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3861         }
3862
3863         frg_len = skb->len - skb->data_len;
3864         if (offload_type == SKB_GSO_UDP) {
3865                 int ufo_size;
3866
3867                 ufo_size = s2io_udp_mss(skb);
3868                 ufo_size &= ~7;
3869                 txdp->Control_1 |= TXD_UFO_EN;
3870                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3871                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3872 #ifdef __BIG_ENDIAN
3873                 sp->ufo_in_band_v[put_off] =
3874                                 (u64)skb_shinfo(skb)->ip6_frag_id;
3875 #else
3876                 sp->ufo_in_band_v[put_off] =
3877                                 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3878 #endif
3879                 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3880                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3881                                         sp->ufo_in_band_v,
3882                                         sizeof(u64), PCI_DMA_TODEVICE);
3883                 txdp++;
3884         }
3885
3886         txdp->Buffer_Pointer = pci_map_single
3887             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3888         txdp->Host_Control = (unsigned long) skb;
3889         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3890         if (offload_type == SKB_GSO_UDP)
3891                 txdp->Control_1 |= TXD_UFO_EN;
3892
3893         frg_cnt = skb_shinfo(skb)->nr_frags;
3894         /* For fragmented SKB. */
3895         for (i = 0; i < frg_cnt; i++) {
3896                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3897                 /* A '0' length fragment will be ignored */
3898                 if (!frag->size)
3899                         continue;
3900                 txdp++;
3901                 txdp->Buffer_Pointer = (u64) pci_map_page
3902                     (sp->pdev, frag->page, frag->page_offset,
3903                      frag->size, PCI_DMA_TODEVICE);
3904                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3905                 if (offload_type == SKB_GSO_UDP)
3906                         txdp->Control_1 |= TXD_UFO_EN;
3907         }
3908         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3909
3910         if (offload_type == SKB_GSO_UDP)
3911                 frg_cnt++; /* as Txd0 was used for inband header */
3912
3913         tx_fifo = mac_control->tx_FIFO_start[queue];
3914         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3915         writeq(val64, &tx_fifo->TxDL_Pointer);
3916
3917         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3918                  TX_FIFO_LAST_LIST);
3919         if (offload_type)
3920                 val64 |= TX_FIFO_SPECIAL_FUNC;
3921
3922         writeq(val64, &tx_fifo->List_Control);
3923
3924         mmiowb();
3925
3926         put_off++;
3927         if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3928                 put_off = 0;
3929         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3930
3931         /* Avoid "put" pointer going beyond "get" pointer */
3932         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3933                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3934                 DBG_PRINT(TX_DBG,
3935                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3936                           put_off, get_off);
3937                 netif_stop_queue(dev);
3938         }
3939
3940         dev->trans_start = jiffies;
3941         spin_unlock_irqrestore(&sp->tx_lock, flags);
3942
3943         return 0;
3944 }
3945
3946 static void
3947 s2io_alarm_handle(unsigned long data)
3948 {
3949         struct s2io_nic *sp = (struct s2io_nic *)data;
3950
3951         alarm_intr_handler(sp);
3952         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3953 }
3954
3955 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
3956 {
3957         int rxb_size, level;
3958
3959         if (!sp->lro) {
3960                 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3961                 level = rx_buffer_level(sp, rxb_size, rng_n);
3962
3963                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3964                         int ret;
3965                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3966                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3967                         if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3968                                 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3969                                           __FUNCTION__);
3970                                 clear_bit(0, (&sp->tasklet_status));
3971                                 return -1;
3972                         }
3973                         clear_bit(0, (&sp->tasklet_status));
3974                 } else if (level == LOW)
3975                         tasklet_schedule(&sp->task);
3976
3977         } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3978                         DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
3979                         DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3980         }
3981         return 0;
3982 }
3983
3984 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
3985 {
3986         struct net_device *dev = (struct net_device *) dev_id;
3987         struct s2io_nic *sp = dev->priv;
3988         int i;
3989         struct mac_info *mac_control;
3990         struct config_param *config;
3991
3992         atomic_inc(&sp->isr_cnt);
3993         mac_control = &sp->mac_control;
3994         config = &sp->config;
3995         DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3996
3997         /* If Intr is because of Rx Traffic */
3998         for (i = 0; i < config->rx_ring_num; i++)
3999                 rx_intr_handler(&mac_control->rings[i]);
4000
4001         /* If Intr is because of Tx Traffic */
4002         for (i = 0; i < config->tx_fifo_num; i++)
4003                 tx_intr_handler(&mac_control->fifos[i]);
4004
4005         /*
4006          * If the Rx buffer count is below the panic threshold then
4007          * reallocate the buffers from the interrupt handler itself,
4008          * else schedule a tasklet to reallocate the buffers.
4009          */
4010         for (i = 0; i < config->rx_ring_num; i++)
4011                 s2io_chk_rx_buffers(sp, i);
4012
4013         atomic_dec(&sp->isr_cnt);
4014         return IRQ_HANDLED;
4015 }
4016
4017 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4018 {
4019         struct ring_info *ring = (struct ring_info *)dev_id;
4020         struct s2io_nic *sp = ring->nic;
4021
4022         atomic_inc(&sp->isr_cnt);
4023
4024         rx_intr_handler(ring);
4025         s2io_chk_rx_buffers(sp, ring->ring_no);
4026
4027         atomic_dec(&sp->isr_cnt);
4028         return IRQ_HANDLED;
4029 }
4030
4031 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4032 {
4033         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4034         struct s2io_nic *sp = fifo->nic;
4035
4036         atomic_inc(&sp->isr_cnt);
4037         tx_intr_handler(fifo);
4038         atomic_dec(&sp->isr_cnt);
4039         return IRQ_HANDLED;
4040 }
4041 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4042 {
4043         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4044         u64 val64;
4045
4046         val64 = readq(&bar0->pic_int_status);
4047         if (val64 & PIC_INT_GPIO) {
4048                 val64 = readq(&bar0->gpio_int_reg);
4049                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4050                     (val64 & GPIO_INT_REG_LINK_UP)) {
4051                         /*
4052                          * This is unstable state so clear both up/down
4053                          * interrupt and adapter to re-evaluate the link state.
4054                          */
4055                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4056                         val64 |= GPIO_INT_REG_LINK_UP;
4057                         writeq(val64, &bar0->gpio_int_reg);
4058                         val64 = readq(&bar0->gpio_int_mask);
4059                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4060                                    GPIO_INT_MASK_LINK_DOWN);
4061                         writeq(val64, &bar0->gpio_int_mask);
4062                 }
4063                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4064                         val64 = readq(&bar0->adapter_status);
4065                                 /* Enable Adapter */
4066                         val64 = readq(&bar0->adapter_control);
4067                         val64 |= ADAPTER_CNTL_EN;
4068                         writeq(val64, &bar0->adapter_control);
4069                         val64 |= ADAPTER_LED_ON;
4070                         writeq(val64, &bar0->adapter_control);
4071                         if (!sp->device_enabled_once)
4072                                 sp->device_enabled_once = 1;
4073
4074                         s2io_link(sp, LINK_UP);
4075                         /*
4076                          * unmask link down interrupt and mask link-up
4077                          * intr
4078                          */
4079                         val64 = readq(&bar0->gpio_int_mask);
4080                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4081                         val64 |= GPIO_INT_MASK_LINK_UP;
4082                         writeq(val64, &bar0->gpio_int_mask);
4083
4084                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4085                         val64 = readq(&bar0->adapter_status);
4086                         s2io_link(sp, LINK_DOWN);
4087                         /* Link is down so unmaks link up interrupt */
4088                         val64 = readq(&bar0->gpio_int_mask);
4089                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4090                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4091                         writeq(val64, &bar0->gpio_int_mask);
4092                 }
4093         }
4094         val64 = readq(&bar0->gpio_int_mask);
4095 }
4096
4097 /**
4098  *  s2io_isr - ISR handler of the device .
4099  *  @irq: the irq of the device.
4100  *  @dev_id: a void pointer to the dev structure of the NIC.
4101  *  Description:  This function is the ISR handler of the device. It
4102  *  identifies the reason for the interrupt and calls the relevant
4103  *  service routines. As a contongency measure, this ISR allocates the
4104  *  recv buffers, if their numbers are below the panic value which is
4105  *  presently set to 25% of the original number of rcv buffers allocated.
4106  *  Return value:
4107  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4108  *   IRQ_NONE: will be returned if interrupt is not from our device
4109  */
4110 static irqreturn_t s2io_isr(int irq, void *dev_id)
4111 {
4112         struct net_device *dev = (struct net_device *) dev_id;
4113         struct s2io_nic *sp = dev->priv;
4114         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4115         int i;
4116         u64 reason = 0;
4117         struct mac_info *mac_control;
4118         struct config_param *config;
4119
4120         atomic_inc(&sp->isr_cnt);
4121         mac_control = &sp->mac_control;
4122         config = &sp->config;
4123
4124         /*
4125          * Identify the cause for interrupt and call the appropriate
4126          * interrupt handler. Causes for the interrupt could be;
4127          * 1. Rx of packet.
4128          * 2. Tx complete.
4129          * 3. Link down.
4130          * 4. Error in any functional blocks of the NIC.
4131          */
4132         reason = readq(&bar0->general_int_status);
4133
4134         if (!reason) {
4135                 /* The interrupt was not raised by us. */
4136                 atomic_dec(&sp->isr_cnt);
4137                 return IRQ_NONE;
4138         }
4139         else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4140                 /* Disable device and get out */
4141                 atomic_dec(&sp->isr_cnt);
4142                 return IRQ_NONE;
4143         }
4144
4145         if (napi) {
4146                 if (reason & GEN_INTR_RXTRAFFIC) {
4147                         if ( likely ( netif_rx_schedule_prep(dev)) ) {
4148                                 __netif_rx_schedule(dev);
4149                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4150                         }
4151                         else
4152                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4153                 }
4154         } else {
4155                 /*
4156                  * Rx handler is called by default, without checking for the
4157                  * cause of interrupt.
4158                  * rx_traffic_int reg is an R1 register, writing all 1's
4159                  * will ensure that the actual interrupt causing bit get's
4160                  * cleared and hence a read can be avoided.
4161                  */
4162                 if (reason & GEN_INTR_RXTRAFFIC)
4163                         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4164
4165                 for (i = 0; i < config->rx_ring_num; i++) {
4166                         rx_intr_handler(&mac_control->rings[i]);
4167                 }
4168         }
4169
4170         /*
4171          * tx_traffic_int reg is an R1 register, writing all 1's
4172          * will ensure that the actual interrupt causing bit get's
4173          * cleared and hence a read can be avoided.
4174          */
4175         if (reason & GEN_INTR_TXTRAFFIC)
4176                 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4177
4178         for (i = 0; i < config->tx_fifo_num; i++)
4179                 tx_intr_handler(&mac_control->fifos[i]);
4180
4181         if (reason & GEN_INTR_TXPIC)
4182                 s2io_txpic_intr_handle(sp);
4183         /*
4184          * If the Rx buffer count is below the panic threshold then
4185          * reallocate the buffers from the interrupt handler itself,
4186          * else schedule a tasklet to reallocate the buffers.
4187          */
4188         if (!napi) {
4189                 for (i = 0; i < config->rx_ring_num; i++)
4190                         s2io_chk_rx_buffers(sp, i);
4191         }
4192
4193         writeq(0, &bar0->general_int_mask);
4194         readl(&bar0->general_int_status);
4195
4196         atomic_dec(&sp->isr_cnt);
4197         return IRQ_HANDLED;
4198 }
4199
4200 /**
4201  * s2io_updt_stats -
4202  */
4203 static void s2io_updt_stats(struct s2io_nic *sp)
4204 {
4205         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4206         u64 val64;
4207         int cnt = 0;
4208
4209         if (atomic_read(&sp->card_state) == CARD_UP) {
4210                 /* Apprx 30us on a 133 MHz bus */
4211                 val64 = SET_UPDT_CLICKS(10) |
4212                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4213                 writeq(val64, &bar0->stat_cfg);
4214                 do {
4215                         udelay(100);
4216                         val64 = readq(&bar0->stat_cfg);
4217                         if (!(val64 & BIT(0)))
4218                                 break;
4219                         cnt++;
4220                         if (cnt == 5)
4221                                 break; /* Updt failed */
4222                 } while(1);
4223         } else {
4224                 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4225         }
4226 }
4227
4228 /**
4229  *  s2io_get_stats - Updates the device statistics structure.
4230  *  @dev : pointer to the device structure.
4231  *  Description:
4232  *  This function updates the device statistics structure in the s2io_nic
4233  *  structure and returns a pointer to the same.
4234  *  Return value:
4235  *  pointer to the updated net_device_stats structure.
4236  */
4237
4238 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4239 {
4240         struct s2io_nic *sp = dev->priv;
4241         struct mac_info *mac_control;
4242         struct config_param *config;
4243
4244
4245         mac_control = &sp->mac_control;
4246         config = &sp->config;
4247
4248         /* Configure Stats for immediate updt */
4249         s2io_updt_stats(sp);
4250
4251         sp->stats.tx_packets =
4252                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4253         sp->stats.tx_errors =
4254                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4255         sp->stats.rx_errors =
4256                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4257         sp->stats.multicast =
4258                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4259         sp->stats.rx_length_errors =
4260                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4261
4262         return (&sp->stats);
4263 }
4264
4265 /**
4266  *  s2io_set_multicast - entry point for multicast address enable/disable.
4267  *  @dev : pointer to the device structure
4268  *  Description:
4269  *  This function is a driver entry point which gets called by the kernel
4270  *  whenever multicast addresses must be enabled/disabled. This also gets
4271  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4272  *  determine, if multicast address must be enabled or if promiscuous mode
4273  *  is to be disabled etc.
4274  *  Return value:
4275  *  void.
4276  */
4277
4278 static void s2io_set_multicast(struct net_device *dev)
4279 {
4280         int i, j, prev_cnt;
4281         struct dev_mc_list *mclist;
4282         struct s2io_nic *sp = dev->priv;
4283         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4284         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4285             0xfeffffffffffULL;
4286         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4287         void __iomem *add;
4288
4289         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4290                 /*  Enable all Multicast addresses */
4291                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4292                        &bar0->rmac_addr_data0_mem);
4293                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4294                        &bar0->rmac_addr_data1_mem);
4295                 val64 = RMAC_ADDR_CMD_MEM_WE |
4296                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4297                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4298                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4299                 /* Wait till command completes */
4300                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4301                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4302
4303                 sp->m_cast_flg = 1;
4304                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4305         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4306                 /*  Disable all Multicast addresses */
4307                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4308                        &bar0->rmac_addr_data0_mem);
4309                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4310                        &bar0->rmac_addr_data1_mem);
4311                 val64 = RMAC_ADDR_CMD_MEM_WE |
4312                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4313                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4314                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4315                 /* Wait till command completes */
4316                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4317                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4318
4319                 sp->m_cast_flg = 0;
4320                 sp->all_multi_pos = 0;
4321         }
4322
4323         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4324                 /*  Put the NIC into promiscuous mode */
4325                 add = &bar0->mac_cfg;
4326                 val64 = readq(&bar0->mac_cfg);
4327                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4328
4329                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4330                 writel((u32) val64, add);
4331                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4332                 writel((u32) (val64 >> 32), (add + 4));
4333
4334                 val64 = readq(&bar0->mac_cfg);
4335                 sp->promisc_flg = 1;
4336                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4337                           dev->name);
4338         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4339                 /*  Remove the NIC from promiscuous mode */
4340                 add = &bar0->mac_cfg;
4341                 val64 = readq(&bar0->mac_cfg);
4342                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4343
4344                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4345                 writel((u32) val64, add);
4346                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4347                 writel((u32) (val64 >> 32), (add + 4));
4348
4349                 val64 = readq(&bar0->mac_cfg);
4350                 sp->promisc_flg = 0;
4351                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4352                           dev->name);
4353         }
4354
4355         /*  Update individual M_CAST address list */
4356         if ((!sp->m_cast_flg) && dev->mc_count) {
4357                 if (dev->mc_count >
4358                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4359                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4360                                   dev->name);
4361                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
4362                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4363                         return;
4364                 }
4365
4366                 prev_cnt = sp->mc_addr_count;
4367                 sp->mc_addr_count = dev->mc_count;
4368
4369                 /* Clear out the previous list of Mc in the H/W. */
4370                 for (i = 0; i < prev_cnt; i++) {
4371                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4372                                &bar0->rmac_addr_data0_mem);
4373                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4374                                 &bar0->rmac_addr_data1_mem);
4375                         val64 = RMAC_ADDR_CMD_MEM_WE |
4376                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4377                             RMAC_ADDR_CMD_MEM_OFFSET
4378                             (MAC_MC_ADDR_START_OFFSET + i);
4379                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4380
4381                         /* Wait for command completes */
4382                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4383                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4384                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4385                                           dev->name);
4386                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4387                                 return;
4388                         }
4389                 }
4390
4391                 /* Create the new Rx filter list and update the same in H/W. */
4392                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4393                      i++, mclist = mclist->next) {
4394                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4395                                ETH_ALEN);
4396                         mac_addr = 0;
4397                         for (j = 0; j < ETH_ALEN; j++) {
4398                                 mac_addr |= mclist->dmi_addr[j];
4399                                 mac_addr <<= 8;
4400                         }
4401                         mac_addr >>= 8;
4402                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4403                                &bar0->rmac_addr_data0_mem);
4404                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4405                                 &bar0->rmac_addr_data1_mem);
4406                         val64 = RMAC_ADDR_CMD_MEM_WE |
4407                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4408                             RMAC_ADDR_CMD_MEM_OFFSET
4409                             (i + MAC_MC_ADDR_START_OFFSET);
4410                         writeq(val64, &bar0->rmac_addr_cmd_mem);
4411
4412                         /* Wait for command completes */
4413                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4414                                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4415                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
4416                                           dev->name);
4417                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4418                                 return;
4419                         }
4420                 }
4421         }
4422 }
4423
4424 /**
4425  *  s2io_set_mac_addr - Programs the Xframe mac address
4426  *  @dev : pointer to the device structure.
4427  *  @addr: a uchar pointer to the new mac address which is to be set.
4428  *  Description : This procedure will program the Xframe to receive
4429  *  frames with new Mac Address
4430  *  Return value: SUCCESS on success and an appropriate (-)ve integer
4431  *  as defined in errno.h file on failure.
4432  */
4433
4434 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4435 {
4436         struct s2io_nic *sp = dev->priv;
4437         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4438         register u64 val64, mac_addr = 0;
4439         int i;
4440
4441         /*
4442          * Set the new MAC address as the new unicast filter and reflect this
4443          * change on the device address registered with the OS. It will be
4444          * at offset 0.
4445          */
4446         for (i = 0; i < ETH_ALEN; i++) {
4447                 mac_addr <<= 8;
4448                 mac_addr |= addr[i];
4449         }
4450
4451         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4452                &bar0->rmac_addr_data0_mem);
4453
4454         val64 =
4455             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4456             RMAC_ADDR_CMD_MEM_OFFSET(0);
4457         writeq(val64, &bar0->rmac_addr_cmd_mem);
4458         /* Wait till command completes */
4459         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4460                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4461                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4462                 return FAILURE;
4463         }
4464
4465         return SUCCESS;
4466 }
4467
4468 /**
4469  * s2io_ethtool_sset - Sets different link parameters.
4470  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
4471  * @info: pointer to the structure with parameters given by ethtool to set
4472  * link information.
4473  * Description:
4474  * The function sets different link parameters provided by the user onto
4475  * the NIC.
4476  * Return value:
4477  * 0 on success.
4478 */
4479
4480 static int s2io_ethtool_sset(struct net_device *dev,
4481                              struct ethtool_cmd *info)
4482 {
4483         struct s2io_nic *sp = dev->priv;
4484         if ((info->autoneg == AUTONEG_ENABLE) ||
4485             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4486                 return -EINVAL;
4487         else {
4488                 s2io_close(sp->dev);
4489                 s2io_open(sp->dev);
4490         }
4491
4492         return 0;
4493 }
4494
4495 /**
4496  * s2io_ethtol_gset - Return link specific information.
4497  * @sp : private member of the device structure, pointer to the
4498  *      s2io_nic structure.
4499  * @info : pointer to the structure with parameters given by ethtool
4500  * to return link information.
4501  * Description:
4502  * Returns link specific information like speed, duplex etc.. to ethtool.
4503  * Return value :
4504  * return 0 on success.
4505  */
4506
4507 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4508 {
4509         struct s2io_nic *sp = dev->priv;
4510         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4511         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4512         info->port = PORT_FIBRE;
4513         /* info->transceiver?? TODO */
4514
4515         if (netif_carrier_ok(sp->dev)) {
4516                 info->speed = 10000;
4517                 info->duplex = DUPLEX_FULL;
4518         } else {
4519                 info->speed = -1;
4520                 info->duplex = -1;
4521         }
4522
4523         info->autoneg = AUTONEG_DISABLE;
4524         return 0;
4525 }
4526
4527 /**
4528  * s2io_ethtool_gdrvinfo - Returns driver specific information.
4529  * @sp : private member of the device structure, which is a pointer to the
4530  * s2io_nic structure.
4531  * @info : pointer to the structure with parameters given by ethtool to
4532  * return driver information.
4533  * Description:
4534  * Returns driver specefic information like name, version etc.. to ethtool.
4535  * Return value:
4536  *  void
4537  */
4538
4539 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4540                                   struct ethtool_drvinfo *info)
4541 {
4542         struct s2io_nic *sp = dev->priv;
4543
4544         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4545         strncpy(info->version, s2io_driver_version, sizeof(info->version));
4546         strncpy(info->fw_version, "", sizeof(info->fw_version));
4547         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4548         info->regdump_len = XENA_REG_SPACE;
4549         info->eedump_len = XENA_EEPROM_SPACE;
4550         info->testinfo_len = S2IO_TEST_LEN;
4551         info->n_stats = S2IO_STAT_LEN;
4552 }
4553
4554 /**
4555  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4556  *  @sp: private member of the device structure, which is a pointer to the
4557  *  s2io_nic structure.
4558  *  @regs : pointer to the structure with parameters given by ethtool for
4559  *  dumping the registers.
4560  *  @reg_space: The input argumnet into which all the registers are dumped.
4561  *  Description:
4562  *  Dumps the entire register space of xFrame NIC into the user given
4563  *  buffer area.
4564  * Return value :
4565  * void .
4566 */
4567
4568 static void s2io_ethtool_gregs(struct net_device *dev,
4569                                struct ethtool_regs *regs, void *space)
4570 {
4571         int i;
4572         u64 reg;
4573         u8 *reg_space = (u8 *) space;
4574         struct s2io_nic *sp = dev->priv;
4575
4576         regs->len = XENA_REG_SPACE;
4577         regs->version = sp->pdev->subsystem_device;
4578
4579         for (i = 0; i < regs->len; i += 8) {
4580                 reg = readq(sp->bar0 + i);
4581                 memcpy((reg_space + i), &reg, 8);
4582         }
4583 }
4584
4585 /**
4586  *  s2io_phy_id  - timer function that alternates adapter LED.
4587  *  @data : address of the private member of the device structure, which
4588  *  is a pointer to the s2io_nic structure, provided as an u32.
4589  * Description: This is actually the timer function that alternates the
4590  * adapter LED bit of the adapter control bit to set/reset every time on
4591  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4592  *  once every second.
4593 */
4594 static void s2io_phy_id(unsigned long data)
4595 {
4596         struct s2io_nic *sp = (struct s2io_nic *) data;
4597         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4598         u64 val64 = 0;
4599         u16 subid;
4600
4601         subid = sp->pdev->subsystem_device;
4602         if ((sp->device_type == XFRAME_II_DEVICE) ||
4603                    ((subid & 0xFF) >= 0x07)) {
4604                 val64 = readq(&bar0->gpio_control);
4605                 val64 ^= GPIO_CTRL_GPIO_0;
4606                 writeq(val64, &bar0->gpio_control);
4607         } else {
4608                 val64 = readq(&bar0->adapter_control);
4609                 val64 ^= ADAPTER_LED_ON;
4610                 writeq(val64, &bar0->adapter_control);
4611         }
4612
4613         mod_timer(&sp->id_timer, jiffies + HZ / 2);
4614 }
4615
4616 /**
4617  * s2io_ethtool_idnic - To physically identify the nic on the system.
4618  * @sp : private member of the device structure, which is a pointer to the
4619  * s2io_nic structure.
4620  * @id : pointer to the structure with identification parameters given by
4621  * ethtool.
4622  * Description: Used to physically identify the NIC on the system.
4623  * The Link LED will blink for a time specified by the user for
4624  * identification.
4625  * NOTE: The Link has to be Up to be able to blink the LED. Hence
4626  * identification is possible only if it's link is up.
4627  * Return value:
4628  * int , returns 0 on success
4629  */
4630
4631 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4632 {
4633         u64 val64 = 0, last_gpio_ctrl_val;
4634         struct s2io_nic *sp = dev->priv;
4635         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4636         u16 subid;
4637
4638         subid = sp->pdev->subsystem_device;
4639         last_gpio_ctrl_val = readq(&bar0->gpio_control);
4640         if ((sp->device_type == XFRAME_I_DEVICE) &&
4641                 ((subid & 0xFF) < 0x07)) {
4642                 val64 = readq(&bar0->adapter_control);
4643                 if (!(val64 & ADAPTER_CNTL_EN)) {
4644                         printk(KERN_ERR
4645                                "Adapter Link down, cannot blink LED\n");
4646                         return -EFAULT;
4647                 }
4648         }
4649         if (sp->id_timer.function == NULL) {
4650                 init_timer(&sp->id_timer);
4651                 sp->id_timer.function = s2io_phy_id;
4652                 sp->id_timer.data = (unsigned long) sp;
4653         }
4654         mod_timer(&sp->id_timer, jiffies);
4655         if (data)
4656                 msleep_interruptible(data * HZ);
4657         else
4658                 msleep_interruptible(MAX_FLICKER_TIME);
4659         del_timer_sync(&sp->id_timer);
4660
4661         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4662                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4663                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4664         }
4665
4666         return 0;
4667 }
4668
4669 /**
4670  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4671  * @sp : private member of the device structure, which is a pointer to the
4672  *      s2io_nic structure.
4673  * @ep : pointer to the structure with pause parameters given by ethtool.
4674  * Description:
4675  * Returns the Pause frame generation and reception capability of the NIC.
4676  * Return value:
4677  *  void
4678  */
4679 static void s2io_ethtool_getpause_data(struct net_device *dev,
4680                                        struct ethtool_pauseparam *ep)
4681 {
4682         u64 val64;
4683         struct s2io_nic *sp = dev->priv;
4684         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4685
4686         val64 = readq(&bar0->rmac_pause_cfg);
4687         if (val64 & RMAC_PAUSE_GEN_ENABLE)
4688                 ep->tx_pause = TRUE;
4689         if (val64 & RMAC_PAUSE_RX_ENABLE)
4690                 ep->rx_pause = TRUE;
4691         ep->autoneg = FALSE;
4692 }
4693
4694 /**
4695  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
4696  * @sp : private member of the device structure, which is a pointer to the
4697  *      s2io_nic structure.
4698  * @ep : pointer to the structure with pause parameters given by ethtool.
4699  * Description:
4700  * It can be used to set or reset Pause frame generation or reception
4701  * support of the NIC.
4702  * Return value:
4703  * int, returns 0 on Success
4704  */
4705
4706 static int s2io_ethtool_setpause_data(struct net_device *dev,
4707                                struct ethtool_pauseparam *ep)
4708 {
4709         u64 val64;
4710         struct s2io_nic *sp = dev->priv;
4711         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4712
4713         val64 = readq(&bar0->rmac_pause_cfg);
4714         if (ep->tx_pause)
4715                 val64 |= RMAC_PAUSE_GEN_ENABLE;
4716         else
4717                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4718         if (ep->rx_pause)
4719                 val64 |= RMAC_PAUSE_RX_ENABLE;
4720         else
4721                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4722         writeq(val64, &bar0->rmac_pause_cfg);
4723         return 0;
4724 }
4725
4726 /**
4727  * read_eeprom - reads 4 bytes of data from user given offset.
4728  * @sp : private member of the device structure, which is a pointer to the
4729  *      s2io_nic structure.
4730  * @off : offset at which the data must be written
4731  * @data : Its an output parameter where the data read at the given
4732  *      offset is stored.
4733  * Description:
4734  * Will read 4 bytes of data from the user given offset and return the
4735  * read data.
4736  * NOTE: Will allow to read only part of the EEPROM visible through the
4737  *   I2C bus.
4738  * Return value:
4739  *  -1 on failure and 0 on success.
4740  */
4741
4742 #define S2IO_DEV_ID             5
4743 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4744 {
4745         int ret = -1;
4746         u32 exit_cnt = 0;
4747         u64 val64;
4748         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4749
4750         if (sp->device_type == XFRAME_I_DEVICE) {
4751                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4752                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4753                     I2C_CONTROL_CNTL_START;
4754                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4755
4756                 while (exit_cnt < 5) {
4757                         val64 = readq(&bar0->i2c_control);
4758                         if (I2C_CONTROL_CNTL_END(val64)) {
4759                                 *data = I2C_CONTROL_GET_DATA(val64);
4760                                 ret = 0;
4761                                 break;
4762                         }
4763                         msleep(50);
4764                         exit_cnt++;
4765                 }
4766         }
4767
4768         if (sp->device_type == XFRAME_II_DEVICE) {
4769                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4770                         SPI_CONTROL_BYTECNT(0x3) |
4771                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4772                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4773                 val64 |= SPI_CONTROL_REQ;
4774                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4775                 while (exit_cnt < 5) {
4776                         val64 = readq(&bar0->spi_control);
4777                         if (val64 & SPI_CONTROL_NACK) {
4778                                 ret = 1;
4779                                 break;
4780                         } else if (val64 & SPI_CONTROL_DONE) {
4781                                 *data = readq(&bar0->spi_data);
4782                                 *data &= 0xffffff;
4783                                 ret = 0;
4784                                 break;
4785                         }
4786                         msleep(50);
4787                         exit_cnt++;
4788                 }
4789         }
4790         return ret;
4791 }
4792
4793 /**
4794  *  write_eeprom - actually writes the relevant part of the data value.
4795  *  @sp : private member of the device structure, which is a pointer to the
4796  *       s2io_nic structure.
4797  *  @off : offset at which the data must be written
4798  *  @data : The data that is to be written
4799  *  @cnt : Number of bytes of the data that are actually to be written into
4800  *  the Eeprom. (max of 3)
4801  * Description:
4802  *  Actually writes the relevant part of the data value into the Eeprom
4803  *  through the I2C bus.
4804  * Return value:
4805  *  0 on success, -1 on failure.
4806  */
4807
4808 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4809 {
4810         int exit_cnt = 0, ret = -1;
4811         u64 val64;
4812         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4813
4814         if (sp->device_type == XFRAME_I_DEVICE) {
4815                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4816                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4817                     I2C_CONTROL_CNTL_START;
4818                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4819
4820                 while (exit_cnt < 5) {
4821                         val64 = readq(&bar0->i2c_control);
4822                         if (I2C_CONTROL_CNTL_END(val64)) {
4823                                 if (!(val64 & I2C_CONTROL_NACK))
4824                                         ret = 0;
4825                                 break;
4826                         }
4827                         msleep(50);
4828                         exit_cnt++;
4829                 }
4830         }
4831
4832         if (sp->device_type == XFRAME_II_DEVICE) {
4833                 int write_cnt = (cnt == 8) ? 0 : cnt;
4834                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4835
4836                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4837                         SPI_CONTROL_BYTECNT(write_cnt) |
4838                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4839                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4840                 val64 |= SPI_CONTROL_REQ;
4841                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4842                 while (exit_cnt < 5) {
4843                         val64 = readq(&bar0->spi_control);
4844                         if (val64 & SPI_CONTROL_NACK) {
4845                                 ret = 1;
4846                                 break;
4847                         } else if (val64 & SPI_CONTROL_DONE) {
4848                                 ret = 0;
4849                                 break;
4850                         }
4851                         msleep(50);
4852                         exit_cnt++;
4853                 }
4854         }
4855         return ret;
4856 }
4857 static void s2io_vpd_read(struct s2io_nic *nic)
4858 {
4859         u8 *vpd_data;
4860         u8 data;
4861         int i=0, cnt, fail = 0;
4862         int vpd_addr = 0x80;
4863
4864         if (nic->device_type == XFRAME_II_DEVICE) {
4865                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4866                 vpd_addr = 0x80;
4867         }
4868         else {
4869                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4870                 vpd_addr = 0x50;
4871         }
4872         strcpy(nic->serial_num, "NOT AVAILABLE");
4873
4874         vpd_data = kmalloc(256, GFP_KERNEL);
4875         if (!vpd_data)
4876                 return;
4877
4878         for (i = 0; i < 256; i +=4 ) {
4879                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4880                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
4881                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4882                 for (cnt = 0; cnt <5; cnt++) {
4883                         msleep(2);
4884                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4885                         if (data == 0x80)
4886                                 break;
4887                 }
4888                 if (cnt >= 5) {
4889                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4890                         fail = 1;
4891                         break;
4892                 }
4893                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
4894                                       (u32 *)&vpd_data[i]);
4895         }
4896
4897         if(!fail) {
4898                 /* read serial number of adapter */
4899                 for (cnt = 0; cnt < 256; cnt++) {
4900                 if ((vpd_data[cnt] == 'S') &&
4901                         (vpd_data[cnt+1] == 'N') &&
4902                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4903                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
4904                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4905                                         vpd_data[cnt+2]);
4906                                 break;
4907                         }
4908                 }
4909         }
4910
4911         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4912                 memset(nic->product_name, 0, vpd_data[1]);
4913                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4914         }
4915         kfree(vpd_data);
4916 }
4917
4918 /**
4919  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4920  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4921  *  @eeprom : pointer to the user level structure provided by ethtool,
4922  *  containing all relevant information.
4923  *  @data_buf : user defined value to be written into Eeprom.
4924  *  Description: Reads the values stored in the Eeprom at given offset
4925  *  for a given length. Stores these values int the input argument data
4926  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4927  *  Return value:
4928  *  int  0 on success
4929  */
4930
4931 static int s2io_ethtool_geeprom(struct net_device *dev,
4932                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4933 {
4934         u32 i, valid;
4935         u64 data;
4936         struct s2io_nic *sp = dev->priv;
4937
4938         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4939
4940         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4941                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4942
4943         for (i = 0; i < eeprom->len; i += 4) {
4944                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4945                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4946                         return -EFAULT;
4947                 }
4948                 valid = INV(data);
4949                 memcpy((data_buf + i), &valid, 4);
4950         }
4951         return 0;
4952 }
4953
4954 /**
4955  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4956  *  @sp : private member of the device structure, which is a pointer to the
4957  *  s2io_nic structure.
4958  *  @eeprom : pointer to the user level structure provided by ethtool,
4959  *  containing all relevant information.
4960  *  @data_buf ; user defined value to be written into Eeprom.
4961  *  Description:
4962  *  Tries to write the user provided value in the Eeprom, at the offset
4963  *  given by the user.
4964  *  Return value:
4965  *  0 on success, -EFAULT on failure.
4966  */
4967
4968 static int s2io_ethtool_seeprom(struct net_device *dev,
4969                                 struct ethtool_eeprom *eeprom,
4970                                 u8 * data_buf)
4971 {
4972         int len = eeprom->len, cnt = 0;
4973         u64 valid = 0, data;
4974         struct s2io_nic *sp = dev->priv;
4975
4976         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4977                 DBG_PRINT(ERR_DBG,
4978                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4979                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4980                           eeprom->magic);
4981                 return -EFAULT;
4982         }
4983
4984         while (len) {
4985                 data = (u32) data_buf[cnt] & 0x000000FF;
4986                 if (data) {
4987                         valid = (u32) (data << 24);
4988                 } else
4989                         valid = data;
4990
4991                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4992                         DBG_PRINT(ERR_DBG,
4993                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4994                         DBG_PRINT(ERR_DBG,
4995                                   "write into the specified offset\n");
4996                         return -EFAULT;
4997                 }
4998                 cnt++;
4999                 len--;
5000         }
5001
5002         return 0;
5003 }
5004
5005 /**
5006  * s2io_register_test - reads and writes into all clock domains.
5007  * @sp : private member of the device structure, which is a pointer to the
5008  * s2io_nic structure.
5009  * @data : variable that returns the result of each of the test conducted b
5010  * by the driver.
5011  * Description:
5012  * Read and write into all clock domains. The NIC has 3 clock domains,
5013  * see that registers in all the three regions are accessible.
5014  * Return value:
5015  * 0 on success.
5016  */
5017
5018 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5019 {
5020         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5021         u64 val64 = 0, exp_val;
5022         int fail = 0;
5023
5024         val64 = readq(&bar0->pif_rd_swapper_fb);
5025         if (val64 != 0x123456789abcdefULL) {
5026                 fail = 1;
5027                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5028         }
5029
5030         val64 = readq(&bar0->rmac_pause_cfg);
5031         if (val64 != 0xc000ffff00000000ULL) {
5032                 fail = 1;
5033                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5034         }
5035
5036         val64 = readq(&bar0->rx_queue_cfg);
5037         if (sp->device_type == XFRAME_II_DEVICE)
5038                 exp_val = 0x0404040404040404ULL;
5039         else
5040                 exp_val = 0x0808080808080808ULL;
5041         if (val64 != exp_val) {
5042                 fail = 1;
5043                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5044         }
5045
5046         val64 = readq(&bar0->xgxs_efifo_cfg);
5047         if (val64 != 0x000000001923141EULL) {
5048                 fail = 1;
5049                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5050         }
5051
5052         val64 = 0x5A5A5A5A5A5A5A5AULL;
5053         writeq(val64, &bar0->xmsi_data);
5054         val64 = readq(&bar0->xmsi_data);
5055         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5056                 fail = 1;
5057                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5058         }
5059
5060         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5061         writeq(val64, &bar0->xmsi_data);
5062         val64 = readq(&bar0->xmsi_data);
5063         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5064                 fail = 1;
5065                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5066         }
5067
5068         *data = fail;
5069         return fail;
5070 }
5071
5072 /**
5073  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5074  * @sp : private member of the device structure, which is a pointer to the
5075  * s2io_nic structure.
5076  * @data:variable that returns the result of each of the test conducted by
5077  * the driver.
5078  * Description:
5079  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5080  * register.
5081  * Return value:
5082  * 0 on success.
5083  */
5084
5085 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5086 {
5087         int fail = 0;
5088         u64 ret_data, org_4F0, org_7F0;
5089         u8 saved_4F0 = 0, saved_7F0 = 0;
5090         struct net_device *dev = sp->dev;
5091
5092         /* Test Write Error at offset 0 */
5093         /* Note that SPI interface allows write access to all areas
5094          * of EEPROM. Hence doing all negative testing only for Xframe I.
5095          */
5096         if (sp->device_type == XFRAME_I_DEVICE)
5097                 if (!write_eeprom(sp, 0, 0, 3))
5098                         fail = 1;
5099
5100         /* Save current values at offsets 0x4F0 and 0x7F0 */
5101         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5102                 saved_4F0 = 1;
5103         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5104                 saved_7F0 = 1;
5105
5106         /* Test Write at offset 4f0 */
5107         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5108                 fail = 1;
5109         if (read_eeprom(sp, 0x4F0, &ret_data))
5110                 fail = 1;
5111
5112         if (ret_data != 0x012345) {
5113                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5114                         "Data written %llx Data read %llx\n",
5115                         dev->name, (unsigned long long)0x12345,
5116                         (unsigned long long)ret_data);
5117                 fail = 1;
5118         }
5119
5120         /* Reset the EEPROM data go FFFF */
5121         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5122
5123         /* Test Write Request Error at offset 0x7c */
5124         if (sp->device_type == XFRAME_I_DEVICE)
5125                 if (!write_eeprom(sp, 0x07C, 0, 3))
5126                         fail = 1;
5127
5128         /* Test Write Request at offset 0x7f0 */
5129         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5130                 fail = 1;
5131         if (read_eeprom(sp, 0x7F0, &ret_data))
5132                 fail = 1;
5133
5134         if (ret_data != 0x012345) {
5135                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5136                         "Data written %llx Data read %llx\n",
5137                         dev->name, (unsigned long long)0x12345,
5138                         (unsigned long long)ret_data);
5139                 fail = 1;
5140         }
5141
5142         /* Reset the EEPROM data go FFFF */
5143         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5144
5145         if (sp->device_type == XFRAME_I_DEVICE) {
5146                 /* Test Write Error at offset 0x80 */
5147                 if (!write_eeprom(sp, 0x080, 0, 3))
5148                         fail = 1;
5149
5150                 /* Test Write Error at offset 0xfc */
5151                 if (!write_eeprom(sp, 0x0FC, 0, 3))
5152                         fail = 1;
5153
5154                 /* Test Write Error at offset 0x100 */
5155                 if (!write_eeprom(sp, 0x100, 0, 3))
5156                         fail = 1;
5157
5158                 /* Test Write Error at offset 4ec */
5159                 if (!write_eeprom(sp, 0x4EC, 0, 3))
5160                         fail = 1;
5161         }
5162
5163         /* Restore values at offsets 0x4F0 and 0x7F0 */
5164         if (saved_4F0)
5165                 write_eeprom(sp, 0x4F0, org_4F0, 3);
5166         if (saved_7F0)
5167                 write_eeprom(sp, 0x7F0, org_7F0, 3);
5168
5169         *data = fail;
5170         return fail;
5171 }
5172
5173 /**
5174  * s2io_bist_test - invokes the MemBist test of the card .
5175  * @sp : private member of the device structure, which is a pointer to the
5176  * s2io_nic structure.
5177  * @data:variable that returns the result of each of the test conducted by
5178  * the driver.
5179  * Description:
5180  * This invokes the MemBist test of the card. We give around
5181  * 2 secs time for the Test to complete. If it's still not complete
5182  * within this peiod, we consider that the test failed.
5183  * Return value:
5184  * 0 on success and -1 on failure.
5185  */
5186
5187 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5188 {
5189         u8 bist = 0;
5190         int cnt = 0, ret = -1;
5191
5192         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5193         bist |= PCI_BIST_START;
5194         pci_write_config_word(sp->pdev, PCI_BIST, bist);
5195
5196         while (cnt < 20) {
5197                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5198                 if (!(bist & PCI_BIST_START)) {
5199                         *data = (bist & PCI_BIST_CODE_MASK);
5200                         ret = 0;
5201                         break;
5202                 }
5203                 msleep(100);
5204                 cnt++;
5205         }
5206
5207         return ret;
5208 }
5209
5210 /**
5211  * s2io-link_test - verifies the link state of the nic
5212  * @sp ; private member of the device structure, which is a pointer to the
5213  * s2io_nic structure.
5214  * @data: variable that returns the result of each of the test conducted by
5215  * the driver.
5216  * Description:
5217  * The function verifies the link state of the NIC and updates the input
5218  * argument 'data' appropriately.
5219  * Return value:
5220  * 0 on success.
5221  */
5222
5223 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5224 {
5225         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5226         u64 val64;
5227
5228         val64 = readq(&bar0->adapter_status);
5229         if(!(LINK_IS_UP(val64)))
5230                 *data = 1;
5231         else
5232                 *data = 0;
5233
5234         return *data;
5235 }
5236
5237 /**
5238  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5239  * @sp - private member of the device structure, which is a pointer to the
5240  * s2io_nic structure.
5241  * @data - variable that returns the result of each of the test
5242  * conducted by the driver.
5243  * Description:
5244  *  This is one of the offline test that tests the read and write
5245  *  access to the RldRam chip on the NIC.
5246  * Return value:
5247  *  0 on success.
5248  */
5249
5250 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5251 {
5252         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5253         u64 val64;
5254         int cnt, iteration = 0, test_fail = 0;
5255
5256         val64 = readq(&bar0->adapter_control);
5257         val64 &= ~ADAPTER_ECC_EN;
5258         writeq(val64, &bar0->adapter_control);
5259
5260         val64 = readq(&bar0->mc_rldram_test_ctrl);
5261         val64 |= MC_RLDRAM_TEST_MODE;
5262         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5263
5264         val64 = readq(&bar0->mc_rldram_mrs);
5265         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5266         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5267
5268         val64 |= MC_RLDRAM_MRS_ENABLE;
5269         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5270
5271         while (iteration < 2) {
5272                 val64 = 0x55555555aaaa0000ULL;
5273                 if (iteration == 1) {
5274                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5275                 }
5276                 writeq(val64, &bar0->mc_rldram_test_d0);
5277
5278                 val64 = 0xaaaa5a5555550000ULL;
5279                 if (iteration == 1) {
5280                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5281                 }
5282                 writeq(val64, &bar0->mc_rldram_test_d1);
5283
5284                 val64 = 0x55aaaaaaaa5a0000ULL;
5285                 if (iteration == 1) {
5286                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
5287                 }
5288                 writeq(val64, &bar0->mc_rldram_test_d2);
5289
5290                 val64 = (u64) (0x0000003ffffe0100ULL);
5291                 writeq(val64, &bar0->mc_rldram_test_add);
5292
5293                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5294                         MC_RLDRAM_TEST_GO;
5295                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5296
5297                 for (cnt = 0; cnt < 5; cnt++) {
5298                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5299                         if (val64 & MC_RLDRAM_TEST_DONE)
5300                                 break;
5301                         msleep(200);
5302                 }
5303
5304                 if (cnt == 5)
5305                         break;
5306
5307                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5308                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5309
5310                 for (cnt = 0; cnt < 5; cnt++) {
5311                         val64 = readq(&bar0->mc_rldram_test_ctrl);
5312                         if (val64 & MC_RLDRAM_TEST_DONE)
5313                                 break;
5314                         msleep(500);
5315                 }
5316
5317                 if (cnt == 5)
5318                         break;
5319
5320                 val64 = readq(&bar0->mc_rldram_test_ctrl);
5321                 if (!(val64 & MC_RLDRAM_TEST_PASS))
5322                         test_fail = 1;
5323
5324                 iteration++;
5325         }
5326
5327         *data = test_fail;
5328
5329         /* Bring the adapter out of test mode */
5330         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5331
5332         return test_fail;
5333 }
5334
5335 /**
5336  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5337  *  @sp : private member of the device structure, which is a pointer to the
5338  *  s2io_nic structure.
5339  *  @ethtest : pointer to a ethtool command specific structure that will be
5340  *  returned to the user.
5341  *  @data : variable that returns the result of each of the test
5342  * conducted by the driver.
5343  * Description:
5344  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
5345  *  the health of the card.
5346  * Return value:
5347  *  void
5348  */
5349
5350 static void s2io_ethtool_test(struct net_device *dev,
5351                               struct ethtool_test *ethtest,
5352                               uint64_t * data)
5353 {
5354         struct s2io_nic *sp = dev->priv;
5355         int orig_state = netif_running(sp->dev);
5356
5357         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5358                 /* Offline Tests. */
5359                 if (orig_state)
5360                         s2io_close(sp->dev);
5361
5362                 if (s2io_register_test(sp, &data[0]))
5363                         ethtest->flags |= ETH_TEST_FL_FAILED;
5364
5365                 s2io_reset(sp);
5366
5367                 if (s2io_rldram_test(sp, &data[3]))
5368                         ethtest->flags |= ETH_TEST_FL_FAILED;
5369
5370                 s2io_reset(sp);
5371
5372                 if (s2io_eeprom_test(sp, &data[1]))
5373                         ethtest->flags |= ETH_TEST_FL_FAILED;
5374
5375                 if (s2io_bist_test(sp, &data[4]))
5376                         ethtest->flags |= ETH_TEST_FL_FAILED;
5377
5378                 if (orig_state)
5379                         s2io_open(sp->dev);
5380
5381                 data[2] = 0;
5382         } else {
5383                 /* Online Tests. */
5384                 if (!orig_state) {
5385                         DBG_PRINT(ERR_DBG,
5386                                   "%s: is not up, cannot run test\n",
5387                                   dev->name);
5388                         data[0] = -1;
5389                         data[1] = -1;
5390                         data[2] = -1;
5391                         data[3] = -1;
5392                         data[4] = -1;
5393                 }
5394
5395                 if (s2io_link_test(sp, &data[2]))
5396                         ethtest->flags |= ETH_TEST_FL_FAILED;
5397
5398                 data[0] = 0;
5399                 data[1] = 0;
5400                 data[3] = 0;
5401                 data[4] = 0;
5402         }
5403 }
5404
5405 static void s2io_get_ethtool_stats(struct net_device *dev,
5406                                    struct ethtool_stats *estats,
5407                                    u64 * tmp_stats)
5408 {
5409         int i = 0;
5410         struct s2io_nic *sp = dev->priv;
5411         struct stat_block *stat_info = sp->mac_control.stats_info;
5412
5413         s2io_updt_stats(sp);
5414         tmp_stats[i++] =
5415                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
5416                 le32_to_cpu(stat_info->tmac_frms);
5417         tmp_stats[i++] =
5418                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5419                 le32_to_cpu(stat_info->tmac_data_octets);
5420         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5421         tmp_stats[i++] =
5422                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5423                 le32_to_cpu(stat_info->tmac_mcst_frms);
5424         tmp_stats[i++] =
5425                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5426                 le32_to_cpu(stat_info->tmac_bcst_frms);
5427         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5428         tmp_stats[i++] =
5429                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5430                 le32_to_cpu(stat_info->tmac_ttl_octets);
5431         tmp_stats[i++] =
5432                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5433                 le32_to_cpu(stat_info->tmac_ucst_frms);
5434         tmp_stats[i++] =
5435                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5436                 le32_to_cpu(stat_info->tmac_nucst_frms);
5437         tmp_stats[i++] =
5438                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5439                 le32_to_cpu(stat_info->tmac_any_err_frms);
5440         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5441         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5442         tmp_stats[i++] =
5443                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5444                 le32_to_cpu(stat_info->tmac_vld_ip);
5445         tmp_stats[i++] =
5446                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5447                 le32_to_cpu(stat_info->tmac_drop_ip);
5448         tmp_stats[i++] =
5449                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5450                 le32_to_cpu(stat_info->tmac_icmp);
5451         tmp_stats[i++] =
5452                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5453                 le32_to_cpu(stat_info->tmac_rst_tcp);
5454         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5455         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5456                 le32_to_cpu(stat_info->tmac_udp);
5457         tmp_stats[i++] =
5458                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5459                 le32_to_cpu(stat_info->rmac_vld_frms);
5460         tmp_stats[i++] =
5461                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5462                 le32_to_cpu(stat_info->rmac_data_octets);
5463         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5464         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5465         tmp_stats[i++] =
5466                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5467                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5468         tmp_stats[i++] =
5469                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5470                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5471         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5472         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5473         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5474         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5475         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5476         tmp_stats[i++] =
5477                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5478                 le32_to_cpu(stat_info->rmac_ttl_octets);
5479         tmp_stats[i++] =
5480                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5481                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5482         tmp_stats[i++] =
5483                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5484                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5485         tmp_stats[i++] =
5486                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5487                 le32_to_cpu(stat_info->rmac_discarded_frms);
5488         tmp_stats[i++] =
5489                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5490                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5491         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5492         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5493         tmp_stats[i++] =
5494                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5495                 le32_to_cpu(stat_info->rmac_usized_frms);
5496         tmp_stats[i++] =
5497                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5498                 le32_to_cpu(stat_info->rmac_osized_frms);
5499         tmp_stats[i++] =
5500                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5501                 le32_to_cpu(stat_info->rmac_frag_frms);
5502         tmp_stats[i++] =
5503                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5504                 le32_to_cpu(stat_info->rmac_jabber_frms);
5505         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5506         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5507         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5508         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5509         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5510         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5511         tmp_stats[i++] =
5512                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5513                 le32_to_cpu(stat_info->rmac_ip);
5514         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5515         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5516         tmp_stats[i++] =
5517                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5518                 le32_to_cpu(stat_info->rmac_drop_ip);
5519         tmp_stats[i++] =
5520                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5521                 le32_to_cpu(stat_info->rmac_icmp);
5522         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5523         tmp_stats[i++] =
5524                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5525                 le32_to_cpu(stat_info->rmac_udp);
5526         tmp_stats[i++] =
5527                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5528                 le32_to_cpu(stat_info->rmac_err_drp_udp);
5529         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5530         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5531         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5532         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5533         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5534         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5535         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5536         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5537         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5538         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5539         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5540         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5541         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5542         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5543         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5544         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5545         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5546         tmp_stats[i++] =
5547                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5548                 le32_to_cpu(stat_info->rmac_pause_cnt);
5549         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5550         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5551         tmp_stats[i++] =
5552                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5553                 le32_to_cpu(stat_info->rmac_accepted_ip);
5554         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5555         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5556         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5557         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5558         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5559         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5560         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5561         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5562         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5563         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5564         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5565         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5566         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5567         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5568         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5569         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5570         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5571         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5572         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5573         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5574         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5575         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5576         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5577         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5578         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5579         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5580         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5581         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5582         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5583         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5584         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5585         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5586         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5587         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5588         tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5589         tmp_stats[i++] = 0;
5590         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5591         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5592         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5593         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5594         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5595         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5596         tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5597         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5598         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5599         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5600         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5601         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5602         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5603         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5604         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5605         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5606         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5607         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5608         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5609         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5610         tmp_stats[i++] = stat_info->sw_stat.sending_both;
5611         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5612         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5613         if (stat_info->sw_stat.num_aggregations) {
5614                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5615                 int count = 0;
5616                 /*
5617                  * Since 64-bit divide does not work on all platforms,
5618                  * do repeated subtraction.
5619                  */
5620                 while (tmp >= stat_info->sw_stat.num_aggregations) {
5621                         tmp -= stat_info->sw_stat.num_aggregations;
5622                         count++;
5623                 }
5624                 tmp_stats[i++] = count;
5625         }
5626         else
5627                 tmp_stats[i++] = 0;
5628 }
5629
5630 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5631 {
5632         return (XENA_REG_SPACE);
5633 }
5634
5635
5636 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5637 {
5638         struct s2io_nic *sp = dev->priv;
5639
5640         return (sp->rx_csum);
5641 }
5642
5643 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5644 {
5645         struct s2io_nic *sp = dev->priv;
5646
5647         if (data)
5648                 sp->rx_csum = 1;
5649         else
5650                 sp->rx_csum = 0;
5651
5652         return 0;
5653 }
5654
5655 static int s2io_get_eeprom_len(struct net_device *dev)
5656 {
5657         return (XENA_EEPROM_SPACE);
5658 }
5659
5660 static int s2io_ethtool_self_test_count(struct net_device *dev)
5661 {
5662         return (S2IO_TEST_LEN);
5663 }
5664
5665 static void s2io_ethtool_get_strings(struct net_device *dev,
5666                                      u32 stringset, u8 * data)
5667 {
5668         switch (stringset) {
5669         case ETH_SS_TEST:
5670                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5671                 break;
5672         case ETH_SS_STATS:
5673                 memcpy(data, &ethtool_stats_keys,
5674                        sizeof(ethtool_stats_keys));
5675         }
5676 }
5677 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5678 {
5679         return (S2IO_STAT_LEN);
5680 }
5681
5682 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5683 {
5684         if (data)
5685                 dev->features |= NETIF_F_IP_CSUM;
5686         else
5687                 dev->features &= ~NETIF_F_IP_CSUM;
5688
5689         return 0;
5690 }
5691
5692 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5693 {
5694         return (dev->features & NETIF_F_TSO) != 0;
5695 }
5696 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5697 {
5698         if (data)
5699                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5700         else
5701                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5702
5703         return 0;
5704 }
5705
5706 static const struct ethtool_ops netdev_ethtool_ops = {
5707         .get_settings = s2io_ethtool_gset,
5708         .set_settings = s2io_ethtool_sset,
5709         .get_drvinfo = s2io_ethtool_gdrvinfo,
5710         .get_regs_len = s2io_ethtool_get_regs_len,
5711         .get_regs = s2io_ethtool_gregs,
5712         .get_link = ethtool_op_get_link,
5713         .get_eeprom_len = s2io_get_eeprom_len,
5714         .get_eeprom = s2io_ethtool_geeprom,
5715         .set_eeprom = s2io_ethtool_seeprom,
5716         .get_pauseparam = s2io_ethtool_getpause_data,
5717         .set_pauseparam = s2io_ethtool_setpause_data,
5718         .get_rx_csum = s2io_ethtool_get_rx_csum,
5719         .set_rx_csum = s2io_ethtool_set_rx_csum,
5720         .get_tx_csum = ethtool_op_get_tx_csum,
5721         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5722         .get_sg = ethtool_op_get_sg,
5723         .set_sg = ethtool_op_set_sg,
5724         .get_tso = s2io_ethtool_op_get_tso,
5725         .set_tso = s2io_ethtool_op_set_tso,
5726         .get_ufo = ethtool_op_get_ufo,
5727         .set_ufo = ethtool_op_set_ufo,
5728         .self_test_count = s2io_ethtool_self_test_count,
5729         .self_test = s2io_ethtool_test,
5730         .get_strings = s2io_ethtool_get_strings,
5731         .phys_id = s2io_ethtool_idnic,
5732         .get_stats_count = s2io_ethtool_get_stats_count,
5733         .get_ethtool_stats = s2io_get_ethtool_stats
5734 };
5735
5736 /**
5737  *  s2io_ioctl - Entry point for the Ioctl
5738  *  @dev :  Device pointer.
5739  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
5740  *  a proprietary structure used to pass information to the driver.
5741  *  @cmd :  This is used to distinguish between the different commands that
5742  *  can be passed to the IOCTL functions.
5743  *  Description:
5744  *  Currently there are no special functionality supported in IOCTL, hence
5745  *  function always return EOPNOTSUPPORTED
5746  */
5747
5748 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5749 {
5750         return -EOPNOTSUPP;
5751 }
5752
5753 /**
5754  *  s2io_change_mtu - entry point to change MTU size for the device.
5755  *   @dev : device pointer.
5756  *   @new_mtu : the new MTU size for the device.
5757  *   Description: A driver entry point to change MTU size for the device.
5758  *   Before changing the MTU the device must be stopped.
5759  *  Return value:
5760  *   0 on success and an appropriate (-)ve integer as defined in errno.h
5761  *   file on failure.
5762  */
5763
5764 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5765 {
5766         struct s2io_nic *sp = dev->priv;
5767
5768         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5769                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5770                           dev->name);
5771                 return -EPERM;
5772         }
5773
5774         dev->mtu = new_mtu;
5775         if (netif_running(dev)) {
5776                 s2io_card_down(sp);
5777                 netif_stop_queue(dev);
5778                 if (s2io_card_up(sp)) {
5779                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5780                                   __FUNCTION__);
5781                 }
5782                 if (netif_queue_stopped(dev))
5783                         netif_wake_queue(dev);
5784         } else { /* Device is down */
5785                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5786                 u64 val64 = new_mtu;
5787
5788                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5789         }
5790
5791         return 0;
5792 }
5793
5794 /**
5795  *  s2io_tasklet - Bottom half of the ISR.
5796  *  @dev_adr : address of the device structure in dma_addr_t format.
5797  *  Description:
5798  *  This is the tasklet or the bottom half of the ISR. This is
5799  *  an extension of the ISR which is scheduled by the scheduler to be run
5800  *  when the load on the CPU is low. All low priority tasks of the ISR can
5801  *  be pushed into the tasklet. For now the tasklet is used only to
5802  *  replenish the Rx buffers in the Rx buffer descriptors.
5803  *  Return value:
5804  *  void.
5805  */
5806
5807 static void s2io_tasklet(unsigned long dev_addr)
5808 {
5809         struct net_device *dev = (struct net_device *) dev_addr;
5810         struct s2io_nic *sp = dev->priv;
5811         int i, ret;
5812         struct mac_info *mac_control;
5813         struct config_param *config;
5814
5815         mac_control = &sp->mac_control;
5816         config = &sp->config;
5817
5818         if (!TASKLET_IN_USE) {
5819                 for (i = 0; i < config->rx_ring_num; i++) {
5820                         ret = fill_rx_buffers(sp, i);
5821                         if (ret == -ENOMEM) {
5822                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
5823                                           dev->name);
5824                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5825                                 break;
5826                         } else if (ret == -EFILL) {
5827                                 DBG_PRINT(ERR_DBG,
5828                                           "%s: Rx Ring %d is full\n",
5829                                           dev->name, i);
5830                                 break;
5831                         }
5832                 }
5833                 clear_bit(0, (&sp->tasklet_status));
5834         }
5835 }
5836
5837 /**
5838  * s2io_set_link - Set the LInk status
5839  * @data: long pointer to device private structue
5840  * Description: Sets the link status for the adapter
5841  */
5842
5843 static void s2io_set_link(struct work_struct *work)
5844 {
5845         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5846         struct net_device *dev = nic->dev;
5847         struct XENA_dev_config __iomem *bar0 = nic->bar0;
5848         register u64 val64;
5849         u16 subid;
5850
5851         if (test_and_set_bit(0, &(nic->link_state))) {
5852                 /* The card is being reset, no point doing anything */
5853                 return;
5854         }
5855
5856         subid = nic->pdev->subsystem_device;
5857         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5858                 /*
5859                  * Allow a small delay for the NICs self initiated
5860                  * cleanup to complete.
5861                  */
5862                 msleep(100);
5863         }
5864
5865         val64 = readq(&bar0->adapter_status);
5866         if (LINK_IS_UP(val64)) {
5867                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5868                         if (verify_xena_quiescence(nic)) {
5869                                 val64 = readq(&bar0->adapter_control);
5870                                 val64 |= ADAPTER_CNTL_EN;
5871                                 writeq(val64, &bar0->adapter_control);
5872                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5873                                         nic->device_type, subid)) {
5874                                         val64 = readq(&bar0->gpio_control);
5875                                         val64 |= GPIO_CTRL_GPIO_0;
5876                                         writeq(val64, &bar0->gpio_control);
5877                                         val64 = readq(&bar0->gpio_control);
5878                                 } else {
5879                                         val64 |= ADAPTER_LED_ON;
5880                                         writeq(val64, &bar0->adapter_control);
5881                                 }
5882                                 nic->device_enabled_once = TRUE;
5883                         } else {
5884                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5885                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5886                                 netif_stop_queue(dev);
5887                         }
5888                 }
5889                 val64 = readq(&bar0->adapter_status);
5890                 if (!LINK_IS_UP(val64)) {
5891                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
5892                         DBG_PRINT(ERR_DBG, " Link down after enabling ");
5893                         DBG_PRINT(ERR_DBG, "device \n");
5894                 } else
5895                         s2io_link(nic, LINK_UP);
5896         } else {
5897                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5898                                                       subid)) {
5899                         val64 = readq(&bar0->gpio_control);
5900                         val64 &= ~GPIO_CTRL_GPIO_0;
5901                         writeq(val64, &bar0->gpio_control);
5902                         val64 = readq(&bar0->gpio_control);
5903                 }
5904                 s2io_link(nic, LINK_DOWN);
5905         }
5906         clear_bit(0, &(nic->link_state));
5907 }
5908
5909 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5910                                 struct buffAdd *ba,
5911                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5912                                 u64 *temp2, int size)
5913 {
5914         struct net_device *dev = sp->dev;
5915         struct sk_buff *frag_list;
5916
5917         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5918                 /* allocate skb */
5919                 if (*skb) {
5920                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5921                         /*
5922                          * As Rx frame are not going to be processed,
5923                          * using same mapped address for the Rxd
5924                          * buffer pointer
5925                          */
5926                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5927                 } else {
5928                         *skb = dev_alloc_skb(size);
5929                         if (!(*skb)) {
5930                                 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
5931                                 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
5932                                 return -ENOMEM ;
5933                         }
5934                         /* storing the mapped addr in a temp variable
5935                          * such it will be used for next rxd whose
5936                          * Host Control is NULL
5937                          */
5938                         ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
5939                                 pci_map_single( sp->pdev, (*skb)->data,
5940                                         size - NET_IP_ALIGN,
5941                                         PCI_DMA_FROMDEVICE);
5942                         rxdp->Host_Control = (unsigned long) (*skb);
5943                 }
5944         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5945                 /* Two buffer Mode */
5946                 if (*skb) {
5947                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5948                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5949                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5950                 } else {
5951                         *skb = dev_alloc_skb(size);
5952                         if (!(*skb)) {
5953                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5954                                         dev->name);
5955                                 return -ENOMEM;
5956                         }
5957                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
5958                                 pci_map_single(sp->pdev, (*skb)->data,
5959                                                dev->mtu + 4,
5960                                                PCI_DMA_FROMDEVICE);
5961                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5962                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5963                                                 PCI_DMA_FROMDEVICE);
5964                         rxdp->Host_Control = (unsigned long) (*skb);
5965
5966                         /* Buffer-1 will be dummy buffer not used */
5967                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5968                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
5969                                                PCI_DMA_FROMDEVICE);
5970                 }
5971         } else if ((rxdp->Host_Control == 0)) {
5972                 /* Three buffer mode */
5973                 if (*skb) {
5974                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
5975                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
5976                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
5977                 } else {
5978                         *skb = dev_alloc_skb(size);
5979                         if (!(*skb)) {
5980                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5981                                           dev->name);
5982                                 return -ENOMEM;
5983                         }
5984                         ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
5985                                 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
5986                                                PCI_DMA_FROMDEVICE);
5987                         /* Buffer-1 receives L3/L4 headers */
5988                         ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
5989                                 pci_map_single( sp->pdev, (*skb)->data,
5990                                                 l3l4hdr_size + 4,
5991                                                 PCI_DMA_FROMDEVICE);
5992                         /*
5993                          * skb_shinfo(skb)->frag_list will have L4
5994                          * data payload
5995                          */
5996                         skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
5997                                                                    ALIGN_SIZE);
5998                         if (skb_shinfo(*skb)->frag_list == NULL) {
5999                                 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6000                                           failed\n ", dev->name);
6001                                 return -ENOMEM ;
6002                         }
6003                         frag_list = skb_shinfo(*skb)->frag_list;
6004                         frag_list->next = NULL;
6005                         /*
6006                          * Buffer-2 receives L4 data payload
6007                          */
6008                         ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6009                                 pci_map_single( sp->pdev, frag_list->data,
6010                                                 dev->mtu, PCI_DMA_FROMDEVICE);
6011                 }
6012         }
6013         return 0;
6014 }
6015 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6016                                 int size)
6017 {
6018         struct net_device *dev = sp->dev;
6019         if (sp->rxd_mode == RXD_MODE_1) {
6020                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6021         } else if (sp->rxd_mode == RXD_MODE_3B) {
6022                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6023                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6024                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6025         } else {
6026                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6027                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6028                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6029         }
6030 }
6031
6032 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6033 {
6034         int i, j, k, blk_cnt = 0, size;
6035         struct mac_info * mac_control = &sp->mac_control;
6036         struct config_param *config = &sp->config;
6037         struct net_device *dev = sp->dev;
6038         struct RxD_t *rxdp = NULL;
6039         struct sk_buff *skb = NULL;
6040         struct buffAdd *ba = NULL;
6041         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6042
6043         /* Calculate the size based on ring mode */
6044         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6045                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6046         if (sp->rxd_mode == RXD_MODE_1)
6047                 size += NET_IP_ALIGN;
6048         else if (sp->rxd_mode == RXD_MODE_3B)
6049                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6050         else
6051                 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6052
6053         for (i = 0; i < config->rx_ring_num; i++) {
6054                 blk_cnt = config->rx_cfg[i].num_rxd /
6055                         (rxd_count[sp->rxd_mode] +1);
6056
6057                 for (j = 0; j < blk_cnt; j++) {
6058                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6059                                 rxdp = mac_control->rings[i].
6060                                         rx_blocks[j].rxds[k].virt_addr;
6061                                 if(sp->rxd_mode >= RXD_MODE_3A)
6062                                         ba = &mac_control->rings[i].ba[j][k];
6063                                 set_rxd_buffer_pointer(sp, rxdp, ba,
6064                                                        &skb,(u64 *)&temp0_64,
6065                                                        (u64 *)&temp1_64,
6066                                                        (u64 *)&temp2_64, size);
6067
6068                                 set_rxd_buffer_size(sp, rxdp, size);
6069                                 wmb();
6070                                 /* flip the Ownership bit to Hardware */
6071                                 rxdp->Control_1 |= RXD_OWN_XENA;
6072                         }
6073                 }
6074         }
6075         return 0;
6076
6077 }
6078
6079 static int s2io_add_isr(struct s2io_nic * sp)
6080 {
6081         int ret = 0;
6082         struct net_device *dev = sp->dev;
6083         int err = 0;
6084
6085         if (sp->intr_type == MSI)
6086                 ret = s2io_enable_msi(sp);
6087         else if (sp->intr_type == MSI_X)
6088                 ret = s2io_enable_msi_x(sp);
6089         if (ret) {
6090                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6091                 sp->intr_type = INTA;
6092         }
6093
6094         /* Store the values of the MSIX table in the struct s2io_nic structure */
6095         store_xmsi_data(sp);
6096
6097         /* After proper initialization of H/W, register ISR */
6098         if (sp->intr_type == MSI) {
6099                 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6100                         IRQF_SHARED, sp->name, dev);
6101                 if (err) {
6102                         pci_disable_msi(sp->pdev);
6103                         DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6104                                   dev->name);
6105                         return -1;
6106                 }
6107         }
6108         if (sp->intr_type == MSI_X) {
6109                 int i;
6110
6111                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6112                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6113                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6114                                         dev->name, i);
6115                                 err = request_irq(sp->entries[i].vector,
6116                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6117                                                   sp->s2io_entries[i].arg);
6118                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6119                                 (unsigned long long)sp->msix_info[i].addr);
6120                         } else {
6121                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6122                                         dev->name, i);
6123                                 err = request_irq(sp->entries[i].vector,
6124                                           s2io_msix_ring_handle, 0, sp->desc[i],
6125                                                   sp->s2io_entries[i].arg);
6126                                 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6127                                 (unsigned long long)sp->msix_info[i].addr);
6128                         }
6129                         if (err) {
6130                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6131                                           "failed\n", dev->name, i);
6132                                 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6133                                 return -1;
6134                         }
6135                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6136                 }
6137         }
6138         if (sp->intr_type == INTA) {
6139                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6140                                 sp->name, dev);
6141                 if (err) {
6142                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6143                                   dev->name);
6144                         return -1;
6145                 }
6146         }
6147         return 0;
6148 }
6149 static void s2io_rem_isr(struct s2io_nic * sp)
6150 {
6151         int cnt = 0;
6152         struct net_device *dev = sp->dev;
6153
6154         if (sp->intr_type == MSI_X) {
6155                 int i;
6156                 u16 msi_control;
6157
6158                 for (i=1; (sp->s2io_entries[i].in_use ==
6159                         MSIX_REGISTERED_SUCCESS); i++) {
6160                         int vector = sp->entries[i].vector;
6161                         void *arg = sp->s2io_entries[i].arg;
6162
6163                         free_irq(vector, arg);
6164                 }
6165                 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6166                 msi_control &= 0xFFFE; /* Disable MSI */
6167                 pci_write_config_word(sp->pdev, 0x42, msi_control);
6168
6169                 pci_disable_msix(sp->pdev);
6170         } else {
6171                 free_irq(sp->pdev->irq, dev);
6172                 if (sp->intr_type == MSI) {
6173                         u16 val;
6174
6175                         pci_disable_msi(sp->pdev);
6176                         pci_read_config_word(sp->pdev, 0x4c, &val);
6177                         val ^= 0x1;
6178                         pci_write_config_word(sp->pdev, 0x4c, val);
6179                 }
6180         }
6181         /* Waiting till all Interrupt handlers are complete */
6182         cnt = 0;
6183         do {
6184                 msleep(10);
6185                 if (!atomic_read(&sp->isr_cnt))
6186                         break;
6187                 cnt++;
6188         } while(cnt < 5);
6189 }
6190
6191 static void s2io_card_down(struct s2io_nic * sp)
6192 {
6193         int cnt = 0;
6194         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6195         unsigned long flags;
6196         register u64 val64 = 0;
6197
6198         del_timer_sync(&sp->alarm_timer);
6199         /* If s2io_set_link task is executing, wait till it completes. */
6200         while (test_and_set_bit(0, &(sp->link_state))) {
6201                 msleep(50);
6202         }
6203         atomic_set(&sp->card_state, CARD_DOWN);
6204
6205         /* disable Tx and Rx traffic on the NIC */
6206         stop_nic(sp);
6207
6208         s2io_rem_isr(sp);
6209
6210         /* Kill tasklet. */
6211         tasklet_kill(&sp->task);
6212
6213         /* Check if the device is Quiescent and then Reset the NIC */
6214         do {
6215                 /* As per the HW requirement we need to replenish the
6216                  * receive buffer to avoid the ring bump. Since there is
6217                  * no intention of processing the Rx frame at this pointwe are
6218                  * just settting the ownership bit of rxd in Each Rx
6219                  * ring to HW and set the appropriate buffer size
6220                  * based on the ring mode
6221                  */
6222                 rxd_owner_bit_reset(sp);
6223
6224                 val64 = readq(&bar0->adapter_status);
6225                 if (verify_xena_quiescence(sp)) {
6226                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6227                         break;
6228                 }
6229
6230                 msleep(50);
6231                 cnt++;
6232                 if (cnt == 10) {
6233                         DBG_PRINT(ERR_DBG,
6234                                   "s2io_close:Device not Quiescent ");
6235                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6236                                   (unsigned long long) val64);
6237                         break;
6238                 }
6239         } while (1);
6240         s2io_reset(sp);
6241
6242         spin_lock_irqsave(&sp->tx_lock, flags);
6243         /* Free all Tx buffers */
6244         free_tx_buffers(sp);
6245         spin_unlock_irqrestore(&sp->tx_lock, flags);
6246
6247         /* Free all Rx buffers */
6248         spin_lock_irqsave(&sp->rx_lock, flags);
6249         free_rx_buffers(sp);
6250         spin_unlock_irqrestore(&sp->rx_lock, flags);
6251
6252         clear_bit(0, &(sp->link_state));
6253 }
6254
6255 static int s2io_card_up(struct s2io_nic * sp)
6256 {
6257         int i, ret = 0;
6258         struct mac_info *mac_control;
6259         struct config_param *config;
6260         struct net_device *dev = (struct net_device *) sp->dev;
6261         u16 interruptible;
6262
6263         /* Initialize the H/W I/O registers */
6264         if (init_nic(sp) != 0) {
6265                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6266                           dev->name);
6267                 s2io_reset(sp);
6268                 return -ENODEV;
6269         }
6270
6271         /*
6272          * Initializing the Rx buffers. For now we are considering only 1
6273          * Rx ring and initializing buffers into 30 Rx blocks
6274          */
6275         mac_control = &sp->mac_control;
6276         config = &sp->config;
6277
6278         for (i = 0; i < config->rx_ring_num; i++) {
6279                 if ((ret = fill_rx_buffers(sp, i))) {
6280                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6281                                   dev->name);
6282                         s2io_reset(sp);
6283                         free_rx_buffers(sp);
6284                         return -ENOMEM;
6285                 }
6286                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6287                           atomic_read(&sp->rx_bufs_left[i]));
6288         }
6289         /* Maintain the state prior to the open */
6290         if (sp->promisc_flg)
6291                 sp->promisc_flg = 0;
6292         if (sp->m_cast_flg) {
6293                 sp->m_cast_flg = 0;
6294                 sp->all_multi_pos= 0;
6295         }
6296
6297         /* Setting its receive mode */
6298         s2io_set_multicast(dev);
6299
6300         if (sp->lro) {
6301                 /* Initialize max aggregatable pkts per session based on MTU */
6302                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6303                 /* Check if we can use(if specified) user provided value */
6304                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6305                         sp->lro_max_aggr_per_sess = lro_max_pkts;
6306         }
6307
6308         /* Enable Rx Traffic and interrupts on the NIC */
6309         if (start_nic(sp)) {
6310                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6311                 s2io_reset(sp);
6312                 free_rx_buffers(sp);
6313                 return -ENODEV;
6314         }
6315
6316         /* Add interrupt service routine */
6317         if (s2io_add_isr(sp) != 0) {
6318                 if (sp->intr_type == MSI_X)
6319                         s2io_rem_isr(sp);
6320                 s2io_reset(sp);
6321                 free_rx_buffers(sp);
6322                 return -ENODEV;
6323         }
6324
6325         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6326
6327         /* Enable tasklet for the device */
6328         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6329
6330         /*  Enable select interrupts */
6331         if (sp->intr_type != INTA)
6332                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6333         else {
6334                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6335                 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6336                 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6337                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6338         }
6339
6340
6341         atomic_set(&sp->card_state, CARD_UP);
6342         return 0;
6343 }
6344
6345 /**
6346  * s2io_restart_nic - Resets the NIC.
6347  * @data : long pointer to the device private structure
6348  * Description:
6349  * This function is scheduled to be run by the s2io_tx_watchdog
6350  * function after 0.5 secs to reset the NIC. The idea is to reduce
6351  * the run time of the watch dog routine which is run holding a
6352  * spin lock.
6353  */
6354
6355 static void s2io_restart_nic(struct work_struct *work)
6356 {
6357         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6358         struct net_device *dev = sp->dev;
6359
6360         s2io_card_down(sp);
6361         if (s2io_card_up(sp)) {
6362                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6363                           dev->name);
6364         }
6365         netif_wake_queue(dev);
6366         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6367                   dev->name);
6368
6369 }
6370
6371 /**
6372  *  s2io_tx_watchdog - Watchdog for transmit side.
6373  *  @dev : Pointer to net device structure
6374  *  Description:
6375  *  This function is triggered if the Tx Queue is stopped
6376  *  for a pre-defined amount of time when the Interface is still up.
6377  *  If the Interface is jammed in such a situation, the hardware is
6378  *  reset (by s2io_close) and restarted again (by s2io_open) to
6379  *  overcome any problem that might have been caused in the hardware.
6380  *  Return value:
6381  *  void
6382  */
6383
6384 static void s2io_tx_watchdog(struct net_device *dev)
6385 {
6386         struct s2io_nic *sp = dev->priv;
6387
6388         if (netif_carrier_ok(dev)) {
6389                 schedule_work(&sp->rst_timer_task);
6390                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6391         }
6392 }
6393
6394 /**
6395  *   rx_osm_handler - To perform some OS related operations on SKB.
6396  *   @sp: private member of the device structure,pointer to s2io_nic structure.
6397  *   @skb : the socket buffer pointer.
6398  *   @len : length of the packet
6399  *   @cksum : FCS checksum of the frame.
6400  *   @ring_no : the ring from which this RxD was extracted.
6401  *   Description:
6402  *   This function is called by the Rx interrupt serivce routine to perform
6403  *   some OS related operations on the SKB before passing it to the upper
6404  *   layers. It mainly checks if the checksum is OK, if so adds it to the
6405  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
6406  *   to the upper layer. If the checksum is wrong, it increments the Rx
6407  *   packet error count, frees the SKB and returns error.
6408  *   Return value:
6409  *   SUCCESS on success and -1 on failure.
6410  */
6411 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6412 {
6413         struct s2io_nic *sp = ring_data->nic;
6414         struct net_device *dev = (struct net_device *) sp->dev;
6415         struct sk_buff *skb = (struct sk_buff *)
6416                 ((unsigned long) rxdp->Host_Control);
6417         int ring_no = ring_data->ring_no;
6418         u16 l3_csum, l4_csum;
6419         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6420         struct lro *lro;
6421
6422         skb->dev = dev;
6423
6424         if (err) {
6425                 /* Check for parity error */
6426                 if (err & 0x1) {
6427                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6428                 }
6429
6430                 /*
6431                 * Drop the packet if bad transfer code. Exception being
6432                 * 0x5, which could be due to unsupported IPv6 extension header.
6433                 * In this case, we let stack handle the packet.
6434                 * Note that in this case, since checksum will be incorrect,
6435                 * stack will validate the same.
6436                 */
6437                 if (err && ((err >> 48) != 0x5)) {
6438                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6439                                 dev->name, err);
6440                         sp->stats.rx_crc_errors++;
6441                         dev_kfree_skb(skb);
6442                         atomic_dec(&sp->rx_bufs_left[ring_no]);
6443                         rxdp->Host_Control = 0;
6444                         return 0;
6445                 }
6446         }
6447
6448         /* Updating statistics */
6449         rxdp->Host_Control = 0;
6450         sp->rx_pkt_count++;
6451         sp->stats.rx_packets++;
6452         if (sp->rxd_mode == RXD_MODE_1) {
6453                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6454
6455                 sp->stats.rx_bytes += len;
6456                 skb_put(skb, len);
6457
6458         } else if (sp->rxd_mode >= RXD_MODE_3A) {
6459                 int get_block = ring_data->rx_curr_get_info.block_index;
6460                 int get_off = ring_data->rx_curr_get_info.offset;
6461                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6462                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6463                 unsigned char *buff = skb_push(skb, buf0_len);
6464
6465                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6466                 sp->stats.rx_bytes += buf0_len + buf2_len;
6467                 memcpy(buff, ba->ba_0, buf0_len);
6468
6469                 if (sp->rxd_mode == RXD_MODE_3A) {
6470                         int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6471
6472                         skb_put(skb, buf1_len);
6473                         skb->len += buf2_len;
6474                         skb->data_len += buf2_len;
6475                         skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6476                         sp->stats.rx_bytes += buf1_len;
6477
6478                 } else
6479                         skb_put(skb, buf2_len);
6480         }
6481
6482         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6483             (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6484             (sp->rx_csum)) {
6485                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6486                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6487                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6488                         /*
6489                          * NIC verifies if the Checksum of the received
6490                          * frame is Ok or not and accordingly returns
6491                          * a flag in the RxD.
6492                          */
6493                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6494                         if (sp->lro) {
6495                                 u32 tcp_len;
6496                                 u8 *tcp;
6497                                 int ret = 0;
6498
6499                                 ret = s2io_club_tcp_session(skb->data, &tcp,
6500                                                 &tcp_len, &lro, rxdp, sp);
6501                                 switch (ret) {
6502                                         case 3: /* Begin anew */
6503                                                 lro->parent = skb;
6504                                                 goto aggregate;
6505                                         case 1: /* Aggregate */
6506                                         {
6507                                                 lro_append_pkt(sp, lro,
6508                                                         skb, tcp_len);
6509                                                 goto aggregate;
6510                                         }
6511                                         case 4: /* Flush session */
6512                                         {
6513                                                 lro_append_pkt(sp, lro,
6514                                                         skb, tcp_len);
6515                                                 queue_rx_frame(lro->parent);
6516                                                 clear_lro_session(lro);
6517                                                 sp->mac_control.stats_info->
6518                                                     sw_stat.flush_max_pkts++;
6519                                                 goto aggregate;
6520                                         }
6521                                         case 2: /* Flush both */
6522                                                 lro->parent->data_len =
6523                                                         lro->frags_len;
6524                                                 sp->mac_control.stats_info->
6525                                                      sw_stat.sending_both++;
6526                                                 queue_rx_frame(lro->parent);
6527                                                 clear_lro_session(lro);
6528                                                 goto send_up;
6529                                         case 0: /* sessions exceeded */
6530                                         case -1: /* non-TCP or not
6531                                                   * L2 aggregatable
6532                                                   */
6533                                         case 5: /*
6534                                                  * First pkt in session not
6535                                                  * L3/L4 aggregatable
6536                                                  */
6537                                                 break;
6538                                         default:
6539                                                 DBG_PRINT(ERR_DBG,
6540                                                         "%s: Samadhana!!\n",
6541                                                          __FUNCTION__);
6542                                                 BUG();
6543                                 }
6544                         }
6545                 } else {
6546                         /*
6547                          * Packet with erroneous checksum, let the
6548                          * upper layers deal with it.
6549                          */
6550                         skb->ip_summed = CHECKSUM_NONE;
6551                 }
6552         } else {
6553                 skb->ip_summed = CHECKSUM_NONE;
6554         }
6555
6556         if (!sp->lro) {
6557                 skb->protocol = eth_type_trans(skb, dev);
6558                 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6559                         /* Queueing the vlan frame to the upper layer */
6560                         if (napi)
6561                                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6562                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6563                         else
6564                                 vlan_hwaccel_rx(skb, sp->vlgrp,
6565                                         RXD_GET_VLAN_TAG(rxdp->Control_2));
6566                 } else {
6567                         if (napi)
6568                                 netif_receive_skb(skb);
6569                         else
6570                                 netif_rx(skb);
6571                 }
6572         } else {
6573 send_up:
6574                 queue_rx_frame(skb);
6575         }
6576         dev->last_rx = jiffies;
6577 aggregate:
6578         atomic_dec(&sp->rx_bufs_left[ring_no]);
6579         return SUCCESS;
6580 }
6581
6582 /**
6583  *  s2io_link - stops/starts the Tx queue.
6584  *  @sp : private member of the device structure, which is a pointer to the
6585  *  s2io_nic structure.
6586  *  @link : inidicates whether link is UP/DOWN.
6587  *  Description:
6588  *  This function stops/starts the Tx queue depending on whether the link
6589  *  status of the NIC is is down or up. This is called by the Alarm
6590  *  interrupt handler whenever a link change interrupt comes up.
6591  *  Return value:
6592  *  void.
6593  */
6594
6595 static void s2io_link(struct s2io_nic * sp, int link)
6596 {
6597         struct net_device *dev = (struct net_device *) sp->dev;
6598
6599         if (link != sp->last_link_state) {
6600                 if (link == LINK_DOWN) {
6601                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6602                         netif_carrier_off(dev);
6603                 } else {
6604                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6605                         netif_carrier_on(dev);
6606                 }
6607         }
6608         sp->last_link_state = link;
6609 }
6610
6611 /**
6612  *  get_xena_rev_id - to identify revision ID of xena.
6613  *  @pdev : PCI Dev structure
6614  *  Description:
6615  *  Function to identify the Revision ID of xena.
6616  *  Return value:
6617  *  returns the revision ID of the device.
6618  */
6619
6620 static int get_xena_rev_id(struct pci_dev *pdev)
6621 {
6622         u8 id = 0;
6623         int ret;
6624         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6625         return id;
6626 }
6627
6628 /**
6629  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6630  *  @sp : private member of the device structure, which is a pointer to the
6631  *  s2io_nic structure.
6632  *  Description:
6633  *  This function initializes a few of the PCI and PCI-X configuration registers
6634  *  with recommended values.
6635  *  Return value:
6636  *  void
6637  */
6638
6639 static void s2io_init_pci(struct s2io_nic * sp)
6640 {
6641         u16 pci_cmd = 0, pcix_cmd = 0;
6642
6643         /* Enable Data Parity Error Recovery in PCI-X command register. */
6644         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6645                              &(pcix_cmd));
6646         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6647                               (pcix_cmd | 1));
6648         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6649                              &(pcix_cmd));
6650
6651         /* Set the PErr Response bit in PCI command register. */
6652         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6653         pci_write_config_word(sp->pdev, PCI_COMMAND,
6654                               (pci_cmd | PCI_COMMAND_PARITY));
6655         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6656 }
6657
6658 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6659 {
6660         if ( tx_fifo_num > 8) {
6661                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6662                          "supported\n");
6663                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6664                 tx_fifo_num = 8;
6665         }
6666         if ( rx_ring_num > 8) {
6667                 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6668                          "supported\n");
6669                 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6670                 rx_ring_num = 8;
6671         }
6672         if (*dev_intr_type != INTA)
6673                 napi = 0;
6674
6675 #ifndef CONFIG_PCI_MSI
6676         if (*dev_intr_type != INTA) {
6677                 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6678                           "MSI/MSI-X. Defaulting to INTA\n");
6679                 *dev_intr_type = INTA;
6680         }
6681 #else
6682         if (*dev_intr_type > MSI_X) {
6683                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6684                           "Defaulting to INTA\n");
6685                 *dev_intr_type = INTA;
6686         }
6687 #endif
6688         if ((*dev_intr_type == MSI_X) &&
6689                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6690                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6691                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6692                                         "Defaulting to INTA\n");
6693                 *dev_intr_type = INTA;
6694         }
6695         if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
6696                 napi = 0;
6697         if (rx_ring_mode > 3) {
6698                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6699                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6700                 rx_ring_mode = 3;
6701         }
6702         return SUCCESS;
6703 }
6704
6705 /**
6706  *  s2io_init_nic - Initialization of the adapter .
6707  *  @pdev : structure containing the PCI related information of the device.
6708  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6709  *  Description:
6710  *  The function initializes an adapter identified by the pci_dec structure.
6711  *  All OS related initialization including memory and device structure and
6712  *  initlaization of the device private variable is done. Also the swapper
6713  *  control register is initialized to enable read and write into the I/O
6714  *  registers of the device.
6715  *  Return value:
6716  *  returns 0 on success and negative on failure.
6717  */
6718
6719 static int __devinit
6720 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6721 {
6722         struct s2io_nic *sp;
6723         struct net_device *dev;
6724         int i, j, ret;
6725         int dma_flag = FALSE;
6726         u32 mac_up, mac_down;
6727         u64 val64 = 0, tmp64 = 0;
6728         struct XENA_dev_config __iomem *bar0 = NULL;
6729         u16 subid;
6730         struct mac_info *mac_control;
6731         struct config_param *config;
6732         int mode;
6733         u8 dev_intr_type = intr_type;
6734
6735         if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6736                 return ret;
6737
6738         if ((ret = pci_enable_device(pdev))) {
6739                 DBG_PRINT(ERR_DBG,
6740                           "s2io_init_nic: pci_enable_device failed\n");
6741                 return ret;
6742         }
6743
6744         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6745                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6746                 dma_flag = TRUE;
6747                 if (pci_set_consistent_dma_mask
6748                     (pdev, DMA_64BIT_MASK)) {
6749                         DBG_PRINT(ERR_DBG,
6750                                   "Unable to obtain 64bit DMA for \
6751                                         consistent allocations\n");
6752                         pci_disable_device(pdev);
6753                         return -ENOMEM;
6754                 }
6755         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6756                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6757         } else {
6758                 pci_disable_device(pdev);
6759                 return -ENOMEM;
6760         }
6761         if (dev_intr_type != MSI_X) {
6762                 if (pci_request_regions(pdev, s2io_driver_name)) {
6763                         DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6764                         pci_disable_device(pdev);
6765                         return -ENODEV;
6766                 }
6767         }
6768         else {
6769                 if (!(request_mem_region(pci_resource_start(pdev, 0),
6770                          pci_resource_len(pdev, 0), s2io_driver_name))) {
6771                         DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6772                         pci_disable_device(pdev);
6773                         return -ENODEV;
6774                 }
6775                 if (!(request_mem_region(pci_resource_start(pdev, 2),
6776                          pci_resource_len(pdev, 2), s2io_driver_name))) {
6777                         DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6778                         release_mem_region(pci_resource_start(pdev, 0),
6779                                    pci_resource_len(pdev, 0));
6780                         pci_disable_device(pdev);
6781                         return -ENODEV;
6782                 }
6783         }
6784
6785         dev = alloc_etherdev(sizeof(struct s2io_nic));
6786         if (dev == NULL) {
6787                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6788                 pci_disable_device(pdev);
6789                 pci_release_regions(pdev);
6790                 return -ENODEV;
6791         }
6792
6793         pci_set_master(pdev);
6794         pci_set_drvdata(pdev, dev);
6795         SET_MODULE_OWNER(dev);
6796         SET_NETDEV_DEV(dev, &pdev->dev);
6797
6798         /*  Private member variable initialized to s2io NIC structure */
6799         sp = dev->priv;
6800         memset(sp, 0, sizeof(struct s2io_nic));
6801         sp->dev = dev;
6802         sp->pdev = pdev;
6803         sp->high_dma_flag = dma_flag;
6804         sp->device_enabled_once = FALSE;
6805         if (rx_ring_mode == 1)
6806                 sp->rxd_mode = RXD_MODE_1;
6807         if (rx_ring_mode == 2)
6808                 sp->rxd_mode = RXD_MODE_3B;
6809         if (rx_ring_mode == 3)
6810                 sp->rxd_mode = RXD_MODE_3A;
6811
6812         sp->intr_type = dev_intr_type;
6813
6814         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6815                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6816                 sp->device_type = XFRAME_II_DEVICE;
6817         else
6818                 sp->device_type = XFRAME_I_DEVICE;
6819
6820         sp->lro = lro;
6821
6822         /* Initialize some PCI/PCI-X fields of the NIC. */
6823         s2io_init_pci(sp);
6824
6825         /*
6826          * Setting the device configuration parameters.
6827          * Most of these parameters can be specified by the user during
6828          * module insertion as they are module loadable parameters. If
6829          * these parameters are not not specified during load time, they
6830          * are initialized with default values.
6831          */
6832         mac_control = &sp->mac_control;
6833         config = &sp->config;
6834
6835         /* Tx side parameters. */
6836         config->tx_fifo_num = tx_fifo_num;
6837         for (i = 0; i < MAX_TX_FIFOS; i++) {
6838                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6839                 config->tx_cfg[i].fifo_priority = i;
6840         }
6841
6842         /* mapping the QoS priority to the configured fifos */
6843         for (i = 0; i < MAX_TX_FIFOS; i++)
6844                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6845
6846         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6847         for (i = 0; i < config->tx_fifo_num; i++) {
6848                 config->tx_cfg[i].f_no_snoop =
6849                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6850                 if (config->tx_cfg[i].fifo_len < 65) {
6851                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6852                         break;
6853                 }
6854         }
6855         /* + 2 because one Txd for skb->data and one Txd for UFO */
6856         config->max_txds = MAX_SKB_FRAGS + 2;
6857
6858         /* Rx side parameters. */
6859         config->rx_ring_num = rx_ring_num;
6860         for (i = 0; i < MAX_RX_RINGS; i++) {
6861                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6862                     (rxd_count[sp->rxd_mode] + 1);
6863                 config->rx_cfg[i].ring_priority = i;
6864         }
6865
6866         for (i = 0; i < rx_ring_num; i++) {
6867                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6868                 config->rx_cfg[i].f_no_snoop =
6869                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6870         }
6871
6872         /*  Setting Mac Control parameters */
6873         mac_control->rmac_pause_time = rmac_pause_time;
6874         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6875         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6876
6877
6878         /* Initialize Ring buffer parameters. */
6879         for (i = 0; i < config->rx_ring_num; i++)
6880                 atomic_set(&sp->rx_bufs_left[i], 0);
6881
6882         /* Initialize the number of ISRs currently running */
6883         atomic_set(&sp->isr_cnt, 0);
6884
6885         /*  initialize the shared memory used by the NIC and the host */
6886         if (init_shared_mem(sp)) {
6887                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6888                           dev->name);
6889                 ret = -ENOMEM;
6890                 goto mem_alloc_failed;
6891         }
6892
6893         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6894                                      pci_resource_len(pdev, 0));
6895         if (!sp->bar0) {
6896                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
6897                           dev->name);
6898                 ret = -ENOMEM;
6899                 goto bar0_remap_failed;
6900         }
6901
6902         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6903                                      pci_resource_len(pdev, 2));
6904         if (!sp->bar1) {
6905                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
6906                           dev->name);
6907                 ret = -ENOMEM;
6908                 goto bar1_remap_failed;
6909         }
6910
6911         dev->irq = pdev->irq;
6912         dev->base_addr = (unsigned long) sp->bar0;
6913
6914         /* Initializing the BAR1 address as the start of the FIFO pointer. */
6915         for (j = 0; j < MAX_TX_FIFOS; j++) {
6916                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
6917                     (sp->bar1 + (j * 0x00020000));
6918         }
6919
6920         /*  Driver entry points */
6921         dev->open = &s2io_open;
6922         dev->stop = &s2io_close;
6923         dev->hard_start_xmit = &s2io_xmit;
6924         dev->get_stats = &s2io_get_stats;
6925         dev->set_multicast_list = &s2io_set_multicast;
6926         dev->do_ioctl = &s2io_ioctl;
6927         dev->change_mtu = &s2io_change_mtu;
6928         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
6929         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6930         dev->vlan_rx_register = s2io_vlan_rx_register;
6931         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
6932
6933         /*
6934          * will use eth_mac_addr() for  dev->set_mac_address
6935          * mac address will be set every time dev->open() is called
6936          */
6937         dev->poll = s2io_poll;
6938         dev->weight = 32;
6939
6940 #ifdef CONFIG_NET_POLL_CONTROLLER
6941         dev->poll_controller = s2io_netpoll;
6942 #endif
6943
6944         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6945         if (sp->high_dma_flag == TRUE)
6946                 dev->features |= NETIF_F_HIGHDMA;
6947         dev->features |= NETIF_F_TSO;
6948         dev->features |= NETIF_F_TSO6;
6949         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
6950                 dev->features |= NETIF_F_UFO;
6951                 dev->features |= NETIF_F_HW_CSUM;
6952         }
6953
6954         dev->tx_timeout = &s2io_tx_watchdog;
6955         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6956         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
6957         INIT_WORK(&sp->set_link_task, s2io_set_link);
6958
6959         pci_save_state(sp->pdev);
6960
6961         /* Setting swapper control on the NIC, for proper reset operation */
6962         if (s2io_set_swapper(sp)) {
6963                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
6964                           dev->name);
6965                 ret = -EAGAIN;
6966                 goto set_swap_failed;
6967         }
6968
6969         /* Verify if the Herc works on the slot its placed into */
6970         if (sp->device_type & XFRAME_II_DEVICE) {
6971                 mode = s2io_verify_pci_mode(sp);
6972                 if (mode < 0) {
6973                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6974                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6975                         ret = -EBADSLT;
6976                         goto set_swap_failed;
6977                 }
6978         }
6979
6980         /* Not needed for Herc */
6981         if (sp->device_type & XFRAME_I_DEVICE) {
6982                 /*
6983                  * Fix for all "FFs" MAC address problems observed on
6984                  * Alpha platforms
6985                  */
6986                 fix_mac_address(sp);
6987                 s2io_reset(sp);
6988         }
6989
6990         /*
6991          * MAC address initialization.
6992          * For now only one mac address will be read and used.
6993          */
6994         bar0 = sp->bar0;
6995         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6996             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6997         writeq(val64, &bar0->rmac_addr_cmd_mem);
6998         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
6999                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
7000         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7001         mac_down = (u32) tmp64;
7002         mac_up = (u32) (tmp64 >> 32);
7003
7004         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7005
7006         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7007         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7008         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7009         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7010         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7011         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7012
7013         /*  Set the factory defined MAC address initially   */
7014         dev->addr_len = ETH_ALEN;
7015         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7016
7017         /* reset Nic and bring it to known state */
7018         s2io_reset(sp);
7019
7020         /*
7021          * Initialize the tasklet status and link state flags
7022          * and the card state parameter
7023          */
7024         atomic_set(&(sp->card_state), 0);
7025         sp->tasklet_status = 0;
7026         sp->link_state = 0;
7027
7028         /* Initialize spinlocks */
7029         spin_lock_init(&sp->tx_lock);
7030
7031         if (!napi)
7032                 spin_lock_init(&sp->put_lock);
7033         spin_lock_init(&sp->rx_lock);
7034
7035         /*
7036          * SXE-002: Configure link and activity LED to init state
7037          * on driver load.
7038          */
7039         subid = sp->pdev->subsystem_device;
7040         if ((subid & 0xFF) >= 0x07) {
7041                 val64 = readq(&bar0->gpio_control);
7042                 val64 |= 0x0000800000000000ULL;
7043                 writeq(val64, &bar0->gpio_control);
7044                 val64 = 0x0411040400000000ULL;
7045                 writeq(val64, (void __iomem *) bar0 + 0x2700);
7046                 val64 = readq(&bar0->gpio_control);
7047         }
7048
7049         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
7050
7051         if (register_netdev(dev)) {
7052                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7053                 ret = -ENODEV;
7054                 goto register_failed;
7055         }
7056         s2io_vpd_read(sp);
7057         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7058         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7059                   sp->product_name, get_xena_rev_id(sp->pdev));
7060         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7061                   s2io_driver_version);
7062         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7063                           "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7064                           sp->def_mac_addr[0].mac_addr[0],
7065                           sp->def_mac_addr[0].mac_addr[1],
7066                           sp->def_mac_addr[0].mac_addr[2],
7067                           sp->def_mac_addr[0].mac_addr[3],
7068                           sp->def_mac_addr[0].mac_addr[4],
7069                           sp->def_mac_addr[0].mac_addr[5]);
7070         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7071         if (sp->device_type & XFRAME_II_DEVICE) {
7072                 mode = s2io_print_pci_mode(sp);
7073                 if (mode < 0) {
7074                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7075                         ret = -EBADSLT;
7076                         unregister_netdev(dev);
7077                         goto set_swap_failed;
7078                 }
7079         }
7080         switch(sp->rxd_mode) {
7081                 case RXD_MODE_1:
7082                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7083                                                 dev->name);
7084                     break;
7085                 case RXD_MODE_3B:
7086                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7087                                                 dev->name);
7088                     break;
7089                 case RXD_MODE_3A:
7090                     DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7091                                                 dev->name);
7092                     break;
7093         }
7094
7095         if (napi)
7096                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7097         switch(sp->intr_type) {
7098                 case INTA:
7099                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7100                     break;
7101                 case MSI:
7102                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7103                     break;
7104                 case MSI_X:
7105                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7106                     break;
7107         }
7108         if (sp->lro)
7109                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7110                           dev->name);
7111         if (ufo)
7112                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7113                                         " enabled\n", dev->name);
7114         /* Initialize device name */
7115         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7116
7117         /* Initialize bimodal Interrupts */
7118         sp->config.bimodal = bimodal;
7119         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7120                 sp->config.bimodal = 0;
7121                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7122                         dev->name);
7123         }
7124
7125         /*
7126          * Make Link state as off at this point, when the Link change
7127          * interrupt comes the state will be automatically changed to
7128          * the right state.
7129          */
7130         netif_carrier_off(dev);
7131
7132         return 0;
7133
7134       register_failed:
7135       set_swap_failed:
7136         iounmap(sp->bar1);
7137       bar1_remap_failed:
7138         iounmap(sp->bar0);
7139       bar0_remap_failed:
7140       mem_alloc_failed:
7141         free_shared_mem(sp);
7142         pci_disable_device(pdev);
7143         if (dev_intr_type != MSI_X)
7144                 pci_release_regions(pdev);
7145         else {
7146                 release_mem_region(pci_resource_start(pdev, 0),
7147                         pci_resource_len(pdev, 0));
7148                 release_mem_region(pci_resource_start(pdev, 2),
7149                         pci_resource_len(pdev, 2));
7150         }
7151         pci_set_drvdata(pdev, NULL);
7152         free_netdev(dev);
7153
7154         return ret;
7155 }
7156
7157 /**
7158  * s2io_rem_nic - Free the PCI device
7159  * @pdev: structure containing the PCI related information of the device.
7160  * Description: This function is called by the Pci subsystem to release a
7161  * PCI device and free up all resource held up by the device. This could
7162  * be in response to a Hot plug event or when the driver is to be removed
7163  * from memory.
7164  */
7165
7166 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7167 {
7168         struct net_device *dev =
7169             (struct net_device *) pci_get_drvdata(pdev);
7170         struct s2io_nic *sp;
7171
7172         if (dev == NULL) {
7173                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7174                 return;
7175         }
7176
7177         sp = dev->priv;
7178         unregister_netdev(dev);
7179
7180         free_shared_mem(sp);
7181         iounmap(sp->bar0);
7182         iounmap(sp->bar1);
7183         if (sp->intr_type != MSI_X)
7184                 pci_release_regions(pdev);
7185         else {
7186                 release_mem_region(pci_resource_start(pdev, 0),
7187                         pci_resource_len(pdev, 0));
7188                 release_mem_region(pci_resource_start(pdev, 2),
7189                         pci_resource_len(pdev, 2));
7190         }
7191         pci_set_drvdata(pdev, NULL);
7192         free_netdev(dev);
7193         pci_disable_device(pdev);
7194 }
7195
7196 /**
7197  * s2io_starter - Entry point for the driver
7198  * Description: This function is the entry point for the driver. It verifies
7199  * the module loadable parameters and initializes PCI configuration space.
7200  */
7201
7202 int __init s2io_starter(void)
7203 {
7204         return pci_register_driver(&s2io_driver);
7205 }
7206
7207 /**
7208  * s2io_closer - Cleanup routine for the driver
7209  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7210  */
7211
7212 static __exit void s2io_closer(void)
7213 {
7214         pci_unregister_driver(&s2io_driver);
7215         DBG_PRINT(INIT_DBG, "cleanup done\n");
7216 }
7217
7218 module_init(s2io_starter);
7219 module_exit(s2io_closer);
7220
7221 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7222                 struct tcphdr **tcp, struct RxD_t *rxdp)
7223 {
7224         int ip_off;
7225         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7226
7227         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7228                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7229                           __FUNCTION__);
7230                 return -1;
7231         }
7232
7233         /* TODO:
7234          * By default the VLAN field in the MAC is stripped by the card, if this
7235          * feature is turned off in rx_pa_cfg register, then the ip_off field
7236          * has to be shifted by a further 2 bytes
7237          */
7238         switch (l2_type) {
7239                 case 0: /* DIX type */
7240                 case 4: /* DIX type with VLAN */
7241                         ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7242                         break;
7243                 /* LLC, SNAP etc are considered non-mergeable */
7244                 default:
7245                         return -1;
7246         }
7247
7248         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7249         ip_len = (u8)((*ip)->ihl);
7250         ip_len <<= 2;
7251         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7252
7253         return 0;
7254 }
7255
7256 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7257                                   struct tcphdr *tcp)
7258 {
7259         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7260         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7261            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7262                 return -1;
7263         return 0;
7264 }
7265
7266 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7267 {
7268         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7269 }
7270
7271 static void initiate_new_session(struct lro *lro, u8 *l2h,
7272                      struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7273 {
7274         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7275         lro->l2h = l2h;
7276         lro->iph = ip;
7277         lro->tcph = tcp;
7278         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7279         lro->tcp_ack = ntohl(tcp->ack_seq);
7280         lro->sg_num = 1;
7281         lro->total_len = ntohs(ip->tot_len);
7282         lro->frags_len = 0;
7283         /*
7284          * check if we saw TCP timestamp. Other consistency checks have
7285          * already been done.
7286          */
7287         if (tcp->doff == 8) {
7288                 u32 *ptr;
7289                 ptr = (u32 *)(tcp+1);
7290                 lro->saw_ts = 1;
7291                 lro->cur_tsval = *(ptr+1);
7292                 lro->cur_tsecr = *(ptr+2);
7293         }
7294         lro->in_use = 1;
7295 }
7296
7297 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7298 {
7299         struct iphdr *ip = lro->iph;
7300         struct tcphdr *tcp = lro->tcph;
7301         __sum16 nchk;
7302         struct stat_block *statinfo = sp->mac_control.stats_info;
7303         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7304
7305         /* Update L3 header */
7306         ip->tot_len = htons(lro->total_len);
7307         ip->check = 0;
7308         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7309         ip->check = nchk;
7310
7311         /* Update L4 header */
7312         tcp->ack_seq = lro->tcp_ack;
7313         tcp->window = lro->window;
7314
7315         /* Update tsecr field if this session has timestamps enabled */
7316         if (lro->saw_ts) {
7317                 u32 *ptr = (u32 *)(tcp + 1);
7318                 *(ptr+2) = lro->cur_tsecr;
7319         }
7320
7321         /* Update counters required for calculation of
7322          * average no. of packets aggregated.
7323          */
7324         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7325         statinfo->sw_stat.num_aggregations++;
7326 }
7327
7328 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7329                 struct tcphdr *tcp, u32 l4_pyld)
7330 {
7331         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7332         lro->total_len += l4_pyld;
7333         lro->frags_len += l4_pyld;
7334         lro->tcp_next_seq += l4_pyld;
7335         lro->sg_num++;
7336
7337         /* Update ack seq no. and window ad(from this pkt) in LRO object */
7338         lro->tcp_ack = tcp->ack_seq;
7339         lro->window = tcp->window;
7340
7341         if (lro->saw_ts) {
7342                 u32 *ptr;
7343                 /* Update tsecr and tsval from this packet */
7344                 ptr = (u32 *) (tcp + 1);
7345                 lro->cur_tsval = *(ptr + 1);
7346                 lro->cur_tsecr = *(ptr + 2);
7347         }
7348 }
7349
7350 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7351                                     struct tcphdr *tcp, u32 tcp_pyld_len)
7352 {
7353         u8 *ptr;
7354
7355         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7356
7357         if (!tcp_pyld_len) {
7358                 /* Runt frame or a pure ack */
7359                 return -1;
7360         }
7361
7362         if (ip->ihl != 5) /* IP has options */
7363                 return -1;
7364
7365         /* If we see CE codepoint in IP header, packet is not mergeable */
7366         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7367                 return -1;
7368
7369         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7370         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7371                                     tcp->ece || tcp->cwr || !tcp->ack) {
7372                 /*
7373                  * Currently recognize only the ack control word and
7374                  * any other control field being set would result in
7375                  * flushing the LRO session
7376                  */
7377                 return -1;
7378         }
7379
7380         /*
7381          * Allow only one TCP timestamp option. Don't aggregate if
7382          * any other options are detected.
7383          */
7384         if (tcp->doff != 5 && tcp->doff != 8)
7385                 return -1;
7386
7387         if (tcp->doff == 8) {
7388                 ptr = (u8 *)(tcp + 1);
7389                 while (*ptr == TCPOPT_NOP)
7390                         ptr++;
7391                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7392                         return -1;
7393
7394                 /* Ensure timestamp value increases monotonically */
7395                 if (l_lro)
7396                         if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7397                                 return -1;
7398
7399                 /* timestamp echo reply should be non-zero */
7400                 if (*((u32 *)(ptr+6)) == 0)
7401                         return -1;
7402         }
7403
7404         return 0;
7405 }
7406
7407 static int
7408 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7409                       struct RxD_t *rxdp, struct s2io_nic *sp)
7410 {
7411         struct iphdr *ip;
7412         struct tcphdr *tcph;
7413         int ret = 0, i;
7414
7415         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7416                                          rxdp))) {
7417                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7418                           ip->saddr, ip->daddr);
7419         } else {
7420                 return ret;
7421         }
7422
7423         tcph = (struct tcphdr *)*tcp;
7424         *tcp_len = get_l4_pyld_length(ip, tcph);
7425         for (i=0; i<MAX_LRO_SESSIONS; i++) {
7426                 struct lro *l_lro = &sp->lro0_n[i];
7427                 if (l_lro->in_use) {
7428                         if (check_for_socket_match(l_lro, ip, tcph))
7429                                 continue;
7430                         /* Sock pair matched */
7431                         *lro = l_lro;
7432
7433                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7434                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7435                                           "0x%x, actual 0x%x\n", __FUNCTION__,
7436                                           (*lro)->tcp_next_seq,
7437                                           ntohl(tcph->seq));
7438
7439                                 sp->mac_control.stats_info->
7440                                    sw_stat.outof_sequence_pkts++;
7441                                 ret = 2;
7442                                 break;
7443                         }
7444
7445                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7446                                 ret = 1; /* Aggregate */
7447                         else
7448                                 ret = 2; /* Flush both */
7449                         break;
7450                 }
7451         }
7452
7453         if (ret == 0) {
7454                 /* Before searching for available LRO objects,
7455                  * check if the pkt is L3/L4 aggregatable. If not
7456                  * don't create new LRO session. Just send this
7457                  * packet up.
7458                  */
7459                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7460                         return 5;
7461                 }
7462
7463                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7464                         struct lro *l_lro = &sp->lro0_n[i];
7465                         if (!(l_lro->in_use)) {
7466                                 *lro = l_lro;
7467                                 ret = 3; /* Begin anew */
7468                                 break;
7469                         }
7470                 }
7471         }
7472
7473         if (ret == 0) { /* sessions exceeded */
7474                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7475                           __FUNCTION__);
7476                 *lro = NULL;
7477                 return ret;
7478         }
7479
7480         switch (ret) {
7481                 case 3:
7482                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7483                         break;
7484                 case 2:
7485                         update_L3L4_header(sp, *lro);
7486                         break;
7487                 case 1:
7488                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7489                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7490                                 update_L3L4_header(sp, *lro);
7491                                 ret = 4; /* Flush the LRO */
7492                         }
7493                         break;
7494                 default:
7495                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7496                                 __FUNCTION__);
7497                         break;
7498         }
7499
7500         return ret;
7501 }
7502
7503 static void clear_lro_session(struct lro *lro)
7504 {
7505         static u16 lro_struct_size = sizeof(struct lro);
7506
7507         memset(lro, 0, lro_struct_size);
7508 }
7509
7510 static void queue_rx_frame(struct sk_buff *skb)
7511 {
7512         struct net_device *dev = skb->dev;
7513
7514         skb->protocol = eth_type_trans(skb, dev);
7515         if (napi)
7516                 netif_receive_skb(skb);
7517         else
7518                 netif_rx(skb);
7519 }
7520
7521 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7522                            struct sk_buff *skb,
7523                            u32 tcp_len)
7524 {
7525         struct sk_buff *first = lro->parent;
7526
7527         first->len += tcp_len;
7528         first->data_len = lro->frags_len;
7529         skb_pull(skb, (skb->len - tcp_len));
7530         if (skb_shinfo(first)->frag_list)
7531                 lro->last_frag->next = skb;
7532         else
7533                 skb_shinfo(first)->frag_list = skb;
7534         first->truesize += skb->truesize;
7535         lro->last_frag = skb;
7536         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7537         return;
7538 }