[PATCH] 8390 Tx fix for non i386 machines
[linux-2.6] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_sz: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.8.1";
71
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73 {
74         int ret;
75
76         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79         return ret;
80 }
81
82 /*
83  * Cards with following subsystem_id have a link state indication
84  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85  * macro below identifies these cards given the subsystem_id.
86  */
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88         (dev_type == XFRAME_I_DEVICE) ?                 \
89                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
91
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 #define PANIC   1
96 #define LOW     2
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 {
99         int level = 0;
100         mac_info_t *mac_control;
101
102         mac_control = &sp->mac_control;
103         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
104                 level = LOW;
105                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
106                         level = PANIC;
107                 }
108         }
109
110         return level;
111 }
112
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115         "Register test\t(offline)",
116         "Eeprom test\t(offline)",
117         "Link test\t(online)",
118         "RLDRAM test\t(offline)",
119         "BIST Test\t(offline)"
120 };
121
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123         {"tmac_frms"},
124         {"tmac_data_octets"},
125         {"tmac_drop_frms"},
126         {"tmac_mcst_frms"},
127         {"tmac_bcst_frms"},
128         {"tmac_pause_ctrl_frms"},
129         {"tmac_any_err_frms"},
130         {"tmac_vld_ip_octets"},
131         {"tmac_vld_ip"},
132         {"tmac_drop_ip"},
133         {"tmac_icmp"},
134         {"tmac_rst_tcp"},
135         {"tmac_tcp"},
136         {"tmac_udp"},
137         {"rmac_vld_frms"},
138         {"rmac_data_octets"},
139         {"rmac_fcs_err_frms"},
140         {"rmac_drop_frms"},
141         {"rmac_vld_mcst_frms"},
142         {"rmac_vld_bcst_frms"},
143         {"rmac_in_rng_len_err_frms"},
144         {"rmac_long_frms"},
145         {"rmac_pause_ctrl_frms"},
146         {"rmac_discarded_frms"},
147         {"rmac_usized_frms"},
148         {"rmac_osized_frms"},
149         {"rmac_frag_frms"},
150         {"rmac_jabber_frms"},
151         {"rmac_ip"},
152         {"rmac_ip_octets"},
153         {"rmac_hdr_err_ip"},
154         {"rmac_drop_ip"},
155         {"rmac_icmp"},
156         {"rmac_tcp"},
157         {"rmac_udp"},
158         {"rmac_err_drp_udp"},
159         {"rmac_pause_cnt"},
160         {"rmac_accepted_ip"},
161         {"rmac_err_tcp"},
162         {"\n DRIVER STATISTICS"},
163         {"single_bit_ecc_errs"},
164         {"double_bit_ecc_errs"},
165 };
166
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
172
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
174                         init_timer(&timer);                     \
175                         timer.function = handle;                \
176                         timer.data = (unsigned long) arg;       \
177                         mod_timer(&timer, (jiffies + exp))      \
178
179 /* Add the vlan */
180 static void s2io_vlan_rx_register(struct net_device *dev,
181                                         struct vlan_group *grp)
182 {
183         nic_t *nic = dev->priv;
184         unsigned long flags;
185
186         spin_lock_irqsave(&nic->tx_lock, flags);
187         nic->vlgrp = grp;
188         spin_unlock_irqrestore(&nic->tx_lock, flags);
189 }
190
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193 {
194         nic_t *nic = dev->priv;
195         unsigned long flags;
196
197         spin_lock_irqsave(&nic->tx_lock, flags);
198         if (nic->vlgrp)
199                 nic->vlgrp->vlan_devices[vid] = NULL;
200         spin_unlock_irqrestore(&nic->tx_lock, flags);
201 }
202
203 /*
204  * Constants to be programmed into the Xena's registers, to configure
205  * the XAUI.
206  */
207
208 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
209 #define END_SIGN        0x0
210
211 static u64 herc_act_dtx_cfg[] = {
212         /* Set address */
213         0x8000051536750000ULL, 0x80000515367500E0ULL,
214         /* Write data */
215         0x8000051536750004ULL, 0x80000515367500E4ULL,
216         /* Set address */
217         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218         /* Write data */
219         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220         /* Set address */
221         0x801205150D440000ULL, 0x801205150D4400E0ULL,
222         /* Write data */
223         0x801205150D440004ULL, 0x801205150D4400E4ULL,
224         /* Set address */
225         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
226         /* Write data */
227         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
228         /* Done */
229         END_SIGN
230 };
231
232 static u64 xena_mdio_cfg[] = {
233         /* Reset PMA PLL */
234         0xC001010000000000ULL, 0xC0010100000000E0ULL,
235         0xC0010100008000E4ULL,
236         /* Remove Reset from PMA PLL */
237         0xC001010000000000ULL, 0xC0010100000000E0ULL,
238         0xC0010100000000E4ULL,
239         END_SIGN
240 };
241
242 static u64 xena_dtx_cfg[] = {
243         0x8000051500000000ULL, 0x80000515000000E0ULL,
244         0x80000515D93500E4ULL, 0x8001051500000000ULL,
245         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246         0x8002051500000000ULL, 0x80020515000000E0ULL,
247         0x80020515F21000E4ULL,
248         /* Set PADLOOPBACKN */
249         0x8002051500000000ULL, 0x80020515000000E0ULL,
250         0x80020515B20000E4ULL, 0x8003051500000000ULL,
251         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252         0x8004051500000000ULL, 0x80040515000000E0ULL,
253         0x80040515B20000E4ULL, 0x8005051500000000ULL,
254         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
255         SWITCH_SIGN,
256         /* Remove PADLOOPBACKN */
257         0x8002051500000000ULL, 0x80020515000000E0ULL,
258         0x80020515F20000E4ULL, 0x8003051500000000ULL,
259         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260         0x8004051500000000ULL, 0x80040515000000E0ULL,
261         0x80040515F20000E4ULL, 0x8005051500000000ULL,
262         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
263         END_SIGN
264 };
265
266 /*
267  * Constants for Fixing the MacAddress problem seen mostly on
268  * Alpha machines.
269  */
270 static u64 fix_mac[] = {
271         0x0060000000000000ULL, 0x0060600000000000ULL,
272         0x0040600000000000ULL, 0x0000600000000000ULL,
273         0x0020600000000000ULL, 0x0060600000000000ULL,
274         0x0020600000000000ULL, 0x0060600000000000ULL,
275         0x0020600000000000ULL, 0x0060600000000000ULL,
276         0x0020600000000000ULL, 0x0060600000000000ULL,
277         0x0020600000000000ULL, 0x0060600000000000ULL,
278         0x0020600000000000ULL, 0x0060600000000000ULL,
279         0x0020600000000000ULL, 0x0060600000000000ULL,
280         0x0020600000000000ULL, 0x0060600000000000ULL,
281         0x0020600000000000ULL, 0x0060600000000000ULL,
282         0x0020600000000000ULL, 0x0060600000000000ULL,
283         0x0020600000000000ULL, 0x0000600000000000ULL,
284         0x0040600000000000ULL, 0x0060600000000000ULL,
285         END_SIGN
286 };
287
288 /* Module Loadable parameters. */
289 static unsigned int tx_fifo_num = 1;
290 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292 static unsigned int rx_ring_num = 1;
293 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
295 static unsigned int rts_frm_len[MAX_RX_RINGS] =
296     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int use_continuous_tx_intrs = 1;
298 static unsigned int rmac_pause_time = 65535;
299 static unsigned int mc_pause_threshold_q0q3 = 187;
300 static unsigned int mc_pause_threshold_q4q7 = 187;
301 static unsigned int shared_splits;
302 static unsigned int tmac_util_period = 5;
303 static unsigned int rmac_util_period = 5;
304 static unsigned int bimodal = 0;
305 #ifndef CONFIG_S2IO_NAPI
306 static unsigned int indicate_max_pkts;
307 #endif
308 /* Frequency of Rx desc syncs expressed as power of 2 */
309 static unsigned int rxsync_frequency = 3;
310
311 /*
312  * S2IO device table.
313  * This table lists all the devices that this driver supports.
314  */
315 static struct pci_device_id s2io_tbl[] __devinitdata = {
316         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
317          PCI_ANY_ID, PCI_ANY_ID},
318         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
319          PCI_ANY_ID, PCI_ANY_ID},
320         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
321          PCI_ANY_ID, PCI_ANY_ID},
322         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
323          PCI_ANY_ID, PCI_ANY_ID},
324         {0,}
325 };
326
327 MODULE_DEVICE_TABLE(pci, s2io_tbl);
328
329 static struct pci_driver s2io_driver = {
330       .name = "S2IO",
331       .id_table = s2io_tbl,
332       .probe = s2io_init_nic,
333       .remove = __devexit_p(s2io_rem_nic),
334 };
335
336 /* A simplifier macro used both by init and free shared_mem Fns(). */
337 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
338
339 /**
340  * init_shared_mem - Allocation and Initialization of Memory
341  * @nic: Device private variable.
342  * Description: The function allocates all the memory areas shared
343  * between the NIC and the driver. This includes Tx descriptors,
344  * Rx descriptors and the statistics block.
345  */
346
347 static int init_shared_mem(struct s2io_nic *nic)
348 {
349         u32 size;
350         void *tmp_v_addr, *tmp_v_addr_next;
351         dma_addr_t tmp_p_addr, tmp_p_addr_next;
352         RxD_block_t *pre_rxd_blk = NULL;
353         int i, j, blk_cnt, rx_sz, tx_sz;
354         int lst_size, lst_per_page;
355         struct net_device *dev = nic->dev;
356 #ifdef CONFIG_2BUFF_MODE
357         unsigned long tmp;
358         buffAdd_t *ba;
359 #endif
360
361         mac_info_t *mac_control;
362         struct config_param *config;
363
364         mac_control = &nic->mac_control;
365         config = &nic->config;
366
367
368         /* Allocation and initialization of TXDLs in FIOFs */
369         size = 0;
370         for (i = 0; i < config->tx_fifo_num; i++) {
371                 size += config->tx_cfg[i].fifo_len;
372         }
373         if (size > MAX_AVAILABLE_TXDS) {
374                 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
375                           __FUNCTION__);
376                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
377                 return FAILURE;
378         }
379
380         lst_size = (sizeof(TxD_t) * config->max_txds);
381         tx_sz = lst_size * size;
382         lst_per_page = PAGE_SIZE / lst_size;
383
384         for (i = 0; i < config->tx_fifo_num; i++) {
385                 int fifo_len = config->tx_cfg[i].fifo_len;
386                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
387                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
388                                                           GFP_KERNEL);
389                 if (!mac_control->fifos[i].list_info) {
390                         DBG_PRINT(ERR_DBG,
391                                   "Malloc failed for list_info\n");
392                         return -ENOMEM;
393                 }
394                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
395         }
396         for (i = 0; i < config->tx_fifo_num; i++) {
397                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
398                                                 lst_per_page);
399                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
400                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
401                     config->tx_cfg[i].fifo_len - 1;
402                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
403                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
404                     config->tx_cfg[i].fifo_len - 1;
405                 mac_control->fifos[i].fifo_no = i;
406                 mac_control->fifos[i].nic = nic;
407                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
408
409                 for (j = 0; j < page_num; j++) {
410                         int k = 0;
411                         dma_addr_t tmp_p;
412                         void *tmp_v;
413                         tmp_v = pci_alloc_consistent(nic->pdev,
414                                                      PAGE_SIZE, &tmp_p);
415                         if (!tmp_v) {
416                                 DBG_PRINT(ERR_DBG,
417                                           "pci_alloc_consistent ");
418                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
419                                 return -ENOMEM;
420                         }
421                         /* If we got a zero DMA address(can happen on
422                          * certain platforms like PPC), reallocate.
423                          * Store virtual address of page we don't want,
424                          * to be freed later.
425                          */
426                         if (!tmp_p) {
427                                 mac_control->zerodma_virt_addr = tmp_v;
428                                 DBG_PRINT(INIT_DBG, 
429                                 "%s: Zero DMA address for TxDL. ", dev->name);
430                                 DBG_PRINT(INIT_DBG, 
431                                 "Virtual address %p\n", tmp_v);
432                                 tmp_v = pci_alloc_consistent(nic->pdev,
433                                                      PAGE_SIZE, &tmp_p);
434                                 if (!tmp_v) {
435                                         DBG_PRINT(ERR_DBG,
436                                           "pci_alloc_consistent ");
437                                         DBG_PRINT(ERR_DBG, "failed for TxDL\n");
438                                         return -ENOMEM;
439                                 }
440                         }
441                         while (k < lst_per_page) {
442                                 int l = (j * lst_per_page) + k;
443                                 if (l == config->tx_cfg[i].fifo_len)
444                                         break;
445                                 mac_control->fifos[i].list_info[l].list_virt_addr =
446                                     tmp_v + (k * lst_size);
447                                 mac_control->fifos[i].list_info[l].list_phy_addr =
448                                     tmp_p + (k * lst_size);
449                                 k++;
450                         }
451                 }
452         }
453
454         /* Allocation and initialization of RXDs in Rings */
455         size = 0;
456         for (i = 0; i < config->rx_ring_num; i++) {
457                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
458                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
459                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
460                                   i);
461                         DBG_PRINT(ERR_DBG, "RxDs per Block");
462                         return FAILURE;
463                 }
464                 size += config->rx_cfg[i].num_rxd;
465                 mac_control->rings[i].block_count =
466                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
467                 mac_control->rings[i].pkt_cnt =
468                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
469         }
470         size = (size * (sizeof(RxD_t)));
471         rx_sz = size;
472
473         for (i = 0; i < config->rx_ring_num; i++) {
474                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
475                 mac_control->rings[i].rx_curr_get_info.offset = 0;
476                 mac_control->rings[i].rx_curr_get_info.ring_len =
477                     config->rx_cfg[i].num_rxd - 1;
478                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
479                 mac_control->rings[i].rx_curr_put_info.offset = 0;
480                 mac_control->rings[i].rx_curr_put_info.ring_len =
481                     config->rx_cfg[i].num_rxd - 1;
482                 mac_control->rings[i].nic = nic;
483                 mac_control->rings[i].ring_no = i;
484
485                 blk_cnt =
486                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
487                 /*  Allocating all the Rx blocks */
488                 for (j = 0; j < blk_cnt; j++) {
489 #ifndef CONFIG_2BUFF_MODE
490                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
491 #else
492                         size = SIZE_OF_BLOCK;
493 #endif
494                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
495                                                           &tmp_p_addr);
496                         if (tmp_v_addr == NULL) {
497                                 /*
498                                  * In case of failure, free_shared_mem()
499                                  * is called, which should free any
500                                  * memory that was alloced till the
501                                  * failure happened.
502                                  */
503                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
504                                     tmp_v_addr;
505                                 return -ENOMEM;
506                         }
507                         memset(tmp_v_addr, 0, size);
508                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
509                                 tmp_v_addr;
510                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
511                                 tmp_p_addr;
512                 }
513                 /* Interlinking all Rx Blocks */
514                 for (j = 0; j < blk_cnt; j++) {
515                         tmp_v_addr =
516                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
517                         tmp_v_addr_next =
518                                 mac_control->rings[i].rx_blocks[(j + 1) %
519                                               blk_cnt].block_virt_addr;
520                         tmp_p_addr =
521                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
522                         tmp_p_addr_next =
523                                 mac_control->rings[i].rx_blocks[(j + 1) %
524                                               blk_cnt].block_dma_addr;
525
526                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
527                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
528                                                                  * marker.
529                                                                  */
530 #ifndef CONFIG_2BUFF_MODE
531                         pre_rxd_blk->reserved_2_pNext_RxD_block =
532                             (unsigned long) tmp_v_addr_next;
533 #endif
534                         pre_rxd_blk->pNext_RxD_Blk_physical =
535                             (u64) tmp_p_addr_next;
536                 }
537         }
538
539 #ifdef CONFIG_2BUFF_MODE
540         /*
541          * Allocation of Storages for buffer addresses in 2BUFF mode
542          * and the buffers as well.
543          */
544         for (i = 0; i < config->rx_ring_num; i++) {
545                 blk_cnt =
546                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
547                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
548                                      GFP_KERNEL);
549                 if (!mac_control->rings[i].ba)
550                         return -ENOMEM;
551                 for (j = 0; j < blk_cnt; j++) {
552                         int k = 0;
553                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
554                                                  (MAX_RXDS_PER_BLOCK + 1)),
555                                                 GFP_KERNEL);
556                         if (!mac_control->rings[i].ba[j])
557                                 return -ENOMEM;
558                         while (k != MAX_RXDS_PER_BLOCK) {
559                                 ba = &mac_control->rings[i].ba[j][k];
560
561                                 ba->ba_0_org = (void *) kmalloc
562                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
563                                 if (!ba->ba_0_org)
564                                         return -ENOMEM;
565                                 tmp = (unsigned long) ba->ba_0_org;
566                                 tmp += ALIGN_SIZE;
567                                 tmp &= ~((unsigned long) ALIGN_SIZE);
568                                 ba->ba_0 = (void *) tmp;
569
570                                 ba->ba_1_org = (void *) kmalloc
571                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
572                                 if (!ba->ba_1_org)
573                                         return -ENOMEM;
574                                 tmp = (unsigned long) ba->ba_1_org;
575                                 tmp += ALIGN_SIZE;
576                                 tmp &= ~((unsigned long) ALIGN_SIZE);
577                                 ba->ba_1 = (void *) tmp;
578                                 k++;
579                         }
580                 }
581         }
582 #endif
583
584         /* Allocation and initialization of Statistics block */
585         size = sizeof(StatInfo_t);
586         mac_control->stats_mem = pci_alloc_consistent
587             (nic->pdev, size, &mac_control->stats_mem_phy);
588
589         if (!mac_control->stats_mem) {
590                 /*
591                  * In case of failure, free_shared_mem() is called, which
592                  * should free any memory that was alloced till the
593                  * failure happened.
594                  */
595                 return -ENOMEM;
596         }
597         mac_control->stats_mem_sz = size;
598
599         tmp_v_addr = mac_control->stats_mem;
600         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
601         memset(tmp_v_addr, 0, size);
602         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
603                   (unsigned long long) tmp_p_addr);
604
605         return SUCCESS;
606 }
607
608 /**
609  * free_shared_mem - Free the allocated Memory
610  * @nic:  Device private variable.
611  * Description: This function is to free all memory locations allocated by
612  * the init_shared_mem() function and return it to the kernel.
613  */
614
615 static void free_shared_mem(struct s2io_nic *nic)
616 {
617         int i, j, blk_cnt, size;
618         void *tmp_v_addr;
619         dma_addr_t tmp_p_addr;
620         mac_info_t *mac_control;
621         struct config_param *config;
622         int lst_size, lst_per_page;
623         struct net_device *dev = nic->dev;
624
625         if (!nic)
626                 return;
627
628         mac_control = &nic->mac_control;
629         config = &nic->config;
630
631         lst_size = (sizeof(TxD_t) * config->max_txds);
632         lst_per_page = PAGE_SIZE / lst_size;
633
634         for (i = 0; i < config->tx_fifo_num; i++) {
635                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
636                                                 lst_per_page);
637                 for (j = 0; j < page_num; j++) {
638                         int mem_blks = (j * lst_per_page);
639                         if (!mac_control->fifos[i].list_info)
640                                 return; 
641                         if (!mac_control->fifos[i].list_info[mem_blks].
642                                  list_virt_addr)
643                                 break;
644                         pci_free_consistent(nic->pdev, PAGE_SIZE,
645                                             mac_control->fifos[i].
646                                             list_info[mem_blks].
647                                             list_virt_addr,
648                                             mac_control->fifos[i].
649                                             list_info[mem_blks].
650                                             list_phy_addr);
651                 }
652                 /* If we got a zero DMA address during allocation,
653                  * free the page now
654                  */
655                 if (mac_control->zerodma_virt_addr) {
656                         pci_free_consistent(nic->pdev, PAGE_SIZE,
657                                             mac_control->zerodma_virt_addr,
658                                             (dma_addr_t)0);
659                         DBG_PRINT(INIT_DBG, 
660                                 "%s: Freeing TxDL with zero DMA addr. ",
661                                 dev->name);
662                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
663                                 mac_control->zerodma_virt_addr);
664                 }
665                 kfree(mac_control->fifos[i].list_info);
666         }
667
668 #ifndef CONFIG_2BUFF_MODE
669         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
670 #else
671         size = SIZE_OF_BLOCK;
672 #endif
673         for (i = 0; i < config->rx_ring_num; i++) {
674                 blk_cnt = mac_control->rings[i].block_count;
675                 for (j = 0; j < blk_cnt; j++) {
676                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
677                                 block_virt_addr;
678                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
679                                 block_dma_addr;
680                         if (tmp_v_addr == NULL)
681                                 break;
682                         pci_free_consistent(nic->pdev, size,
683                                             tmp_v_addr, tmp_p_addr);
684                 }
685         }
686
687 #ifdef CONFIG_2BUFF_MODE
688         /* Freeing buffer storage addresses in 2BUFF mode. */
689         for (i = 0; i < config->rx_ring_num; i++) {
690                 blk_cnt =
691                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
692                 for (j = 0; j < blk_cnt; j++) {
693                         int k = 0;
694                         if (!mac_control->rings[i].ba[j])
695                                 continue;
696                         while (k != MAX_RXDS_PER_BLOCK) {
697                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
698                                 kfree(ba->ba_0_org);
699                                 kfree(ba->ba_1_org);
700                                 k++;
701                         }
702                         kfree(mac_control->rings[i].ba[j]);
703                 }
704                 if (mac_control->rings[i].ba)
705                         kfree(mac_control->rings[i].ba);
706         }
707 #endif
708
709         if (mac_control->stats_mem) {
710                 pci_free_consistent(nic->pdev,
711                                     mac_control->stats_mem_sz,
712                                     mac_control->stats_mem,
713                                     mac_control->stats_mem_phy);
714         }
715 }
716
717 /**
718  * s2io_verify_pci_mode -
719  */
720
721 static int s2io_verify_pci_mode(nic_t *nic)
722 {
723         XENA_dev_config_t __iomem *bar0 = nic->bar0;
724         register u64 val64 = 0;
725         int     mode;
726
727         val64 = readq(&bar0->pci_mode);
728         mode = (u8)GET_PCI_MODE(val64);
729
730         if ( val64 & PCI_MODE_UNKNOWN_MODE)
731                 return -1;      /* Unknown PCI mode */
732         return mode;
733 }
734
735
736 /**
737  * s2io_print_pci_mode -
738  */
739 static int s2io_print_pci_mode(nic_t *nic)
740 {
741         XENA_dev_config_t __iomem *bar0 = nic->bar0;
742         register u64 val64 = 0;
743         int     mode;
744         struct config_param *config = &nic->config;
745
746         val64 = readq(&bar0->pci_mode);
747         mode = (u8)GET_PCI_MODE(val64);
748
749         if ( val64 & PCI_MODE_UNKNOWN_MODE)
750                 return -1;      /* Unknown PCI mode */
751
752         if (val64 & PCI_MODE_32_BITS) {
753                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
754         } else {
755                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
756         }
757
758         switch(mode) {
759                 case PCI_MODE_PCI_33:
760                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
761                         config->bus_speed = 33;
762                         break;
763                 case PCI_MODE_PCI_66:
764                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
765                         config->bus_speed = 133;
766                         break;
767                 case PCI_MODE_PCIX_M1_66:
768                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
769                         config->bus_speed = 133; /* Herc doubles the clock rate */
770                         break;
771                 case PCI_MODE_PCIX_M1_100:
772                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
773                         config->bus_speed = 200;
774                         break;
775                 case PCI_MODE_PCIX_M1_133:
776                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
777                         config->bus_speed = 266;
778                         break;
779                 case PCI_MODE_PCIX_M2_66:
780                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
781                         config->bus_speed = 133;
782                         break;
783                 case PCI_MODE_PCIX_M2_100:
784                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
785                         config->bus_speed = 200;
786                         break;
787                 case PCI_MODE_PCIX_M2_133:
788                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
789                         config->bus_speed = 266;
790                         break;
791                 default:
792                         return -1;      /* Unsupported bus speed */
793         }
794
795         return mode;
796 }
797
798 /**
799  *  init_nic - Initialization of hardware
800  *  @nic: device peivate variable
801  *  Description: The function sequentially configures every block
802  *  of the H/W from their reset values.
803  *  Return Value:  SUCCESS on success and
804  *  '-1' on failure (endian settings incorrect).
805  */
806
807 static int init_nic(struct s2io_nic *nic)
808 {
809         XENA_dev_config_t __iomem *bar0 = nic->bar0;
810         struct net_device *dev = nic->dev;
811         register u64 val64 = 0;
812         void __iomem *add;
813         u32 time;
814         int i, j;
815         mac_info_t *mac_control;
816         struct config_param *config;
817         int mdio_cnt = 0, dtx_cnt = 0;
818         unsigned long long mem_share;
819         int mem_size;
820
821         mac_control = &nic->mac_control;
822         config = &nic->config;
823
824         /* to set the swapper controle on the card */
825         if(s2io_set_swapper(nic)) {
826                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
827                 return -1;
828         }
829
830         /*
831          * Herc requires EOI to be removed from reset before XGXS, so..
832          */
833         if (nic->device_type & XFRAME_II_DEVICE) {
834                 val64 = 0xA500000000ULL;
835                 writeq(val64, &bar0->sw_reset);
836                 msleep(500);
837                 val64 = readq(&bar0->sw_reset);
838         }
839
840         /* Remove XGXS from reset state */
841         val64 = 0;
842         writeq(val64, &bar0->sw_reset);
843         msleep(500);
844         val64 = readq(&bar0->sw_reset);
845
846         /*  Enable Receiving broadcasts */
847         add = &bar0->mac_cfg;
848         val64 = readq(&bar0->mac_cfg);
849         val64 |= MAC_RMAC_BCAST_ENABLE;
850         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
851         writel((u32) val64, add);
852         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
853         writel((u32) (val64 >> 32), (add + 4));
854
855         /* Read registers in all blocks */
856         val64 = readq(&bar0->mac_int_mask);
857         val64 = readq(&bar0->mc_int_mask);
858         val64 = readq(&bar0->xgxs_int_mask);
859
860         /*  Set MTU */
861         val64 = dev->mtu;
862         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
863
864         /*
865          * Configuring the XAUI Interface of Xena.
866          * ***************************************
867          * To Configure the Xena's XAUI, one has to write a series
868          * of 64 bit values into two registers in a particular
869          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
870          * which will be defined in the array of configuration values
871          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
872          * to switch writing from one regsiter to another. We continue
873          * writing these values until we encounter the 'END_SIGN' macro.
874          * For example, After making a series of 21 writes into
875          * dtx_control register the 'SWITCH_SIGN' appears and hence we
876          * start writing into mdio_control until we encounter END_SIGN.
877          */
878         if (nic->device_type & XFRAME_II_DEVICE) {
879                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
880                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
881                                           &bar0->dtx_control, UF);
882                         if (dtx_cnt & 0x1)
883                                 msleep(1); /* Necessary!! */
884                         dtx_cnt++;
885                 }
886         } else {
887                 while (1) {
888                       dtx_cfg:
889                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
890                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
891                                         dtx_cnt++;
892                                         goto mdio_cfg;
893                                 }
894                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
895                                                   &bar0->dtx_control, UF);
896                                 val64 = readq(&bar0->dtx_control);
897                                 dtx_cnt++;
898                         }
899                       mdio_cfg:
900                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
901                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
902                                         mdio_cnt++;
903                                         goto dtx_cfg;
904                                 }
905                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
906                                                   &bar0->mdio_control, UF);
907                                 val64 = readq(&bar0->mdio_control);
908                                 mdio_cnt++;
909                         }
910                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
911                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
912                                 break;
913                         } else {
914                                 goto dtx_cfg;
915                         }
916                 }
917         }
918
919         /*  Tx DMA Initialization */
920         val64 = 0;
921         writeq(val64, &bar0->tx_fifo_partition_0);
922         writeq(val64, &bar0->tx_fifo_partition_1);
923         writeq(val64, &bar0->tx_fifo_partition_2);
924         writeq(val64, &bar0->tx_fifo_partition_3);
925
926
927         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
928                 val64 |=
929                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
930                          13) | vBIT(config->tx_cfg[i].fifo_priority,
931                                     ((i * 32) + 5), 3);
932
933                 if (i == (config->tx_fifo_num - 1)) {
934                         if (i % 2 == 0)
935                                 i++;
936                 }
937
938                 switch (i) {
939                 case 1:
940                         writeq(val64, &bar0->tx_fifo_partition_0);
941                         val64 = 0;
942                         break;
943                 case 3:
944                         writeq(val64, &bar0->tx_fifo_partition_1);
945                         val64 = 0;
946                         break;
947                 case 5:
948                         writeq(val64, &bar0->tx_fifo_partition_2);
949                         val64 = 0;
950                         break;
951                 case 7:
952                         writeq(val64, &bar0->tx_fifo_partition_3);
953                         break;
954                 }
955         }
956
957         /* Enable Tx FIFO partition 0. */
958         val64 = readq(&bar0->tx_fifo_partition_0);
959         val64 |= BIT(0);        /* To enable the FIFO partition. */
960         writeq(val64, &bar0->tx_fifo_partition_0);
961
962         /*
963          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
964          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
965          */
966         if ((nic->device_type == XFRAME_I_DEVICE) &&
967                 (get_xena_rev_id(nic->pdev) < 4))
968                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
969
970         val64 = readq(&bar0->tx_fifo_partition_0);
971         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
972                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
973
974         /*
975          * Initialization of Tx_PA_CONFIG register to ignore packet
976          * integrity checking.
977          */
978         val64 = readq(&bar0->tx_pa_cfg);
979         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
980             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
981         writeq(val64, &bar0->tx_pa_cfg);
982
983         /* Rx DMA intialization. */
984         val64 = 0;
985         for (i = 0; i < config->rx_ring_num; i++) {
986                 val64 |=
987                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
988                          3);
989         }
990         writeq(val64, &bar0->rx_queue_priority);
991
992         /*
993          * Allocating equal share of memory to all the
994          * configured Rings.
995          */
996         val64 = 0;
997         if (nic->device_type & XFRAME_II_DEVICE)
998                 mem_size = 32;
999         else
1000                 mem_size = 64;
1001
1002         for (i = 0; i < config->rx_ring_num; i++) {
1003                 switch (i) {
1004                 case 0:
1005                         mem_share = (mem_size / config->rx_ring_num +
1006                                      mem_size % config->rx_ring_num);
1007                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1008                         continue;
1009                 case 1:
1010                         mem_share = (mem_size / config->rx_ring_num);
1011                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1012                         continue;
1013                 case 2:
1014                         mem_share = (mem_size / config->rx_ring_num);
1015                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1016                         continue;
1017                 case 3:
1018                         mem_share = (mem_size / config->rx_ring_num);
1019                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1020                         continue;
1021                 case 4:
1022                         mem_share = (mem_size / config->rx_ring_num);
1023                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1024                         continue;
1025                 case 5:
1026                         mem_share = (mem_size / config->rx_ring_num);
1027                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1028                         continue;
1029                 case 6:
1030                         mem_share = (mem_size / config->rx_ring_num);
1031                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1032                         continue;
1033                 case 7:
1034                         mem_share = (mem_size / config->rx_ring_num);
1035                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1036                         continue;
1037                 }
1038         }
1039         writeq(val64, &bar0->rx_queue_cfg);
1040
1041         /*
1042          * Filling Tx round robin registers
1043          * as per the number of FIFOs
1044          */
1045         switch (config->tx_fifo_num) {
1046         case 1:
1047                 val64 = 0x0000000000000000ULL;
1048                 writeq(val64, &bar0->tx_w_round_robin_0);
1049                 writeq(val64, &bar0->tx_w_round_robin_1);
1050                 writeq(val64, &bar0->tx_w_round_robin_2);
1051                 writeq(val64, &bar0->tx_w_round_robin_3);
1052                 writeq(val64, &bar0->tx_w_round_robin_4);
1053                 break;
1054         case 2:
1055                 val64 = 0x0000010000010000ULL;
1056                 writeq(val64, &bar0->tx_w_round_robin_0);
1057                 val64 = 0x0100000100000100ULL;
1058                 writeq(val64, &bar0->tx_w_round_robin_1);
1059                 val64 = 0x0001000001000001ULL;
1060                 writeq(val64, &bar0->tx_w_round_robin_2);
1061                 val64 = 0x0000010000010000ULL;
1062                 writeq(val64, &bar0->tx_w_round_robin_3);
1063                 val64 = 0x0100000000000000ULL;
1064                 writeq(val64, &bar0->tx_w_round_robin_4);
1065                 break;
1066         case 3:
1067                 val64 = 0x0001000102000001ULL;
1068                 writeq(val64, &bar0->tx_w_round_robin_0);
1069                 val64 = 0x0001020000010001ULL;
1070                 writeq(val64, &bar0->tx_w_round_robin_1);
1071                 val64 = 0x0200000100010200ULL;
1072                 writeq(val64, &bar0->tx_w_round_robin_2);
1073                 val64 = 0x0001000102000001ULL;
1074                 writeq(val64, &bar0->tx_w_round_robin_3);
1075                 val64 = 0x0001020000000000ULL;
1076                 writeq(val64, &bar0->tx_w_round_robin_4);
1077                 break;
1078         case 4:
1079                 val64 = 0x0001020300010200ULL;
1080                 writeq(val64, &bar0->tx_w_round_robin_0);
1081                 val64 = 0x0100000102030001ULL;
1082                 writeq(val64, &bar0->tx_w_round_robin_1);
1083                 val64 = 0x0200010000010203ULL;
1084                 writeq(val64, &bar0->tx_w_round_robin_2);
1085                 val64 = 0x0001020001000001ULL;
1086                 writeq(val64, &bar0->tx_w_round_robin_3);
1087                 val64 = 0x0203000100000000ULL;
1088                 writeq(val64, &bar0->tx_w_round_robin_4);
1089                 break;
1090         case 5:
1091                 val64 = 0x0001000203000102ULL;
1092                 writeq(val64, &bar0->tx_w_round_robin_0);
1093                 val64 = 0x0001020001030004ULL;
1094                 writeq(val64, &bar0->tx_w_round_robin_1);
1095                 val64 = 0x0001000203000102ULL;
1096                 writeq(val64, &bar0->tx_w_round_robin_2);
1097                 val64 = 0x0001020001030004ULL;
1098                 writeq(val64, &bar0->tx_w_round_robin_3);
1099                 val64 = 0x0001000000000000ULL;
1100                 writeq(val64, &bar0->tx_w_round_robin_4);
1101                 break;
1102         case 6:
1103                 val64 = 0x0001020304000102ULL;
1104                 writeq(val64, &bar0->tx_w_round_robin_0);
1105                 val64 = 0x0304050001020001ULL;
1106                 writeq(val64, &bar0->tx_w_round_robin_1);
1107                 val64 = 0x0203000100000102ULL;
1108                 writeq(val64, &bar0->tx_w_round_robin_2);
1109                 val64 = 0x0304000102030405ULL;
1110                 writeq(val64, &bar0->tx_w_round_robin_3);
1111                 val64 = 0x0001000200000000ULL;
1112                 writeq(val64, &bar0->tx_w_round_robin_4);
1113                 break;
1114         case 7:
1115                 val64 = 0x0001020001020300ULL;
1116                 writeq(val64, &bar0->tx_w_round_robin_0);
1117                 val64 = 0x0102030400010203ULL;
1118                 writeq(val64, &bar0->tx_w_round_robin_1);
1119                 val64 = 0x0405060001020001ULL;
1120                 writeq(val64, &bar0->tx_w_round_robin_2);
1121                 val64 = 0x0304050000010200ULL;
1122                 writeq(val64, &bar0->tx_w_round_robin_3);
1123                 val64 = 0x0102030000000000ULL;
1124                 writeq(val64, &bar0->tx_w_round_robin_4);
1125                 break;
1126         case 8:
1127                 val64 = 0x0001020300040105ULL;
1128                 writeq(val64, &bar0->tx_w_round_robin_0);
1129                 val64 = 0x0200030106000204ULL;
1130                 writeq(val64, &bar0->tx_w_round_robin_1);
1131                 val64 = 0x0103000502010007ULL;
1132                 writeq(val64, &bar0->tx_w_round_robin_2);
1133                 val64 = 0x0304010002060500ULL;
1134                 writeq(val64, &bar0->tx_w_round_robin_3);
1135                 val64 = 0x0103020400000000ULL;
1136                 writeq(val64, &bar0->tx_w_round_robin_4);
1137                 break;
1138         }
1139
1140         /* Filling the Rx round robin registers as per the
1141          * number of Rings and steering based on QoS.
1142          */
1143         switch (config->rx_ring_num) {
1144         case 1:
1145                 val64 = 0x8080808080808080ULL;
1146                 writeq(val64, &bar0->rts_qos_steering);
1147                 break;
1148         case 2:
1149                 val64 = 0x0000010000010000ULL;
1150                 writeq(val64, &bar0->rx_w_round_robin_0);
1151                 val64 = 0x0100000100000100ULL;
1152                 writeq(val64, &bar0->rx_w_round_robin_1);
1153                 val64 = 0x0001000001000001ULL;
1154                 writeq(val64, &bar0->rx_w_round_robin_2);
1155                 val64 = 0x0000010000010000ULL;
1156                 writeq(val64, &bar0->rx_w_round_robin_3);
1157                 val64 = 0x0100000000000000ULL;
1158                 writeq(val64, &bar0->rx_w_round_robin_4);
1159
1160                 val64 = 0x8080808040404040ULL;
1161                 writeq(val64, &bar0->rts_qos_steering);
1162                 break;
1163         case 3:
1164                 val64 = 0x0001000102000001ULL;
1165                 writeq(val64, &bar0->rx_w_round_robin_0);
1166                 val64 = 0x0001020000010001ULL;
1167                 writeq(val64, &bar0->rx_w_round_robin_1);
1168                 val64 = 0x0200000100010200ULL;
1169                 writeq(val64, &bar0->rx_w_round_robin_2);
1170                 val64 = 0x0001000102000001ULL;
1171                 writeq(val64, &bar0->rx_w_round_robin_3);
1172                 val64 = 0x0001020000000000ULL;
1173                 writeq(val64, &bar0->rx_w_round_robin_4);
1174
1175                 val64 = 0x8080804040402020ULL;
1176                 writeq(val64, &bar0->rts_qos_steering);
1177                 break;
1178         case 4:
1179                 val64 = 0x0001020300010200ULL;
1180                 writeq(val64, &bar0->rx_w_round_robin_0);
1181                 val64 = 0x0100000102030001ULL;
1182                 writeq(val64, &bar0->rx_w_round_robin_1);
1183                 val64 = 0x0200010000010203ULL;
1184                 writeq(val64, &bar0->rx_w_round_robin_2);
1185                 val64 = 0x0001020001000001ULL;  
1186                 writeq(val64, &bar0->rx_w_round_robin_3);
1187                 val64 = 0x0203000100000000ULL;
1188                 writeq(val64, &bar0->rx_w_round_robin_4);
1189
1190                 val64 = 0x8080404020201010ULL;
1191                 writeq(val64, &bar0->rts_qos_steering);
1192                 break;
1193         case 5:
1194                 val64 = 0x0001000203000102ULL;
1195                 writeq(val64, &bar0->rx_w_round_robin_0);
1196                 val64 = 0x0001020001030004ULL;
1197                 writeq(val64, &bar0->rx_w_round_robin_1);
1198                 val64 = 0x0001000203000102ULL;
1199                 writeq(val64, &bar0->rx_w_round_robin_2);
1200                 val64 = 0x0001020001030004ULL;
1201                 writeq(val64, &bar0->rx_w_round_robin_3);
1202                 val64 = 0x0001000000000000ULL;
1203                 writeq(val64, &bar0->rx_w_round_robin_4);
1204
1205                 val64 = 0x8080404020201008ULL;
1206                 writeq(val64, &bar0->rts_qos_steering);
1207                 break;
1208         case 6:
1209                 val64 = 0x0001020304000102ULL;
1210                 writeq(val64, &bar0->rx_w_round_robin_0);
1211                 val64 = 0x0304050001020001ULL;
1212                 writeq(val64, &bar0->rx_w_round_robin_1);
1213                 val64 = 0x0203000100000102ULL;
1214                 writeq(val64, &bar0->rx_w_round_robin_2);
1215                 val64 = 0x0304000102030405ULL;
1216                 writeq(val64, &bar0->rx_w_round_robin_3);
1217                 val64 = 0x0001000200000000ULL;
1218                 writeq(val64, &bar0->rx_w_round_robin_4);
1219
1220                 val64 = 0x8080404020100804ULL;
1221                 writeq(val64, &bar0->rts_qos_steering);
1222                 break;
1223         case 7:
1224                 val64 = 0x0001020001020300ULL;
1225                 writeq(val64, &bar0->rx_w_round_robin_0);
1226                 val64 = 0x0102030400010203ULL;
1227                 writeq(val64, &bar0->rx_w_round_robin_1);
1228                 val64 = 0x0405060001020001ULL;
1229                 writeq(val64, &bar0->rx_w_round_robin_2);
1230                 val64 = 0x0304050000010200ULL;
1231                 writeq(val64, &bar0->rx_w_round_robin_3);
1232                 val64 = 0x0102030000000000ULL;
1233                 writeq(val64, &bar0->rx_w_round_robin_4);
1234
1235                 val64 = 0x8080402010080402ULL;
1236                 writeq(val64, &bar0->rts_qos_steering);
1237                 break;
1238         case 8:
1239                 val64 = 0x0001020300040105ULL;
1240                 writeq(val64, &bar0->rx_w_round_robin_0);
1241                 val64 = 0x0200030106000204ULL;
1242                 writeq(val64, &bar0->rx_w_round_robin_1);
1243                 val64 = 0x0103000502010007ULL;
1244                 writeq(val64, &bar0->rx_w_round_robin_2);
1245                 val64 = 0x0304010002060500ULL;
1246                 writeq(val64, &bar0->rx_w_round_robin_3);
1247                 val64 = 0x0103020400000000ULL;
1248                 writeq(val64, &bar0->rx_w_round_robin_4);
1249
1250                 val64 = 0x8040201008040201ULL;
1251                 writeq(val64, &bar0->rts_qos_steering);
1252                 break;
1253         }
1254
1255         /* UDP Fix */
1256         val64 = 0;
1257         for (i = 0; i < 8; i++)
1258                 writeq(val64, &bar0->rts_frm_len_n[i]);
1259
1260         /* Set the default rts frame length for the rings configured */
1261         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1262         for (i = 0 ; i < config->rx_ring_num ; i++)
1263                 writeq(val64, &bar0->rts_frm_len_n[i]);
1264
1265         /* Set the frame length for the configured rings
1266          * desired by the user
1267          */
1268         for (i = 0; i < config->rx_ring_num; i++) {
1269                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1270                  * specified frame length steering.
1271                  * If the user provides the frame length then program
1272                  * the rts_frm_len register for those values or else
1273                  * leave it as it is.
1274                  */
1275                 if (rts_frm_len[i] != 0) {
1276                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1277                                 &bar0->rts_frm_len_n[i]);
1278                 }
1279         }
1280
1281         /* Program statistics memory */
1282         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1283
1284         if (nic->device_type == XFRAME_II_DEVICE) {
1285                 val64 = STAT_BC(0x320);
1286                 writeq(val64, &bar0->stat_byte_cnt);
1287         }
1288
1289         /*
1290          * Initializing the sampling rate for the device to calculate the
1291          * bandwidth utilization.
1292          */
1293         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1294             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1295         writeq(val64, &bar0->mac_link_util);
1296
1297
1298         /*
1299          * Initializing the Transmit and Receive Traffic Interrupt
1300          * Scheme.
1301          */
1302         /*
1303          * TTI Initialization. Default Tx timer gets us about
1304          * 250 interrupts per sec. Continuous interrupts are enabled
1305          * by default.
1306          */
1307         if (nic->device_type == XFRAME_II_DEVICE) {
1308                 int count = (nic->config.bus_speed * 125)/2;
1309                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1310         } else {
1311
1312                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1313         }
1314         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1315             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1316             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1317                 if (use_continuous_tx_intrs)
1318                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1319         writeq(val64, &bar0->tti_data1_mem);
1320
1321         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1322             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1323             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1324         writeq(val64, &bar0->tti_data2_mem);
1325
1326         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1327         writeq(val64, &bar0->tti_command_mem);
1328
1329         /*
1330          * Once the operation completes, the Strobe bit of the command
1331          * register will be reset. We poll for this particular condition
1332          * We wait for a maximum of 500ms for the operation to complete,
1333          * if it's not complete by then we return error.
1334          */
1335         time = 0;
1336         while (TRUE) {
1337                 val64 = readq(&bar0->tti_command_mem);
1338                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1339                         break;
1340                 }
1341                 if (time > 10) {
1342                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1343                                   dev->name);
1344                         return -1;
1345                 }
1346                 msleep(50);
1347                 time++;
1348         }
1349
1350         if (nic->config.bimodal) {
1351                 int k = 0;
1352                 for (k = 0; k < config->rx_ring_num; k++) {
1353                         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1354                         val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1355                         writeq(val64, &bar0->tti_command_mem);
1356
1357                 /*
1358                  * Once the operation completes, the Strobe bit of the command
1359                  * register will be reset. We poll for this particular condition
1360                  * We wait for a maximum of 500ms for the operation to complete,
1361                  * if it's not complete by then we return error.
1362                 */
1363                         time = 0;
1364                         while (TRUE) {
1365                                 val64 = readq(&bar0->tti_command_mem);
1366                                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1367                                         break;
1368                                 }
1369                                 if (time > 10) {
1370                                         DBG_PRINT(ERR_DBG,
1371                                                 "%s: TTI init Failed\n",
1372                                         dev->name);
1373                                         return -1;
1374                                 }
1375                                 time++;
1376                                 msleep(50);
1377                         }
1378                 }
1379         } else {
1380
1381                 /* RTI Initialization */
1382                 if (nic->device_type == XFRAME_II_DEVICE) {
1383                         /*
1384                          * Programmed to generate Apprx 500 Intrs per
1385                          * second
1386                          */
1387                         int count = (nic->config.bus_speed * 125)/4;
1388                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1389                 } else {
1390                         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1391                 }
1392                 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1393                     RTI_DATA1_MEM_RX_URNG_B(0x10) |
1394                     RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1395
1396                 writeq(val64, &bar0->rti_data1_mem);
1397
1398                 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1399                     RTI_DATA2_MEM_RX_UFC_B(0x2) |
1400                     RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1401                 writeq(val64, &bar0->rti_data2_mem);
1402
1403                 for (i = 0; i < config->rx_ring_num; i++) {
1404                         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1405                                         | RTI_CMD_MEM_OFFSET(i);
1406                         writeq(val64, &bar0->rti_command_mem);
1407
1408                         /*
1409                          * Once the operation completes, the Strobe bit of the
1410                          * command register will be reset. We poll for this
1411                          * particular condition. We wait for a maximum of 500ms
1412                          * for the operation to complete, if it's not complete
1413                          * by then we return error.
1414                          */
1415                         time = 0;
1416                         while (TRUE) {
1417                                 val64 = readq(&bar0->rti_command_mem);
1418                                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1419                                         break;
1420                                 }
1421                                 if (time > 10) {
1422                                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1423                                                   dev->name);
1424                                         return -1;
1425                                 }
1426                                 time++;
1427                                 msleep(50);
1428                         }
1429                 }
1430         }
1431
1432         /*
1433          * Initializing proper values as Pause threshold into all
1434          * the 8 Queues on Rx side.
1435          */
1436         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1437         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1438
1439         /* Disable RMAC PAD STRIPPING */
1440         add = &bar0->mac_cfg;
1441         val64 = readq(&bar0->mac_cfg);
1442         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1443         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1444         writel((u32) (val64), add);
1445         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1446         writel((u32) (val64 >> 32), (add + 4));
1447         val64 = readq(&bar0->mac_cfg);
1448
1449         /*
1450          * Set the time value to be inserted in the pause frame
1451          * generated by xena.
1452          */
1453         val64 = readq(&bar0->rmac_pause_cfg);
1454         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1455         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1456         writeq(val64, &bar0->rmac_pause_cfg);
1457
1458         /*
1459          * Set the Threshold Limit for Generating the pause frame
1460          * If the amount of data in any Queue exceeds ratio of
1461          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1462          * pause frame is generated
1463          */
1464         val64 = 0;
1465         for (i = 0; i < 4; i++) {
1466                 val64 |=
1467                     (((u64) 0xFF00 | nic->mac_control.
1468                       mc_pause_threshold_q0q3)
1469                      << (i * 2 * 8));
1470         }
1471         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1472
1473         val64 = 0;
1474         for (i = 0; i < 4; i++) {
1475                 val64 |=
1476                     (((u64) 0xFF00 | nic->mac_control.
1477                       mc_pause_threshold_q4q7)
1478                      << (i * 2 * 8));
1479         }
1480         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1481
1482         /*
1483          * TxDMA will stop Read request if the number of read split has
1484          * exceeded the limit pointed by shared_splits
1485          */
1486         val64 = readq(&bar0->pic_control);
1487         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1488         writeq(val64, &bar0->pic_control);
1489
1490         /*
1491          * Programming the Herc to split every write transaction
1492          * that does not start on an ADB to reduce disconnects.
1493          */
1494         if (nic->device_type == XFRAME_II_DEVICE) {
1495                 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1496                 writeq(val64, &bar0->wreq_split_mask);
1497         }
1498
1499         /* Setting Link stability period to 64 ms */ 
1500         if (nic->device_type == XFRAME_II_DEVICE) {
1501                 val64 = MISC_LINK_STABILITY_PRD(3);
1502                 writeq(val64, &bar0->misc_control);
1503         }
1504
1505         return SUCCESS;
1506 }
1507 #define LINK_UP_DOWN_INTERRUPT          1
1508 #define MAC_RMAC_ERR_TIMER              2
1509
1510 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1511 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1512 #else
1513 int s2io_link_fault_indication(nic_t *nic)
1514 {
1515         if (nic->device_type == XFRAME_II_DEVICE)
1516                 return LINK_UP_DOWN_INTERRUPT;
1517         else
1518                 return MAC_RMAC_ERR_TIMER;
1519 }
1520 #endif
1521
1522 /**
1523  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1524  *  @nic: device private variable,
1525  *  @mask: A mask indicating which Intr block must be modified and,
1526  *  @flag: A flag indicating whether to enable or disable the Intrs.
1527  *  Description: This function will either disable or enable the interrupts
1528  *  depending on the flag argument. The mask argument can be used to
1529  *  enable/disable any Intr block.
1530  *  Return Value: NONE.
1531  */
1532
1533 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1534 {
1535         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1536         register u64 val64 = 0, temp64 = 0;
1537
1538         /*  Top level interrupt classification */
1539         /*  PIC Interrupts */
1540         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1541                 /*  Enable PIC Intrs in the general intr mask register */
1542                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1543                 if (flag == ENABLE_INTRS) {
1544                         temp64 = readq(&bar0->general_int_mask);
1545                         temp64 &= ~((u64) val64);
1546                         writeq(temp64, &bar0->general_int_mask);
1547                         /*
1548                          * If Hercules adapter enable GPIO otherwise
1549                          * disabled all PCIX, Flash, MDIO, IIC and GPIO
1550                          * interrupts for now.
1551                          * TODO
1552                          */
1553                         if (s2io_link_fault_indication(nic) ==
1554                                         LINK_UP_DOWN_INTERRUPT ) {
1555                                 temp64 = readq(&bar0->pic_int_mask);
1556                                 temp64 &= ~((u64) PIC_INT_GPIO);
1557                                 writeq(temp64, &bar0->pic_int_mask);
1558                                 temp64 = readq(&bar0->gpio_int_mask);
1559                                 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1560                                 writeq(temp64, &bar0->gpio_int_mask);
1561                         } else {
1562                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1563                         }
1564                         /*
1565                          * No MSI Support is available presently, so TTI and
1566                          * RTI interrupts are also disabled.
1567                          */
1568                 } else if (flag == DISABLE_INTRS) {
1569                         /*
1570                          * Disable PIC Intrs in the general
1571                          * intr mask register
1572                          */
1573                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1574                         temp64 = readq(&bar0->general_int_mask);
1575                         val64 |= temp64;
1576                         writeq(val64, &bar0->general_int_mask);
1577                 }
1578         }
1579
1580         /*  DMA Interrupts */
1581         /*  Enabling/Disabling Tx DMA interrupts */
1582         if (mask & TX_DMA_INTR) {
1583                 /* Enable TxDMA Intrs in the general intr mask register */
1584                 val64 = TXDMA_INT_M;
1585                 if (flag == ENABLE_INTRS) {
1586                         temp64 = readq(&bar0->general_int_mask);
1587                         temp64 &= ~((u64) val64);
1588                         writeq(temp64, &bar0->general_int_mask);
1589                         /*
1590                          * Keep all interrupts other than PFC interrupt
1591                          * and PCC interrupt disabled in DMA level.
1592                          */
1593                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1594                                                       TXDMA_PCC_INT_M);
1595                         writeq(val64, &bar0->txdma_int_mask);
1596                         /*
1597                          * Enable only the MISC error 1 interrupt in PFC block
1598                          */
1599                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1600                         writeq(val64, &bar0->pfc_err_mask);
1601                         /*
1602                          * Enable only the FB_ECC error interrupt in PCC block
1603                          */
1604                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1605                         writeq(val64, &bar0->pcc_err_mask);
1606                 } else if (flag == DISABLE_INTRS) {
1607                         /*
1608                          * Disable TxDMA Intrs in the general intr mask
1609                          * register
1610                          */
1611                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1612                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1613                         temp64 = readq(&bar0->general_int_mask);
1614                         val64 |= temp64;
1615                         writeq(val64, &bar0->general_int_mask);
1616                 }
1617         }
1618
1619         /*  Enabling/Disabling Rx DMA interrupts */
1620         if (mask & RX_DMA_INTR) {
1621                 /*  Enable RxDMA Intrs in the general intr mask register */
1622                 val64 = RXDMA_INT_M;
1623                 if (flag == ENABLE_INTRS) {
1624                         temp64 = readq(&bar0->general_int_mask);
1625                         temp64 &= ~((u64) val64);
1626                         writeq(temp64, &bar0->general_int_mask);
1627                         /*
1628                          * All RxDMA block interrupts are disabled for now
1629                          * TODO
1630                          */
1631                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1632                 } else if (flag == DISABLE_INTRS) {
1633                         /*
1634                          * Disable RxDMA Intrs in the general intr mask
1635                          * register
1636                          */
1637                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1638                         temp64 = readq(&bar0->general_int_mask);
1639                         val64 |= temp64;
1640                         writeq(val64, &bar0->general_int_mask);
1641                 }
1642         }
1643
1644         /*  MAC Interrupts */
1645         /*  Enabling/Disabling MAC interrupts */
1646         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1647                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1648                 if (flag == ENABLE_INTRS) {
1649                         temp64 = readq(&bar0->general_int_mask);
1650                         temp64 &= ~((u64) val64);
1651                         writeq(temp64, &bar0->general_int_mask);
1652                         /*
1653                          * All MAC block error interrupts are disabled for now
1654                          * TODO
1655                          */
1656                 } else if (flag == DISABLE_INTRS) {
1657                         /*
1658                          * Disable MAC Intrs in the general intr mask register
1659                          */
1660                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1661                         writeq(DISABLE_ALL_INTRS,
1662                                &bar0->mac_rmac_err_mask);
1663
1664                         temp64 = readq(&bar0->general_int_mask);
1665                         val64 |= temp64;
1666                         writeq(val64, &bar0->general_int_mask);
1667                 }
1668         }
1669
1670         /*  XGXS Interrupts */
1671         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1672                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1673                 if (flag == ENABLE_INTRS) {
1674                         temp64 = readq(&bar0->general_int_mask);
1675                         temp64 &= ~((u64) val64);
1676                         writeq(temp64, &bar0->general_int_mask);
1677                         /*
1678                          * All XGXS block error interrupts are disabled for now
1679                          * TODO
1680                          */
1681                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1682                 } else if (flag == DISABLE_INTRS) {
1683                         /*
1684                          * Disable MC Intrs in the general intr mask register
1685                          */
1686                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1687                         temp64 = readq(&bar0->general_int_mask);
1688                         val64 |= temp64;
1689                         writeq(val64, &bar0->general_int_mask);
1690                 }
1691         }
1692
1693         /*  Memory Controller(MC) interrupts */
1694         if (mask & MC_INTR) {
1695                 val64 = MC_INT_M;
1696                 if (flag == ENABLE_INTRS) {
1697                         temp64 = readq(&bar0->general_int_mask);
1698                         temp64 &= ~((u64) val64);
1699                         writeq(temp64, &bar0->general_int_mask);
1700                         /*
1701                          * Enable all MC Intrs.
1702                          */
1703                         writeq(0x0, &bar0->mc_int_mask);
1704                         writeq(0x0, &bar0->mc_err_mask);
1705                 } else if (flag == DISABLE_INTRS) {
1706                         /*
1707                          * Disable MC Intrs in the general intr mask register
1708                          */
1709                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1710                         temp64 = readq(&bar0->general_int_mask);
1711                         val64 |= temp64;
1712                         writeq(val64, &bar0->general_int_mask);
1713                 }
1714         }
1715
1716
1717         /*  Tx traffic interrupts */
1718         if (mask & TX_TRAFFIC_INTR) {
1719                 val64 = TXTRAFFIC_INT_M;
1720                 if (flag == ENABLE_INTRS) {
1721                         temp64 = readq(&bar0->general_int_mask);
1722                         temp64 &= ~((u64) val64);
1723                         writeq(temp64, &bar0->general_int_mask);
1724                         /*
1725                          * Enable all the Tx side interrupts
1726                          * writing 0 Enables all 64 TX interrupt levels
1727                          */
1728                         writeq(0x0, &bar0->tx_traffic_mask);
1729                 } else if (flag == DISABLE_INTRS) {
1730                         /*
1731                          * Disable Tx Traffic Intrs in the general intr mask
1732                          * register.
1733                          */
1734                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1735                         temp64 = readq(&bar0->general_int_mask);
1736                         val64 |= temp64;
1737                         writeq(val64, &bar0->general_int_mask);
1738                 }
1739         }
1740
1741         /*  Rx traffic interrupts */
1742         if (mask & RX_TRAFFIC_INTR) {
1743                 val64 = RXTRAFFIC_INT_M;
1744                 if (flag == ENABLE_INTRS) {
1745                         temp64 = readq(&bar0->general_int_mask);
1746                         temp64 &= ~((u64) val64);
1747                         writeq(temp64, &bar0->general_int_mask);
1748                         /* writing 0 Enables all 8 RX interrupt levels */
1749                         writeq(0x0, &bar0->rx_traffic_mask);
1750                 } else if (flag == DISABLE_INTRS) {
1751                         /*
1752                          * Disable Rx Traffic Intrs in the general intr mask
1753                          * register.
1754                          */
1755                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1756                         temp64 = readq(&bar0->general_int_mask);
1757                         val64 |= temp64;
1758                         writeq(val64, &bar0->general_int_mask);
1759                 }
1760         }
1761 }
1762
1763 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1764 {
1765         int ret = 0;
1766
1767         if (flag == FALSE) {
1768                 if ((!herc && (rev_id >= 4)) || herc) {
1769                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1770                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1771                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1772                                 ret = 1;
1773                         }
1774                 }else {
1775                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1776                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1777                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1778                                 ret = 1;
1779                         }
1780                 }
1781         } else {
1782                 if ((!herc && (rev_id >= 4)) || herc) {
1783                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1784                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1785                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1786                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1787                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1788                                 ret = 1;
1789                         }
1790                 } else {
1791                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1792                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1793                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1794                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1795                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1796                                 ret = 1;
1797                         }
1798                 }
1799         }
1800
1801         return ret;
1802 }
1803 /**
1804  *  verify_xena_quiescence - Checks whether the H/W is ready
1805  *  @val64 :  Value read from adapter status register.
1806  *  @flag : indicates if the adapter enable bit was ever written once
1807  *  before.
1808  *  Description: Returns whether the H/W is ready to go or not. Depending
1809  *  on whether adapter enable bit was written or not the comparison
1810  *  differs and the calling function passes the input argument flag to
1811  *  indicate this.
1812  *  Return: 1 If xena is quiescence
1813  *          0 If Xena is not quiescence
1814  */
1815
1816 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1817 {
1818         int ret = 0, herc;
1819         u64 tmp64 = ~((u64) val64);
1820         int rev_id = get_xena_rev_id(sp->pdev);
1821
1822         herc = (sp->device_type == XFRAME_II_DEVICE);
1823         if (!
1824             (tmp64 &
1825              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1826               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1827               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1828               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1829               ADAPTER_STATUS_P_PLL_LOCK))) {
1830                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1831         }
1832
1833         return ret;
1834 }
1835
1836 /**
1837  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1838  * @sp: Pointer to device specifc structure
1839  * Description :
1840  * New procedure to clear mac address reading  problems on Alpha platforms
1841  *
1842  */
1843
1844 void fix_mac_address(nic_t * sp)
1845 {
1846         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1847         u64 val64;
1848         int i = 0;
1849
1850         while (fix_mac[i] != END_SIGN) {
1851                 writeq(fix_mac[i++], &bar0->gpio_control);
1852                 udelay(10);
1853                 val64 = readq(&bar0->gpio_control);
1854         }
1855 }
1856
1857 /**
1858  *  start_nic - Turns the device on
1859  *  @nic : device private variable.
1860  *  Description:
1861  *  This function actually turns the device on. Before this  function is
1862  *  called,all Registers are configured from their reset states
1863  *  and shared memory is allocated but the NIC is still quiescent. On
1864  *  calling this function, the device interrupts are cleared and the NIC is
1865  *  literally switched on by writing into the adapter control register.
1866  *  Return Value:
1867  *  SUCCESS on success and -1 on failure.
1868  */
1869
1870 static int start_nic(struct s2io_nic *nic)
1871 {
1872         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1873         struct net_device *dev = nic->dev;
1874         register u64 val64 = 0;
1875         u16 interruptible;
1876         u16 subid, i;
1877         mac_info_t *mac_control;
1878         struct config_param *config;
1879
1880         mac_control = &nic->mac_control;
1881         config = &nic->config;
1882
1883         /*  PRC Initialization and configuration */
1884         for (i = 0; i < config->rx_ring_num; i++) {
1885                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1886                        &bar0->prc_rxd0_n[i]);
1887
1888                 val64 = readq(&bar0->prc_ctrl_n[i]);
1889                 if (nic->config.bimodal)
1890                         val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1891 #ifndef CONFIG_2BUFF_MODE
1892                 val64 |= PRC_CTRL_RC_ENABLED;
1893 #else
1894                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1895 #endif
1896                 writeq(val64, &bar0->prc_ctrl_n[i]);
1897         }
1898
1899 #ifdef CONFIG_2BUFF_MODE
1900         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1901         val64 = readq(&bar0->rx_pa_cfg);
1902         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1903         writeq(val64, &bar0->rx_pa_cfg);
1904 #endif
1905
1906         /*
1907          * Enabling MC-RLDRAM. After enabling the device, we timeout
1908          * for around 100ms, which is approximately the time required
1909          * for the device to be ready for operation.
1910          */
1911         val64 = readq(&bar0->mc_rldram_mrs);
1912         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1913         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1914         val64 = readq(&bar0->mc_rldram_mrs);
1915
1916         msleep(100);    /* Delay by around 100 ms. */
1917
1918         /* Enabling ECC Protection. */
1919         val64 = readq(&bar0->adapter_control);
1920         val64 &= ~ADAPTER_ECC_EN;
1921         writeq(val64, &bar0->adapter_control);
1922
1923         /*
1924          * Clearing any possible Link state change interrupts that
1925          * could have popped up just before Enabling the card.
1926          */
1927         val64 = readq(&bar0->mac_rmac_err_reg);
1928         if (val64)
1929                 writeq(val64, &bar0->mac_rmac_err_reg);
1930
1931         /*
1932          * Verify if the device is ready to be enabled, if so enable
1933          * it.
1934          */
1935         val64 = readq(&bar0->adapter_status);
1936         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1937                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1938                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1939                           (unsigned long long) val64);
1940                 return FAILURE;
1941         }
1942
1943         /*  Enable select interrupts */
1944         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1945         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1946         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1947
1948         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1949
1950         /*
1951          * With some switches, link might be already up at this point.
1952          * Because of this weird behavior, when we enable laser,
1953          * we may not get link. We need to handle this. We cannot
1954          * figure out which switch is misbehaving. So we are forced to
1955          * make a global change.
1956          */
1957
1958         /* Enabling Laser. */
1959         val64 = readq(&bar0->adapter_control);
1960         val64 |= ADAPTER_EOI_TX_ON;
1961         writeq(val64, &bar0->adapter_control);
1962
1963         /* SXE-002: Initialize link and activity LED */
1964         subid = nic->pdev->subsystem_device;
1965         if (((subid & 0xFF) >= 0x07) &&
1966             (nic->device_type == XFRAME_I_DEVICE)) {
1967                 val64 = readq(&bar0->gpio_control);
1968                 val64 |= 0x0000800000000000ULL;
1969                 writeq(val64, &bar0->gpio_control);
1970                 val64 = 0x0411040400000000ULL;
1971                 writeq(val64, (void __iomem *)bar0 + 0x2700);
1972         }
1973
1974         /*
1975          * Don't see link state interrupts on certain switches, so
1976          * directly scheduling a link state task from here.
1977          */
1978         schedule_work(&nic->set_link_task);
1979
1980         return SUCCESS;
1981 }
1982
1983 /**
1984  *  free_tx_buffers - Free all queued Tx buffers
1985  *  @nic : device private variable.
1986  *  Description:
1987  *  Free all queued Tx buffers.
1988  *  Return Value: void
1989 */
1990
1991 static void free_tx_buffers(struct s2io_nic *nic)
1992 {
1993         struct net_device *dev = nic->dev;
1994         struct sk_buff *skb;
1995         TxD_t *txdp;
1996         int i, j;
1997         mac_info_t *mac_control;
1998         struct config_param *config;
1999         int cnt = 0, frg_cnt;
2000
2001         mac_control = &nic->mac_control;
2002         config = &nic->config;
2003
2004         for (i = 0; i < config->tx_fifo_num; i++) {
2005                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2006                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2007                             list_virt_addr;
2008                         skb =
2009                             (struct sk_buff *) ((unsigned long) txdp->
2010                                                 Host_Control);
2011                         if (skb == NULL) {
2012                                 memset(txdp, 0, sizeof(TxD_t) *
2013                                        config->max_txds);
2014                                 continue;
2015                         }
2016                         frg_cnt = skb_shinfo(skb)->nr_frags;
2017                         pci_unmap_single(nic->pdev, (dma_addr_t)
2018                                          txdp->Buffer_Pointer,
2019                                          skb->len - skb->data_len,
2020                                          PCI_DMA_TODEVICE);
2021                         if (frg_cnt) {
2022                                 TxD_t *temp;
2023                                 temp = txdp;
2024                                 txdp++;
2025                                 for (j = 0; j < frg_cnt; j++, txdp++) {
2026                                         skb_frag_t *frag =
2027                                             &skb_shinfo(skb)->frags[j];
2028                                         pci_unmap_page(nic->pdev,
2029                                                        (dma_addr_t)
2030                                                        txdp->
2031                                                        Buffer_Pointer,
2032                                                        frag->size,
2033                                                        PCI_DMA_TODEVICE);
2034                                 }
2035                                 txdp = temp;
2036                         }
2037                         dev_kfree_skb(skb);
2038                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2039                         cnt++;
2040                 }
2041                 DBG_PRINT(INTR_DBG,
2042                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2043                           dev->name, cnt, i);
2044                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2045                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2046         }
2047 }
2048
2049 /**
2050  *   stop_nic -  To stop the nic
2051  *   @nic ; device private variable.
2052  *   Description:
2053  *   This function does exactly the opposite of what the start_nic()
2054  *   function does. This function is called to stop the device.
2055  *   Return Value:
2056  *   void.
2057  */
2058
2059 static void stop_nic(struct s2io_nic *nic)
2060 {
2061         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2062         register u64 val64 = 0;
2063         u16 interruptible, i;
2064         mac_info_t *mac_control;
2065         struct config_param *config;
2066
2067         mac_control = &nic->mac_control;
2068         config = &nic->config;
2069
2070         /*  Disable all interrupts */
2071         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2072         interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2073         interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2074         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2075
2076         /*  Disable PRCs */
2077         for (i = 0; i < config->rx_ring_num; i++) {
2078                 val64 = readq(&bar0->prc_ctrl_n[i]);
2079                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2080                 writeq(val64, &bar0->prc_ctrl_n[i]);
2081         }
2082 }
2083
2084 /**
2085  *  fill_rx_buffers - Allocates the Rx side skbs
2086  *  @nic:  device private variable
2087  *  @ring_no: ring number
2088  *  Description:
2089  *  The function allocates Rx side skbs and puts the physical
2090  *  address of these buffers into the RxD buffer pointers, so that the NIC
2091  *  can DMA the received frame into these locations.
2092  *  The NIC supports 3 receive modes, viz
2093  *  1. single buffer,
2094  *  2. three buffer and
2095  *  3. Five buffer modes.
2096  *  Each mode defines how many fragments the received frame will be split
2097  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2098  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2099  *  is split into 3 fragments. As of now only single buffer mode is
2100  *  supported.
2101  *   Return Value:
2102  *  SUCCESS on success or an appropriate -ve value on failure.
2103  */
2104
2105 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2106 {
2107         struct net_device *dev = nic->dev;
2108         struct sk_buff *skb;
2109         RxD_t *rxdp;
2110         int off, off1, size, block_no, block_no1;
2111         int offset, offset1;
2112         u32 alloc_tab = 0;
2113         u32 alloc_cnt;
2114         mac_info_t *mac_control;
2115         struct config_param *config;
2116 #ifdef CONFIG_2BUFF_MODE
2117         RxD_t *rxdpnext;
2118         int nextblk;
2119         u64 tmp;
2120         buffAdd_t *ba;
2121         dma_addr_t rxdpphys;
2122 #endif
2123 #ifndef CONFIG_S2IO_NAPI
2124         unsigned long flags;
2125 #endif
2126         RxD_t *first_rxdp = NULL;
2127
2128         mac_control = &nic->mac_control;
2129         config = &nic->config;
2130         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2131             atomic_read(&nic->rx_bufs_left[ring_no]);
2132         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2133             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2134
2135         while (alloc_tab < alloc_cnt) {
2136                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2137                     block_index;
2138                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2139                     block_index;
2140                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2141                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2142 #ifndef CONFIG_2BUFF_MODE
2143                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2144                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2145 #else
2146                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2147                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2148 #endif
2149
2150                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2151                     block_virt_addr + off;
2152                 if ((offset == offset1) && (rxdp->Host_Control)) {
2153                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2154                         DBG_PRINT(INTR_DBG, " info equated\n");
2155                         goto end;
2156                 }
2157 #ifndef CONFIG_2BUFF_MODE
2158                 if (rxdp->Control_1 == END_OF_BLOCK) {
2159                         mac_control->rings[ring_no].rx_curr_put_info.
2160                             block_index++;
2161                         mac_control->rings[ring_no].rx_curr_put_info.
2162                             block_index %= mac_control->rings[ring_no].block_count;
2163                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
2164                                 block_index;
2165                         off++;
2166                         off %= (MAX_RXDS_PER_BLOCK + 1);
2167                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2168                             off;
2169                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2170                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2171                                   dev->name, rxdp);
2172                 }
2173 #ifndef CONFIG_S2IO_NAPI
2174                 spin_lock_irqsave(&nic->put_lock, flags);
2175                 mac_control->rings[ring_no].put_pos =
2176                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2177                 spin_unlock_irqrestore(&nic->put_lock, flags);
2178 #endif
2179 #else
2180                 if (rxdp->Host_Control == END_OF_BLOCK) {
2181                         mac_control->rings[ring_no].rx_curr_put_info.
2182                             block_index++;
2183                         mac_control->rings[ring_no].rx_curr_put_info.block_index
2184                             %= mac_control->rings[ring_no].block_count;
2185                         block_no = mac_control->rings[ring_no].rx_curr_put_info
2186                             .block_index;
2187                         off = 0;
2188                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2189                                   dev->name, block_no,
2190                                   (unsigned long long) rxdp->Control_1);
2191                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2192                             off;
2193                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2194                             block_virt_addr;
2195                 }
2196 #ifndef CONFIG_S2IO_NAPI
2197                 spin_lock_irqsave(&nic->put_lock, flags);
2198                 mac_control->rings[ring_no].put_pos = (block_no *
2199                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
2200                 spin_unlock_irqrestore(&nic->put_lock, flags);
2201 #endif
2202 #endif
2203
2204 #ifndef CONFIG_2BUFF_MODE
2205                 if (rxdp->Control_1 & RXD_OWN_XENA)
2206 #else
2207                 if (rxdp->Control_2 & BIT(0))
2208 #endif
2209                 {
2210                         mac_control->rings[ring_no].rx_curr_put_info.
2211                             offset = off;
2212                         goto end;
2213                 }
2214 #ifdef  CONFIG_2BUFF_MODE
2215                 /*
2216                  * RxDs Spanning cache lines will be replenished only
2217                  * if the succeeding RxD is also owned by Host. It
2218                  * will always be the ((8*i)+3) and ((8*i)+6)
2219                  * descriptors for the 48 byte descriptor. The offending
2220                  * decsriptor is of-course the 3rd descriptor.
2221                  */
2222                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2223                     block_dma_addr + (off * sizeof(RxD_t));
2224                 if (((u64) (rxdpphys)) % 128 > 80) {
2225                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2226                             block_virt_addr + (off + 1);
2227                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
2228                                 nextblk = (block_no + 1) %
2229                                     (mac_control->rings[ring_no].block_count);
2230                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
2231                                     [nextblk].block_virt_addr;
2232                         }
2233                         if (rxdpnext->Control_2 & BIT(0))
2234                                 goto end;
2235                 }
2236 #endif
2237
2238 #ifndef CONFIG_2BUFF_MODE
2239                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2240 #else
2241                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2242 #endif
2243                 if (!skb) {
2244                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2245                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2246                         if (first_rxdp) {
2247                                 wmb();
2248                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2249                         }
2250                         return -ENOMEM;
2251                 }
2252 #ifndef CONFIG_2BUFF_MODE
2253                 skb_reserve(skb, NET_IP_ALIGN);
2254                 memset(rxdp, 0, sizeof(RxD_t));
2255                 rxdp->Buffer0_ptr = pci_map_single
2256                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2257                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2258                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2259                 rxdp->Host_Control = (unsigned long) (skb);
2260                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2261                         rxdp->Control_1 |= RXD_OWN_XENA;
2262                 off++;
2263                 off %= (MAX_RXDS_PER_BLOCK + 1);
2264                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2265 #else
2266                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2267                 skb_reserve(skb, BUF0_LEN);
2268                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2269                 if (tmp)
2270                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2271
2272                 memset(rxdp, 0, sizeof(RxD_t));
2273                 rxdp->Buffer2_ptr = pci_map_single
2274                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2275                      PCI_DMA_FROMDEVICE);
2276                 rxdp->Buffer0_ptr =
2277                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2278                                    PCI_DMA_FROMDEVICE);
2279                 rxdp->Buffer1_ptr =
2280                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2281                                    PCI_DMA_FROMDEVICE);
2282
2283                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2284                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2285                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2286                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2287                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2288                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2289                         rxdp->Control_1 |= RXD_OWN_XENA;
2290                 off++;
2291                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2292 #endif
2293                 rxdp->Control_2 |= SET_RXD_MARKER;
2294
2295                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2296                         if (first_rxdp) {
2297                                 wmb();
2298                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2299                         }
2300                         first_rxdp = rxdp;
2301                 }
2302                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2303                 alloc_tab++;
2304         }
2305
2306       end:
2307         /* Transfer ownership of first descriptor to adapter just before
2308          * exiting. Before that, use memory barrier so that ownership
2309          * and other fields are seen by adapter correctly.
2310          */
2311         if (first_rxdp) {
2312                 wmb();
2313                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2314         }
2315
2316         return SUCCESS;
2317 }
2318
2319 /**
2320  *  free_rx_buffers - Frees all Rx buffers
2321  *  @sp: device private variable.
2322  *  Description:
2323  *  This function will free all Rx buffers allocated by host.
2324  *  Return Value:
2325  *  NONE.
2326  */
2327
2328 static void free_rx_buffers(struct s2io_nic *sp)
2329 {
2330         struct net_device *dev = sp->dev;
2331         int i, j, blk = 0, off, buf_cnt = 0;
2332         RxD_t *rxdp;
2333         struct sk_buff *skb;
2334         mac_info_t *mac_control;
2335         struct config_param *config;
2336 #ifdef CONFIG_2BUFF_MODE
2337         buffAdd_t *ba;
2338 #endif
2339
2340         mac_control = &sp->mac_control;
2341         config = &sp->config;
2342
2343         for (i = 0; i < config->rx_ring_num; i++) {
2344                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2345                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2346                         rxdp = mac_control->rings[i].rx_blocks[blk].
2347                                 block_virt_addr + off;
2348
2349 #ifndef CONFIG_2BUFF_MODE
2350                         if (rxdp->Control_1 == END_OF_BLOCK) {
2351                                 rxdp =
2352                                     (RxD_t *) ((unsigned long) rxdp->
2353                                                Control_2);
2354                                 j++;
2355                                 blk++;
2356                         }
2357 #else
2358                         if (rxdp->Host_Control == END_OF_BLOCK) {
2359                                 blk++;
2360                                 continue;
2361                         }
2362 #endif
2363
2364                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2365                                 memset(rxdp, 0, sizeof(RxD_t));
2366                                 continue;
2367                         }
2368
2369                         skb =
2370                             (struct sk_buff *) ((unsigned long) rxdp->
2371                                                 Host_Control);
2372                         if (skb) {
2373 #ifndef CONFIG_2BUFF_MODE
2374                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2375                                                  rxdp->Buffer0_ptr,
2376                                                  dev->mtu +
2377                                                  HEADER_ETHERNET_II_802_3_SIZE
2378                                                  + HEADER_802_2_SIZE +
2379                                                  HEADER_SNAP_SIZE,
2380                                                  PCI_DMA_FROMDEVICE);
2381 #else
2382                                 ba = &mac_control->rings[i].ba[blk][off];
2383                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2384                                                  rxdp->Buffer0_ptr,
2385                                                  BUF0_LEN,
2386                                                  PCI_DMA_FROMDEVICE);
2387                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2388                                                  rxdp->Buffer1_ptr,
2389                                                  BUF1_LEN,
2390                                                  PCI_DMA_FROMDEVICE);
2391                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2392                                                  rxdp->Buffer2_ptr,
2393                                                  dev->mtu + BUF0_LEN + 4,
2394                                                  PCI_DMA_FROMDEVICE);
2395 #endif
2396                                 dev_kfree_skb(skb);
2397                                 atomic_dec(&sp->rx_bufs_left[i]);
2398                                 buf_cnt++;
2399                         }
2400                         memset(rxdp, 0, sizeof(RxD_t));
2401                 }
2402                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2403                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2404                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2405                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2406                 atomic_set(&sp->rx_bufs_left[i], 0);
2407                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2408                           dev->name, buf_cnt, i);
2409         }
2410 }
2411
2412 /**
2413  * s2io_poll - Rx interrupt handler for NAPI support
2414  * @dev : pointer to the device structure.
2415  * @budget : The number of packets that were budgeted to be processed
2416  * during  one pass through the 'Poll" function.
2417  * Description:
2418  * Comes into picture only if NAPI support has been incorporated. It does
2419  * the same thing that rx_intr_handler does, but not in a interrupt context
2420  * also It will process only a given number of packets.
2421  * Return value:
2422  * 0 on success and 1 if there are No Rx packets to be processed.
2423  */
2424
2425 #if defined(CONFIG_S2IO_NAPI)
2426 static int s2io_poll(struct net_device *dev, int *budget)
2427 {
2428         nic_t *nic = dev->priv;
2429         int pkt_cnt = 0, org_pkts_to_process;
2430         mac_info_t *mac_control;
2431         struct config_param *config;
2432         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2433         u64 val64;
2434         int i;
2435
2436         atomic_inc(&nic->isr_cnt);
2437         mac_control = &nic->mac_control;
2438         config = &nic->config;
2439
2440         nic->pkts_to_process = *budget;
2441         if (nic->pkts_to_process > dev->quota)
2442                 nic->pkts_to_process = dev->quota;
2443         org_pkts_to_process = nic->pkts_to_process;
2444
2445         val64 = readq(&bar0->rx_traffic_int);
2446         writeq(val64, &bar0->rx_traffic_int);
2447
2448         for (i = 0; i < config->rx_ring_num; i++) {
2449                 rx_intr_handler(&mac_control->rings[i]);
2450                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2451                 if (!nic->pkts_to_process) {
2452                         /* Quota for the current iteration has been met */
2453                         goto no_rx;
2454                 }
2455         }
2456         if (!pkt_cnt)
2457                 pkt_cnt = 1;
2458
2459         dev->quota -= pkt_cnt;
2460         *budget -= pkt_cnt;
2461         netif_rx_complete(dev);
2462
2463         for (i = 0; i < config->rx_ring_num; i++) {
2464                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2465                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2466                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2467                         break;
2468                 }
2469         }
2470         /* Re enable the Rx interrupts. */
2471         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2472         atomic_dec(&nic->isr_cnt);
2473         return 0;
2474
2475 no_rx:
2476         dev->quota -= pkt_cnt;
2477         *budget -= pkt_cnt;
2478
2479         for (i = 0; i < config->rx_ring_num; i++) {
2480                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2481                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2482                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2483                         break;
2484                 }
2485         }
2486         atomic_dec(&nic->isr_cnt);
2487         return 1;
2488 }
2489 #endif
2490
2491 /**
2492  *  rx_intr_handler - Rx interrupt handler
2493  *  @nic: device private variable.
2494  *  Description:
2495  *  If the interrupt is because of a received frame or if the
2496  *  receive ring contains fresh as yet un-processed frames,this function is
2497  *  called. It picks out the RxD at which place the last Rx processing had
2498  *  stopped and sends the skb to the OSM's Rx handler and then increments
2499  *  the offset.
2500  *  Return Value:
2501  *  NONE.
2502  */
2503 static void rx_intr_handler(ring_info_t *ring_data)
2504 {
2505         nic_t *nic = ring_data->nic;
2506         struct net_device *dev = (struct net_device *) nic->dev;
2507         int get_block, get_offset, put_block, put_offset, ring_bufs;
2508         rx_curr_get_info_t get_info, put_info;
2509         RxD_t *rxdp;
2510         struct sk_buff *skb;
2511 #ifndef CONFIG_S2IO_NAPI
2512         int pkt_cnt = 0;
2513 #endif
2514         spin_lock(&nic->rx_lock);
2515         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2516                 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2517                           __FUNCTION__, dev->name);
2518                 spin_unlock(&nic->rx_lock);
2519                 return;
2520         }
2521
2522         get_info = ring_data->rx_curr_get_info;
2523         get_block = get_info.block_index;
2524         put_info = ring_data->rx_curr_put_info;
2525         put_block = put_info.block_index;
2526         ring_bufs = get_info.ring_len+1;
2527         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2528                     get_info.offset;
2529         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2530                 get_info.offset;
2531 #ifndef CONFIG_S2IO_NAPI
2532         spin_lock(&nic->put_lock);
2533         put_offset = ring_data->put_pos;
2534         spin_unlock(&nic->put_lock);
2535 #else
2536         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2537                 put_info.offset;
2538 #endif
2539         while (RXD_IS_UP2DT(rxdp) &&
2540                (((get_offset + 1) % ring_bufs) != put_offset)) {
2541                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2542                 if (skb == NULL) {
2543                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2544                                   dev->name);
2545                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2546                         spin_unlock(&nic->rx_lock);
2547                         return;
2548                 }
2549 #ifndef CONFIG_2BUFF_MODE
2550                 pci_unmap_single(nic->pdev, (dma_addr_t)
2551                                  rxdp->Buffer0_ptr,
2552                                  dev->mtu +
2553                                  HEADER_ETHERNET_II_802_3_SIZE +
2554                                  HEADER_802_2_SIZE +
2555                                  HEADER_SNAP_SIZE,
2556                                  PCI_DMA_FROMDEVICE);
2557 #else
2558                 pci_unmap_single(nic->pdev, (dma_addr_t)
2559                                  rxdp->Buffer0_ptr,
2560                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2561                 pci_unmap_single(nic->pdev, (dma_addr_t)
2562                                  rxdp->Buffer1_ptr,
2563                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2564                 pci_unmap_single(nic->pdev, (dma_addr_t)
2565                                  rxdp->Buffer2_ptr,
2566                                  dev->mtu + BUF0_LEN + 4,
2567                                  PCI_DMA_FROMDEVICE);
2568 #endif
2569                 rx_osm_handler(ring_data, rxdp);
2570                 get_info.offset++;
2571                 ring_data->rx_curr_get_info.offset =
2572                     get_info.offset;
2573                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2574                     get_info.offset;
2575                 if (get_info.offset &&
2576                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2577                         get_info.offset = 0;
2578                         ring_data->rx_curr_get_info.offset
2579                             = get_info.offset;
2580                         get_block++;
2581                         get_block %= ring_data->block_count;
2582                         ring_data->rx_curr_get_info.block_index
2583                             = get_block;
2584                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2585                 }
2586
2587                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2588                             get_info.offset;
2589 #ifdef CONFIG_S2IO_NAPI
2590                 nic->pkts_to_process -= 1;
2591                 if (!nic->pkts_to_process)
2592                         break;
2593 #else
2594                 pkt_cnt++;
2595                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2596                         break;
2597 #endif
2598         }
2599         spin_unlock(&nic->rx_lock);
2600 }
2601
2602 /**
2603  *  tx_intr_handler - Transmit interrupt handler
2604  *  @nic : device private variable
2605  *  Description:
2606  *  If an interrupt was raised to indicate DMA complete of the
2607  *  Tx packet, this function is called. It identifies the last TxD
2608  *  whose buffer was freed and frees all skbs whose data have already
2609  *  DMA'ed into the NICs internal memory.
2610  *  Return Value:
2611  *  NONE
2612  */
2613
2614 static void tx_intr_handler(fifo_info_t *fifo_data)
2615 {
2616         nic_t *nic = fifo_data->nic;
2617         struct net_device *dev = (struct net_device *) nic->dev;
2618         tx_curr_get_info_t get_info, put_info;
2619         struct sk_buff *skb;
2620         TxD_t *txdlp;
2621         u16 j, frg_cnt;
2622
2623         get_info = fifo_data->tx_curr_get_info;
2624         put_info = fifo_data->tx_curr_put_info;
2625         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2626             list_virt_addr;
2627         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2628                (get_info.offset != put_info.offset) &&
2629                (txdlp->Host_Control)) {
2630                 /* Check for TxD errors */
2631                 if (txdlp->Control_1 & TXD_T_CODE) {
2632                         unsigned long long err;
2633                         err = txdlp->Control_1 & TXD_T_CODE;
2634                         if ((err >> 48) == 0xA) {
2635                                 DBG_PRINT(TX_DBG, "TxD returned due \
2636                                                 to loss of link\n");
2637                         }
2638                         else {
2639                                 DBG_PRINT(ERR_DBG, "***TxD error \
2640                                                 %llx\n", err);
2641                         }
2642                 }
2643
2644                 skb = (struct sk_buff *) ((unsigned long)
2645                                 txdlp->Host_Control);
2646                 if (skb == NULL) {
2647                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2648                         __FUNCTION__);
2649                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2650                         return;
2651                 }
2652
2653                 frg_cnt = skb_shinfo(skb)->nr_frags;
2654                 nic->tx_pkt_count++;
2655
2656                 pci_unmap_single(nic->pdev, (dma_addr_t)
2657                                  txdlp->Buffer_Pointer,
2658                                  skb->len - skb->data_len,
2659                                  PCI_DMA_TODEVICE);
2660                 if (frg_cnt) {
2661                         TxD_t *temp;
2662                         temp = txdlp;
2663                         txdlp++;
2664                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2665                                 skb_frag_t *frag =
2666                                     &skb_shinfo(skb)->frags[j];
2667                                 if (!txdlp->Buffer_Pointer)
2668                                         break;
2669                                 pci_unmap_page(nic->pdev,
2670                                                (dma_addr_t)
2671                                                txdlp->
2672                                                Buffer_Pointer,
2673                                                frag->size,
2674                                                PCI_DMA_TODEVICE);
2675                         }
2676                         txdlp = temp;
2677                 }
2678                 memset(txdlp, 0,
2679                        (sizeof(TxD_t) * fifo_data->max_txds));
2680
2681                 /* Updating the statistics block */
2682                 nic->stats.tx_bytes += skb->len;
2683                 dev_kfree_skb_irq(skb);
2684
2685                 get_info.offset++;
2686                 get_info.offset %= get_info.fifo_len + 1;
2687                 txdlp = (TxD_t *) fifo_data->list_info
2688                     [get_info.offset].list_virt_addr;
2689                 fifo_data->tx_curr_get_info.offset =
2690                     get_info.offset;
2691         }
2692
2693         spin_lock(&nic->tx_lock);
2694         if (netif_queue_stopped(dev))
2695                 netif_wake_queue(dev);
2696         spin_unlock(&nic->tx_lock);
2697 }
2698
2699 /**
2700  *  alarm_intr_handler - Alarm Interrrupt handler
2701  *  @nic: device private variable
2702  *  Description: If the interrupt was neither because of Rx packet or Tx
2703  *  complete, this function is called. If the interrupt was to indicate
2704  *  a loss of link, the OSM link status handler is invoked for any other
2705  *  alarm interrupt the block that raised the interrupt is displayed
2706  *  and a H/W reset is issued.
2707  *  Return Value:
2708  *  NONE
2709 */
2710
2711 static void alarm_intr_handler(struct s2io_nic *nic)
2712 {
2713         struct net_device *dev = (struct net_device *) nic->dev;
2714         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2715         register u64 val64 = 0, err_reg = 0;
2716
2717         /* Handling link status change error Intr */
2718         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2719                 err_reg = readq(&bar0->mac_rmac_err_reg);
2720                 writeq(err_reg, &bar0->mac_rmac_err_reg);
2721                 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2722                         schedule_work(&nic->set_link_task);
2723                 }
2724         }
2725
2726         /* Handling Ecc errors */
2727         val64 = readq(&bar0->mc_err_reg);
2728         writeq(val64, &bar0->mc_err_reg);
2729         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2730                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2731                         nic->mac_control.stats_info->sw_stat.
2732                                 double_ecc_errs++;
2733                         DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2734                                   dev->name);
2735                         DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2736                         if (nic->device_type != XFRAME_II_DEVICE) {
2737                                 /* Reset XframeI only if critical error */
2738                                 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2739                                              MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2740                                         netif_stop_queue(dev);
2741                                         schedule_work(&nic->rst_timer_task);
2742                                 }
2743                         }
2744                 } else {
2745                         nic->mac_control.stats_info->sw_stat.
2746                                 single_ecc_errs++;
2747                 }
2748         }
2749
2750         /* In case of a serious error, the device will be Reset. */
2751         val64 = readq(&bar0->serr_source);
2752         if (val64 & SERR_SOURCE_ANY) {
2753                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2754                 DBG_PRINT(ERR_DBG, "serious error %llx!!\n", 
2755                           (unsigned long long)val64);
2756                 netif_stop_queue(dev);
2757                 schedule_work(&nic->rst_timer_task);
2758         }
2759
2760         /*
2761          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2762          * Error occurs, the adapter will be recycled by disabling the
2763          * adapter enable bit and enabling it again after the device
2764          * becomes Quiescent.
2765          */
2766         val64 = readq(&bar0->pcc_err_reg);
2767         writeq(val64, &bar0->pcc_err_reg);
2768         if (val64 & PCC_FB_ECC_DB_ERR) {
2769                 u64 ac = readq(&bar0->adapter_control);
2770                 ac &= ~(ADAPTER_CNTL_EN);
2771                 writeq(ac, &bar0->adapter_control);
2772                 ac = readq(&bar0->adapter_control);
2773                 schedule_work(&nic->set_link_task);
2774         }
2775
2776         /* Other type of interrupts are not being handled now,  TODO */
2777 }
2778
2779 /**
2780  *  wait_for_cmd_complete - waits for a command to complete.
2781  *  @sp : private member of the device structure, which is a pointer to the
2782  *  s2io_nic structure.
2783  *  Description: Function that waits for a command to Write into RMAC
2784  *  ADDR DATA registers to be completed and returns either success or
2785  *  error depending on whether the command was complete or not.
2786  *  Return value:
2787  *   SUCCESS on success and FAILURE on failure.
2788  */
2789
2790 int wait_for_cmd_complete(nic_t * sp)
2791 {
2792         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2793         int ret = FAILURE, cnt = 0;
2794         u64 val64;
2795
2796         while (TRUE) {
2797                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2798                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2799                         ret = SUCCESS;
2800                         break;
2801                 }
2802                 msleep(50);
2803                 if (cnt++ > 10)
2804                         break;
2805         }
2806
2807         return ret;
2808 }
2809
2810 /**
2811  *  s2io_reset - Resets the card.
2812  *  @sp : private member of the device structure.
2813  *  Description: Function to Reset the card. This function then also
2814  *  restores the previously saved PCI configuration space registers as
2815  *  the card reset also resets the configuration space.
2816  *  Return value:
2817  *  void.
2818  */
2819
2820 void s2io_reset(nic_t * sp)
2821 {
2822         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2823         u64 val64;
2824         u16 subid, pci_cmd;
2825
2826         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2827         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2828
2829         val64 = SW_RESET_ALL;
2830         writeq(val64, &bar0->sw_reset);
2831
2832         /*
2833          * At this stage, if the PCI write is indeed completed, the
2834          * card is reset and so is the PCI Config space of the device.
2835          * So a read cannot be issued at this stage on any of the
2836          * registers to ensure the write into "sw_reset" register
2837          * has gone through.
2838          * Question: Is there any system call that will explicitly force
2839          * all the write commands still pending on the bus to be pushed
2840          * through?
2841          * As of now I'am just giving a 250ms delay and hoping that the
2842          * PCI write to sw_reset register is done by this time.
2843          */
2844         msleep(250);
2845
2846         /* Restore the PCI state saved during initialization. */
2847         pci_restore_state(sp->pdev);
2848         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2849                                      pci_cmd);
2850         s2io_init_pci(sp);
2851
2852         msleep(250);
2853
2854         /* Set swapper to enable I/O register access */
2855         s2io_set_swapper(sp);
2856
2857         /* Clear certain PCI/PCI-X fields after reset */
2858         if (sp->device_type == XFRAME_II_DEVICE) {
2859                 /* Clear parity err detect bit */
2860                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2861
2862                 /* Clearing PCIX Ecc status register */
2863                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2864
2865                 /* Clearing PCI_STATUS error reflected here */
2866                 writeq(BIT(62), &bar0->txpic_int_reg);
2867         }
2868
2869         /* Reset device statistics maintained by OS */
2870         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2871
2872         /* SXE-002: Configure link and activity LED to turn it off */
2873         subid = sp->pdev->subsystem_device;
2874         if (((subid & 0xFF) >= 0x07) &&
2875             (sp->device_type == XFRAME_I_DEVICE)) {
2876                 val64 = readq(&bar0->gpio_control);
2877                 val64 |= 0x0000800000000000ULL;
2878                 writeq(val64, &bar0->gpio_control);
2879                 val64 = 0x0411040400000000ULL;
2880                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2881         }
2882
2883         /*
2884          * Clear spurious ECC interrupts that would have occured on
2885          * XFRAME II cards after reset.
2886          */
2887         if (sp->device_type == XFRAME_II_DEVICE) {
2888                 val64 = readq(&bar0->pcc_err_reg);
2889                 writeq(val64, &bar0->pcc_err_reg);
2890         }
2891
2892         sp->device_enabled_once = FALSE;
2893 }
2894
2895 /**
2896  *  s2io_set_swapper - to set the swapper controle on the card
2897  *  @sp : private member of the device structure,
2898  *  pointer to the s2io_nic structure.
2899  *  Description: Function to set the swapper control on the card
2900  *  correctly depending on the 'endianness' of the system.
2901  *  Return value:
2902  *  SUCCESS on success and FAILURE on failure.
2903  */
2904
2905 int s2io_set_swapper(nic_t * sp)
2906 {
2907         struct net_device *dev = sp->dev;
2908         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2909         u64 val64, valt, valr;
2910
2911         /*
2912          * Set proper endian settings and verify the same by reading
2913          * the PIF Feed-back register.
2914          */
2915
2916         val64 = readq(&bar0->pif_rd_swapper_fb);
2917         if (val64 != 0x0123456789ABCDEFULL) {
2918                 int i = 0;
2919                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2920                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2921                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2922                                 0};                     /* FE=0, SE=0 */
2923
2924                 while(i<4) {
2925                         writeq(value[i], &bar0->swapper_ctrl);
2926                         val64 = readq(&bar0->pif_rd_swapper_fb);
2927                         if (val64 == 0x0123456789ABCDEFULL)
2928                                 break;
2929                         i++;
2930                 }
2931                 if (i == 4) {
2932                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2933                                 dev->name);
2934                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2935                                 (unsigned long long) val64);
2936                         return FAILURE;
2937                 }
2938                 valr = value[i];
2939         } else {
2940                 valr = readq(&bar0->swapper_ctrl);
2941         }
2942
2943         valt = 0x0123456789ABCDEFULL;
2944         writeq(valt, &bar0->xmsi_address);
2945         val64 = readq(&bar0->xmsi_address);
2946
2947         if(val64 != valt) {
2948                 int i = 0;
2949                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2950                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2951                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2952                                 0};                     /* FE=0, SE=0 */
2953
2954                 while(i<4) {
2955                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2956                         writeq(valt, &bar0->xmsi_address);
2957                         val64 = readq(&bar0->xmsi_address);
2958                         if(val64 == valt)
2959                                 break;
2960                         i++;
2961                 }
2962                 if(i == 4) {
2963                         unsigned long long x = val64;
2964                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2965                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2966                         return FAILURE;
2967                 }
2968         }
2969         val64 = readq(&bar0->swapper_ctrl);
2970         val64 &= 0xFFFF000000000000ULL;
2971
2972 #ifdef  __BIG_ENDIAN
2973         /*
2974          * The device by default set to a big endian format, so a
2975          * big endian driver need not set anything.
2976          */
2977         val64 |= (SWAPPER_CTRL_TXP_FE |
2978                  SWAPPER_CTRL_TXP_SE |
2979                  SWAPPER_CTRL_TXD_R_FE |
2980                  SWAPPER_CTRL_TXD_W_FE |
2981                  SWAPPER_CTRL_TXF_R_FE |
2982                  SWAPPER_CTRL_RXD_R_FE |
2983                  SWAPPER_CTRL_RXD_W_FE |
2984                  SWAPPER_CTRL_RXF_W_FE |
2985                  SWAPPER_CTRL_XMSI_FE |
2986                  SWAPPER_CTRL_XMSI_SE |
2987                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2988         writeq(val64, &bar0->swapper_ctrl);
2989 #else
2990         /*
2991          * Initially we enable all bits to make it accessible by the
2992          * driver, then we selectively enable only those bits that
2993          * we want to set.
2994          */
2995         val64 |= (SWAPPER_CTRL_TXP_FE |
2996                  SWAPPER_CTRL_TXP_SE |
2997                  SWAPPER_CTRL_TXD_R_FE |
2998                  SWAPPER_CTRL_TXD_R_SE |
2999                  SWAPPER_CTRL_TXD_W_FE |
3000                  SWAPPER_CTRL_TXD_W_SE |
3001                  SWAPPER_CTRL_TXF_R_FE |
3002                  SWAPPER_CTRL_RXD_R_FE |
3003                  SWAPPER_CTRL_RXD_R_SE |
3004                  SWAPPER_CTRL_RXD_W_FE |
3005                  SWAPPER_CTRL_RXD_W_SE |
3006                  SWAPPER_CTRL_RXF_W_FE |
3007                  SWAPPER_CTRL_XMSI_FE |
3008                  SWAPPER_CTRL_XMSI_SE |
3009                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3010         writeq(val64, &bar0->swapper_ctrl);
3011 #endif
3012         val64 = readq(&bar0->swapper_ctrl);
3013
3014         /*
3015          * Verifying if endian settings are accurate by reading a
3016          * feedback register.
3017          */
3018         val64 = readq(&bar0->pif_rd_swapper_fb);
3019         if (val64 != 0x0123456789ABCDEFULL) {
3020                 /* Endian settings are incorrect, calls for another dekko. */
3021                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3022                           dev->name);
3023                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3024                           (unsigned long long) val64);
3025                 return FAILURE;
3026         }
3027
3028         return SUCCESS;
3029 }
3030
3031 /* ********************************************************* *
3032  * Functions defined below concern the OS part of the driver *
3033  * ********************************************************* */
3034
3035 /**
3036  *  s2io_open - open entry point of the driver
3037  *  @dev : pointer to the device structure.
3038  *  Description:
3039  *  This function is the open entry point of the driver. It mainly calls a
3040  *  function to allocate Rx buffers and inserts them into the buffer
3041  *  descriptors and then enables the Rx part of the NIC.
3042  *  Return value:
3043  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3044  *   file on failure.
3045  */
3046
3047 int s2io_open(struct net_device *dev)
3048 {
3049         nic_t *sp = dev->priv;
3050         int err = 0;
3051
3052         /*
3053          * Make sure you have link off by default every time
3054          * Nic is initialized
3055          */
3056         netif_carrier_off(dev);
3057         sp->last_link_state = 0;
3058
3059         /* Initialize H/W and enable interrupts */
3060         if (s2io_card_up(sp)) {
3061                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3062                           dev->name);
3063                 err = -ENODEV;
3064                 goto hw_init_failed;
3065         }
3066
3067         /* After proper initialization of H/W, register ISR */
3068         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3069                           sp->name, dev);
3070         if (err) {
3071                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3072                           dev->name);
3073                 goto isr_registration_failed;
3074         }
3075
3076         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3077                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3078                 err = -ENODEV;
3079                 goto setting_mac_address_failed;
3080         }
3081
3082         netif_start_queue(dev);
3083         return 0;
3084
3085 setting_mac_address_failed:
3086         free_irq(sp->pdev->irq, dev);
3087 isr_registration_failed:
3088         del_timer_sync(&sp->alarm_timer);
3089         s2io_reset(sp);
3090 hw_init_failed:
3091         return err;
3092 }
3093
3094 /**
3095  *  s2io_close -close entry point of the driver
3096  *  @dev : device pointer.
3097  *  Description:
3098  *  This is the stop entry point of the driver. It needs to undo exactly
3099  *  whatever was done by the open entry point,thus it's usually referred to
3100  *  as the close function.Among other things this function mainly stops the
3101  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3102  *  Return value:
3103  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3104  *  file on failure.
3105  */
3106
3107 int s2io_close(struct net_device *dev)
3108 {
3109         nic_t *sp = dev->priv;
3110         flush_scheduled_work();
3111         netif_stop_queue(dev);
3112         /* Reset card, kill tasklet and free Tx and Rx buffers. */
3113         s2io_card_down(sp);
3114
3115         free_irq(sp->pdev->irq, dev);
3116         sp->device_close_flag = TRUE;   /* Device is shut down. */
3117         return 0;
3118 }
3119
3120 /**
3121  *  s2io_xmit - Tx entry point of te driver
3122  *  @skb : the socket buffer containing the Tx data.
3123  *  @dev : device pointer.
3124  *  Description :
3125  *  This function is the Tx entry point of the driver. S2IO NIC supports
3126  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3127  *  NOTE: when device cant queue the pkt,just the trans_start variable will
3128  *  not be upadted.
3129  *  Return value:
3130  *  0 on success & 1 on failure.
3131  */
3132
3133 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3134 {
3135         nic_t *sp = dev->priv;
3136         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3137         register u64 val64;
3138         TxD_t *txdp;
3139         TxFIFO_element_t __iomem *tx_fifo;
3140         unsigned long flags;
3141 #ifdef NETIF_F_TSO
3142         int mss;
3143 #endif
3144         u16 vlan_tag = 0;
3145         int vlan_priority = 0;
3146         mac_info_t *mac_control;
3147         struct config_param *config;
3148
3149         mac_control = &sp->mac_control;
3150         config = &sp->config;
3151
3152         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3153         spin_lock_irqsave(&sp->tx_lock, flags);
3154         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3155                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3156                           dev->name);
3157                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3158                 dev_kfree_skb(skb);
3159                 return 0;
3160         }
3161
3162         queue = 0;
3163
3164         /* Get Fifo number to Transmit based on vlan priority */
3165         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3166                 vlan_tag = vlan_tx_tag_get(skb);
3167                 vlan_priority = vlan_tag >> 13;
3168                 queue = config->fifo_mapping[vlan_priority];
3169         }
3170
3171         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3172         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3173         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3174                 list_virt_addr;
3175
3176         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3177         /* Avoid "put" pointer going beyond "get" pointer */
3178         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3179                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3180                 netif_stop_queue(dev);
3181                 dev_kfree_skb(skb);
3182                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3183                 return 0;
3184         }
3185
3186         /* A buffer with no data will be dropped */
3187         if (!skb->len) {
3188                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3189                 dev_kfree_skb(skb);
3190                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3191                 return 0;
3192         }
3193
3194 #ifdef NETIF_F_TSO
3195         mss = skb_shinfo(skb)->tso_size;
3196         if (mss) {
3197                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3198                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3199         }
3200 #endif
3201
3202         frg_cnt = skb_shinfo(skb)->nr_frags;
3203         frg_len = skb->len - skb->data_len;
3204
3205         txdp->Buffer_Pointer = pci_map_single
3206             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3207         txdp->Host_Control = (unsigned long) skb;
3208         if (skb->ip_summed == CHECKSUM_HW) {
3209                 txdp->Control_2 |=
3210                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3211                      TXD_TX_CKO_UDP_EN);
3212         }
3213
3214         txdp->Control_2 |= config->tx_intr_type;
3215
3216         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3217                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3218                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3219         }
3220
3221         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3222                             TXD_GATHER_CODE_FIRST);
3223         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3224
3225         /* For fragmented SKB. */
3226         for (i = 0; i < frg_cnt; i++) {
3227                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3228                 /* A '0' length fragment will be ignored */
3229                 if (!frag->size)
3230                         continue;
3231                 txdp++;
3232                 txdp->Buffer_Pointer = (u64) pci_map_page
3233                     (sp->pdev, frag->page, frag->page_offset,
3234                      frag->size, PCI_DMA_TODEVICE);
3235                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3236         }
3237         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3238
3239         tx_fifo = mac_control->tx_FIFO_start[queue];
3240         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3241         writeq(val64, &tx_fifo->TxDL_Pointer);
3242
3243         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3244                  TX_FIFO_LAST_LIST);
3245
3246 #ifdef NETIF_F_TSO
3247         if (mss)
3248                 val64 |= TX_FIFO_SPECIAL_FUNC;
3249 #endif
3250         writeq(val64, &tx_fifo->List_Control);
3251
3252         mmiowb();
3253
3254         put_off++;
3255         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3256         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3257
3258         /* Avoid "put" pointer going beyond "get" pointer */
3259         if (((put_off + 1) % queue_len) == get_off) {
3260                 DBG_PRINT(TX_DBG,
3261                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3262                           put_off, get_off);
3263                 netif_stop_queue(dev);
3264         }
3265
3266         dev->trans_start = jiffies;
3267         spin_unlock_irqrestore(&sp->tx_lock, flags);
3268
3269         return 0;
3270 }
3271
3272 static void
3273 s2io_alarm_handle(unsigned long data)
3274 {
3275         nic_t *sp = (nic_t *)data;
3276
3277         alarm_intr_handler(sp);
3278         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3279 }
3280
3281 static void s2io_txpic_intr_handle(nic_t *sp)
3282 {
3283         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3284         u64 val64;
3285
3286         val64 = readq(&bar0->pic_int_status);
3287         if (val64 & PIC_INT_GPIO) {
3288                 val64 = readq(&bar0->gpio_int_reg);
3289                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
3290                     (val64 & GPIO_INT_REG_LINK_UP)) {
3291                         val64 |=  GPIO_INT_REG_LINK_DOWN;
3292                         val64 |= GPIO_INT_REG_LINK_UP;
3293                         writeq(val64, &bar0->gpio_int_reg);
3294                         goto masking;
3295                 }
3296
3297                 if (((sp->last_link_state == LINK_UP) &&
3298                         (val64 & GPIO_INT_REG_LINK_DOWN)) ||
3299                 ((sp->last_link_state == LINK_DOWN) &&
3300                 (val64 & GPIO_INT_REG_LINK_UP))) {
3301                         val64 = readq(&bar0->gpio_int_mask);
3302                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3303                         val64 |= GPIO_INT_MASK_LINK_UP;
3304                         writeq(val64, &bar0->gpio_int_mask);
3305                         s2io_set_link((unsigned long)sp);
3306                 }
3307 masking:
3308                 if (sp->last_link_state == LINK_UP) {
3309                         /*enable down interrupt */
3310                         val64 = readq(&bar0->gpio_int_mask);
3311                         /* unmasks link down intr */
3312                         val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
3313                         /* masks link up intr */
3314                         val64 |= GPIO_INT_MASK_LINK_UP;
3315                         writeq(val64, &bar0->gpio_int_mask);
3316                 } else {
3317                         /*enable UP Interrupt */
3318                         val64 = readq(&bar0->gpio_int_mask);
3319                         /* unmasks link up interrupt */
3320                         val64 &= ~GPIO_INT_MASK_LINK_UP;
3321                         /* masks link down interrupt */
3322                         val64 |=  GPIO_INT_MASK_LINK_DOWN;
3323                         writeq(val64, &bar0->gpio_int_mask);
3324                 }
3325         }
3326 }
3327
3328 /**
3329  *  s2io_isr - ISR handler of the device .
3330  *  @irq: the irq of the device.
3331  *  @dev_id: a void pointer to the dev structure of the NIC.
3332  *  @pt_regs: pointer to the registers pushed on the stack.
3333  *  Description:  This function is the ISR handler of the device. It
3334  *  identifies the reason for the interrupt and calls the relevant
3335  *  service routines. As a contongency measure, this ISR allocates the
3336  *  recv buffers, if their numbers are below the panic value which is
3337  *  presently set to 25% of the original number of rcv buffers allocated.
3338  *  Return value:
3339  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3340  *   IRQ_NONE: will be returned if interrupt is not from our device
3341  */
3342 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3343 {
3344         struct net_device *dev = (struct net_device *) dev_id;
3345         nic_t *sp = dev->priv;
3346         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3347         int i;
3348         u64 reason = 0, val64;
3349         mac_info_t *mac_control;
3350         struct config_param *config;
3351
3352         atomic_inc(&sp->isr_cnt);
3353         mac_control = &sp->mac_control;
3354         config = &sp->config;
3355
3356         /*
3357          * Identify the cause for interrupt and call the appropriate
3358          * interrupt handler. Causes for the interrupt could be;
3359          * 1. Rx of packet.
3360          * 2. Tx complete.
3361          * 3. Link down.
3362          * 4. Error in any functional blocks of the NIC.
3363          */
3364         reason = readq(&bar0->general_int_status);
3365
3366         if (!reason) {
3367                 /* The interrupt was not raised by Xena. */
3368                 atomic_dec(&sp->isr_cnt);
3369                 return IRQ_NONE;
3370         }
3371
3372 #ifdef CONFIG_S2IO_NAPI
3373         if (reason & GEN_INTR_RXTRAFFIC) {
3374                 if (netif_rx_schedule_prep(dev)) {
3375                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3376                                               DISABLE_INTRS);
3377                         __netif_rx_schedule(dev);
3378                 }
3379         }
3380 #else
3381         /* If Intr is because of Rx Traffic */
3382         if (reason & GEN_INTR_RXTRAFFIC) {
3383                 /*
3384                  * rx_traffic_int reg is an R1 register, writing all 1's
3385                  * will ensure that the actual interrupt causing bit get's
3386                  * cleared and hence a read can be avoided.
3387                  */
3388                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3389                 writeq(val64, &bar0->rx_traffic_int);
3390                 for (i = 0; i < config->rx_ring_num; i++) {
3391                         rx_intr_handler(&mac_control->rings[i]);
3392                 }
3393         }
3394 #endif
3395
3396         /* If Intr is because of Tx Traffic */
3397         if (reason & GEN_INTR_TXTRAFFIC) {
3398                 /*
3399                  * tx_traffic_int reg is an R1 register, writing all 1's
3400                  * will ensure that the actual interrupt causing bit get's
3401                  * cleared and hence a read can be avoided.
3402                  */
3403                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3404                 writeq(val64, &bar0->tx_traffic_int);
3405
3406                 for (i = 0; i < config->tx_fifo_num; i++)
3407                         tx_intr_handler(&mac_control->fifos[i]);
3408         }
3409
3410         if (reason & GEN_INTR_TXPIC)
3411                 s2io_txpic_intr_handle(sp);
3412         /*
3413          * If the Rx buffer count is below the panic threshold then
3414          * reallocate the buffers from the interrupt handler itself,
3415          * else schedule a tasklet to reallocate the buffers.
3416          */
3417 #ifndef CONFIG_S2IO_NAPI
3418         for (i = 0; i < config->rx_ring_num; i++) {
3419                 int ret;
3420                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3421                 int level = rx_buffer_level(sp, rxb_size, i);
3422
3423                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3424                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3425                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3426                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3427                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3428                                           dev->name);
3429                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3430                                 clear_bit(0, (&sp->tasklet_status));
3431                                 atomic_dec(&sp->isr_cnt);
3432                                 return IRQ_HANDLED;
3433                         }
3434                         clear_bit(0, (&sp->tasklet_status));
3435                 } else if (level == LOW) {
3436                         tasklet_schedule(&sp->task);
3437                 }
3438         }
3439 #endif
3440
3441         atomic_dec(&sp->isr_cnt);
3442         return IRQ_HANDLED;
3443 }
3444
3445 /**
3446  * s2io_updt_stats -
3447  */
3448 static void s2io_updt_stats(nic_t *sp)
3449 {
3450         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3451         u64 val64;
3452         int cnt = 0;
3453
3454         if (atomic_read(&sp->card_state) == CARD_UP) {
3455                 /* Apprx 30us on a 133 MHz bus */
3456                 val64 = SET_UPDT_CLICKS(10) |
3457                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3458                 writeq(val64, &bar0->stat_cfg);
3459                 do {
3460                         udelay(100);
3461                         val64 = readq(&bar0->stat_cfg);
3462                         if (!(val64 & BIT(0)))
3463                                 break;
3464                         cnt++;
3465                         if (cnt == 5)
3466                                 break; /* Updt failed */
3467                 } while(1);
3468         }
3469 }
3470
3471 /**
3472  *  s2io_get_stats - Updates the device statistics structure.
3473  *  @dev : pointer to the device structure.
3474  *  Description:
3475  *  This function updates the device statistics structure in the s2io_nic
3476  *  structure and returns a pointer to the same.
3477  *  Return value:
3478  *  pointer to the updated net_device_stats structure.
3479  */
3480
3481 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3482 {
3483         nic_t *sp = dev->priv;
3484         mac_info_t *mac_control;
3485         struct config_param *config;
3486
3487
3488         mac_control = &sp->mac_control;
3489         config = &sp->config;
3490
3491         /* Configure Stats for immediate updt */
3492         s2io_updt_stats(sp);
3493
3494         sp->stats.tx_packets =
3495                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3496         sp->stats.tx_errors =
3497                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3498         sp->stats.rx_errors =
3499                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3500         sp->stats.multicast =
3501                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3502         sp->stats.rx_length_errors =
3503                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3504
3505         return (&sp->stats);
3506 }
3507
3508 /**
3509  *  s2io_set_multicast - entry point for multicast address enable/disable.
3510  *  @dev : pointer to the device structure
3511  *  Description:
3512  *  This function is a driver entry point which gets called by the kernel
3513  *  whenever multicast addresses must be enabled/disabled. This also gets
3514  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3515  *  determine, if multicast address must be enabled or if promiscuous mode
3516  *  is to be disabled etc.
3517  *  Return value:
3518  *  void.
3519  */
3520
3521 static void s2io_set_multicast(struct net_device *dev)
3522 {
3523         int i, j, prev_cnt;
3524         struct dev_mc_list *mclist;
3525         nic_t *sp = dev->priv;
3526         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3527         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3528             0xfeffffffffffULL;
3529         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3530         void __iomem *add;
3531
3532         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3533                 /*  Enable all Multicast addresses */
3534                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3535                        &bar0->rmac_addr_data0_mem);
3536                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3537                        &bar0->rmac_addr_data1_mem);
3538                 val64 = RMAC_ADDR_CMD_MEM_WE |
3539                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3540                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3541                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3542                 /* Wait till command completes */
3543                 wait_for_cmd_complete(sp);
3544
3545                 sp->m_cast_flg = 1;
3546                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3547         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3548                 /*  Disable all Multicast addresses */
3549                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3550                        &bar0->rmac_addr_data0_mem);
3551                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3552                        &bar0->rmac_addr_data1_mem);
3553                 val64 = RMAC_ADDR_CMD_MEM_WE |
3554                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3555                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3556                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3557                 /* Wait till command completes */
3558                 wait_for_cmd_complete(sp);
3559
3560                 sp->m_cast_flg = 0;
3561                 sp->all_multi_pos = 0;
3562         }
3563
3564         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3565                 /*  Put the NIC into promiscuous mode */
3566                 add = &bar0->mac_cfg;
3567                 val64 = readq(&bar0->mac_cfg);
3568                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3569
3570                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3571                 writel((u32) val64, add);
3572                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3573                 writel((u32) (val64 >> 32), (add + 4));
3574
3575                 val64 = readq(&bar0->mac_cfg);
3576                 sp->promisc_flg = 1;
3577                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3578                           dev->name);
3579         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3580                 /*  Remove the NIC from promiscuous mode */
3581                 add = &bar0->mac_cfg;
3582                 val64 = readq(&bar0->mac_cfg);
3583                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3584
3585                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3586                 writel((u32) val64, add);
3587                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3588                 writel((u32) (val64 >> 32), (add + 4));
3589
3590                 val64 = readq(&bar0->mac_cfg);
3591                 sp->promisc_flg = 0;
3592                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3593                           dev->name);
3594         }
3595
3596         /*  Update individual M_CAST address list */
3597         if ((!sp->m_cast_flg) && dev->mc_count) {
3598                 if (dev->mc_count >
3599                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3600                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3601                                   dev->name);
3602                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3603                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3604                         return;
3605                 }
3606
3607                 prev_cnt = sp->mc_addr_count;
3608                 sp->mc_addr_count = dev->mc_count;
3609
3610                 /* Clear out the previous list of Mc in the H/W. */
3611                 for (i = 0; i < prev_cnt; i++) {
3612                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3613                                &bar0->rmac_addr_data0_mem);
3614                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3615                                 &bar0->rmac_addr_data1_mem);
3616                         val64 = RMAC_ADDR_CMD_MEM_WE |
3617                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3618                             RMAC_ADDR_CMD_MEM_OFFSET
3619                             (MAC_MC_ADDR_START_OFFSET + i);
3620                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3621
3622                         /* Wait for command completes */
3623                         if (wait_for_cmd_complete(sp)) {
3624                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3625                                           dev->name);
3626                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3627                                 return;
3628                         }
3629                 }
3630
3631                 /* Create the new Rx filter list and update the same in H/W. */
3632                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3633                      i++, mclist = mclist->next) {
3634                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3635                                ETH_ALEN);
3636                         for (j = 0; j < ETH_ALEN; j++) {
3637                                 mac_addr |= mclist->dmi_addr[j];
3638                                 mac_addr <<= 8;
3639                         }
3640                         mac_addr >>= 8;
3641                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3642                                &bar0->rmac_addr_data0_mem);
3643                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3644                                 &bar0->rmac_addr_data1_mem);
3645                         val64 = RMAC_ADDR_CMD_MEM_WE |
3646                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3647                             RMAC_ADDR_CMD_MEM_OFFSET
3648                             (i + MAC_MC_ADDR_START_OFFSET);
3649                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3650
3651                         /* Wait for command completes */
3652                         if (wait_for_cmd_complete(sp)) {
3653                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3654                                           dev->name);
3655                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3656                                 return;
3657                         }
3658                 }
3659         }
3660 }
3661
3662 /**
3663  *  s2io_set_mac_addr - Programs the Xframe mac address
3664  *  @dev : pointer to the device structure.
3665  *  @addr: a uchar pointer to the new mac address which is to be set.
3666  *  Description : This procedure will program the Xframe to receive
3667  *  frames with new Mac Address
3668  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3669  *  as defined in errno.h file on failure.
3670  */
3671
3672 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3673 {
3674         nic_t *sp = dev->priv;
3675         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3676         register u64 val64, mac_addr = 0;
3677         int i;
3678
3679         /*
3680          * Set the new MAC address as the new unicast filter and reflect this
3681          * change on the device address registered with the OS. It will be
3682          * at offset 0.
3683          */
3684         for (i = 0; i < ETH_ALEN; i++) {
3685                 mac_addr <<= 8;
3686                 mac_addr |= addr[i];
3687         }
3688
3689         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3690                &bar0->rmac_addr_data0_mem);
3691
3692         val64 =
3693             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3694             RMAC_ADDR_CMD_MEM_OFFSET(0);
3695         writeq(val64, &bar0->rmac_addr_cmd_mem);
3696         /* Wait till command completes */
3697         if (wait_for_cmd_complete(sp)) {
3698                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3699                 return FAILURE;
3700         }
3701
3702         return SUCCESS;
3703 }
3704
3705 /**
3706  * s2io_ethtool_sset - Sets different link parameters.
3707  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3708  * @info: pointer to the structure with parameters given by ethtool to set
3709  * link information.
3710  * Description:
3711  * The function sets different link parameters provided by the user onto
3712  * the NIC.
3713  * Return value:
3714  * 0 on success.
3715 */
3716
3717 static int s2io_ethtool_sset(struct net_device *dev,
3718                              struct ethtool_cmd *info)
3719 {
3720         nic_t *sp = dev->priv;
3721         if ((info->autoneg == AUTONEG_ENABLE) ||
3722             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3723                 return -EINVAL;
3724         else {
3725                 s2io_close(sp->dev);
3726                 s2io_open(sp->dev);
3727         }
3728
3729         return 0;
3730 }
3731
3732 /**
3733  * s2io_ethtol_gset - Return link specific information.
3734  * @sp : private member of the device structure, pointer to the
3735  *      s2io_nic structure.
3736  * @info : pointer to the structure with parameters given by ethtool
3737  * to return link information.
3738  * Description:
3739  * Returns link specific information like speed, duplex etc.. to ethtool.
3740  * Return value :
3741  * return 0 on success.
3742  */
3743
3744 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3745 {
3746         nic_t *sp = dev->priv;
3747         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3748         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3749         info->port = PORT_FIBRE;
3750         /* info->transceiver?? TODO */
3751
3752         if (netif_carrier_ok(sp->dev)) {
3753                 info->speed = 10000;
3754                 info->duplex = DUPLEX_FULL;
3755         } else {
3756                 info->speed = -1;
3757                 info->duplex = -1;
3758         }
3759
3760         info->autoneg = AUTONEG_DISABLE;
3761         return 0;
3762 }
3763
3764 /**
3765  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3766  * @sp : private member of the device structure, which is a pointer to the
3767  * s2io_nic structure.
3768  * @info : pointer to the structure with parameters given by ethtool to
3769  * return driver information.
3770  * Description:
3771  * Returns driver specefic information like name, version etc.. to ethtool.
3772  * Return value:
3773  *  void
3774  */
3775
3776 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3777                                   struct ethtool_drvinfo *info)
3778 {
3779         nic_t *sp = dev->priv;
3780
3781         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3782         strncpy(info->version, s2io_driver_version,
3783                 sizeof(s2io_driver_version));
3784         strncpy(info->fw_version, "", 32);
3785         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3786         info->regdump_len = XENA_REG_SPACE;
3787         info->eedump_len = XENA_EEPROM_SPACE;
3788         info->testinfo_len = S2IO_TEST_LEN;
3789         info->n_stats = S2IO_STAT_LEN;
3790 }
3791
3792 /**
3793  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3794  *  @sp: private member of the device structure, which is a pointer to the
3795  *  s2io_nic structure.
3796  *  @regs : pointer to the structure with parameters given by ethtool for
3797  *  dumping the registers.
3798  *  @reg_space: The input argumnet into which all the registers are dumped.
3799  *  Description:
3800  *  Dumps the entire register space of xFrame NIC into the user given
3801  *  buffer area.
3802  * Return value :
3803  * void .
3804 */
3805
3806 static void s2io_ethtool_gregs(struct net_device *dev,
3807                                struct ethtool_regs *regs, void *space)
3808 {
3809         int i;
3810         u64 reg;
3811         u8 *reg_space = (u8 *) space;
3812         nic_t *sp = dev->priv;
3813
3814         regs->len = XENA_REG_SPACE;
3815         regs->version = sp->pdev->subsystem_device;
3816
3817         for (i = 0; i < regs->len; i += 8) {
3818                 reg = readq(sp->bar0 + i);
3819                 memcpy((reg_space + i), &reg, 8);
3820         }
3821 }
3822
3823 /**
3824  *  s2io_phy_id  - timer function that alternates adapter LED.
3825  *  @data : address of the private member of the device structure, which
3826  *  is a pointer to the s2io_nic structure, provided as an u32.
3827  * Description: This is actually the timer function that alternates the
3828  * adapter LED bit of the adapter control bit to set/reset every time on
3829  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3830  *  once every second.
3831 */
3832 static void s2io_phy_id(unsigned long data)
3833 {
3834         nic_t *sp = (nic_t *) data;
3835         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3836         u64 val64 = 0;
3837         u16 subid;
3838
3839         subid = sp->pdev->subsystem_device;
3840         if ((sp->device_type == XFRAME_II_DEVICE) ||
3841                    ((subid & 0xFF) >= 0x07)) {
3842                 val64 = readq(&bar0->gpio_control);
3843                 val64 ^= GPIO_CTRL_GPIO_0;
3844                 writeq(val64, &bar0->gpio_control);
3845         } else {
3846                 val64 = readq(&bar0->adapter_control);
3847                 val64 ^= ADAPTER_LED_ON;
3848                 writeq(val64, &bar0->adapter_control);
3849         }
3850
3851         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3852 }
3853
3854 /**
3855  * s2io_ethtool_idnic - To physically identify the nic on the system.
3856  * @sp : private member of the device structure, which is a pointer to the
3857  * s2io_nic structure.
3858  * @id : pointer to the structure with identification parameters given by
3859  * ethtool.
3860  * Description: Used to physically identify the NIC on the system.
3861  * The Link LED will blink for a time specified by the user for
3862  * identification.
3863  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3864  * identification is possible only if it's link is up.
3865  * Return value:
3866  * int , returns 0 on success
3867  */
3868
3869 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3870 {
3871         u64 val64 = 0, last_gpio_ctrl_val;
3872         nic_t *sp = dev->priv;
3873         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3874         u16 subid;
3875
3876         subid = sp->pdev->subsystem_device;
3877         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3878         if ((sp->device_type == XFRAME_I_DEVICE) &&
3879                 ((subid & 0xFF) < 0x07)) {
3880                 val64 = readq(&bar0->adapter_control);
3881                 if (!(val64 & ADAPTER_CNTL_EN)) {
3882                         printk(KERN_ERR
3883                                "Adapter Link down, cannot blink LED\n");
3884                         return -EFAULT;
3885                 }
3886         }
3887         if (sp->id_timer.function == NULL) {
3888                 init_timer(&sp->id_timer);
3889                 sp->id_timer.function = s2io_phy_id;
3890                 sp->id_timer.data = (unsigned long) sp;
3891         }
3892         mod_timer(&sp->id_timer, jiffies);
3893         if (data)
3894                 msleep_interruptible(data * HZ);
3895         else
3896                 msleep_interruptible(MAX_FLICKER_TIME);
3897         del_timer_sync(&sp->id_timer);
3898
3899         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3900                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3901                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3902         }
3903
3904         return 0;
3905 }
3906
3907 /**
3908  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3909  * @sp : private member of the device structure, which is a pointer to the
3910  *      s2io_nic structure.
3911  * @ep : pointer to the structure with pause parameters given by ethtool.
3912  * Description:
3913  * Returns the Pause frame generation and reception capability of the NIC.
3914  * Return value:
3915  *  void
3916  */
3917 static void s2io_ethtool_getpause_data(struct net_device *dev,
3918                                        struct ethtool_pauseparam *ep)
3919 {
3920         u64 val64;
3921         nic_t *sp = dev->priv;
3922         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3923
3924         val64 = readq(&bar0->rmac_pause_cfg);
3925         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3926                 ep->tx_pause = TRUE;
3927         if (val64 & RMAC_PAUSE_RX_ENABLE)
3928                 ep->rx_pause = TRUE;
3929         ep->autoneg = FALSE;
3930 }
3931
3932 /**
3933  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3934  * @sp : private member of the device structure, which is a pointer to the
3935  *      s2io_nic structure.
3936  * @ep : pointer to the structure with pause parameters given by ethtool.
3937  * Description:
3938  * It can be used to set or reset Pause frame generation or reception
3939  * support of the NIC.
3940  * Return value:
3941  * int, returns 0 on Success
3942  */
3943
3944 static int s2io_ethtool_setpause_data(struct net_device *dev,
3945                                struct ethtool_pauseparam *ep)
3946 {
3947         u64 val64;
3948         nic_t *sp = dev->priv;
3949         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3950
3951         val64 = readq(&bar0->rmac_pause_cfg);
3952         if (ep->tx_pause)
3953                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3954         else
3955                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3956         if (ep->rx_pause)
3957                 val64 |= RMAC_PAUSE_RX_ENABLE;
3958         else
3959                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3960         writeq(val64, &bar0->rmac_pause_cfg);
3961         return 0;
3962 }
3963
3964 /**
3965  * read_eeprom - reads 4 bytes of data from user given offset.
3966  * @sp : private member of the device structure, which is a pointer to the
3967  *      s2io_nic structure.
3968  * @off : offset at which the data must be written
3969  * @data : Its an output parameter where the data read at the given
3970  *      offset is stored.
3971  * Description:
3972  * Will read 4 bytes of data from the user given offset and return the
3973  * read data.
3974  * NOTE: Will allow to read only part of the EEPROM visible through the
3975  *   I2C bus.
3976  * Return value:
3977  *  -1 on failure and 0 on success.
3978  */
3979
3980 #define S2IO_DEV_ID             5
3981 static int read_eeprom(nic_t * sp, int off, u32 * data)
3982 {
3983         int ret = -1;
3984         u32 exit_cnt = 0;
3985         u64 val64;
3986         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3987
3988         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3989             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3990             I2C_CONTROL_CNTL_START;
3991         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3992
3993         while (exit_cnt < 5) {
3994                 val64 = readq(&bar0->i2c_control);
3995                 if (I2C_CONTROL_CNTL_END(val64)) {
3996                         *data = I2C_CONTROL_GET_DATA(val64);
3997                         ret = 0;
3998                         break;
3999                 }
4000                 msleep(50);
4001                 exit_cnt++;
4002         }
4003
4004         return ret;
4005 }
4006
4007 /**
4008  *  write_eeprom - actually writes the relevant part of the data value.
4009  *  @sp : private member of the device structure, which is a pointer to the
4010  *       s2io_nic structure.
4011  *  @off : offset at which the data must be written
4012  *  @data : The data that is to be written
4013  *  @cnt : Number of bytes of the data that are actually to be written into
4014  *  the Eeprom. (max of 3)
4015  * Description:
4016  *  Actually writes the relevant part of the data value into the Eeprom
4017  *  through the I2C bus.
4018  * Return value:
4019  *  0 on success, -1 on failure.
4020  */
4021
4022 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
4023 {
4024         int exit_cnt = 0, ret = -1;
4025         u64 val64;
4026         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4027
4028         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4029             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
4030             I2C_CONTROL_CNTL_START;
4031         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4032
4033         while (exit_cnt < 5) {
4034                 val64 = readq(&bar0->i2c_control);
4035                 if (I2C_CONTROL_CNTL_END(val64)) {
4036                         if (!(val64 & I2C_CONTROL_NACK))
4037                                 ret = 0;
4038                         break;
4039                 }
4040                 msleep(50);
4041                 exit_cnt++;
4042         }
4043
4044         return ret;
4045 }
4046
4047 /**
4048  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
4049  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
4050  *  @eeprom : pointer to the user level structure provided by ethtool,
4051  *  containing all relevant information.
4052  *  @data_buf : user defined value to be written into Eeprom.
4053  *  Description: Reads the values stored in the Eeprom at given offset
4054  *  for a given length. Stores these values int the input argument data
4055  *  buffer 'data_buf' and returns these to the caller (ethtool.)
4056  *  Return value:
4057  *  int  0 on success
4058  */
4059
4060 static int s2io_ethtool_geeprom(struct net_device *dev,
4061                          struct ethtool_eeprom *eeprom, u8 * data_buf)
4062 {
4063         u32 data, i, valid;
4064         nic_t *sp = dev->priv;
4065
4066         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4067
4068         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4069                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4070
4071         for (i = 0; i < eeprom->len; i += 4) {
4072                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4073                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4074                         return -EFAULT;
4075                 }
4076                 valid = INV(data);
4077                 memcpy((data_buf + i), &valid, 4);
4078         }
4079         return 0;
4080 }
4081
4082 /**
4083  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4084  *  @sp : private member of the device structure, which is a pointer to the
4085  *  s2io_nic structure.
4086  *  @eeprom : pointer to the user level structure provided by ethtool,
4087  *  containing all relevant information.
4088  *  @data_buf ; user defined value to be written into Eeprom.
4089  *  Description:
4090  *  Tries to write the user provided value in the Eeprom, at the offset
4091  *  given by the user.
4092  *  Return value:
4093  *  0 on success, -EFAULT on failure.
4094  */
4095
4096 static int s2io_ethtool_seeprom(struct net_device *dev,
4097                                 struct ethtool_eeprom *eeprom,
4098                                 u8 * data_buf)
4099 {
4100         int len = eeprom->len, cnt = 0;
4101         u32 valid = 0, data;
4102         nic_t *sp = dev->priv;
4103
4104         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4105                 DBG_PRINT(ERR_DBG,
4106                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4107                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4108                           eeprom->magic);
4109                 return -EFAULT;
4110         }
4111
4112         while (len) {
4113                 data = (u32) data_buf[cnt] & 0x000000FF;
4114                 if (data) {
4115                         valid = (u32) (data << 24);
4116                 } else
4117                         valid = data;
4118
4119                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4120                         DBG_PRINT(ERR_DBG,
4121                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4122                         DBG_PRINT(ERR_DBG,
4123                                   "write into the specified offset\n");
4124                         return -EFAULT;
4125                 }
4126                 cnt++;
4127                 len--;
4128         }
4129
4130         return 0;
4131 }
4132
4133 /**
4134  * s2io_register_test - reads and writes into all clock domains.
4135  * @sp : private member of the device structure, which is a pointer to the
4136  * s2io_nic structure.
4137  * @data : variable that returns the result of each of the test conducted b
4138  * by the driver.
4139  * Description:
4140  * Read and write into all clock domains. The NIC has 3 clock domains,
4141  * see that registers in all the three regions are accessible.
4142  * Return value:
4143  * 0 on success.
4144  */
4145
4146 static int s2io_register_test(nic_t * sp, uint64_t * data)
4147 {
4148         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4149         u64 val64 = 0;
4150         int fail = 0;
4151
4152         val64 = readq(&bar0->pif_rd_swapper_fb);
4153         if (val64 != 0x123456789abcdefULL) {
4154                 fail = 1;
4155                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
4156         }
4157
4158         val64 = readq(&bar0->rmac_pause_cfg);
4159         if (val64 != 0xc000ffff00000000ULL) {
4160                 fail = 1;
4161                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
4162         }
4163
4164         val64 = readq(&bar0->rx_queue_cfg);
4165         if (val64 != 0x0808080808080808ULL) {
4166                 fail = 1;
4167                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4168         }
4169
4170         val64 = readq(&bar0->xgxs_efifo_cfg);
4171         if (val64 != 0x000000001923141EULL) {
4172                 fail = 1;
4173                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
4174         }
4175
4176         val64 = 0x5A5A5A5A5A5A5A5AULL;
4177         writeq(val64, &bar0->xmsi_data);
4178         val64 = readq(&bar0->xmsi_data);
4179         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
4180                 fail = 1;
4181                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
4182         }
4183
4184         val64 = 0xA5A5A5A5A5A5A5A5ULL;
4185         writeq(val64, &bar0->xmsi_data);
4186         val64 = readq(&bar0->xmsi_data);
4187         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
4188                 fail = 1;
4189                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
4190         }
4191
4192         *data = fail;
4193         return 0;
4194 }
4195
4196 /**
4197  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
4198  * @sp : private member of the device structure, which is a pointer to the
4199  * s2io_nic structure.
4200  * @data:variable that returns the result of each of the test conducted by
4201  * the driver.
4202  * Description:
4203  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
4204  * register.
4205  * Return value:
4206  * 0 on success.
4207  */
4208
4209 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4210 {
4211         int fail = 0;
4212         u32 ret_data;
4213
4214         /* Test Write Error at offset 0 */
4215         if (!write_eeprom(sp, 0, 0, 3))
4216                 fail = 1;
4217
4218         /* Test Write at offset 4f0 */
4219         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4220                 fail = 1;
4221         if (read_eeprom(sp, 0x4F0, &ret_data))
4222                 fail = 1;
4223
4224         if (ret_data != 0x01234567)
4225                 fail = 1;
4226
4227         /* Reset the EEPROM data go FFFF */
4228         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4229
4230         /* Test Write Request Error at offset 0x7c */
4231         if (!write_eeprom(sp, 0x07C, 0, 3))
4232                 fail = 1;
4233
4234         /* Test Write Request at offset 0x7fc */
4235         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4236                 fail = 1;
4237         if (read_eeprom(sp, 0x7FC, &ret_data))
4238                 fail = 1;
4239
4240         if (ret_data != 0x01234567)
4241                 fail = 1;
4242
4243         /* Reset the EEPROM data go FFFF */
4244         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4245
4246         /* Test Write Error at offset 0x80 */
4247         if (!write_eeprom(sp, 0x080, 0, 3))
4248                 fail = 1;
4249
4250         /* Test Write Error at offset 0xfc */
4251         if (!write_eeprom(sp, 0x0FC, 0, 3))
4252                 fail = 1;
4253
4254         /* Test Write Error at offset 0x100 */
4255         if (!write_eeprom(sp, 0x100, 0, 3))
4256                 fail = 1;
4257
4258         /* Test Write Error at offset 4ec */
4259         if (!write_eeprom(sp, 0x4EC, 0, 3))
4260                 fail = 1;
4261
4262         *data = fail;
4263         return 0;
4264 }
4265
4266 /**
4267  * s2io_bist_test - invokes the MemBist test of the card .
4268  * @sp : private member of the device structure, which is a pointer to the
4269  * s2io_nic structure.
4270  * @data:variable that returns the result of each of the test conducted by
4271  * the driver.
4272  * Description:
4273  * This invokes the MemBist test of the card. We give around
4274  * 2 secs time for the Test to complete. If it's still not complete
4275  * within this peiod, we consider that the test failed.
4276  * Return value:
4277  * 0 on success and -1 on failure.
4278  */
4279
4280 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4281 {
4282         u8 bist = 0;
4283         int cnt = 0, ret = -1;
4284
4285         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4286         bist |= PCI_BIST_START;
4287         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4288
4289         while (cnt < 20) {
4290                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4291                 if (!(bist & PCI_BIST_START)) {
4292                         *data = (bist & PCI_BIST_CODE_MASK);
4293                         ret = 0;
4294                         break;
4295                 }
4296                 msleep(100);
4297                 cnt++;
4298         }
4299
4300         return ret;
4301 }
4302
4303 /**
4304  * s2io-link_test - verifies the link state of the nic
4305  * @sp ; private member of the device structure, which is a pointer to the
4306  * s2io_nic structure.
4307  * @data: variable that returns the result of each of the test conducted by
4308  * the driver.
4309  * Description:
4310  * The function verifies the link state of the NIC and updates the input
4311  * argument 'data' appropriately.
4312  * Return value:
4313  * 0 on success.
4314  */
4315
4316 static int s2io_link_test(nic_t * sp, uint64_t * data)
4317 {
4318         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4319         u64 val64;
4320
4321         val64 = readq(&bar0->adapter_status);
4322         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4323                 *data = 1;
4324
4325         return 0;
4326 }
4327
4328 /**
4329  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4330  * @sp - private member of the device structure, which is a pointer to the
4331  * s2io_nic structure.
4332  * @data - variable that returns the result of each of the test
4333  * conducted by the driver.
4334  * Description:
4335  *  This is one of the offline test that tests the read and write
4336  *  access to the RldRam chip on the NIC.
4337  * Return value:
4338  *  0 on success.
4339  */
4340
4341 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4342 {
4343         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4344         u64 val64;
4345         int cnt, iteration = 0, test_pass = 0;
4346
4347         val64 = readq(&bar0->adapter_control);
4348         val64 &= ~ADAPTER_ECC_EN;
4349         writeq(val64, &bar0->adapter_control);
4350
4351         val64 = readq(&bar0->mc_rldram_test_ctrl);
4352         val64 |= MC_RLDRAM_TEST_MODE;
4353         writeq(val64, &bar0->mc_rldram_test_ctrl);
4354
4355         val64 = readq(&bar0->mc_rldram_mrs);
4356         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4357         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4358
4359         val64 |= MC_RLDRAM_MRS_ENABLE;
4360         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4361
4362         while (iteration < 2) {
4363                 val64 = 0x55555555aaaa0000ULL;
4364                 if (iteration == 1) {
4365                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4366                 }
4367                 writeq(val64, &bar0->mc_rldram_test_d0);
4368
4369                 val64 = 0xaaaa5a5555550000ULL;
4370                 if (iteration == 1) {
4371                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4372                 }
4373                 writeq(val64, &bar0->mc_rldram_test_d1);
4374
4375                 val64 = 0x55aaaaaaaa5a0000ULL;
4376                 if (iteration == 1) {
4377                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4378                 }
4379                 writeq(val64, &bar0->mc_rldram_test_d2);
4380
4381                 val64 = (u64) (0x0000003fffff0000ULL);
4382                 writeq(val64, &bar0->mc_rldram_test_add);
4383
4384
4385                 val64 = MC_RLDRAM_TEST_MODE;
4386                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4387
4388                 val64 |=
4389                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4390                     MC_RLDRAM_TEST_GO;
4391                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4392
4393                 for (cnt = 0; cnt < 5; cnt++) {
4394                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4395                         if (val64 & MC_RLDRAM_TEST_DONE)
4396                                 break;
4397                         msleep(200);
4398                 }
4399
4400                 if (cnt == 5)
4401                         break;
4402
4403                 val64 = MC_RLDRAM_TEST_MODE;
4404                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4405
4406                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4407                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4408
4409                 for (cnt = 0; cnt < 5; cnt++) {
4410                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4411                         if (val64 & MC_RLDRAM_TEST_DONE)
4412                                 break;
4413                         msleep(500);
4414                 }
4415
4416                 if (cnt == 5)
4417                         break;
4418
4419                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4420                 if (val64 & MC_RLDRAM_TEST_PASS)
4421                         test_pass = 1;
4422
4423                 iteration++;
4424         }
4425
4426         if (!test_pass)
4427                 *data = 1;
4428         else
4429                 *data = 0;
4430
4431         return 0;
4432 }
4433
4434 /**
4435  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4436  *  @sp : private member of the device structure, which is a pointer to the
4437  *  s2io_nic structure.
4438  *  @ethtest : pointer to a ethtool command specific structure that will be
4439  *  returned to the user.
4440  *  @data : variable that returns the result of each of the test
4441  * conducted by the driver.
4442  * Description:
4443  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4444  *  the health of the card.
4445  * Return value:
4446  *  void
4447  */
4448
4449 static void s2io_ethtool_test(struct net_device *dev,
4450                               struct ethtool_test *ethtest,
4451                               uint64_t * data)
4452 {
4453         nic_t *sp = dev->priv;
4454         int orig_state = netif_running(sp->dev);
4455
4456         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4457                 /* Offline Tests. */
4458                 if (orig_state)
4459                         s2io_close(sp->dev);
4460
4461                 if (s2io_register_test(sp, &data[0]))
4462                         ethtest->flags |= ETH_TEST_FL_FAILED;
4463
4464                 s2io_reset(sp);
4465
4466                 if (s2io_rldram_test(sp, &data[3]))
4467                         ethtest->flags |= ETH_TEST_FL_FAILED;
4468
4469                 s2io_reset(sp);
4470
4471                 if (s2io_eeprom_test(sp, &data[1]))
4472                         ethtest->flags |= ETH_TEST_FL_FAILED;
4473
4474                 if (s2io_bist_test(sp, &data[4]))
4475                         ethtest->flags |= ETH_TEST_FL_FAILED;
4476
4477                 if (orig_state)
4478                         s2io_open(sp->dev);
4479
4480                 data[2] = 0;
4481         } else {
4482                 /* Online Tests. */
4483                 if (!orig_state) {
4484                         DBG_PRINT(ERR_DBG,
4485                                   "%s: is not up, cannot run test\n",
4486                                   dev->name);
4487                         data[0] = -1;
4488                         data[1] = -1;
4489                         data[2] = -1;
4490                         data[3] = -1;
4491                         data[4] = -1;
4492                 }
4493
4494                 if (s2io_link_test(sp, &data[2]))
4495                         ethtest->flags |= ETH_TEST_FL_FAILED;
4496
4497                 data[0] = 0;
4498                 data[1] = 0;
4499                 data[3] = 0;
4500                 data[4] = 0;
4501         }
4502 }
4503
4504 static void s2io_get_ethtool_stats(struct net_device *dev,
4505                                    struct ethtool_stats *estats,
4506                                    u64 * tmp_stats)
4507 {
4508         int i = 0;
4509         nic_t *sp = dev->priv;
4510         StatInfo_t *stat_info = sp->mac_control.stats_info;
4511
4512         s2io_updt_stats(sp);
4513         tmp_stats[i++] =
4514                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
4515                 le32_to_cpu(stat_info->tmac_frms);
4516         tmp_stats[i++] =
4517                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4518                 le32_to_cpu(stat_info->tmac_data_octets);
4519         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4520         tmp_stats[i++] =
4521                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4522                 le32_to_cpu(stat_info->tmac_mcst_frms);
4523         tmp_stats[i++] =
4524                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4525                 le32_to_cpu(stat_info->tmac_bcst_frms);
4526         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4527         tmp_stats[i++] =
4528                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4529                 le32_to_cpu(stat_info->tmac_any_err_frms);
4530         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4531         tmp_stats[i++] =
4532                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4533                 le32_to_cpu(stat_info->tmac_vld_ip);
4534         tmp_stats[i++] =
4535                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4536                 le32_to_cpu(stat_info->tmac_drop_ip);
4537         tmp_stats[i++] =
4538                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4539                 le32_to_cpu(stat_info->tmac_icmp);
4540         tmp_stats[i++] =
4541                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4542                 le32_to_cpu(stat_info->tmac_rst_tcp);
4543         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4544         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4545                 le32_to_cpu(stat_info->tmac_udp);
4546         tmp_stats[i++] =
4547                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4548                 le32_to_cpu(stat_info->rmac_vld_frms);
4549         tmp_stats[i++] =
4550                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4551                 le32_to_cpu(stat_info->rmac_data_octets);
4552         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4553         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4554         tmp_stats[i++] =
4555                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4556                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4557         tmp_stats[i++] =
4558                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4559                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4560         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4561         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4562         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4563         tmp_stats[i++] =
4564                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4565                 le32_to_cpu(stat_info->rmac_discarded_frms);
4566         tmp_stats[i++] =
4567                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4568                 le32_to_cpu(stat_info->rmac_usized_frms);
4569         tmp_stats[i++] =
4570                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4571                 le32_to_cpu(stat_info->rmac_osized_frms);
4572         tmp_stats[i++] =
4573                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4574                 le32_to_cpu(stat_info->rmac_frag_frms);
4575         tmp_stats[i++] =
4576                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4577                 le32_to_cpu(stat_info->rmac_jabber_frms);
4578         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4579                 le32_to_cpu(stat_info->rmac_ip);
4580         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4581         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4582         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4583                 le32_to_cpu(stat_info->rmac_drop_ip);
4584         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4585                 le32_to_cpu(stat_info->rmac_icmp);
4586         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4587         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4588                 le32_to_cpu(stat_info->rmac_udp);
4589         tmp_stats[i++] =
4590                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4591                 le32_to_cpu(stat_info->rmac_err_drp_udp);
4592         tmp_stats[i++] =
4593                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4594                 le32_to_cpu(stat_info->rmac_pause_cnt);
4595         tmp_stats[i++] =
4596                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4597                 le32_to_cpu(stat_info->rmac_accepted_ip);
4598         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4599         tmp_stats[i++] = 0;
4600         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4601         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4602 }
4603
4604 int s2io_ethtool_get_regs_len(struct net_device *dev)
4605 {
4606         return (XENA_REG_SPACE);
4607 }
4608
4609
4610 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4611 {
4612         nic_t *sp = dev->priv;
4613
4614         return (sp->rx_csum);
4615 }
4616 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4617 {
4618         nic_t *sp = dev->priv;
4619
4620         if (data)
4621                 sp->rx_csum = 1;
4622         else
4623                 sp->rx_csum = 0;
4624
4625         return 0;
4626 }
4627 int s2io_get_eeprom_len(struct net_device *dev)
4628 {
4629         return (XENA_EEPROM_SPACE);
4630 }
4631
4632 int s2io_ethtool_self_test_count(struct net_device *dev)
4633 {
4634         return (S2IO_TEST_LEN);
4635 }
4636 void s2io_ethtool_get_strings(struct net_device *dev,
4637                               u32 stringset, u8 * data)
4638 {
4639         switch (stringset) {
4640         case ETH_SS_TEST:
4641                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4642                 break;
4643         case ETH_SS_STATS:
4644                 memcpy(data, &ethtool_stats_keys,
4645                        sizeof(ethtool_stats_keys));
4646         }
4647 }
4648 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4649 {
4650         return (S2IO_STAT_LEN);
4651 }
4652
4653 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4654 {
4655         if (data)
4656                 dev->features |= NETIF_F_IP_CSUM;
4657         else
4658                 dev->features &= ~NETIF_F_IP_CSUM;
4659
4660         return 0;
4661 }
4662
4663
4664 static struct ethtool_ops netdev_ethtool_ops = {
4665         .get_settings = s2io_ethtool_gset,
4666         .set_settings = s2io_ethtool_sset,
4667         .get_drvinfo = s2io_ethtool_gdrvinfo,
4668         .get_regs_len = s2io_ethtool_get_regs_len,
4669         .get_regs = s2io_ethtool_gregs,
4670         .get_link = ethtool_op_get_link,
4671         .get_eeprom_len = s2io_get_eeprom_len,
4672         .get_eeprom = s2io_ethtool_geeprom,
4673         .set_eeprom = s2io_ethtool_seeprom,
4674         .get_pauseparam = s2io_ethtool_getpause_data,
4675         .set_pauseparam = s2io_ethtool_setpause_data,
4676         .get_rx_csum = s2io_ethtool_get_rx_csum,
4677         .set_rx_csum = s2io_ethtool_set_rx_csum,
4678         .get_tx_csum = ethtool_op_get_tx_csum,
4679         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4680         .get_sg = ethtool_op_get_sg,
4681         .set_sg = ethtool_op_set_sg,
4682 #ifdef NETIF_F_TSO
4683         .get_tso = ethtool_op_get_tso,
4684         .set_tso = ethtool_op_set_tso,
4685 #endif
4686         .self_test_count = s2io_ethtool_self_test_count,
4687         .self_test = s2io_ethtool_test,
4688         .get_strings = s2io_ethtool_get_strings,
4689         .phys_id = s2io_ethtool_idnic,
4690         .get_stats_count = s2io_ethtool_get_stats_count,
4691         .get_ethtool_stats = s2io_get_ethtool_stats
4692 };
4693
4694 /**
4695  *  s2io_ioctl - Entry point for the Ioctl
4696  *  @dev :  Device pointer.
4697  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4698  *  a proprietary structure used to pass information to the driver.
4699  *  @cmd :  This is used to distinguish between the different commands that
4700  *  can be passed to the IOCTL functions.
4701  *  Description:
4702  *  Currently there are no special functionality supported in IOCTL, hence
4703  *  function always return EOPNOTSUPPORTED
4704  */
4705
4706 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4707 {
4708         return -EOPNOTSUPP;
4709 }
4710
4711 /**
4712  *  s2io_change_mtu - entry point to change MTU size for the device.
4713  *   @dev : device pointer.
4714  *   @new_mtu : the new MTU size for the device.
4715  *   Description: A driver entry point to change MTU size for the device.
4716  *   Before changing the MTU the device must be stopped.
4717  *  Return value:
4718  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4719  *   file on failure.
4720  */
4721
4722 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4723 {
4724         nic_t *sp = dev->priv;
4725
4726         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4727                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4728                           dev->name);
4729                 return -EPERM;
4730         }
4731
4732         dev->mtu = new_mtu;
4733         if (netif_running(dev)) {
4734                 s2io_card_down(sp);
4735                 netif_stop_queue(dev);
4736                 if (s2io_card_up(sp)) {
4737                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4738                                   __FUNCTION__);
4739                 }
4740                 if (netif_queue_stopped(dev))
4741                         netif_wake_queue(dev);
4742         } else { /* Device is down */
4743                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4744                 u64 val64 = new_mtu;
4745
4746                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4747         }
4748
4749         return 0;
4750 }
4751
4752 /**
4753  *  s2io_tasklet - Bottom half of the ISR.
4754  *  @dev_adr : address of the device structure in dma_addr_t format.
4755  *  Description:
4756  *  This is the tasklet or the bottom half of the ISR. This is
4757  *  an extension of the ISR which is scheduled by the scheduler to be run
4758  *  when the load on the CPU is low. All low priority tasks of the ISR can
4759  *  be pushed into the tasklet. For now the tasklet is used only to
4760  *  replenish the Rx buffers in the Rx buffer descriptors.
4761  *  Return value:
4762  *  void.
4763  */
4764
4765 static void s2io_tasklet(unsigned long dev_addr)
4766 {
4767         struct net_device *dev = (struct net_device *) dev_addr;
4768         nic_t *sp = dev->priv;
4769         int i, ret;
4770         mac_info_t *mac_control;
4771         struct config_param *config;
4772
4773         mac_control = &sp->mac_control;
4774         config = &sp->config;
4775
4776         if (!TASKLET_IN_USE) {
4777                 for (i = 0; i < config->rx_ring_num; i++) {
4778                         ret = fill_rx_buffers(sp, i);
4779                         if (ret == -ENOMEM) {
4780                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4781                                           dev->name);
4782                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4783                                 break;
4784                         } else if (ret == -EFILL) {
4785                                 DBG_PRINT(ERR_DBG,
4786                                           "%s: Rx Ring %d is full\n",
4787                                           dev->name, i);
4788                                 break;
4789                         }
4790                 }
4791                 clear_bit(0, (&sp->tasklet_status));
4792         }
4793 }
4794
4795 /**
4796  * s2io_set_link - Set the LInk status
4797  * @data: long pointer to device private structue
4798  * Description: Sets the link status for the adapter
4799  */
4800
4801 static void s2io_set_link(unsigned long data)
4802 {
4803         nic_t *nic = (nic_t *) data;
4804         struct net_device *dev = nic->dev;
4805         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4806         register u64 val64;
4807         u16 subid;
4808
4809         if (test_and_set_bit(0, &(nic->link_state))) {
4810                 /* The card is being reset, no point doing anything */
4811                 return;
4812         }
4813
4814         subid = nic->pdev->subsystem_device;
4815         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
4816                 /*
4817                  * Allow a small delay for the NICs self initiated
4818                  * cleanup to complete.
4819                  */
4820                 msleep(100);
4821         }
4822
4823         val64 = readq(&bar0->adapter_status);
4824         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4825                 if (LINK_IS_UP(val64)) {
4826                         val64 = readq(&bar0->adapter_control);
4827                         val64 |= ADAPTER_CNTL_EN;
4828                         writeq(val64, &bar0->adapter_control);
4829                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4830                                                              subid)) {
4831                                 val64 = readq(&bar0->gpio_control);
4832                                 val64 |= GPIO_CTRL_GPIO_0;
4833                                 writeq(val64, &bar0->gpio_control);
4834                                 val64 = readq(&bar0->gpio_control);
4835                         } else {
4836                                 val64 |= ADAPTER_LED_ON;
4837                                 writeq(val64, &bar0->adapter_control);
4838                         }
4839                         if (s2io_link_fault_indication(nic) ==
4840                                                 MAC_RMAC_ERR_TIMER) {
4841                                 val64 = readq(&bar0->adapter_status);
4842                                 if (!LINK_IS_UP(val64)) {
4843                                         DBG_PRINT(ERR_DBG, "%s:", dev->name);
4844                                         DBG_PRINT(ERR_DBG, " Link down");
4845                                         DBG_PRINT(ERR_DBG, "after ");
4846                                         DBG_PRINT(ERR_DBG, "enabling ");
4847                                         DBG_PRINT(ERR_DBG, "device \n");
4848                                 }
4849                         }
4850                         if (nic->device_enabled_once == FALSE) {
4851                                 nic->device_enabled_once = TRUE;
4852                         }
4853                         s2io_link(nic, LINK_UP);
4854                 } else {
4855                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4856                                                               subid)) {
4857                                 val64 = readq(&bar0->gpio_control);
4858                                 val64 &= ~GPIO_CTRL_GPIO_0;
4859                                 writeq(val64, &bar0->gpio_control);
4860                                 val64 = readq(&bar0->gpio_control);
4861                         }
4862                         s2io_link(nic, LINK_DOWN);
4863                 }
4864         } else {                /* NIC is not Quiescent. */
4865                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4866                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4867                 netif_stop_queue(dev);
4868         }
4869         clear_bit(0, &(nic->link_state));
4870 }
4871
4872 static void s2io_card_down(nic_t * sp)
4873 {
4874         int cnt = 0;
4875         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4876         unsigned long flags;
4877         register u64 val64 = 0;
4878
4879         del_timer_sync(&sp->alarm_timer);
4880         /* If s2io_set_link task is executing, wait till it completes. */
4881         while (test_and_set_bit(0, &(sp->link_state))) {
4882                 msleep(50);
4883         }
4884         atomic_set(&sp->card_state, CARD_DOWN);
4885
4886         /* disable Tx and Rx traffic on the NIC */
4887         stop_nic(sp);
4888
4889         /* Kill tasklet. */
4890         tasklet_kill(&sp->task);
4891
4892         /* Check if the device is Quiescent and then Reset the NIC */
4893         do {
4894                 val64 = readq(&bar0->adapter_status);
4895                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4896                         break;
4897                 }
4898
4899                 msleep(50);
4900                 cnt++;
4901                 if (cnt == 10) {
4902                         DBG_PRINT(ERR_DBG,
4903                                   "s2io_close:Device not Quiescent ");
4904                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4905                                   (unsigned long long) val64);
4906                         break;
4907                 }
4908         } while (1);
4909         s2io_reset(sp);
4910
4911         /* Waiting till all Interrupt handlers are complete */
4912         cnt = 0;
4913         do {
4914                 msleep(10);
4915                 if (!atomic_read(&sp->isr_cnt))
4916                         break;
4917                 cnt++;
4918         } while(cnt < 5);
4919
4920         spin_lock_irqsave(&sp->tx_lock, flags);
4921         /* Free all Tx buffers */
4922         free_tx_buffers(sp);
4923         spin_unlock_irqrestore(&sp->tx_lock, flags);
4924
4925         /* Free all Rx buffers */
4926         spin_lock_irqsave(&sp->rx_lock, flags);
4927         free_rx_buffers(sp);
4928         spin_unlock_irqrestore(&sp->rx_lock, flags);
4929
4930         clear_bit(0, &(sp->link_state));
4931 }
4932
4933 static int s2io_card_up(nic_t * sp)
4934 {
4935         int i, ret;
4936         mac_info_t *mac_control;
4937         struct config_param *config;
4938         struct net_device *dev = (struct net_device *) sp->dev;
4939
4940         /* Initialize the H/W I/O registers */
4941         if (init_nic(sp) != 0) {
4942                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4943                           dev->name);
4944                 return -ENODEV;
4945         }
4946
4947         /*
4948          * Initializing the Rx buffers. For now we are considering only 1
4949          * Rx ring and initializing buffers into 30 Rx blocks
4950          */
4951         mac_control = &sp->mac_control;
4952         config = &sp->config;
4953
4954         for (i = 0; i < config->rx_ring_num; i++) {
4955                 if ((ret = fill_rx_buffers(sp, i))) {
4956                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4957                                   dev->name);
4958                         s2io_reset(sp);
4959                         free_rx_buffers(sp);
4960                         return -ENOMEM;
4961                 }
4962                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4963                           atomic_read(&sp->rx_bufs_left[i]));
4964         }
4965
4966         /* Setting its receive mode */
4967         s2io_set_multicast(dev);
4968
4969         /* Enable tasklet for the device */
4970         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4971
4972         /* Enable Rx Traffic and interrupts on the NIC */
4973         if (start_nic(sp)) {
4974                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4975                 tasklet_kill(&sp->task);
4976                 s2io_reset(sp);
4977                 free_irq(dev->irq, dev);
4978                 free_rx_buffers(sp);
4979                 return -ENODEV;
4980         }
4981
4982         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4983
4984         atomic_set(&sp->card_state, CARD_UP);
4985         return 0;
4986 }
4987
4988 /**
4989  * s2io_restart_nic - Resets the NIC.
4990  * @data : long pointer to the device private structure
4991  * Description:
4992  * This function is scheduled to be run by the s2io_tx_watchdog
4993  * function after 0.5 secs to reset the NIC. The idea is to reduce
4994  * the run time of the watch dog routine which is run holding a
4995  * spin lock.
4996  */
4997
4998 static void s2io_restart_nic(unsigned long data)
4999 {
5000         struct net_device *dev = (struct net_device *) data;
5001         nic_t *sp = dev->priv;
5002
5003         s2io_card_down(sp);
5004         if (s2io_card_up(sp)) {
5005                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5006                           dev->name);
5007         }
5008         netif_wake_queue(dev);
5009         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
5010                   dev->name);
5011
5012 }
5013
5014 /**
5015  *  s2io_tx_watchdog - Watchdog for transmit side.
5016  *  @dev : Pointer to net device structure
5017  *  Description:
5018  *  This function is triggered if the Tx Queue is stopped
5019  *  for a pre-defined amount of time when the Interface is still up.
5020  *  If the Interface is jammed in such a situation, the hardware is
5021  *  reset (by s2io_close) and restarted again (by s2io_open) to
5022  *  overcome any problem that might have been caused in the hardware.
5023  *  Return value:
5024  *  void
5025  */
5026
5027 static void s2io_tx_watchdog(struct net_device *dev)
5028 {
5029         nic_t *sp = dev->priv;
5030
5031         if (netif_carrier_ok(dev)) {
5032                 schedule_work(&sp->rst_timer_task);
5033         }
5034 }
5035
5036 /**
5037  *   rx_osm_handler - To perform some OS related operations on SKB.
5038  *   @sp: private member of the device structure,pointer to s2io_nic structure.
5039  *   @skb : the socket buffer pointer.
5040  *   @len : length of the packet
5041  *   @cksum : FCS checksum of the frame.
5042  *   @ring_no : the ring from which this RxD was extracted.
5043  *   Description:
5044  *   This function is called by the Tx interrupt serivce routine to perform
5045  *   some OS related operations on the SKB before passing it to the upper
5046  *   layers. It mainly checks if the checksum is OK, if so adds it to the
5047  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
5048  *   to the upper layer. If the checksum is wrong, it increments the Rx
5049  *   packet error count, frees the SKB and returns error.
5050  *   Return value:
5051  *   SUCCESS on success and -1 on failure.
5052  */
5053 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5054 {
5055         nic_t *sp = ring_data->nic;
5056         struct net_device *dev = (struct net_device *) sp->dev;
5057         struct sk_buff *skb = (struct sk_buff *)
5058                 ((unsigned long) rxdp->Host_Control);
5059         int ring_no = ring_data->ring_no;
5060         u16 l3_csum, l4_csum;
5061 #ifdef CONFIG_2BUFF_MODE
5062         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5063         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5064         int get_block = ring_data->rx_curr_get_info.block_index;
5065         int get_off = ring_data->rx_curr_get_info.offset;
5066         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5067         unsigned char *buff;
5068 #else
5069         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5070 #endif
5071         skb->dev = dev;
5072         if (rxdp->Control_1 & RXD_T_CODE) {
5073                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
5074                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
5075                           dev->name, err);
5076                 dev_kfree_skb(skb);
5077                 sp->stats.rx_crc_errors++;
5078                 atomic_dec(&sp->rx_bufs_left[ring_no]);
5079                 rxdp->Host_Control = 0;
5080                 return 0;
5081         }
5082
5083         /* Updating statistics */
5084         rxdp->Host_Control = 0;
5085         sp->rx_pkt_count++;
5086         sp->stats.rx_packets++;
5087 #ifndef CONFIG_2BUFF_MODE
5088         sp->stats.rx_bytes += len;
5089 #else
5090         sp->stats.rx_bytes += buf0_len + buf2_len;
5091 #endif
5092
5093 #ifndef CONFIG_2BUFF_MODE
5094         skb_put(skb, len);
5095 #else
5096         buff = skb_push(skb, buf0_len);
5097         memcpy(buff, ba->ba_0, buf0_len);
5098         skb_put(skb, buf2_len);
5099 #endif
5100
5101         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5102             (sp->rx_csum)) {
5103                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
5104                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
5105                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
5106                         /*
5107                          * NIC verifies if the Checksum of the received
5108                          * frame is Ok or not and accordingly returns
5109                          * a flag in the RxD.
5110                          */
5111                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5112                 } else {
5113                         /*
5114                          * Packet with erroneous checksum, let the
5115                          * upper layers deal with it.
5116                          */
5117                         skb->ip_summed = CHECKSUM_NONE;
5118                 }
5119         } else {
5120                 skb->ip_summed = CHECKSUM_NONE;
5121         }
5122
5123         skb->protocol = eth_type_trans(skb, dev);
5124 #ifdef CONFIG_S2IO_NAPI
5125         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5126                 /* Queueing the vlan frame to the upper layer */
5127                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
5128                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5129         } else {
5130                 netif_receive_skb(skb);
5131         }
5132 #else
5133         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
5134                 /* Queueing the vlan frame to the upper layer */
5135                 vlan_hwaccel_rx(skb, sp->vlgrp,
5136                         RXD_GET_VLAN_TAG(rxdp->Control_2));
5137         } else {
5138                 netif_rx(skb);
5139         }
5140 #endif
5141         dev->last_rx = jiffies;
5142         atomic_dec(&sp->rx_bufs_left[ring_no]);
5143         return SUCCESS;
5144 }
5145
5146 /**
5147  *  s2io_link - stops/starts the Tx queue.
5148  *  @sp : private member of the device structure, which is a pointer to the
5149  *  s2io_nic structure.
5150  *  @link : inidicates whether link is UP/DOWN.
5151  *  Description:
5152  *  This function stops/starts the Tx queue depending on whether the link
5153  *  status of the NIC is is down or up. This is called by the Alarm
5154  *  interrupt handler whenever a link change interrupt comes up.
5155  *  Return value:
5156  *  void.
5157  */
5158
5159 void s2io_link(nic_t * sp, int link)
5160 {
5161         struct net_device *dev = (struct net_device *) sp->dev;
5162
5163         if (link != sp->last_link_state) {
5164                 if (link == LINK_DOWN) {
5165                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
5166                         netif_carrier_off(dev);
5167                 } else {
5168                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
5169                         netif_carrier_on(dev);
5170                 }
5171         }
5172         sp->last_link_state = link;
5173 }
5174
5175 /**
5176  *  get_xena_rev_id - to identify revision ID of xena.
5177  *  @pdev : PCI Dev structure
5178  *  Description:
5179  *  Function to identify the Revision ID of xena.
5180  *  Return value:
5181  *  returns the revision ID of the device.
5182  */
5183
5184 int get_xena_rev_id(struct pci_dev *pdev)
5185 {
5186         u8 id = 0;
5187         int ret;
5188         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
5189         return id;
5190 }
5191
5192 /**
5193  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
5194  *  @sp : private member of the device structure, which is a pointer to the
5195  *  s2io_nic structure.
5196  *  Description:
5197  *  This function initializes a few of the PCI and PCI-X configuration registers
5198  *  with recommended values.
5199  *  Return value:
5200  *  void
5201  */
5202
5203 static void s2io_init_pci(nic_t * sp)
5204 {
5205         u16 pci_cmd = 0, pcix_cmd = 0;
5206
5207         /* Enable Data Parity Error Recovery in PCI-X command register. */
5208         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5209                              &(pcix_cmd));
5210         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5211                               (pcix_cmd | 1));
5212         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5213                              &(pcix_cmd));
5214
5215         /* Set the PErr Response bit in PCI command register. */
5216         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5217         pci_write_config_word(sp->pdev, PCI_COMMAND,
5218                               (pci_cmd | PCI_COMMAND_PARITY));
5219         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5220
5221         /* Forcibly disabling relaxed ordering capability of the card. */
5222         pcix_cmd &= 0xfffd;
5223         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5224                               pcix_cmd);
5225         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5226                              &(pcix_cmd));
5227 }
5228
5229 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5230 MODULE_LICENSE("GPL");
5231 module_param(tx_fifo_num, int, 0);
5232 module_param(rx_ring_num, int, 0);
5233 module_param_array(tx_fifo_len, uint, NULL, 0);
5234 module_param_array(rx_ring_sz, uint, NULL, 0);
5235 module_param_array(rts_frm_len, uint, NULL, 0);
5236 module_param(use_continuous_tx_intrs, int, 1);
5237 module_param(rmac_pause_time, int, 0);
5238 module_param(mc_pause_threshold_q0q3, int, 0);
5239 module_param(mc_pause_threshold_q4q7, int, 0);
5240 module_param(shared_splits, int, 0);
5241 module_param(tmac_util_period, int, 0);
5242 module_param(rmac_util_period, int, 0);
5243 module_param(bimodal, bool, 0);
5244 #ifndef CONFIG_S2IO_NAPI
5245 module_param(indicate_max_pkts, int, 0);
5246 #endif
5247 module_param(rxsync_frequency, int, 0);
5248
5249 /**
5250  *  s2io_init_nic - Initialization of the adapter .
5251  *  @pdev : structure containing the PCI related information of the device.
5252  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5253  *  Description:
5254  *  The function initializes an adapter identified by the pci_dec structure.
5255  *  All OS related initialization including memory and device structure and
5256  *  initlaization of the device private variable is done. Also the swapper
5257  *  control register is initialized to enable read and write into the I/O
5258  *  registers of the device.
5259  *  Return value:
5260  *  returns 0 on success and negative on failure.
5261  */
5262
5263 static int __devinit
5264 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5265 {
5266         nic_t *sp;
5267         struct net_device *dev;
5268         int i, j, ret;
5269         int dma_flag = FALSE;
5270         u32 mac_up, mac_down;
5271         u64 val64 = 0, tmp64 = 0;
5272         XENA_dev_config_t __iomem *bar0 = NULL;
5273         u16 subid;
5274         mac_info_t *mac_control;
5275         struct config_param *config;
5276         int mode;
5277
5278 #ifdef CONFIG_S2IO_NAPI
5279         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5280 #endif
5281
5282         if ((ret = pci_enable_device(pdev))) {
5283                 DBG_PRINT(ERR_DBG,
5284                           "s2io_init_nic: pci_enable_device failed\n");
5285                 return ret;
5286         }
5287
5288         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5289                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5290                 dma_flag = TRUE;
5291                 if (pci_set_consistent_dma_mask
5292                     (pdev, DMA_64BIT_MASK)) {
5293                         DBG_PRINT(ERR_DBG,
5294                                   "Unable to obtain 64bit DMA for \
5295                                         consistent allocations\n");
5296                         pci_disable_device(pdev);
5297                         return -ENOMEM;
5298                 }
5299         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5300                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5301         } else {
5302                 pci_disable_device(pdev);
5303                 return -ENOMEM;
5304         }
5305
5306         if (pci_request_regions(pdev, s2io_driver_name)) {
5307                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5308                     pci_disable_device(pdev);
5309                 return -ENODEV;
5310         }
5311
5312         dev = alloc_etherdev(sizeof(nic_t));
5313         if (dev == NULL) {
5314                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5315                 pci_disable_device(pdev);
5316                 pci_release_regions(pdev);
5317                 return -ENODEV;
5318         }
5319
5320         pci_set_master(pdev);
5321         pci_set_drvdata(pdev, dev);
5322         SET_MODULE_OWNER(dev);
5323         SET_NETDEV_DEV(dev, &pdev->dev);
5324
5325         /*  Private member variable initialized to s2io NIC structure */
5326         sp = dev->priv;
5327         memset(sp, 0, sizeof(nic_t));
5328         sp->dev = dev;
5329         sp->pdev = pdev;
5330         sp->high_dma_flag = dma_flag;
5331         sp->device_enabled_once = FALSE;
5332
5333         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5334                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5335                 sp->device_type = XFRAME_II_DEVICE;
5336         else
5337                 sp->device_type = XFRAME_I_DEVICE;
5338
5339         /* Initialize some PCI/PCI-X fields of the NIC. */
5340         s2io_init_pci(sp);
5341
5342         /*
5343          * Setting the device configuration parameters.
5344          * Most of these parameters can be specified by the user during
5345          * module insertion as they are module loadable parameters. If
5346          * these parameters are not not specified during load time, they
5347          * are initialized with default values.
5348          */
5349         mac_control = &sp->mac_control;
5350         config = &sp->config;
5351
5352         /* Tx side parameters. */
5353         if (tx_fifo_len[0] == 0)
5354                 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
5355         config->tx_fifo_num = tx_fifo_num;
5356         for (i = 0; i < MAX_TX_FIFOS; i++) {
5357                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5358                 config->tx_cfg[i].fifo_priority = i;
5359         }
5360
5361         /* mapping the QoS priority to the configured fifos */
5362         for (i = 0; i < MAX_TX_FIFOS; i++)
5363                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5364
5365         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5366         for (i = 0; i < config->tx_fifo_num; i++) {
5367                 config->tx_cfg[i].f_no_snoop =
5368                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5369                 if (config->tx_cfg[i].fifo_len < 65) {
5370                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5371                         break;
5372                 }
5373         }
5374         config->max_txds = MAX_SKB_FRAGS + 1;
5375
5376         /* Rx side parameters. */
5377         if (rx_ring_sz[0] == 0)
5378                 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
5379         config->rx_ring_num = rx_ring_num;
5380         for (i = 0; i < MAX_RX_RINGS; i++) {
5381                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5382                     (MAX_RXDS_PER_BLOCK + 1);
5383                 config->rx_cfg[i].ring_priority = i;
5384         }
5385
5386         for (i = 0; i < rx_ring_num; i++) {
5387                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5388                 config->rx_cfg[i].f_no_snoop =
5389                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5390         }
5391
5392         /*  Setting Mac Control parameters */
5393         mac_control->rmac_pause_time = rmac_pause_time;
5394         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5395         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5396
5397
5398         /* Initialize Ring buffer parameters. */
5399         for (i = 0; i < config->rx_ring_num; i++)
5400                 atomic_set(&sp->rx_bufs_left[i], 0);
5401
5402         /* Initialize the number of ISRs currently running */
5403         atomic_set(&sp->isr_cnt, 0);
5404
5405         /*  initialize the shared memory used by the NIC and the host */
5406         if (init_shared_mem(sp)) {
5407                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5408                           __FUNCTION__);
5409                 ret = -ENOMEM;
5410                 goto mem_alloc_failed;
5411         }
5412
5413         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5414                                      pci_resource_len(pdev, 0));
5415         if (!sp->bar0) {
5416                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5417                           dev->name);
5418                 ret = -ENOMEM;
5419                 goto bar0_remap_failed;
5420         }
5421
5422         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5423                                      pci_resource_len(pdev, 2));
5424         if (!sp->bar1) {
5425                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5426                           dev->name);
5427                 ret = -ENOMEM;
5428                 goto bar1_remap_failed;
5429         }
5430
5431         dev->irq = pdev->irq;
5432         dev->base_addr = (unsigned long) sp->bar0;
5433
5434         /* Initializing the BAR1 address as the start of the FIFO pointer. */
5435         for (j = 0; j < MAX_TX_FIFOS; j++) {
5436                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5437                     (sp->bar1 + (j * 0x00020000));
5438         }
5439
5440         /*  Driver entry points */
5441         dev->open = &s2io_open;
5442         dev->stop = &s2io_close;
5443         dev->hard_start_xmit = &s2io_xmit;
5444         dev->get_stats = &s2io_get_stats;
5445         dev->set_multicast_list = &s2io_set_multicast;
5446         dev->do_ioctl = &s2io_ioctl;
5447         dev->change_mtu = &s2io_change_mtu;
5448         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5449         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5450         dev->vlan_rx_register = s2io_vlan_rx_register;
5451         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5452
5453         /*
5454          * will use eth_mac_addr() for  dev->set_mac_address
5455          * mac address will be set every time dev->open() is called
5456          */
5457 #if defined(CONFIG_S2IO_NAPI)
5458         dev->poll = s2io_poll;
5459         dev->weight = 32;
5460 #endif
5461
5462         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5463         if (sp->high_dma_flag == TRUE)
5464                 dev->features |= NETIF_F_HIGHDMA;
5465 #ifdef NETIF_F_TSO
5466         dev->features |= NETIF_F_TSO;
5467 #endif
5468
5469         dev->tx_timeout = &s2io_tx_watchdog;
5470         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5471         INIT_WORK(&sp->rst_timer_task,
5472                   (void (*)(void *)) s2io_restart_nic, dev);
5473         INIT_WORK(&sp->set_link_task,
5474                   (void (*)(void *)) s2io_set_link, sp);
5475
5476         pci_save_state(sp->pdev);
5477
5478         /* Setting swapper control on the NIC, for proper reset operation */
5479         if (s2io_set_swapper(sp)) {
5480                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5481                           dev->name);
5482                 ret = -EAGAIN;
5483                 goto set_swap_failed;
5484         }
5485
5486         /* Verify if the Herc works on the slot its placed into */
5487         if (sp->device_type & XFRAME_II_DEVICE) {
5488                 mode = s2io_verify_pci_mode(sp);
5489                 if (mode < 0) {
5490                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5491                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5492                         ret = -EBADSLT;
5493                         goto set_swap_failed;
5494                 }
5495         }
5496
5497         /* Not needed for Herc */
5498         if (sp->device_type & XFRAME_I_DEVICE) {
5499                 /*
5500                  * Fix for all "FFs" MAC address problems observed on
5501                  * Alpha platforms
5502                  */
5503                 fix_mac_address(sp);
5504                 s2io_reset(sp);
5505         }
5506
5507         /*
5508          * MAC address initialization.
5509          * For now only one mac address will be read and used.
5510          */
5511         bar0 = sp->bar0;
5512         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5513             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5514         writeq(val64, &bar0->rmac_addr_cmd_mem);
5515         wait_for_cmd_complete(sp);
5516
5517         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5518         mac_down = (u32) tmp64;
5519         mac_up = (u32) (tmp64 >> 32);
5520
5521         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5522
5523         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5524         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5525         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5526         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5527         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5528         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5529
5530         /*  Set the factory defined MAC address initially   */
5531         dev->addr_len = ETH_ALEN;
5532         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5533
5534         /*
5535          * Initialize the tasklet status and link state flags
5536          * and the card state parameter
5537          */
5538         atomic_set(&(sp->card_state), 0);
5539         sp->tasklet_status = 0;
5540         sp->link_state = 0;
5541
5542         /* Initialize spinlocks */
5543         spin_lock_init(&sp->tx_lock);
5544 #ifndef CONFIG_S2IO_NAPI
5545         spin_lock_init(&sp->put_lock);
5546 #endif
5547         spin_lock_init(&sp->rx_lock);
5548
5549         /*
5550          * SXE-002: Configure link and activity LED to init state
5551          * on driver load.
5552          */
5553         subid = sp->pdev->subsystem_device;
5554         if ((subid & 0xFF) >= 0x07) {
5555                 val64 = readq(&bar0->gpio_control);
5556                 val64 |= 0x0000800000000000ULL;
5557                 writeq(val64, &bar0->gpio_control);
5558                 val64 = 0x0411040400000000ULL;
5559                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5560                 val64 = readq(&bar0->gpio_control);
5561         }
5562
5563         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5564
5565         if (register_netdev(dev)) {
5566                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5567                 ret = -ENODEV;
5568                 goto register_failed;
5569         }
5570
5571         if (sp->device_type & XFRAME_II_DEVICE) {
5572                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5573                           dev->name);
5574                 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5575                                 get_xena_rev_id(sp->pdev),
5576                                 s2io_driver_version);
5577 #ifdef CONFIG_2BUFF_MODE
5578                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5579 #endif
5580
5581                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5582                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5583                           sp->def_mac_addr[0].mac_addr[0],
5584                           sp->def_mac_addr[0].mac_addr[1],
5585                           sp->def_mac_addr[0].mac_addr[2],
5586                           sp->def_mac_addr[0].mac_addr[3],
5587                           sp->def_mac_addr[0].mac_addr[4],
5588                           sp->def_mac_addr[0].mac_addr[5]);
5589                 mode = s2io_print_pci_mode(sp);
5590                 if (mode < 0) {
5591                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5592                         ret = -EBADSLT;
5593                         goto set_swap_failed;
5594                 }
5595         } else {
5596                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5597                           dev->name);
5598                 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5599                                         get_xena_rev_id(sp->pdev),
5600                                         s2io_driver_version);
5601 #ifdef CONFIG_2BUFF_MODE
5602                 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5603 #endif
5604                 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5605                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5606                           sp->def_mac_addr[0].mac_addr[0],
5607                           sp->def_mac_addr[0].mac_addr[1],
5608                           sp->def_mac_addr[0].mac_addr[2],
5609                           sp->def_mac_addr[0].mac_addr[3],
5610                           sp->def_mac_addr[0].mac_addr[4],
5611                           sp->def_mac_addr[0].mac_addr[5]);
5612         }
5613
5614         /* Initialize device name */
5615         strcpy(sp->name, dev->name);
5616         if (sp->device_type & XFRAME_II_DEVICE)
5617                 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5618         else
5619                 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5620
5621         /* Initialize bimodal Interrupts */
5622         sp->config.bimodal = bimodal;
5623         if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
5624                 sp->config.bimodal = 0;
5625                 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
5626                         dev->name);
5627         }
5628
5629         /*
5630          * Make Link state as off at this point, when the Link change
5631          * interrupt comes the state will be automatically changed to
5632          * the right state.
5633          */
5634         netif_carrier_off(dev);
5635
5636         return 0;
5637
5638       register_failed:
5639       set_swap_failed:
5640         iounmap(sp->bar1);
5641       bar1_remap_failed:
5642         iounmap(sp->bar0);
5643       bar0_remap_failed:
5644       mem_alloc_failed:
5645         free_shared_mem(sp);
5646         pci_disable_device(pdev);
5647         pci_release_regions(pdev);
5648         pci_set_drvdata(pdev, NULL);
5649         free_netdev(dev);
5650
5651         return ret;
5652 }
5653
5654 /**
5655  * s2io_rem_nic - Free the PCI device
5656  * @pdev: structure containing the PCI related information of the device.
5657  * Description: This function is called by the Pci subsystem to release a
5658  * PCI device and free up all resource held up by the device. This could
5659  * be in response to a Hot plug event or when the driver is to be removed
5660  * from memory.
5661  */
5662
5663 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5664 {
5665         struct net_device *dev =
5666             (struct net_device *) pci_get_drvdata(pdev);
5667         nic_t *sp;
5668
5669         if (dev == NULL) {
5670                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5671                 return;
5672         }
5673
5674         sp = dev->priv;
5675         unregister_netdev(dev);
5676
5677         free_shared_mem(sp);
5678         iounmap(sp->bar0);
5679         iounmap(sp->bar1);
5680         pci_disable_device(pdev);
5681         pci_release_regions(pdev);
5682         pci_set_drvdata(pdev, NULL);
5683         free_netdev(dev);
5684 }
5685
5686 /**
5687  * s2io_starter - Entry point for the driver
5688  * Description: This function is the entry point for the driver. It verifies
5689  * the module loadable parameters and initializes PCI configuration space.
5690  */
5691
5692 int __init s2io_starter(void)
5693 {
5694         return pci_module_init(&s2io_driver);
5695 }
5696
5697 /**
5698  * s2io_closer - Cleanup routine for the driver
5699  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5700  */
5701
5702 void s2io_closer(void)
5703 {
5704         pci_unregister_driver(&s2io_driver);
5705         DBG_PRINT(INIT_DBG, "cleanup done\n");
5706 }
5707
5708 module_init(s2io_starter);
5709 module_exit(s2io_closer);