Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
[linux-2.6] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2009 Neterion Inc.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18
19 #include "vxge-traffic.h"
20 #include "vxge-config.h"
21
22 /*
23  * __vxge_hw_channel_allocate - Allocate memory for channel
24  * This function allocates required memory for the channel and various arrays
25  * in the channel
26  */
27 struct __vxge_hw_channel*
28 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
29                            enum __vxge_hw_channel_type type,
30         u32 length, u32 per_dtr_space, void *userdata)
31 {
32         struct __vxge_hw_channel *channel;
33         struct __vxge_hw_device *hldev;
34         int size = 0;
35         u32 vp_id;
36
37         hldev = vph->vpath->hldev;
38         vp_id = vph->vpath->vp_id;
39
40         switch (type) {
41         case VXGE_HW_CHANNEL_TYPE_FIFO:
42                 size = sizeof(struct __vxge_hw_fifo);
43                 break;
44         case VXGE_HW_CHANNEL_TYPE_RING:
45                 size = sizeof(struct __vxge_hw_ring);
46                 break;
47         default:
48                 break;
49         }
50
51         channel = kzalloc(size, GFP_KERNEL);
52         if (channel == NULL)
53                 goto exit0;
54         INIT_LIST_HEAD(&channel->item);
55
56         channel->common_reg = hldev->common_reg;
57         channel->first_vp_id = hldev->first_vp_id;
58         channel->type = type;
59         channel->devh = hldev;
60         channel->vph = vph;
61         channel->userdata = userdata;
62         channel->per_dtr_space = per_dtr_space;
63         channel->length = length;
64         channel->vp_id = vp_id;
65
66         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
67         if (channel->work_arr == NULL)
68                 goto exit1;
69
70         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
71         if (channel->free_arr == NULL)
72                 goto exit1;
73         channel->free_ptr = length;
74
75         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
76         if (channel->reserve_arr == NULL)
77                 goto exit1;
78         channel->reserve_ptr = length;
79         channel->reserve_top = 0;
80
81         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
82         if (channel->orig_arr == NULL)
83                 goto exit1;
84
85         return channel;
86 exit1:
87         __vxge_hw_channel_free(channel);
88
89 exit0:
90         return NULL;
91 }
92
93 /*
94  * __vxge_hw_channel_free - Free memory allocated for channel
95  * This function deallocates memory from the channel and various arrays
96  * in the channel
97  */
98 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
99 {
100         kfree(channel->work_arr);
101         kfree(channel->free_arr);
102         kfree(channel->reserve_arr);
103         kfree(channel->orig_arr);
104         kfree(channel);
105 }
106
107 /*
108  * __vxge_hw_channel_initialize - Initialize a channel
109  * This function initializes a channel by properly setting the
110  * various references
111  */
112 enum vxge_hw_status
113 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
114 {
115         u32 i;
116         struct __vxge_hw_virtualpath *vpath;
117
118         vpath = channel->vph->vpath;
119
120         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
121                 for (i = 0; i < channel->length; i++)
122                         channel->orig_arr[i] = channel->reserve_arr[i];
123         }
124
125         switch (channel->type) {
126         case VXGE_HW_CHANNEL_TYPE_FIFO:
127                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
128                 channel->stats = &((struct __vxge_hw_fifo *)
129                                 channel)->stats->common_stats;
130                 break;
131         case VXGE_HW_CHANNEL_TYPE_RING:
132                 vpath->ringh = (struct __vxge_hw_ring *)channel;
133                 channel->stats = &((struct __vxge_hw_ring *)
134                                 channel)->stats->common_stats;
135                 break;
136         default:
137                 break;
138         }
139
140         return VXGE_HW_OK;
141 }
142
143 /*
144  * __vxge_hw_channel_reset - Resets a channel
145  * This function resets a channel by properly setting the various references
146  */
147 enum vxge_hw_status
148 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
149 {
150         u32 i;
151
152         for (i = 0; i < channel->length; i++) {
153                 if (channel->reserve_arr != NULL)
154                         channel->reserve_arr[i] = channel->orig_arr[i];
155                 if (channel->free_arr != NULL)
156                         channel->free_arr[i] = NULL;
157                 if (channel->work_arr != NULL)
158                         channel->work_arr[i] = NULL;
159         }
160         channel->free_ptr = channel->length;
161         channel->reserve_ptr = channel->length;
162         channel->reserve_top = 0;
163         channel->post_index = 0;
164         channel->compl_index = 0;
165
166         return VXGE_HW_OK;
167 }
168
169 /*
170  * __vxge_hw_device_pci_e_init
171  * Initialize certain PCI/PCI-X configuration registers
172  * with recommended values. Save config space for future hw resets.
173  */
174 void
175 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
176 {
177         u16 cmd = 0;
178
179         /* Set the PErr Repconse bit and SERR in PCI command register. */
180         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
181         cmd |= 0x140;
182         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
183
184         pci_save_state(hldev->pdev);
185
186         return;
187 }
188
189 /*
190  * __vxge_hw_device_register_poll
191  * Will poll certain register for specified amount of time.
192  * Will poll until masked bit is not cleared.
193  */
194 enum vxge_hw_status
195 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
196 {
197         u64 val64;
198         u32 i = 0;
199         enum vxge_hw_status ret = VXGE_HW_FAIL;
200
201         udelay(10);
202
203         do {
204                 val64 = readq(reg);
205                 if (!(val64 & mask))
206                         return VXGE_HW_OK;
207                 udelay(100);
208         } while (++i <= 9);
209
210         i = 0;
211         do {
212                 val64 = readq(reg);
213                 if (!(val64 & mask))
214                         return VXGE_HW_OK;
215                 mdelay(1);
216         } while (++i <= max_millis);
217
218         return ret;
219 }
220
221  /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
222  * in progress
223  * This routine checks the vpath reset in progress register is turned zero
224  */
225 enum vxge_hw_status
226 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
227 {
228         enum vxge_hw_status status;
229         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
230                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
231                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
232         return status;
233 }
234
235 /*
236  * __vxge_hw_device_toc_get
237  * This routine sets the swapper and reads the toc pointer and returns the
238  * memory mapped address of the toc
239  */
240 struct vxge_hw_toc_reg __iomem *
241 __vxge_hw_device_toc_get(void __iomem *bar0)
242 {
243         u64 val64;
244         struct vxge_hw_toc_reg __iomem *toc = NULL;
245         enum vxge_hw_status status;
246
247         struct vxge_hw_legacy_reg __iomem *legacy_reg =
248                 (struct vxge_hw_legacy_reg __iomem *)bar0;
249
250         status = __vxge_hw_legacy_swapper_set(legacy_reg);
251         if (status != VXGE_HW_OK)
252                 goto exit;
253
254         val64 = readq(&legacy_reg->toc_first_pointer);
255         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
256 exit:
257         return toc;
258 }
259
260 /*
261  * __vxge_hw_device_reg_addr_get
262  * This routine sets the swapper and reads the toc pointer and initializes the
263  * register location pointers in the device object. It waits until the ric is
264  * completed initializing registers.
265  */
266 enum vxge_hw_status
267 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
268 {
269         u64 val64;
270         u32 i;
271         enum vxge_hw_status status = VXGE_HW_OK;
272
273         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
274
275         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
276         if (hldev->toc_reg  == NULL) {
277                 status = VXGE_HW_FAIL;
278                 goto exit;
279         }
280
281         val64 = readq(&hldev->toc_reg->toc_common_pointer);
282         hldev->common_reg =
283         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
284
285         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
286         hldev->mrpcim_reg =
287                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
288
289         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
290                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
291                 hldev->srpcim_reg[i] =
292                         (struct vxge_hw_srpcim_reg __iomem *)
293                                 (hldev->bar0 + val64);
294         }
295
296         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
297                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
298                 hldev->vpmgmt_reg[i] =
299                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
300         }
301
302         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
303                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
304                 hldev->vpath_reg[i] =
305                         (struct vxge_hw_vpath_reg __iomem *)
306                                 (hldev->bar0 + val64);
307         }
308
309         val64 = readq(&hldev->toc_reg->toc_kdfc);
310
311         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
312         case 0:
313                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
314                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
315                 break;
316         case 2:
317                 hldev->kdfc = (u8 __iomem *)(hldev->bar1 +
318                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
319                 break;
320         case 4:
321                 hldev->kdfc = (u8 __iomem *)(hldev->bar2 +
322                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
323                 break;
324         default:
325                 break;
326         }
327
328         status = __vxge_hw_device_vpath_reset_in_prog_check(
329                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
330 exit:
331         return status;
332 }
333
334 /*
335  * __vxge_hw_device_id_get
336  * This routine returns sets the device id and revision numbers into the device
337  * structure
338  */
339 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
340 {
341         u64 val64;
342
343         val64 = readq(&hldev->common_reg->titan_asic_id);
344         hldev->device_id =
345                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
346
347         hldev->major_revision =
348                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
349
350         hldev->minor_revision =
351                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
352
353         return;
354 }
355
356 /*
357  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
358  * This routine returns the Access Rights of the driver
359  */
360 static u32
361 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
362 {
363         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
364
365         switch (host_type) {
366         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
367                 if (func_id == 0) {
368                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
369                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
370                 }
371                 break;
372         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
373                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
374                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
375                 break;
376         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
377                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
378                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
379                 break;
380         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
381         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
382         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
383                 break;
384         case VXGE_HW_SR_VH_FUNCTION0:
385         case VXGE_HW_VH_NORMAL_FUNCTION:
386                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
387                 break;
388         }
389
390         return access_rights;
391 }
392 /*
393  * __vxge_hw_device_host_info_get
394  * This routine returns the host type assignments
395  */
396 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
397 {
398         u64 val64;
399         u32 i;
400
401         val64 = readq(&hldev->common_reg->host_type_assignments);
402
403         hldev->host_type =
404            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
405
406         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
407
408         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
409
410                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
411                         continue;
412
413                 hldev->func_id =
414                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
415
416                 hldev->access_rights = __vxge_hw_device_access_rights_get(
417                         hldev->host_type, hldev->func_id);
418
419                 hldev->first_vp_id = i;
420                 break;
421         }
422
423         return;
424 }
425
426 /*
427  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
428  * link width and signalling rate.
429  */
430 static enum vxge_hw_status
431 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
432 {
433         int exp_cap;
434         u16 lnk;
435
436         /* Get the negotiated link width and speed from PCI config space */
437         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
438         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
439
440         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
441                 return VXGE_HW_ERR_INVALID_PCI_INFO;
442
443         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
444         case PCIE_LNK_WIDTH_RESRV:
445         case PCIE_LNK_X1:
446         case PCIE_LNK_X2:
447         case PCIE_LNK_X4:
448         case PCIE_LNK_X8:
449                 break;
450         default:
451                 return VXGE_HW_ERR_INVALID_PCI_INFO;
452         }
453
454         return VXGE_HW_OK;
455 }
456
457 static enum vxge_hw_status
458 __vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
459 {
460         if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
461         hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
462         hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
463         (hldev->func_id == 0))
464                 return VXGE_HW_OK;
465         else
466                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
467 }
468
469 /*
470  * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
471  * Rebalance the RX_WRR and KDFC_WRR calandars.
472  */
473 static enum
474 vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
475 {
476         u64 val64;
477         u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
478         u32 i, j, how_often = 1;
479         enum vxge_hw_status status = VXGE_HW_OK;
480
481         status = __vxge_hw_device_is_privilaged(hldev);
482         if (status != VXGE_HW_OK)
483                 goto exit;
484
485         /* Reset the priorities assigned to the WRR arbitration
486         phases for the receive traffic */
487         for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
488                 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
489
490         /* Reset the transmit FIFO servicing calendar for FIFOs */
491         for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
492                 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
493                 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
494         }
495
496         /* Assign WRR priority  0 for all FIFOs */
497         for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
498                 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
499                                 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl)  + i));
500
501                 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
502                         ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
503         }
504
505         /* Reset to service non-offload doorbells */
506         writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
507         writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
508
509         /* Set priority 0 to all receive queues */
510         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
511         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
512         writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
513
514         /* Initialize all the slots as unused */
515         for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
516                 wrr_states[i] = -1;
517
518         /* Prepare the Fifo service states */
519         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
520
521                 if (!hldev->config.vp_config[i].min_bandwidth)
522                         continue;
523
524                 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
525                                 hldev->config.vp_config[i].min_bandwidth;
526                 if (how_often) {
527
528                         for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
529                                 if (wrr_states[j] == -1) {
530                                         wrr_states[j] = i;
531                                         /* Make sure each fifo is serviced
532                                          * atleast once */
533                                         if (i == j)
534                                                 j += VXGE_HW_MAX_VIRTUAL_PATHS;
535                                         else
536                                                 j += how_often;
537                                 } else
538                                         j++;
539                         }
540                 }
541         }
542
543         /* Fill the unused slots with 0 */
544         for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
545                 if (wrr_states[j] == -1)
546                         wrr_states[j] = 0;
547         }
548
549         /* Assign WRR priority number for FIFOs */
550         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
551                 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
552                                 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
553
554                 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
555                         ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
556         }
557
558         /* Modify the servicing algorithm applied to the 3 types of doorbells.
559         i.e, none-offload, message and offload */
560         writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
561                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
562                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
563                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
564                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
565                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
566                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
567                                 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
568                                 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
569
570         writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
571                                 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
572
573         for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
574
575                 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
576                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
577                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
578                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
579                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
580                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
581                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
582                 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
583
584                 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
585                 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
586         }
587
588         /* Set up the priorities assigned to receive queues */
589         writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
590                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
591                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
592                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
593                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
594                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
595                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
596                         VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
597                         &hldev->mrpcim_reg->rx_queue_priority_0);
598
599         writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
600                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
601                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
602                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
603                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
604                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
605                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
606                         VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
607                         &hldev->mrpcim_reg->rx_queue_priority_1);
608
609         writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
610                                 &hldev->mrpcim_reg->rx_queue_priority_2);
611
612         /* Initialize all the slots as unused */
613         for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
614                 wrr_states[i] = -1;
615
616         /* Prepare the Ring service states */
617         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
618
619                 if (!hldev->config.vp_config[i].min_bandwidth)
620                         continue;
621
622                 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
623                                 hldev->config.vp_config[i].min_bandwidth;
624
625                 if (how_often) {
626                         for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
627                                 if (wrr_states[j] == -1) {
628                                         wrr_states[j] = i;
629                                         /* Make sure each ring is
630                                          * serviced atleast once */
631                                         if (i == j)
632                                                 j += VXGE_HW_MAX_VIRTUAL_PATHS;
633                                         else
634                                                 j += how_often;
635                                 } else
636                                         j++;
637                         }
638                 }
639         }
640
641         /* Fill the unused slots with 0 */
642         for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
643                 if (wrr_states[j] == -1)
644                         wrr_states[j] = 0;
645         }
646
647         for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
648                 val64 =  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
649                                 wrr_states[j++]);
650                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
651                                 wrr_states[j++]);
652                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
653                                 wrr_states[j++]);
654                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
655                                 wrr_states[j++]);
656                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
657                                 wrr_states[j++]);
658                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
659                                 wrr_states[j++]);
660                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
661                                 wrr_states[j++]);
662                 val64 |=  VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
663                                 wrr_states[j++]);
664
665                 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
666         }
667 exit:
668         return status;
669 }
670
671 /*
672  * __vxge_hw_device_initialize
673  * Initialize Titan-V hardware.
674  */
675 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
676 {
677         enum vxge_hw_status status = VXGE_HW_OK;
678
679         /* Validate the pci-e link width and speed */
680         status = __vxge_hw_verify_pci_e_info(hldev);
681         if (status != VXGE_HW_OK)
682                 goto exit;
683
684         vxge_hw_wrr_rebalance(hldev);
685 exit:
686         return status;
687 }
688
689 /**
690  * vxge_hw_device_hw_info_get - Get the hw information
691  * Returns the vpath mask that has the bits set for each vpath allocated
692  * for the driver, FW version information and the first mac addresse for
693  * each vpath
694  */
695 enum vxge_hw_status __devinit
696 vxge_hw_device_hw_info_get(void __iomem *bar0,
697                            struct vxge_hw_device_hw_info *hw_info)
698 {
699         u32 i;
700         u64 val64;
701         struct vxge_hw_toc_reg __iomem *toc;
702         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
703         struct vxge_hw_common_reg __iomem *common_reg;
704         struct vxge_hw_vpath_reg __iomem *vpath_reg;
705         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
706         enum vxge_hw_status status;
707
708         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
709
710         toc = __vxge_hw_device_toc_get(bar0);
711         if (toc == NULL) {
712                 status = VXGE_HW_ERR_CRITICAL;
713                 goto exit;
714         }
715
716         val64 = readq(&toc->toc_common_pointer);
717         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
718
719         status = __vxge_hw_device_vpath_reset_in_prog_check(
720                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
721         if (status != VXGE_HW_OK)
722                 goto exit;
723
724         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
725
726         val64 = readq(&common_reg->host_type_assignments);
727
728         hw_info->host_type =
729            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
730
731         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
732
733                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
734                         continue;
735
736                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
737
738                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
739                                 (bar0 + val64);
740
741                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
742                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
743                         hw_info->func_id) &
744                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
745
746                         val64 = readq(&toc->toc_mrpcim_pointer);
747
748                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
749                                         (bar0 + val64);
750
751                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
752                         wmb();
753                 }
754
755                 val64 = readq(&toc->toc_vpath_pointer[i]);
756
757                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
758
759                 hw_info->function_mode =
760                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
761
762                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
763                 if (status != VXGE_HW_OK)
764                         goto exit;
765
766                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
767                 if (status != VXGE_HW_OK)
768                         goto exit;
769
770                 break;
771         }
772
773         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
774
775                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
776                         continue;
777
778                 val64 = readq(&toc->toc_vpath_pointer[i]);
779                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
780
781                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
782                                 hw_info->mac_addrs[i],
783                                 hw_info->mac_addr_masks[i]);
784                 if (status != VXGE_HW_OK)
785                         goto exit;
786         }
787 exit:
788         return status;
789 }
790
791 /*
792  * vxge_hw_device_initialize - Initialize Titan device.
793  * Initialize Titan device. Note that all the arguments of this public API
794  * are 'IN', including @hldev. Driver cooperates with
795  * OS to find new Titan device, locate its PCI and memory spaces.
796  *
797  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
798  * to enable the latter to perform Titan hardware initialization.
799  */
800 enum vxge_hw_status __devinit
801 vxge_hw_device_initialize(
802         struct __vxge_hw_device **devh,
803         struct vxge_hw_device_attr *attr,
804         struct vxge_hw_device_config *device_config)
805 {
806         u32 i;
807         u32 nblocks = 0;
808         struct __vxge_hw_device *hldev = NULL;
809         enum vxge_hw_status status = VXGE_HW_OK;
810
811         status = __vxge_hw_device_config_check(device_config);
812         if (status != VXGE_HW_OK)
813                 goto exit;
814
815         hldev = (struct __vxge_hw_device *)
816                         vmalloc(sizeof(struct __vxge_hw_device));
817         if (hldev == NULL) {
818                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
819                 goto exit;
820         }
821
822         memset(hldev, 0, sizeof(struct __vxge_hw_device));
823         hldev->magic = VXGE_HW_DEVICE_MAGIC;
824
825         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
826
827         /* apply config */
828         memcpy(&hldev->config, device_config,
829                 sizeof(struct vxge_hw_device_config));
830
831         hldev->bar0 = attr->bar0;
832         hldev->bar1 = attr->bar1;
833         hldev->bar2 = attr->bar2;
834         hldev->pdev = attr->pdev;
835
836         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
837         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
838         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
839
840         __vxge_hw_device_pci_e_init(hldev);
841
842         status = __vxge_hw_device_reg_addr_get(hldev);
843         if (status != VXGE_HW_OK)
844                 goto exit;
845         __vxge_hw_device_id_get(hldev);
846
847         __vxge_hw_device_host_info_get(hldev);
848
849         /* Incrementing for stats blocks */
850         nblocks++;
851
852         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
853
854                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
855                         continue;
856
857                 if (device_config->vp_config[i].ring.enable ==
858                         VXGE_HW_RING_ENABLE)
859                         nblocks += device_config->vp_config[i].ring.ring_blocks;
860
861                 if (device_config->vp_config[i].fifo.enable ==
862                         VXGE_HW_FIFO_ENABLE)
863                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
864                 nblocks++;
865         }
866
867         if (__vxge_hw_blockpool_create(hldev,
868                 &hldev->block_pool,
869                 device_config->dma_blockpool_initial + nblocks,
870                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
871
872                 vxge_hw_device_terminate(hldev);
873                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
874                 goto exit;
875         }
876
877         status = __vxge_hw_device_initialize(hldev);
878
879         if (status != VXGE_HW_OK) {
880                 vxge_hw_device_terminate(hldev);
881                 goto exit;
882         }
883
884         *devh = hldev;
885 exit:
886         return status;
887 }
888
889 /*
890  * vxge_hw_device_terminate - Terminate Titan device.
891  * Terminate HW device.
892  */
893 void
894 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
895 {
896         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
897
898         hldev->magic = VXGE_HW_DEVICE_DEAD;
899         __vxge_hw_blockpool_destroy(&hldev->block_pool);
900         vfree(hldev);
901 }
902
903 /*
904  * vxge_hw_device_stats_get - Get the device hw statistics.
905  * Returns the vpath h/w stats for the device.
906  */
907 enum vxge_hw_status
908 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
909                         struct vxge_hw_device_stats_hw_info *hw_stats)
910 {
911         u32 i;
912         enum vxge_hw_status status = VXGE_HW_OK;
913
914         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
915
916                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
917                         (hldev->virtual_paths[i].vp_open ==
918                                 VXGE_HW_VP_NOT_OPEN))
919                         continue;
920
921                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
922                                 hldev->virtual_paths[i].hw_stats,
923                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
924
925                 status = __vxge_hw_vpath_stats_get(
926                         &hldev->virtual_paths[i],
927                         hldev->virtual_paths[i].hw_stats);
928         }
929
930         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
931                         sizeof(struct vxge_hw_device_stats_hw_info));
932
933         return status;
934 }
935
936 /*
937  * vxge_hw_driver_stats_get - Get the device sw statistics.
938  * Returns the vpath s/w stats for the device.
939  */
940 enum vxge_hw_status vxge_hw_driver_stats_get(
941                         struct __vxge_hw_device *hldev,
942                         struct vxge_hw_device_stats_sw_info *sw_stats)
943 {
944         enum vxge_hw_status status = VXGE_HW_OK;
945
946         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
947                 sizeof(struct vxge_hw_device_stats_sw_info));
948
949         return status;
950 }
951
952 /*
953  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
954  *                           and offset and perform an operation
955  * Get the statistics from the given location and offset.
956  */
957 enum vxge_hw_status
958 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
959                             u32 operation, u32 location, u32 offset, u64 *stat)
960 {
961         u64 val64;
962         enum vxge_hw_status status = VXGE_HW_OK;
963
964         status = __vxge_hw_device_is_privilaged(hldev);
965         if (status != VXGE_HW_OK)
966                 goto exit;
967
968         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
969                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
970                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
971                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
972
973         status = __vxge_hw_pio_mem_write64(val64,
974                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
975                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
976                                 hldev->config.device_poll_millis);
977
978         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
979                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
980         else
981                 *stat = 0;
982 exit:
983         return status;
984 }
985
986 /*
987  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
988  * Get the Statistics on aggregate port
989  */
990 enum vxge_hw_status
991 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
992                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
993 {
994         u64 *val64;
995         int i;
996         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
997         enum vxge_hw_status status = VXGE_HW_OK;
998
999         val64 = (u64 *)aggr_stats;
1000
1001         status = __vxge_hw_device_is_privilaged(hldev);
1002         if (status != VXGE_HW_OK)
1003                 goto exit;
1004
1005         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1006                 status = vxge_hw_mrpcim_stats_access(hldev,
1007                                         VXGE_HW_STATS_OP_READ,
1008                                         VXGE_HW_STATS_LOC_AGGR,
1009                                         ((offset + (104 * port)) >> 3), val64);
1010                 if (status != VXGE_HW_OK)
1011                         goto exit;
1012
1013                 offset += 8;
1014                 val64++;
1015         }
1016 exit:
1017         return status;
1018 }
1019
1020 /*
1021  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1022  * Get the Statistics on port
1023  */
1024 enum vxge_hw_status
1025 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1026                                    struct vxge_hw_xmac_port_stats *port_stats)
1027 {
1028         u64 *val64;
1029         enum vxge_hw_status status = VXGE_HW_OK;
1030         int i;
1031         u32 offset = 0x0;
1032         val64 = (u64 *) port_stats;
1033
1034         status = __vxge_hw_device_is_privilaged(hldev);
1035         if (status != VXGE_HW_OK)
1036                 goto exit;
1037
1038         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1039                 status = vxge_hw_mrpcim_stats_access(hldev,
1040                                         VXGE_HW_STATS_OP_READ,
1041                                         VXGE_HW_STATS_LOC_AGGR,
1042                                         ((offset + (608 * port)) >> 3), val64);
1043                 if (status != VXGE_HW_OK)
1044                         goto exit;
1045
1046                 offset += 8;
1047                 val64++;
1048         }
1049
1050 exit:
1051         return status;
1052 }
1053
1054 /*
1055  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1056  * Get the XMAC Statistics
1057  */
1058 enum vxge_hw_status
1059 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1060                               struct vxge_hw_xmac_stats *xmac_stats)
1061 {
1062         enum vxge_hw_status status = VXGE_HW_OK;
1063         u32 i;
1064
1065         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1066                                         0, &xmac_stats->aggr_stats[0]);
1067
1068         if (status != VXGE_HW_OK)
1069                 goto exit;
1070
1071         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1072                                 1, &xmac_stats->aggr_stats[1]);
1073         if (status != VXGE_HW_OK)
1074                 goto exit;
1075
1076         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1077
1078                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1079                                         i, &xmac_stats->port_stats[i]);
1080                 if (status != VXGE_HW_OK)
1081                         goto exit;
1082         }
1083
1084         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1085
1086                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1087                         continue;
1088
1089                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1090                                         &hldev->virtual_paths[i],
1091                                         &xmac_stats->vpath_tx_stats[i]);
1092                 if (status != VXGE_HW_OK)
1093                         goto exit;
1094
1095                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1096                                         &hldev->virtual_paths[i],
1097                                         &xmac_stats->vpath_rx_stats[i]);
1098                 if (status != VXGE_HW_OK)
1099                         goto exit;
1100         }
1101 exit:
1102         return status;
1103 }
1104
1105 /*
1106  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1107  * This routine is used to dynamically change the debug output
1108  */
1109 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1110                               enum vxge_debug_level level, u32 mask)
1111 {
1112         if (hldev == NULL)
1113                 return;
1114
1115 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1116         defined(VXGE_DEBUG_ERR_MASK)
1117         hldev->debug_module_mask = mask;
1118         hldev->debug_level = level;
1119 #endif
1120
1121 #if defined(VXGE_DEBUG_ERR_MASK)
1122         hldev->level_err = level & VXGE_ERR;
1123 #endif
1124
1125 #if defined(VXGE_DEBUG_TRACE_MASK)
1126         hldev->level_trace = level & VXGE_TRACE;
1127 #endif
1128 }
1129
1130 /*
1131  * vxge_hw_device_error_level_get - Get the error level
1132  * This routine returns the current error level set
1133  */
1134 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1135 {
1136 #if defined(VXGE_DEBUG_ERR_MASK)
1137         if (hldev == NULL)
1138                 return VXGE_ERR;
1139         else
1140                 return hldev->level_err;
1141 #else
1142         return 0;
1143 #endif
1144 }
1145
1146 /*
1147  * vxge_hw_device_trace_level_get - Get the trace level
1148  * This routine returns the current trace level set
1149  */
1150 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1151 {
1152 #if defined(VXGE_DEBUG_TRACE_MASK)
1153         if (hldev == NULL)
1154                 return VXGE_TRACE;
1155         else
1156                 return hldev->level_trace;
1157 #else
1158         return 0;
1159 #endif
1160 }
1161 /*
1162  * vxge_hw_device_debug_mask_get - Get the debug mask
1163  * This routine returns the current debug mask set
1164  */
1165 u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
1166 {
1167 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
1168         if (hldev == NULL)
1169                 return 0;
1170         return hldev->debug_module_mask;
1171 #else
1172         return 0;
1173 #endif
1174 }
1175
1176 /*
1177  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1178  * Returns the Pause frame generation and reception capability of the NIC.
1179  */
1180 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1181                                                  u32 port, u32 *tx, u32 *rx)
1182 {
1183         u64 val64;
1184         enum vxge_hw_status status = VXGE_HW_OK;
1185
1186         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1187                 status = VXGE_HW_ERR_INVALID_DEVICE;
1188                 goto exit;
1189         }
1190
1191         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1192                 status = VXGE_HW_ERR_INVALID_PORT;
1193                 goto exit;
1194         }
1195
1196         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1197                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1198                 goto exit;
1199         }
1200
1201         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1202         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1203                 *tx = 1;
1204         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1205                 *rx = 1;
1206 exit:
1207         return status;
1208 }
1209
1210 /*
1211  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1212  * It can be used to set or reset Pause frame generation or reception
1213  * support of the NIC.
1214  */
1215
1216 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1217                                                  u32 port, u32 tx, u32 rx)
1218 {
1219         u64 val64;
1220         enum vxge_hw_status status = VXGE_HW_OK;
1221
1222         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1223                 status = VXGE_HW_ERR_INVALID_DEVICE;
1224                 goto exit;
1225         }
1226
1227         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1228                 status = VXGE_HW_ERR_INVALID_PORT;
1229                 goto exit;
1230         }
1231
1232         status = __vxge_hw_device_is_privilaged(hldev);
1233         if (status != VXGE_HW_OK)
1234                 goto exit;
1235
1236         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1237         if (tx)
1238                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1239         else
1240                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1241         if (rx)
1242                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1243         else
1244                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1245
1246         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1247 exit:
1248         return status;
1249 }
1250
1251 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1252 {
1253         int link_width, exp_cap;
1254         u16 lnk;
1255
1256         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1257         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1258         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1259         return link_width;
1260 }
1261
1262 /*
1263  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1264  * This function returns the index of memory block
1265  */
1266 static inline u32
1267 __vxge_hw_ring_block_memblock_idx(u8 *block)
1268 {
1269         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1270 }
1271
1272 /*
1273  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1274  * This function sets index to a memory block
1275  */
1276 static inline void
1277 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1278 {
1279         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1280 }
1281
1282 /*
1283  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1284  * in RxD block
1285  * Sets the next block pointer in RxD block
1286  */
1287 static inline void
1288 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1289 {
1290         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1291 }
1292
1293 /*
1294  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1295  *             first block
1296  * Returns the dma address of the first RxD block
1297  */
1298 u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1299 {
1300         struct vxge_hw_mempool_dma *dma_object;
1301
1302         dma_object = ring->mempool->memblocks_dma_arr;
1303         vxge_assert(dma_object != NULL);
1304
1305         return dma_object->addr;
1306 }
1307
1308 /*
1309  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1310  * This function returns the dma address of a given item
1311  */
1312 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1313                                                void *item)
1314 {
1315         u32 memblock_idx;
1316         void *memblock;
1317         struct vxge_hw_mempool_dma *memblock_dma_object;
1318         ptrdiff_t dma_item_offset;
1319
1320         /* get owner memblock index */
1321         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1322
1323         /* get owner memblock by memblock index */
1324         memblock = mempoolh->memblocks_arr[memblock_idx];
1325
1326         /* get memblock DMA object by memblock index */
1327         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1328
1329         /* calculate offset in the memblock of this item */
1330         dma_item_offset = (u8 *)item - (u8 *)memblock;
1331
1332         return memblock_dma_object->addr + dma_item_offset;
1333 }
1334
1335 /*
1336  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1337  * This function returns the dma address of a given item
1338  */
1339 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1340                                          struct __vxge_hw_ring *ring, u32 from,
1341                                          u32 to)
1342 {
1343         u8 *to_item , *from_item;
1344         dma_addr_t to_dma;
1345
1346         /* get "from" RxD block */
1347         from_item = mempoolh->items_arr[from];
1348         vxge_assert(from_item);
1349
1350         /* get "to" RxD block */
1351         to_item = mempoolh->items_arr[to];
1352         vxge_assert(to_item);
1353
1354         /* return address of the beginning of previous RxD block */
1355         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1356
1357         /* set next pointer for this RxD block to point on
1358          * previous item's DMA start address */
1359         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1360 }
1361
1362 /*
1363  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1364  * block callback
1365  * This function is callback passed to __vxge_hw_mempool_create to create memory
1366  * pool for RxD block
1367  */
1368 static void
1369 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1370                                   u32 memblock_index,
1371                                   struct vxge_hw_mempool_dma *dma_object,
1372                                   u32 index, u32 is_last)
1373 {
1374         u32 i;
1375         void *item = mempoolh->items_arr[index];
1376         struct __vxge_hw_ring *ring =
1377                 (struct __vxge_hw_ring *)mempoolh->userdata;
1378
1379         /* format rxds array */
1380         for (i = 0; i < ring->rxds_per_block; i++) {
1381                 void *rxdblock_priv;
1382                 void *uld_priv;
1383                 struct vxge_hw_ring_rxd_1 *rxdp;
1384
1385                 u32 reserve_index = ring->channel.reserve_ptr -
1386                                 (index * ring->rxds_per_block + i + 1);
1387                 u32 memblock_item_idx;
1388
1389                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1390                                                 i * ring->rxd_size;
1391
1392                 /* Note: memblock_item_idx is index of the item within
1393                  *       the memblock. For instance, in case of three RxD-blocks
1394                  *       per memblock this value can be 0, 1 or 2. */
1395                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1396                                         memblock_index, item,
1397                                         &memblock_item_idx);
1398
1399                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1400                                 ring->channel.reserve_arr[reserve_index];
1401
1402                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1403
1404                 /* pre-format Host_Control */
1405                 rxdp->host_control = (u64)(size_t)uld_priv;
1406         }
1407
1408         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1409
1410         if (is_last) {
1411                 /* link last one with first one */
1412                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1413         }
1414
1415         if (index > 0) {
1416                 /* link this RxD block with previous one */
1417                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1418         }
1419
1420         return;
1421 }
1422
1423 /*
1424  * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs
1425  * This function replenishes the RxDs from reserve array to work array
1426  */
1427 enum vxge_hw_status
1428 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
1429 {
1430         void *rxd;
1431         int i = 0;
1432         struct __vxge_hw_channel *channel;
1433         enum vxge_hw_status status = VXGE_HW_OK;
1434
1435         channel = &ring->channel;
1436
1437         while (vxge_hw_channel_dtr_count(channel) > 0) {
1438
1439                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1440
1441                 vxge_assert(status == VXGE_HW_OK);
1442
1443                 if (ring->rxd_init) {
1444                         status = ring->rxd_init(rxd, channel->userdata);
1445                         if (status != VXGE_HW_OK) {
1446                                 vxge_hw_ring_rxd_free(ring, rxd);
1447                                 goto exit;
1448                         }
1449                 }
1450
1451                 vxge_hw_ring_rxd_post(ring, rxd);
1452                 if (min_flag) {
1453                         i++;
1454                         if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
1455                                 break;
1456                 }
1457         }
1458         status = VXGE_HW_OK;
1459 exit:
1460         return status;
1461 }
1462
1463 /*
1464  * __vxge_hw_ring_create - Create a Ring
1465  * This function creates Ring and initializes it.
1466  *
1467  */
1468 enum vxge_hw_status
1469 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1470                       struct vxge_hw_ring_attr *attr)
1471 {
1472         enum vxge_hw_status status = VXGE_HW_OK;
1473         struct __vxge_hw_ring *ring;
1474         u32 ring_length;
1475         struct vxge_hw_ring_config *config;
1476         struct __vxge_hw_device *hldev;
1477         u32 vp_id;
1478         struct vxge_hw_mempool_cbs ring_mp_callback;
1479
1480         if ((vp == NULL) || (attr == NULL)) {
1481                 status = VXGE_HW_FAIL;
1482                 goto exit;
1483         }
1484
1485         hldev = vp->vpath->hldev;
1486         vp_id = vp->vpath->vp_id;
1487
1488         config = &hldev->config.vp_config[vp_id].ring;
1489
1490         ring_length = config->ring_blocks *
1491                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1492
1493         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1494                                                 VXGE_HW_CHANNEL_TYPE_RING,
1495                                                 ring_length,
1496                                                 attr->per_rxd_space,
1497                                                 attr->userdata);
1498
1499         if (ring == NULL) {
1500                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1501                 goto exit;
1502         }
1503
1504         vp->vpath->ringh = ring;
1505         ring->vp_id = vp_id;
1506         ring->vp_reg = vp->vpath->vp_reg;
1507         ring->common_reg = hldev->common_reg;
1508         ring->stats = &vp->vpath->sw_stats->ring_stats;
1509         ring->config = config;
1510         ring->callback = attr->callback;
1511         ring->rxd_init = attr->rxd_init;
1512         ring->rxd_term = attr->rxd_term;
1513         ring->buffer_mode = config->buffer_mode;
1514         ring->rxds_limit = config->rxds_limit;
1515
1516         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1517         ring->rxd_priv_size =
1518                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1519         ring->per_rxd_space = attr->per_rxd_space;
1520
1521         ring->rxd_priv_size =
1522                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1523                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1524
1525         /* how many RxDs can fit into one block. Depends on configured
1526          * buffer_mode. */
1527         ring->rxds_per_block =
1528                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1529
1530         /* calculate actual RxD block private size */
1531         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1532         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1533         ring->mempool = __vxge_hw_mempool_create(hldev,
1534                                 VXGE_HW_BLOCK_SIZE,
1535                                 VXGE_HW_BLOCK_SIZE,
1536                                 ring->rxdblock_priv_size,
1537                                 ring->config->ring_blocks,
1538                                 ring->config->ring_blocks,
1539                                 &ring_mp_callback,
1540                                 ring);
1541
1542         if (ring->mempool == NULL) {
1543                 __vxge_hw_ring_delete(vp);
1544                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1545         }
1546
1547         status = __vxge_hw_channel_initialize(&ring->channel);
1548         if (status != VXGE_HW_OK) {
1549                 __vxge_hw_ring_delete(vp);
1550                 goto exit;
1551         }
1552
1553         /* Note:
1554          * Specifying rxd_init callback means two things:
1555          * 1) rxds need to be initialized by driver at channel-open time;
1556          * 2) rxds need to be posted at channel-open time
1557          *    (that's what the initial_replenish() below does)
1558          * Currently we don't have a case when the 1) is done without the 2).
1559          */
1560         if (ring->rxd_init) {
1561                 status = vxge_hw_ring_replenish(ring, 1);
1562                 if (status != VXGE_HW_OK) {
1563                         __vxge_hw_ring_delete(vp);
1564                         goto exit;
1565                 }
1566         }
1567
1568         /* initial replenish will increment the counter in its post() routine,
1569          * we have to reset it */
1570         ring->stats->common_stats.usage_cnt = 0;
1571 exit:
1572         return status;
1573 }
1574
1575 /*
1576  * __vxge_hw_ring_abort - Returns the RxD
1577  * This function terminates the RxDs of ring
1578  */
1579 enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1580 {
1581         void *rxdh;
1582         struct __vxge_hw_channel *channel;
1583
1584         channel = &ring->channel;
1585
1586         for (;;) {
1587                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1588
1589                 if (rxdh == NULL)
1590                         break;
1591
1592                 vxge_hw_channel_dtr_complete(channel);
1593
1594                 if (ring->rxd_term)
1595                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1596                                 channel->userdata);
1597
1598                 vxge_hw_channel_dtr_free(channel, rxdh);
1599         }
1600
1601         return VXGE_HW_OK;
1602 }
1603
1604 /*
1605  * __vxge_hw_ring_reset - Resets the ring
1606  * This function resets the ring during vpath reset operation
1607  */
1608 enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1609 {
1610         enum vxge_hw_status status = VXGE_HW_OK;
1611         struct __vxge_hw_channel *channel;
1612
1613         channel = &ring->channel;
1614
1615         __vxge_hw_ring_abort(ring);
1616
1617         status = __vxge_hw_channel_reset(channel);
1618
1619         if (status != VXGE_HW_OK)
1620                 goto exit;
1621
1622         if (ring->rxd_init) {
1623                 status = vxge_hw_ring_replenish(ring, 1);
1624                 if (status != VXGE_HW_OK)
1625                         goto exit;
1626         }
1627 exit:
1628         return status;
1629 }
1630
1631 /*
1632  * __vxge_hw_ring_delete - Removes the ring
1633  * This function freeup the memory pool and removes the ring
1634  */
1635 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1636 {
1637         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1638
1639         __vxge_hw_ring_abort(ring);
1640
1641         if (ring->mempool)
1642                 __vxge_hw_mempool_destroy(ring->mempool);
1643
1644         vp->vpath->ringh = NULL;
1645         __vxge_hw_channel_free(&ring->channel);
1646
1647         return VXGE_HW_OK;
1648 }
1649
1650 /*
1651  * __vxge_hw_mempool_grow
1652  * Will resize mempool up to %num_allocate value.
1653  */
1654 enum vxge_hw_status
1655 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1656                        u32 *num_allocated)
1657 {
1658         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1659         u32 n_items = mempool->items_per_memblock;
1660         u32 start_block_idx = mempool->memblocks_allocated;
1661         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1662         enum vxge_hw_status status = VXGE_HW_OK;
1663
1664         *num_allocated = 0;
1665
1666         if (end_block_idx > mempool->memblocks_max) {
1667                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1668                 goto exit;
1669         }
1670
1671         for (i = start_block_idx; i < end_block_idx; i++) {
1672                 u32 j;
1673                 u32 is_last = ((end_block_idx - 1) == i);
1674                 struct vxge_hw_mempool_dma *dma_object =
1675                         mempool->memblocks_dma_arr + i;
1676                 void *the_memblock;
1677
1678                 /* allocate memblock's private part. Each DMA memblock
1679                  * has a space allocated for item's private usage upon
1680                  * mempool's user request. Each time mempool grows, it will
1681                  * allocate new memblock and its private part at once.
1682                  * This helps to minimize memory usage a lot. */
1683                 mempool->memblocks_priv_arr[i] =
1684                                 vmalloc(mempool->items_priv_size * n_items);
1685                 if (mempool->memblocks_priv_arr[i] == NULL) {
1686                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1687                         goto exit;
1688                 }
1689
1690                 memset(mempool->memblocks_priv_arr[i], 0,
1691                              mempool->items_priv_size * n_items);
1692
1693                 /* allocate DMA-capable memblock */
1694                 mempool->memblocks_arr[i] =
1695                         __vxge_hw_blockpool_malloc(mempool->devh,
1696                                 mempool->memblock_size, dma_object);
1697                 if (mempool->memblocks_arr[i] == NULL) {
1698                         vfree(mempool->memblocks_priv_arr[i]);
1699                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1700                         goto exit;
1701                 }
1702
1703                 (*num_allocated)++;
1704                 mempool->memblocks_allocated++;
1705
1706                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1707
1708                 the_memblock = mempool->memblocks_arr[i];
1709
1710                 /* fill the items hash array */
1711                 for (j = 0; j < n_items; j++) {
1712                         u32 index = i * n_items + j;
1713
1714                         if (first_time && index >= mempool->items_initial)
1715                                 break;
1716
1717                         mempool->items_arr[index] =
1718                                 ((char *)the_memblock + j*mempool->item_size);
1719
1720                         /* let caller to do more job on each item */
1721                         if (mempool->item_func_alloc != NULL)
1722                                 mempool->item_func_alloc(mempool, i,
1723                                         dma_object, index, is_last);
1724
1725                         mempool->items_current = index + 1;
1726                 }
1727
1728                 if (first_time && mempool->items_current ==
1729                                         mempool->items_initial)
1730                         break;
1731         }
1732 exit:
1733         return status;
1734 }
1735
1736 /*
1737  * vxge_hw_mempool_create
1738  * This function will create memory pool object. Pool may grow but will
1739  * never shrink. Pool consists of number of dynamically allocated blocks
1740  * with size enough to hold %items_initial number of items. Memory is
1741  * DMA-able but client must map/unmap before interoperating with the device.
1742  */
1743 struct vxge_hw_mempool*
1744 __vxge_hw_mempool_create(
1745         struct __vxge_hw_device *devh,
1746         u32 memblock_size,
1747         u32 item_size,
1748         u32 items_priv_size,
1749         u32 items_initial,
1750         u32 items_max,
1751         struct vxge_hw_mempool_cbs *mp_callback,
1752         void *userdata)
1753 {
1754         enum vxge_hw_status status = VXGE_HW_OK;
1755         u32 memblocks_to_allocate;
1756         struct vxge_hw_mempool *mempool = NULL;
1757         u32 allocated;
1758
1759         if (memblock_size < item_size) {
1760                 status = VXGE_HW_FAIL;
1761                 goto exit;
1762         }
1763
1764         mempool = (struct vxge_hw_mempool *)
1765                         vmalloc(sizeof(struct vxge_hw_mempool));
1766         if (mempool == NULL) {
1767                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1768                 goto exit;
1769         }
1770         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1771
1772         mempool->devh                   = devh;
1773         mempool->memblock_size          = memblock_size;
1774         mempool->items_max              = items_max;
1775         mempool->items_initial          = items_initial;
1776         mempool->item_size              = item_size;
1777         mempool->items_priv_size        = items_priv_size;
1778         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1779         mempool->userdata               = userdata;
1780
1781         mempool->memblocks_allocated = 0;
1782
1783         mempool->items_per_memblock = memblock_size / item_size;
1784
1785         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1786                                         mempool->items_per_memblock;
1787
1788         /* allocate array of memblocks */
1789         mempool->memblocks_arr =
1790                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1791         if (mempool->memblocks_arr == NULL) {
1792                 __vxge_hw_mempool_destroy(mempool);
1793                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1794                 mempool = NULL;
1795                 goto exit;
1796         }
1797         memset(mempool->memblocks_arr, 0,
1798                 sizeof(void *) * mempool->memblocks_max);
1799
1800         /* allocate array of private parts of items per memblocks */
1801         mempool->memblocks_priv_arr =
1802                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1803         if (mempool->memblocks_priv_arr == NULL) {
1804                 __vxge_hw_mempool_destroy(mempool);
1805                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1806                 mempool = NULL;
1807                 goto exit;
1808         }
1809         memset(mempool->memblocks_priv_arr, 0,
1810                     sizeof(void *) * mempool->memblocks_max);
1811
1812         /* allocate array of memblocks DMA objects */
1813         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1814                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1815                         mempool->memblocks_max);
1816
1817         if (mempool->memblocks_dma_arr == NULL) {
1818                 __vxge_hw_mempool_destroy(mempool);
1819                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1820                 mempool = NULL;
1821                 goto exit;
1822         }
1823         memset(mempool->memblocks_dma_arr, 0,
1824                         sizeof(struct vxge_hw_mempool_dma) *
1825                         mempool->memblocks_max);
1826
1827         /* allocate hash array of items */
1828         mempool->items_arr =
1829                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1830         if (mempool->items_arr == NULL) {
1831                 __vxge_hw_mempool_destroy(mempool);
1832                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1833                 mempool = NULL;
1834                 goto exit;
1835         }
1836         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1837
1838         /* calculate initial number of memblocks */
1839         memblocks_to_allocate = (mempool->items_initial +
1840                                  mempool->items_per_memblock - 1) /
1841                                                 mempool->items_per_memblock;
1842
1843         /* pre-allocate the mempool */
1844         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1845                                         &allocated);
1846         if (status != VXGE_HW_OK) {
1847                 __vxge_hw_mempool_destroy(mempool);
1848                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1849                 mempool = NULL;
1850                 goto exit;
1851         }
1852
1853 exit:
1854         return mempool;
1855 }
1856
1857 /*
1858  * vxge_hw_mempool_destroy
1859  */
1860 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1861 {
1862         u32 i, j;
1863         struct __vxge_hw_device *devh = mempool->devh;
1864
1865         for (i = 0; i < mempool->memblocks_allocated; i++) {
1866                 struct vxge_hw_mempool_dma *dma_object;
1867
1868                 vxge_assert(mempool->memblocks_arr[i]);
1869                 vxge_assert(mempool->memblocks_dma_arr + i);
1870
1871                 dma_object = mempool->memblocks_dma_arr + i;
1872
1873                 for (j = 0; j < mempool->items_per_memblock; j++) {
1874                         u32 index = i * mempool->items_per_memblock + j;
1875
1876                         /* to skip last partially filled(if any) memblock */
1877                         if (index >= mempool->items_current)
1878                                 break;
1879                 }
1880
1881                 vfree(mempool->memblocks_priv_arr[i]);
1882
1883                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1884                                 mempool->memblock_size, dma_object);
1885         }
1886
1887         if (mempool->items_arr)
1888                 vfree(mempool->items_arr);
1889
1890         if (mempool->memblocks_dma_arr)
1891                 vfree(mempool->memblocks_dma_arr);
1892
1893         if (mempool->memblocks_priv_arr)
1894                 vfree(mempool->memblocks_priv_arr);
1895
1896         if (mempool->memblocks_arr)
1897                 vfree(mempool->memblocks_arr);
1898
1899         vfree(mempool);
1900 }
1901
1902 /*
1903  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1904  * Check the fifo configuration
1905  */
1906 enum vxge_hw_status
1907 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1908 {
1909         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1910              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1911                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1912
1913         return VXGE_HW_OK;
1914 }
1915
1916 /*
1917  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1918  * Check the vpath configuration
1919  */
1920 enum vxge_hw_status
1921 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1922 {
1923         enum vxge_hw_status status;
1924
1925         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1926                 (vp_config->min_bandwidth >
1927                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1928                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1929
1930         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1931         if (status != VXGE_HW_OK)
1932                 return status;
1933
1934         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1935                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1936                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1937                 return VXGE_HW_BADCFG_VPATH_MTU;
1938
1939         if ((vp_config->rpa_strip_vlan_tag !=
1940                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1941                 (vp_config->rpa_strip_vlan_tag !=
1942                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1943                 (vp_config->rpa_strip_vlan_tag !=
1944                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1945                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1946
1947         return VXGE_HW_OK;
1948 }
1949
1950 /*
1951  * __vxge_hw_device_config_check - Check device configuration.
1952  * Check the device configuration
1953  */
1954 enum vxge_hw_status
1955 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1956 {
1957         u32 i;
1958         enum vxge_hw_status status;
1959
1960         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1961            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1962            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1963            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1964                 return VXGE_HW_BADCFG_INTR_MODE;
1965
1966         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1967            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1968                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1969
1970         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1971                 status = __vxge_hw_device_vpath_config_check(
1972                                 &new_config->vp_config[i]);
1973                 if (status != VXGE_HW_OK)
1974                         return status;
1975         }
1976
1977         return VXGE_HW_OK;
1978 }
1979
1980 /*
1981  * vxge_hw_device_config_default_get - Initialize device config with defaults.
1982  * Initialize Titan device config with default values.
1983  */
1984 enum vxge_hw_status __devinit
1985 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1986 {
1987         u32 i;
1988
1989         device_config->dma_blockpool_initial =
1990                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1991         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1992         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1993         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1994         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1995         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1996         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
1997
1998         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1999
2000                 device_config->vp_config[i].vp_id = i;
2001
2002                 device_config->vp_config[i].min_bandwidth =
2003                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2004
2005                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2006
2007                 device_config->vp_config[i].ring.ring_blocks =
2008                                 VXGE_HW_DEF_RING_BLOCKS;
2009
2010                 device_config->vp_config[i].ring.buffer_mode =
2011                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2012
2013                 device_config->vp_config[i].ring.scatter_mode =
2014                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2015
2016                 device_config->vp_config[i].ring.rxds_limit =
2017                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
2018
2019                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2020
2021                 device_config->vp_config[i].fifo.fifo_blocks =
2022                                 VXGE_HW_MIN_FIFO_BLOCKS;
2023
2024                 device_config->vp_config[i].fifo.max_frags =
2025                                 VXGE_HW_MAX_FIFO_FRAGS;
2026
2027                 device_config->vp_config[i].fifo.memblock_size =
2028                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2029
2030                 device_config->vp_config[i].fifo.alignment_size =
2031                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2032
2033                 device_config->vp_config[i].fifo.intr =
2034                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2035
2036                 device_config->vp_config[i].fifo.no_snoop_bits =
2037                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2038                 device_config->vp_config[i].tti.intr_enable =
2039                                 VXGE_HW_TIM_INTR_DEFAULT;
2040
2041                 device_config->vp_config[i].tti.btimer_val =
2042                                 VXGE_HW_USE_FLASH_DEFAULT;
2043
2044                 device_config->vp_config[i].tti.timer_ac_en =
2045                                 VXGE_HW_USE_FLASH_DEFAULT;
2046
2047                 device_config->vp_config[i].tti.timer_ci_en =
2048                                 VXGE_HW_USE_FLASH_DEFAULT;
2049
2050                 device_config->vp_config[i].tti.timer_ri_en =
2051                                 VXGE_HW_USE_FLASH_DEFAULT;
2052
2053                 device_config->vp_config[i].tti.rtimer_val =
2054                                 VXGE_HW_USE_FLASH_DEFAULT;
2055
2056                 device_config->vp_config[i].tti.util_sel =
2057                                 VXGE_HW_USE_FLASH_DEFAULT;
2058
2059                 device_config->vp_config[i].tti.ltimer_val =
2060                                 VXGE_HW_USE_FLASH_DEFAULT;
2061
2062                 device_config->vp_config[i].tti.urange_a =
2063                                 VXGE_HW_USE_FLASH_DEFAULT;
2064
2065                 device_config->vp_config[i].tti.uec_a =
2066                                 VXGE_HW_USE_FLASH_DEFAULT;
2067
2068                 device_config->vp_config[i].tti.urange_b =
2069                                 VXGE_HW_USE_FLASH_DEFAULT;
2070
2071                 device_config->vp_config[i].tti.uec_b =
2072                                 VXGE_HW_USE_FLASH_DEFAULT;
2073
2074                 device_config->vp_config[i].tti.urange_c =
2075                                 VXGE_HW_USE_FLASH_DEFAULT;
2076
2077                 device_config->vp_config[i].tti.uec_c =
2078                                 VXGE_HW_USE_FLASH_DEFAULT;
2079
2080                 device_config->vp_config[i].tti.uec_d =
2081                                 VXGE_HW_USE_FLASH_DEFAULT;
2082
2083                 device_config->vp_config[i].rti.intr_enable =
2084                                 VXGE_HW_TIM_INTR_DEFAULT;
2085
2086                 device_config->vp_config[i].rti.btimer_val =
2087                                 VXGE_HW_USE_FLASH_DEFAULT;
2088
2089                 device_config->vp_config[i].rti.timer_ac_en =
2090                                 VXGE_HW_USE_FLASH_DEFAULT;
2091
2092                 device_config->vp_config[i].rti.timer_ci_en =
2093                                 VXGE_HW_USE_FLASH_DEFAULT;
2094
2095                 device_config->vp_config[i].rti.timer_ri_en =
2096                                 VXGE_HW_USE_FLASH_DEFAULT;
2097
2098                 device_config->vp_config[i].rti.rtimer_val =
2099                                 VXGE_HW_USE_FLASH_DEFAULT;
2100
2101                 device_config->vp_config[i].rti.util_sel =
2102                                 VXGE_HW_USE_FLASH_DEFAULT;
2103
2104                 device_config->vp_config[i].rti.ltimer_val =
2105                                 VXGE_HW_USE_FLASH_DEFAULT;
2106
2107                 device_config->vp_config[i].rti.urange_a =
2108                                 VXGE_HW_USE_FLASH_DEFAULT;
2109
2110                 device_config->vp_config[i].rti.uec_a =
2111                                 VXGE_HW_USE_FLASH_DEFAULT;
2112
2113                 device_config->vp_config[i].rti.urange_b =
2114                                 VXGE_HW_USE_FLASH_DEFAULT;
2115
2116                 device_config->vp_config[i].rti.uec_b =
2117                                 VXGE_HW_USE_FLASH_DEFAULT;
2118
2119                 device_config->vp_config[i].rti.urange_c =
2120                                 VXGE_HW_USE_FLASH_DEFAULT;
2121
2122                 device_config->vp_config[i].rti.uec_c =
2123                                 VXGE_HW_USE_FLASH_DEFAULT;
2124
2125                 device_config->vp_config[i].rti.uec_d =
2126                                 VXGE_HW_USE_FLASH_DEFAULT;
2127
2128                 device_config->vp_config[i].mtu =
2129                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2130
2131                 device_config->vp_config[i].rpa_strip_vlan_tag =
2132                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2133         }
2134
2135         return VXGE_HW_OK;
2136 }
2137
2138 /*
2139  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2140  * Set the swapper bits appropriately for the lagacy section.
2141  */
2142 enum vxge_hw_status
2143 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2144 {
2145         u64 val64;
2146         enum vxge_hw_status status = VXGE_HW_OK;
2147
2148         val64 = readq(&legacy_reg->toc_swapper_fb);
2149
2150         wmb();
2151
2152         switch (val64) {
2153
2154         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2155                 return status;
2156
2157         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2158                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2159                         &legacy_reg->pifm_rd_swap_en);
2160                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2161                         &legacy_reg->pifm_rd_flip_en);
2162                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2163                         &legacy_reg->pifm_wr_swap_en);
2164                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2165                         &legacy_reg->pifm_wr_flip_en);
2166                 break;
2167
2168         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2169                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2170                         &legacy_reg->pifm_rd_swap_en);
2171                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2172                         &legacy_reg->pifm_wr_swap_en);
2173                 break;
2174
2175         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2176                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2177                         &legacy_reg->pifm_rd_flip_en);
2178                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2179                         &legacy_reg->pifm_wr_flip_en);
2180                 break;
2181         }
2182
2183         wmb();
2184
2185         val64 = readq(&legacy_reg->toc_swapper_fb);
2186
2187         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2188                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2189
2190         return status;
2191 }
2192
2193 /*
2194  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2195  * Set the swapper bits appropriately for the vpath.
2196  */
2197 enum vxge_hw_status
2198 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2199 {
2200 #ifndef __BIG_ENDIAN
2201         u64 val64;
2202
2203         val64 = readq(&vpath_reg->vpath_general_cfg1);
2204         wmb();
2205         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2206         writeq(val64, &vpath_reg->vpath_general_cfg1);
2207         wmb();
2208 #endif
2209         return VXGE_HW_OK;
2210 }
2211
2212 /*
2213  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2214  * Set the swapper bits appropriately for the vpath.
2215  */
2216 enum vxge_hw_status
2217 __vxge_hw_kdfc_swapper_set(
2218         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2219         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2220 {
2221         u64 val64;
2222
2223         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2224
2225         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2226                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2227                 wmb();
2228
2229                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2230                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2231                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2232
2233                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2234                 wmb();
2235         }
2236
2237         return VXGE_HW_OK;
2238 }
2239
2240 /*
2241  * vxge_hw_mgmt_device_config - Retrieve device configuration.
2242  * Get device configuration. Permits to retrieve at run-time configuration
2243  * values that were used to initialize and configure the device.
2244  */
2245 enum vxge_hw_status
2246 vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2247                            struct vxge_hw_device_config *dev_config, int size)
2248 {
2249
2250         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2251                 return VXGE_HW_ERR_INVALID_DEVICE;
2252
2253         if (size != sizeof(struct vxge_hw_device_config))
2254                 return VXGE_HW_ERR_VERSION_CONFLICT;
2255
2256         memcpy(dev_config, &hldev->config,
2257                 sizeof(struct vxge_hw_device_config));
2258
2259         return VXGE_HW_OK;
2260 }
2261
2262 /*
2263  * vxge_hw_mgmt_reg_read - Read Titan register.
2264  */
2265 enum vxge_hw_status
2266 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2267                       enum vxge_hw_mgmt_reg_type type,
2268                       u32 index, u32 offset, u64 *value)
2269 {
2270         enum vxge_hw_status status = VXGE_HW_OK;
2271
2272         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2273                 status = VXGE_HW_ERR_INVALID_DEVICE;
2274                 goto exit;
2275         }
2276
2277         switch (type) {
2278         case vxge_hw_mgmt_reg_type_legacy:
2279                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2280                         status = VXGE_HW_ERR_INVALID_OFFSET;
2281                         break;
2282                 }
2283                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2284                 break;
2285         case vxge_hw_mgmt_reg_type_toc:
2286                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2287                         status = VXGE_HW_ERR_INVALID_OFFSET;
2288                         break;
2289                 }
2290                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2291                 break;
2292         case vxge_hw_mgmt_reg_type_common:
2293                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2294                         status = VXGE_HW_ERR_INVALID_OFFSET;
2295                         break;
2296                 }
2297                 *value = readq((void __iomem *)hldev->common_reg + offset);
2298                 break;
2299         case vxge_hw_mgmt_reg_type_mrpcim:
2300                 if (!(hldev->access_rights &
2301                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2302                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2303                         break;
2304                 }
2305                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2306                         status = VXGE_HW_ERR_INVALID_OFFSET;
2307                         break;
2308                 }
2309                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2310                 break;
2311         case vxge_hw_mgmt_reg_type_srpcim:
2312                 if (!(hldev->access_rights &
2313                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2314                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2315                         break;
2316                 }
2317                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2318                         status = VXGE_HW_ERR_INVALID_INDEX;
2319                         break;
2320                 }
2321                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2322                         status = VXGE_HW_ERR_INVALID_OFFSET;
2323                         break;
2324                 }
2325                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2326                                 offset);
2327                 break;
2328         case vxge_hw_mgmt_reg_type_vpmgmt:
2329                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2330                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2331                         status = VXGE_HW_ERR_INVALID_INDEX;
2332                         break;
2333                 }
2334                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2335                         status = VXGE_HW_ERR_INVALID_OFFSET;
2336                         break;
2337                 }
2338                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2339                                 offset);
2340                 break;
2341         case vxge_hw_mgmt_reg_type_vpath:
2342                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2343                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2344                         status = VXGE_HW_ERR_INVALID_INDEX;
2345                         break;
2346                 }
2347                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2348                         status = VXGE_HW_ERR_INVALID_INDEX;
2349                         break;
2350                 }
2351                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2352                         status = VXGE_HW_ERR_INVALID_OFFSET;
2353                         break;
2354                 }
2355                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2356                                 offset);
2357                 break;
2358         default:
2359                 status = VXGE_HW_ERR_INVALID_TYPE;
2360                 break;
2361         }
2362
2363 exit:
2364         return status;
2365 }
2366
2367 /*
2368  * vxge_hw_mgmt_reg_Write - Write Titan register.
2369  */
2370 enum vxge_hw_status
2371 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2372                       enum vxge_hw_mgmt_reg_type type,
2373                       u32 index, u32 offset, u64 value)
2374 {
2375         enum vxge_hw_status status = VXGE_HW_OK;
2376
2377         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2378                 status = VXGE_HW_ERR_INVALID_DEVICE;
2379                 goto exit;
2380         }
2381
2382         switch (type) {
2383         case vxge_hw_mgmt_reg_type_legacy:
2384                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2385                         status = VXGE_HW_ERR_INVALID_OFFSET;
2386                         break;
2387                 }
2388                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2389                 break;
2390         case vxge_hw_mgmt_reg_type_toc:
2391                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2392                         status = VXGE_HW_ERR_INVALID_OFFSET;
2393                         break;
2394                 }
2395                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2396                 break;
2397         case vxge_hw_mgmt_reg_type_common:
2398                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2399                         status = VXGE_HW_ERR_INVALID_OFFSET;
2400                         break;
2401                 }
2402                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2403                 break;
2404         case vxge_hw_mgmt_reg_type_mrpcim:
2405                 if (!(hldev->access_rights &
2406                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2407                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2408                         break;
2409                 }
2410                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2411                         status = VXGE_HW_ERR_INVALID_OFFSET;
2412                         break;
2413                 }
2414                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2415                 break;
2416         case vxge_hw_mgmt_reg_type_srpcim:
2417                 if (!(hldev->access_rights &
2418                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2419                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2420                         break;
2421                 }
2422                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2423                         status = VXGE_HW_ERR_INVALID_INDEX;
2424                         break;
2425                 }
2426                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2427                         status = VXGE_HW_ERR_INVALID_OFFSET;
2428                         break;
2429                 }
2430                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2431                         offset);
2432
2433                 break;
2434         case vxge_hw_mgmt_reg_type_vpmgmt:
2435                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2436                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2437                         status = VXGE_HW_ERR_INVALID_INDEX;
2438                         break;
2439                 }
2440                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2441                         status = VXGE_HW_ERR_INVALID_OFFSET;
2442                         break;
2443                 }
2444                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2445                         offset);
2446                 break;
2447         case vxge_hw_mgmt_reg_type_vpath:
2448                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2449                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2450                         status = VXGE_HW_ERR_INVALID_INDEX;
2451                         break;
2452                 }
2453                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2454                         status = VXGE_HW_ERR_INVALID_OFFSET;
2455                         break;
2456                 }
2457                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2458                         offset);
2459                 break;
2460         default:
2461                 status = VXGE_HW_ERR_INVALID_TYPE;
2462                 break;
2463         }
2464 exit:
2465         return status;
2466 }
2467
2468 /*
2469  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2470  * list callback
2471  * This function is callback passed to __vxge_hw_mempool_create to create memory
2472  * pool for TxD list
2473  */
2474 static void
2475 __vxge_hw_fifo_mempool_item_alloc(
2476         struct vxge_hw_mempool *mempoolh,
2477         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2478         u32 index, u32 is_last)
2479 {
2480         u32 memblock_item_idx;
2481         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2482         struct vxge_hw_fifo_txd *txdp =
2483                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2484         struct __vxge_hw_fifo *fifo =
2485                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2486         void *memblock = mempoolh->memblocks_arr[memblock_index];
2487
2488         vxge_assert(txdp);
2489
2490         txdp->host_control = (u64) (size_t)
2491         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2492                                         &memblock_item_idx);
2493
2494         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2495
2496         vxge_assert(txdl_priv);
2497
2498         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2499
2500         /* pre-format HW's TxDL's private */
2501         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2502         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2503         txdl_priv->dma_handle = dma_object->handle;
2504         txdl_priv->memblock   = memblock;
2505         txdl_priv->first_txdp = txdp;
2506         txdl_priv->next_txdl_priv = NULL;
2507         txdl_priv->alloc_frags = 0;
2508
2509         return;
2510 }
2511
2512 /*
2513  * __vxge_hw_fifo_create - Create a FIFO
2514  * This function creates FIFO and initializes it.
2515  */
2516 enum vxge_hw_status
2517 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2518                       struct vxge_hw_fifo_attr *attr)
2519 {
2520         enum vxge_hw_status status = VXGE_HW_OK;
2521         struct __vxge_hw_fifo *fifo;
2522         struct vxge_hw_fifo_config *config;
2523         u32 txdl_size, txdl_per_memblock;
2524         struct vxge_hw_mempool_cbs fifo_mp_callback;
2525         struct __vxge_hw_virtualpath *vpath;
2526
2527         if ((vp == NULL) || (attr == NULL)) {
2528                 status = VXGE_HW_ERR_INVALID_HANDLE;
2529                 goto exit;
2530         }
2531         vpath = vp->vpath;
2532         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2533
2534         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2535
2536         txdl_per_memblock = config->memblock_size / txdl_size;
2537
2538         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2539                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2540                                         config->fifo_blocks * txdl_per_memblock,
2541                                         attr->per_txdl_space, attr->userdata);
2542
2543         if (fifo == NULL) {
2544                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2545                 goto exit;
2546         }
2547
2548         vpath->fifoh = fifo;
2549         fifo->nofl_db = vpath->nofl_db;
2550
2551         fifo->vp_id = vpath->vp_id;
2552         fifo->vp_reg = vpath->vp_reg;
2553         fifo->stats = &vpath->sw_stats->fifo_stats;
2554
2555         fifo->config = config;
2556
2557         /* apply "interrupts per txdl" attribute */
2558         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2559
2560         if (fifo->config->intr)
2561                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2562
2563         fifo->no_snoop_bits = config->no_snoop_bits;
2564
2565         /*
2566          * FIFO memory management strategy:
2567          *
2568          * TxDL split into three independent parts:
2569          *      - set of TxD's
2570          *      - TxD HW private part
2571          *      - driver private part
2572          *
2573          * Adaptative memory allocation used. i.e. Memory allocated on
2574          * demand with the size which will fit into one memory block.
2575          * One memory block may contain more than one TxDL.
2576          *
2577          * During "reserve" operations more memory can be allocated on demand
2578          * for example due to FIFO full condition.
2579          *
2580          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2581          * routine which will essentially stop the channel and free resources.
2582          */
2583
2584         /* TxDL common private size == TxDL private  +  driver private */
2585         fifo->priv_size =
2586                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2587         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2588                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2589
2590         fifo->per_txdl_space = attr->per_txdl_space;
2591
2592         /* recompute txdl size to be cacheline aligned */
2593         fifo->txdl_size = txdl_size;
2594         fifo->txdl_per_memblock = txdl_per_memblock;
2595
2596         fifo->txdl_term = attr->txdl_term;
2597         fifo->callback = attr->callback;
2598
2599         if (fifo->txdl_per_memblock == 0) {
2600                 __vxge_hw_fifo_delete(vp);
2601                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2602                 goto exit;
2603         }
2604
2605         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2606
2607         fifo->mempool =
2608                 __vxge_hw_mempool_create(vpath->hldev,
2609                         fifo->config->memblock_size,
2610                         fifo->txdl_size,
2611                         fifo->priv_size,
2612                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2613                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2614                         &fifo_mp_callback,
2615                         fifo);
2616
2617         if (fifo->mempool == NULL) {
2618                 __vxge_hw_fifo_delete(vp);
2619                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2620                 goto exit;
2621         }
2622
2623         status = __vxge_hw_channel_initialize(&fifo->channel);
2624         if (status != VXGE_HW_OK) {
2625                 __vxge_hw_fifo_delete(vp);
2626                 goto exit;
2627         }
2628
2629         vxge_assert(fifo->channel.reserve_ptr);
2630 exit:
2631         return status;
2632 }
2633
2634 /*
2635  * __vxge_hw_fifo_abort - Returns the TxD
2636  * This function terminates the TxDs of fifo
2637  */
2638 enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2639 {
2640         void *txdlh;
2641
2642         for (;;) {
2643                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2644
2645                 if (txdlh == NULL)
2646                         break;
2647
2648                 vxge_hw_channel_dtr_complete(&fifo->channel);
2649
2650                 if (fifo->txdl_term) {
2651                         fifo->txdl_term(txdlh,
2652                         VXGE_HW_TXDL_STATE_POSTED,
2653                         fifo->channel.userdata);
2654                 }
2655
2656                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2657         }
2658
2659         return VXGE_HW_OK;
2660 }
2661
2662 /*
2663  * __vxge_hw_fifo_reset - Resets the fifo
2664  * This function resets the fifo during vpath reset operation
2665  */
2666 enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2667 {
2668         enum vxge_hw_status status = VXGE_HW_OK;
2669
2670         __vxge_hw_fifo_abort(fifo);
2671         status = __vxge_hw_channel_reset(&fifo->channel);
2672
2673         return status;
2674 }
2675
2676 /*
2677  * __vxge_hw_fifo_delete - Removes the FIFO
2678  * This function freeup the memory pool and removes the FIFO
2679  */
2680 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2681 {
2682         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2683
2684         __vxge_hw_fifo_abort(fifo);
2685
2686         if (fifo->mempool)
2687                 __vxge_hw_mempool_destroy(fifo->mempool);
2688
2689         vp->vpath->fifoh = NULL;
2690
2691         __vxge_hw_channel_free(&fifo->channel);
2692
2693         return VXGE_HW_OK;
2694 }
2695
2696 /*
2697  * __vxge_hw_vpath_pci_read - Read the content of given address
2698  *                          in pci config space.
2699  * Read from the vpath pci config space.
2700  */
2701 enum vxge_hw_status
2702 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2703                          u32 phy_func_0, u32 offset, u32 *val)
2704 {
2705         u64 val64;
2706         enum vxge_hw_status status = VXGE_HW_OK;
2707         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2708
2709         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2710
2711         if (phy_func_0)
2712                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2713
2714         writeq(val64, &vp_reg->pci_config_access_cfg1);
2715         wmb();
2716         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2717                         &vp_reg->pci_config_access_cfg2);
2718         wmb();
2719
2720         status = __vxge_hw_device_register_poll(
2721                         &vp_reg->pci_config_access_cfg2,
2722                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2723
2724         if (status != VXGE_HW_OK)
2725                 goto exit;
2726
2727         val64 = readq(&vp_reg->pci_config_access_status);
2728
2729         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2730                 status = VXGE_HW_FAIL;
2731                 *val = 0;
2732         } else
2733                 *val = (u32)vxge_bVALn(val64, 32, 32);
2734 exit:
2735         return status;
2736 }
2737
2738 /*
2739  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2740  * Returns the function number of the vpath.
2741  */
2742 u32
2743 __vxge_hw_vpath_func_id_get(u32 vp_id,
2744         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2745 {
2746         u64 val64;
2747
2748         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2749
2750         return
2751          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2752 }
2753
2754 /*
2755  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2756  */
2757 static inline void
2758 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2759                       u64 dta_struct_sel)
2760 {
2761         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2762         wmb();
2763         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2764         writeq(0, &vpath_reg->rts_access_steer_data1);
2765         wmb();
2766         return;
2767 }
2768
2769
2770 /*
2771  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2772  * part number and product description.
2773  */
2774 enum vxge_hw_status
2775 __vxge_hw_vpath_card_info_get(
2776         u32 vp_id,
2777         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2778         struct vxge_hw_device_hw_info *hw_info)
2779 {
2780         u32 i, j;
2781         u64 val64;
2782         u64 data1 = 0ULL;
2783         u64 data2 = 0ULL;
2784         enum vxge_hw_status status = VXGE_HW_OK;
2785         u8 *serial_number = hw_info->serial_number;
2786         u8 *part_number = hw_info->part_number;
2787         u8 *product_desc = hw_info->product_desc;
2788
2789         __vxge_hw_read_rts_ds(vpath_reg,
2790                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2791
2792         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2793                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2794                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2795                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2796                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2797                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2798
2799         status = __vxge_hw_pio_mem_write64(val64,
2800                                 &vpath_reg->rts_access_steer_ctrl,
2801                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2802                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2803
2804         if (status != VXGE_HW_OK)
2805                 return status;
2806
2807         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2808
2809         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2810                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2811                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2812
2813                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2814                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2815                 status = VXGE_HW_OK;
2816         } else
2817                 *serial_number = 0;
2818
2819         __vxge_hw_read_rts_ds(vpath_reg,
2820                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2821
2822         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2823                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2824                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2825                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2826                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2827                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2828
2829         status = __vxge_hw_pio_mem_write64(val64,
2830                                 &vpath_reg->rts_access_steer_ctrl,
2831                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2832                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2833
2834         if (status != VXGE_HW_OK)
2835                 return status;
2836
2837         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2838
2839         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2840
2841                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2842                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2843
2844                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2845                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2846
2847                 status = VXGE_HW_OK;
2848
2849         } else
2850                 *part_number = 0;
2851
2852         j = 0;
2853
2854         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2855              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2856
2857                 __vxge_hw_read_rts_ds(vpath_reg, i);
2858
2859                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2860                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2861                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2862                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2863                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2864                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2865
2866                 status = __vxge_hw_pio_mem_write64(val64,
2867                                 &vpath_reg->rts_access_steer_ctrl,
2868                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2869                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2870
2871                 if (status != VXGE_HW_OK)
2872                         return status;
2873
2874                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2875
2876                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2877
2878                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2879                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2880
2881                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2882                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2883
2884                         status = VXGE_HW_OK;
2885                 } else
2886                         *product_desc = 0;
2887         }
2888
2889         return status;
2890 }
2891
2892 /*
2893  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2894  * Returns FW Version
2895  */
2896 enum vxge_hw_status
2897 __vxge_hw_vpath_fw_ver_get(
2898         u32 vp_id,
2899         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2900         struct vxge_hw_device_hw_info *hw_info)
2901 {
2902         u64 val64;
2903         u64 data1 = 0ULL;
2904         u64 data2 = 0ULL;
2905         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2906         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2907         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2908         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2909         enum vxge_hw_status status = VXGE_HW_OK;
2910
2911         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2912                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2913                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2914                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2915                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2916                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2917
2918         status = __vxge_hw_pio_mem_write64(val64,
2919                                 &vpath_reg->rts_access_steer_ctrl,
2920                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2921                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2922
2923         if (status != VXGE_HW_OK)
2924                 goto exit;
2925
2926         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2927
2928         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2929
2930                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2931                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2932
2933                 fw_date->day =
2934                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2935                                                 data1);
2936                 fw_date->month =
2937                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2938                                                 data1);
2939                 fw_date->year =
2940                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2941                                                 data1);
2942
2943                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2944                         fw_date->month, fw_date->day, fw_date->year);
2945
2946                 fw_version->major =
2947                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2948                 fw_version->minor =
2949                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2950                 fw_version->build =
2951                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2952
2953                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2954                     fw_version->major, fw_version->minor, fw_version->build);
2955
2956                 flash_date->day =
2957                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2958                 flash_date->month =
2959                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2960                 flash_date->year =
2961                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2962
2963                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2964                         "%2.2d/%2.2d/%4.4d",
2965                         flash_date->month, flash_date->day, flash_date->year);
2966
2967                 flash_version->major =
2968                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2969                 flash_version->minor =
2970                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2971                 flash_version->build =
2972                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2973
2974                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2975                         flash_version->major, flash_version->minor,
2976                         flash_version->build);
2977
2978                 status = VXGE_HW_OK;
2979
2980         } else
2981                 status = VXGE_HW_FAIL;
2982 exit:
2983         return status;
2984 }
2985
2986 /*
2987  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2988  * Returns pci function mode
2989  */
2990 u64
2991 __vxge_hw_vpath_pci_func_mode_get(
2992         u32  vp_id,
2993         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2994 {
2995         u64 val64;
2996         u64 data1 = 0ULL;
2997         enum vxge_hw_status status = VXGE_HW_OK;
2998
2999         __vxge_hw_read_rts_ds(vpath_reg,
3000                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
3001
3002         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3003                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
3004                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3005                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3006                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3007                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3008
3009         status = __vxge_hw_pio_mem_write64(val64,
3010                                 &vpath_reg->rts_access_steer_ctrl,
3011                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3012                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3013
3014         if (status != VXGE_HW_OK)
3015                 goto exit;
3016
3017         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3018
3019         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3020                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3021                 status = VXGE_HW_OK;
3022         } else {
3023                 data1 = 0;
3024                 status = VXGE_HW_FAIL;
3025         }
3026 exit:
3027         return data1;
3028 }
3029
3030 /**
3031  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3032  * @hldev: HW device.
3033  * @on_off: TRUE if flickering to be on, FALSE to be off
3034  *
3035  * Flicker the link LED.
3036  */
3037 enum vxge_hw_status
3038 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
3039                                u64 on_off)
3040 {
3041         u64 val64;
3042         enum vxge_hw_status status = VXGE_HW_OK;
3043         struct vxge_hw_vpath_reg __iomem *vp_reg;
3044
3045         if (hldev == NULL) {
3046                 status = VXGE_HW_ERR_INVALID_DEVICE;
3047                 goto exit;
3048         }
3049
3050         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
3051
3052         writeq(0, &vp_reg->rts_access_steer_ctrl);
3053         wmb();
3054         writeq(on_off, &vp_reg->rts_access_steer_data0);
3055         writeq(0, &vp_reg->rts_access_steer_data1);
3056         wmb();
3057
3058         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3059                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
3060                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3061                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3062                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3063                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3064
3065         status = __vxge_hw_pio_mem_write64(val64,
3066                                 &vp_reg->rts_access_steer_ctrl,
3067                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3068                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3069 exit:
3070         return status;
3071 }
3072
3073 /*
3074  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3075  */
3076 enum vxge_hw_status
3077 __vxge_hw_vpath_rts_table_get(
3078         struct __vxge_hw_vpath_handle *vp,
3079         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3080 {
3081         u64 val64;
3082         struct __vxge_hw_virtualpath *vpath;
3083         struct vxge_hw_vpath_reg __iomem *vp_reg;
3084
3085         enum vxge_hw_status status = VXGE_HW_OK;
3086
3087         if (vp == NULL) {
3088                 status = VXGE_HW_ERR_INVALID_HANDLE;
3089                 goto exit;
3090         }
3091
3092         vpath = vp->vpath;
3093         vp_reg = vpath->vp_reg;
3094
3095         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3096                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3097                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3098                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3099
3100         if ((rts_table ==
3101                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3102             (rts_table ==
3103                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3104             (rts_table ==
3105                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3106             (rts_table ==
3107                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3108                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3109         }
3110
3111         status = __vxge_hw_pio_mem_write64(val64,
3112                                 &vp_reg->rts_access_steer_ctrl,
3113                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3114                                 vpath->hldev->config.device_poll_millis);
3115
3116         if (status != VXGE_HW_OK)
3117                 goto exit;
3118
3119         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3120
3121         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3122
3123                 *data1 = readq(&vp_reg->rts_access_steer_data0);
3124
3125                 if ((rts_table ==
3126                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3127                 (rts_table ==
3128                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3129                         *data2 = readq(&vp_reg->rts_access_steer_data1);
3130                 }
3131                 status = VXGE_HW_OK;
3132         } else
3133                 status = VXGE_HW_FAIL;
3134 exit:
3135         return status;
3136 }
3137
3138 /*
3139  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3140  */
3141 enum vxge_hw_status
3142 __vxge_hw_vpath_rts_table_set(
3143         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3144         u32 offset, u64 data1, u64 data2)
3145 {
3146         u64 val64;
3147         struct __vxge_hw_virtualpath *vpath;
3148         enum vxge_hw_status status = VXGE_HW_OK;
3149         struct vxge_hw_vpath_reg __iomem *vp_reg;
3150
3151         if (vp == NULL) {
3152                 status = VXGE_HW_ERR_INVALID_HANDLE;
3153                 goto exit;
3154         }
3155
3156         vpath = vp->vpath;
3157         vp_reg = vpath->vp_reg;
3158
3159         writeq(data1, &vp_reg->rts_access_steer_data0);
3160         wmb();
3161
3162         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3163             (rts_table ==
3164                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3165                 writeq(data2, &vp_reg->rts_access_steer_data1);
3166                 wmb();
3167         }
3168
3169         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3170                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3171                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3172                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3173
3174         status = __vxge_hw_pio_mem_write64(val64,
3175                                 &vp_reg->rts_access_steer_ctrl,
3176                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3177                                 vpath->hldev->config.device_poll_millis);
3178
3179         if (status != VXGE_HW_OK)
3180                 goto exit;
3181
3182         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3183
3184         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3185                 status = VXGE_HW_OK;
3186         else
3187                 status = VXGE_HW_FAIL;
3188 exit:
3189         return status;
3190 }
3191
3192 /*
3193  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3194  *               from MAC address table.
3195  */
3196 enum vxge_hw_status
3197 __vxge_hw_vpath_addr_get(
3198         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3199         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3200 {
3201         u32 i;
3202         u64 val64;
3203         u64 data1 = 0ULL;
3204         u64 data2 = 0ULL;
3205         enum vxge_hw_status status = VXGE_HW_OK;
3206
3207         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3208                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3209                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3210                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3211                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3212                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3213
3214         status = __vxge_hw_pio_mem_write64(val64,
3215                                 &vpath_reg->rts_access_steer_ctrl,
3216                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3217                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3218
3219         if (status != VXGE_HW_OK)
3220                 goto exit;
3221
3222         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3223
3224         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3225
3226                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3227                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3228
3229                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3230                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3231                                                         data2);
3232
3233                 for (i = ETH_ALEN; i > 0; i--) {
3234                         macaddr[i-1] = (u8)(data1 & 0xFF);
3235                         data1 >>= 8;
3236
3237                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3238                         data2 >>= 8;
3239                 }
3240                 status = VXGE_HW_OK;
3241         } else
3242                 status = VXGE_HW_FAIL;
3243 exit:
3244         return status;
3245 }
3246
3247 /*
3248  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3249  */
3250 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3251                         struct __vxge_hw_vpath_handle *vp,
3252                         enum vxge_hw_rth_algoritms algorithm,
3253                         struct vxge_hw_rth_hash_types *hash_type,
3254                         u16 bucket_size)
3255 {
3256         u64 data0, data1;
3257         enum vxge_hw_status status = VXGE_HW_OK;
3258
3259         if (vp == NULL) {
3260                 status = VXGE_HW_ERR_INVALID_HANDLE;
3261                 goto exit;
3262         }
3263
3264         status = __vxge_hw_vpath_rts_table_get(vp,
3265                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3266                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3267                         0, &data0, &data1);
3268
3269         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3270                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3271
3272         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3273         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3274         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3275
3276         if (hash_type->hash_type_tcpipv4_en)
3277                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3278
3279         if (hash_type->hash_type_ipv4_en)
3280                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3281
3282         if (hash_type->hash_type_tcpipv6_en)
3283                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3284
3285         if (hash_type->hash_type_ipv6_en)
3286                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3287
3288         if (hash_type->hash_type_tcpipv6ex_en)
3289                 data0 |=
3290                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3291
3292         if (hash_type->hash_type_ipv6ex_en)
3293                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3294
3295         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3296                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3297         else
3298                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3299
3300         status = __vxge_hw_vpath_rts_table_set(vp,
3301                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3302                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3303                 0, data0, 0);
3304 exit:
3305         return status;
3306 }
3307
3308 static void
3309 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3310                                 u16 flag, u8 *itable)
3311 {
3312         switch (flag) {
3313         case 1:
3314                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3315                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3316                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3317                         itable[j]);
3318         case 2:
3319                 *data0 |=
3320                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3321                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3322                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3323                         itable[j]);
3324         case 3:
3325                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3326                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3327                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3328                         itable[j]);
3329         case 4:
3330                 *data1 |=
3331                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3332                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3333                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3334                         itable[j]);
3335         default:
3336                 return;
3337         }
3338 }
3339 /*
3340  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3341  */
3342 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3343                         struct __vxge_hw_vpath_handle **vpath_handles,
3344                         u32 vpath_count,
3345                         u8 *mtable,
3346                         u8 *itable,
3347                         u32 itable_size)
3348 {
3349         u32 i, j, action, rts_table;
3350         u64 data0;
3351         u64 data1;
3352         u32 max_entries;
3353         enum vxge_hw_status status = VXGE_HW_OK;
3354         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3355
3356         if (vp == NULL) {
3357                 status = VXGE_HW_ERR_INVALID_HANDLE;
3358                 goto exit;
3359         }
3360
3361         max_entries = (((u32)1) << itable_size);
3362
3363         if (vp->vpath->hldev->config.rth_it_type
3364                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3365                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3366                 rts_table =
3367                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3368
3369                 for (j = 0; j < max_entries; j++) {
3370
3371                         data1 = 0;
3372
3373                         data0 =
3374                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3375                                 itable[j]);
3376
3377                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3378                                 action, rts_table, j, data0, data1);
3379
3380                         if (status != VXGE_HW_OK)
3381                                 goto exit;
3382                 }
3383
3384                 for (j = 0; j < max_entries; j++) {
3385
3386                         data1 = 0;
3387
3388                         data0 =
3389                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3390                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3391                                 itable[j]);
3392
3393                         status = __vxge_hw_vpath_rts_table_set(
3394                                 vpath_handles[mtable[itable[j]]], action,
3395                                 rts_table, j, data0, data1);
3396
3397                         if (status != VXGE_HW_OK)
3398                                 goto exit;
3399                 }
3400         } else {
3401                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3402                 rts_table =
3403                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3404                 for (i = 0; i < vpath_count; i++) {
3405
3406                         for (j = 0; j < max_entries;) {
3407
3408                                 data0 = 0;
3409                                 data1 = 0;
3410
3411                                 while (j < max_entries) {
3412                                         if (mtable[itable[j]] != i) {
3413                                                 j++;
3414                                                 continue;
3415                                         }
3416                                         vxge_hw_rts_rth_data0_data1_get(j,
3417                                                 &data0, &data1, 1, itable);
3418                                         j++;
3419                                         break;
3420                                 }
3421
3422                                 while (j < max_entries) {
3423                                         if (mtable[itable[j]] != i) {
3424                                                 j++;
3425                                                 continue;
3426                                         }
3427                                         vxge_hw_rts_rth_data0_data1_get(j,
3428                                                 &data0, &data1, 2, itable);
3429                                         j++;
3430                                         break;
3431                                 }
3432
3433                                 while (j < max_entries) {
3434                                         if (mtable[itable[j]] != i) {
3435                                                 j++;
3436                                                 continue;
3437                                         }
3438                                         vxge_hw_rts_rth_data0_data1_get(j,
3439                                                 &data0, &data1, 3, itable);
3440                                         j++;
3441                                         break;
3442                                 }
3443
3444                                 while (j < max_entries) {
3445                                         if (mtable[itable[j]] != i) {
3446                                                 j++;
3447                                                 continue;
3448                                         }
3449                                         vxge_hw_rts_rth_data0_data1_get(j,
3450                                                 &data0, &data1, 4, itable);
3451                                         j++;
3452                                         break;
3453                                 }
3454
3455                                 if (data0 != 0) {
3456                                         status = __vxge_hw_vpath_rts_table_set(
3457                                                         vpath_handles[i],
3458                                                         action, rts_table,
3459                                                         0, data0, data1);
3460
3461                                         if (status != VXGE_HW_OK)
3462                                                 goto exit;
3463                                 }
3464                         }
3465                 }
3466         }
3467 exit:
3468         return status;
3469 }
3470
3471 /**
3472  * vxge_hw_vpath_check_leak - Check for memory leak
3473  * @ringh: Handle to the ring object used for receive
3474  *
3475  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3476  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3477  * Returns: VXGE_HW_FAIL, if leak has occurred.
3478  *
3479  */
3480 enum vxge_hw_status
3481 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3482 {
3483         enum vxge_hw_status status = VXGE_HW_OK;
3484         u64 rxd_new_count, rxd_spat;
3485
3486         if (ring == NULL)
3487                 return status;
3488
3489         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3490         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3491         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3492
3493         if (rxd_new_count >= rxd_spat)
3494                 status = VXGE_HW_FAIL;
3495
3496         return status;
3497 }
3498
3499 /*
3500  * __vxge_hw_vpath_mgmt_read
3501  * This routine reads the vpath_mgmt registers
3502  */
3503 static enum vxge_hw_status
3504 __vxge_hw_vpath_mgmt_read(
3505         struct __vxge_hw_device *hldev,
3506         struct __vxge_hw_virtualpath *vpath)
3507 {
3508         u32 i, mtu = 0, max_pyld = 0;
3509         u64 val64;
3510         enum vxge_hw_status status = VXGE_HW_OK;
3511
3512         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3513
3514                 val64 = readq(&vpath->vpmgmt_reg->
3515                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3516                 max_pyld =
3517                         (u32)
3518                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3519                         (val64);
3520                 if (mtu < max_pyld)
3521                         mtu = max_pyld;
3522         }
3523
3524         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3525
3526         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3527
3528         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3529                 if (val64 & vxge_mBIT(i))
3530                         vpath->vsport_number = i;
3531         }
3532
3533         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3534
3535         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3536                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3537         else
3538                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3539
3540         return status;
3541 }
3542
3543 /*
3544  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3545  * This routine checks the vpath_rst_in_prog register to see if
3546  * adapter completed the reset process for the vpath
3547  */
3548 enum vxge_hw_status
3549 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3550 {
3551         enum vxge_hw_status status;
3552
3553         status = __vxge_hw_device_register_poll(
3554                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3555                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3556                                 1 << (16 - vpath->vp_id)),
3557                         vpath->hldev->config.device_poll_millis);
3558
3559         return status;
3560 }
3561
3562 /*
3563  * __vxge_hw_vpath_reset
3564  * This routine resets the vpath on the device
3565  */
3566 enum vxge_hw_status
3567 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3568 {
3569         u64 val64;
3570         enum vxge_hw_status status = VXGE_HW_OK;
3571
3572         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3573
3574         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3575                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3576
3577         return status;
3578 }
3579
3580 /*
3581  * __vxge_hw_vpath_sw_reset
3582  * This routine resets the vpath structures
3583  */
3584 enum vxge_hw_status
3585 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3586 {
3587         enum vxge_hw_status status = VXGE_HW_OK;
3588         struct __vxge_hw_virtualpath *vpath;
3589
3590         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3591
3592         if (vpath->ringh) {
3593                 status = __vxge_hw_ring_reset(vpath->ringh);
3594                 if (status != VXGE_HW_OK)
3595                         goto exit;
3596         }
3597
3598         if (vpath->fifoh)
3599                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3600 exit:
3601         return status;
3602 }
3603
3604 /*
3605  * __vxge_hw_vpath_prc_configure
3606  * This routine configures the prc registers of virtual path using the config
3607  * passed
3608  */
3609 void
3610 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3611 {
3612         u64 val64;
3613         struct __vxge_hw_virtualpath *vpath;
3614         struct vxge_hw_vp_config *vp_config;
3615         struct vxge_hw_vpath_reg __iomem *vp_reg;
3616
3617         vpath = &hldev->virtual_paths[vp_id];
3618         vp_reg = vpath->vp_reg;
3619         vp_config = vpath->vp_config;
3620
3621         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3622                 return;
3623
3624         val64 = readq(&vp_reg->prc_cfg1);
3625         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3626         writeq(val64, &vp_reg->prc_cfg1);
3627
3628         val64 = readq(&vpath->vp_reg->prc_cfg6);
3629         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3630         writeq(val64, &vpath->vp_reg->prc_cfg6);
3631
3632         val64 = readq(&vp_reg->prc_cfg7);
3633
3634         if (vpath->vp_config->ring.scatter_mode !=
3635                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3636
3637                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3638
3639                 switch (vpath->vp_config->ring.scatter_mode) {
3640                 case VXGE_HW_RING_SCATTER_MODE_A:
3641                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3642                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3643                         break;
3644                 case VXGE_HW_RING_SCATTER_MODE_B:
3645                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3646                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3647                         break;
3648                 case VXGE_HW_RING_SCATTER_MODE_C:
3649                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3650                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3651                         break;
3652                 }
3653         }
3654
3655         writeq(val64, &vp_reg->prc_cfg7);
3656
3657         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3658                                 __vxge_hw_ring_first_block_address_get(
3659                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3660
3661         val64 = readq(&vp_reg->prc_cfg4);
3662         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3663         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3664
3665         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3666                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3667
3668         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3669                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3670         else
3671                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3672
3673         writeq(val64, &vp_reg->prc_cfg4);
3674         return;
3675 }
3676
3677 /*
3678  * __vxge_hw_vpath_kdfc_configure
3679  * This routine configures the kdfc registers of virtual path using the
3680  * config passed
3681  */
3682 enum vxge_hw_status
3683 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3684 {
3685         u64 val64;
3686         u64 vpath_stride;
3687         enum vxge_hw_status status = VXGE_HW_OK;
3688         struct __vxge_hw_virtualpath *vpath;
3689         struct vxge_hw_vpath_reg __iomem *vp_reg;
3690
3691         vpath = &hldev->virtual_paths[vp_id];
3692         vp_reg = vpath->vp_reg;
3693         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3694
3695         if (status != VXGE_HW_OK)
3696                 goto exit;
3697
3698         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3699
3700         vpath->max_kdfc_db =
3701                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3702                         val64+1)/2;
3703
3704         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3705
3706                 vpath->max_nofl_db = vpath->max_kdfc_db;
3707
3708                 if (vpath->max_nofl_db <
3709                         ((vpath->vp_config->fifo.memblock_size /
3710                         (vpath->vp_config->fifo.max_frags *
3711                         sizeof(struct vxge_hw_fifo_txd))) *
3712                         vpath->vp_config->fifo.fifo_blocks)) {
3713
3714                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3715                 }
3716                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3717                                 (vpath->max_nofl_db*2)-1);
3718         }
3719
3720         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3721
3722         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3723                 &vp_reg->kdfc_fifo_trpl_ctrl);
3724
3725         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3726
3727         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3728                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3729
3730         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3731                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3732 #ifndef __BIG_ENDIAN
3733                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3734 #endif
3735                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3736
3737         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3738         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3739         wmb();
3740         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3741
3742         vpath->nofl_db =
3743                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3744                 (hldev->kdfc + (vp_id *
3745                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3746                                         vpath_stride)));
3747 exit:
3748         return status;
3749 }
3750
3751 /*
3752  * __vxge_hw_vpath_mac_configure
3753  * This routine configures the mac of virtual path using the config passed
3754  */
3755 enum vxge_hw_status
3756 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3757 {
3758         u64 val64;
3759         enum vxge_hw_status status = VXGE_HW_OK;
3760         struct __vxge_hw_virtualpath *vpath;
3761         struct vxge_hw_vp_config *vp_config;
3762         struct vxge_hw_vpath_reg __iomem *vp_reg;
3763
3764         vpath = &hldev->virtual_paths[vp_id];
3765         vp_reg = vpath->vp_reg;
3766         vp_config = vpath->vp_config;
3767
3768         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3769                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3770
3771         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3772
3773                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3774
3775                 if (vp_config->rpa_strip_vlan_tag !=
3776                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3777                         if (vp_config->rpa_strip_vlan_tag)
3778                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3779                         else
3780                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3781                 }
3782
3783                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3784                 val64 = readq(&vp_reg->rxmac_vcfg0);
3785
3786                 if (vp_config->mtu !=
3787                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3788                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3789                         if ((vp_config->mtu  +
3790                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3791                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3792                                         vp_config->mtu  +
3793                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3794                         else
3795                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3796                                         vpath->max_mtu);
3797                 }
3798
3799                 writeq(val64, &vp_reg->rxmac_vcfg0);
3800
3801                 val64 = readq(&vp_reg->rxmac_vcfg1);
3802
3803                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3804                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3805
3806                 if (hldev->config.rth_it_type ==
3807                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3808                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3809                                 0x2) |
3810                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3811                 }
3812
3813                 writeq(val64, &vp_reg->rxmac_vcfg1);
3814         }
3815         return status;
3816 }
3817
3818 /*
3819  * __vxge_hw_vpath_tim_configure
3820  * This routine configures the tim registers of virtual path using the config
3821  * passed
3822  */
3823 enum vxge_hw_status
3824 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3825 {
3826         u64 val64;
3827         enum vxge_hw_status status = VXGE_HW_OK;
3828         struct __vxge_hw_virtualpath *vpath;
3829         struct vxge_hw_vpath_reg __iomem *vp_reg;
3830         struct vxge_hw_vp_config *config;
3831
3832         vpath = &hldev->virtual_paths[vp_id];
3833         vp_reg = vpath->vp_reg;
3834         config = vpath->vp_config;
3835
3836         writeq((u64)0, &vp_reg->tim_dest_addr);
3837         writeq((u64)0, &vp_reg->tim_vpath_map);
3838         writeq((u64)0, &vp_reg->tim_bitmap);
3839         writeq((u64)0, &vp_reg->tim_remap);
3840
3841         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3842                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3843                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3844                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3845
3846         val64 = readq(&vp_reg->tim_pci_cfg);
3847         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3848         writeq(val64, &vp_reg->tim_pci_cfg);
3849
3850         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3851
3852                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3853
3854                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3855                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3856                                 0x3ffffff);
3857                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3858                                         config->tti.btimer_val);
3859                 }
3860
3861                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3862
3863                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3864                         if (config->tti.timer_ac_en)
3865                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3866                         else
3867                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3868                 }
3869
3870                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3871                         if (config->tti.timer_ci_en)
3872                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3873                         else
3874                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3875                 }
3876
3877                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3878                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3879                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3880                                         config->tti.urange_a);
3881                 }
3882
3883                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3884                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3885                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3886                                         config->tti.urange_b);
3887                 }
3888
3889                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3890                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3891                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3892                                         config->tti.urange_c);
3893                 }
3894
3895                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3896                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3897
3898                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3899                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3900                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3901                                                 config->tti.uec_a);
3902                 }
3903
3904                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3905                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3906                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3907                                                 config->tti.uec_b);
3908                 }
3909
3910                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3911                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3912                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3913                                                 config->tti.uec_c);
3914                 }
3915
3916                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3917                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3918                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3919                                                 config->tti.uec_d);
3920                 }
3921
3922                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3923                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3924
3925                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3926                         if (config->tti.timer_ri_en)
3927                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3928                         else
3929                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3930                 }
3931
3932                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3933                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3934                                         0x3ffffff);
3935                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3936                                         config->tti.rtimer_val);
3937                 }
3938
3939                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3940                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3941                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3942                                         config->tti.util_sel);
3943                 }
3944
3945                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3946                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3947                                         0x3ffffff);
3948                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3949                                         config->tti.ltimer_val);
3950                 }
3951
3952                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3953         }
3954
3955         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3956
3957                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3958
3959                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3960                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3961                                         0x3ffffff);
3962                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3963                                         config->rti.btimer_val);
3964                 }
3965
3966                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3967
3968                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3969                         if (config->rti.timer_ac_en)
3970                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3971                         else
3972                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3973                 }
3974
3975                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3976                         if (config->rti.timer_ci_en)
3977                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3978                         else
3979                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3980                 }
3981
3982                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3983                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3984                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3985                                         config->rti.urange_a);
3986                 }
3987
3988                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3989                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3990                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3991                                         config->rti.urange_b);
3992                 }
3993
3994                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3995                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3996                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3997                                         config->rti.urange_c);
3998                 }
3999
4000                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4001                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4002
4003                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4004                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4005                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4006                                                 config->rti.uec_a);
4007                 }
4008
4009                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4010                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4011                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4012                                                 config->rti.uec_b);
4013                 }
4014
4015                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4016                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4017                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4018                                                 config->rti.uec_c);
4019                 }
4020
4021                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4022                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4023                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4024                                                 config->rti.uec_d);
4025                 }
4026
4027                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4028                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4029
4030                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4031                         if (config->rti.timer_ri_en)
4032                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4033                         else
4034                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4035                 }
4036
4037                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4038                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4039                                         0x3ffffff);
4040                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4041                                         config->rti.rtimer_val);
4042                 }
4043
4044                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4045                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4046                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4047                                         config->rti.util_sel);
4048                 }
4049
4050                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4051                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4052                                         0x3ffffff);
4053                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4054                                         config->rti.ltimer_val);
4055                 }
4056
4057                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4058         }
4059
4060         val64 = 0;
4061         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4062         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4063         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4064         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4065         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4066         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4067
4068         return status;
4069 }
4070
4071 /*
4072  * __vxge_hw_vpath_initialize
4073  * This routine is the final phase of init which initializes the
4074  * registers of the vpath using the configuration passed.
4075  */
4076 enum vxge_hw_status
4077 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4078 {
4079         u64 val64;
4080         u32 val32;
4081         enum vxge_hw_status status = VXGE_HW_OK;
4082         struct __vxge_hw_virtualpath *vpath;
4083         struct vxge_hw_vpath_reg __iomem *vp_reg;
4084
4085         vpath = &hldev->virtual_paths[vp_id];
4086
4087         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4088                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4089                 goto exit;
4090         }
4091         vp_reg = vpath->vp_reg;
4092
4093         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4094
4095         if (status != VXGE_HW_OK)
4096                 goto exit;
4097
4098         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4099
4100         if (status != VXGE_HW_OK)
4101                 goto exit;
4102
4103         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4104
4105         if (status != VXGE_HW_OK)
4106                 goto exit;
4107
4108         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4109
4110         if (status != VXGE_HW_OK)
4111                 goto exit;
4112
4113         writeq(0, &vp_reg->gendma_int);
4114
4115         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4116
4117         /* Get MRRS value from device control */
4118         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4119
4120         if (status == VXGE_HW_OK) {
4121                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4122                 val64 &=
4123                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4124                 val64 |=
4125                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4126
4127                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4128         }
4129
4130         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4131         val64 |=
4132             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4133                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4134
4135         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4136         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4137
4138 exit:
4139         return status;
4140 }
4141
4142 /*
4143  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4144  * This routine is the initial phase of init which resets the vpath and
4145  * initializes the software support structures.
4146  */
4147 enum vxge_hw_status
4148 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4149                         struct vxge_hw_vp_config *config)
4150 {
4151         struct __vxge_hw_virtualpath *vpath;
4152         enum vxge_hw_status status = VXGE_HW_OK;
4153
4154         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4155                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4156                 goto exit;
4157         }
4158
4159         vpath = &hldev->virtual_paths[vp_id];
4160
4161         vpath->vp_id = vp_id;
4162         vpath->vp_open = VXGE_HW_VP_OPEN;
4163         vpath->hldev = hldev;
4164         vpath->vp_config = config;
4165         vpath->vp_reg = hldev->vpath_reg[vp_id];
4166         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4167
4168         __vxge_hw_vpath_reset(hldev, vp_id);
4169
4170         status = __vxge_hw_vpath_reset_check(vpath);
4171
4172         if (status != VXGE_HW_OK) {
4173                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4174                 goto exit;
4175         }
4176
4177         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4178
4179         if (status != VXGE_HW_OK) {
4180                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4181                 goto exit;
4182         }
4183
4184         INIT_LIST_HEAD(&vpath->vpath_handles);
4185
4186         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4187
4188         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4189                 hldev->tim_int_mask1, vp_id);
4190
4191         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4192
4193         if (status != VXGE_HW_OK)
4194                 __vxge_hw_vp_terminate(hldev, vp_id);
4195 exit:
4196         return status;
4197 }
4198
4199 /*
4200  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4201  * This routine closes all channels it opened and freeup memory
4202  */
4203 void
4204 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4205 {
4206         struct __vxge_hw_virtualpath *vpath;
4207
4208         vpath = &hldev->virtual_paths[vp_id];
4209
4210         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4211                 goto exit;
4212
4213         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4214                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4215         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4216
4217         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4218 exit:
4219         return;
4220 }
4221
4222 /*
4223  * vxge_hw_vpath_mtu_set - Set MTU.
4224  * Set new MTU value. Example, to use jumbo frames:
4225  * vxge_hw_vpath_mtu_set(my_device, 9600);
4226  */
4227 enum vxge_hw_status
4228 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4229 {
4230         u64 val64;
4231         enum vxge_hw_status status = VXGE_HW_OK;
4232         struct __vxge_hw_virtualpath *vpath;
4233
4234         if (vp == NULL) {
4235                 status = VXGE_HW_ERR_INVALID_HANDLE;
4236                 goto exit;
4237         }
4238         vpath = vp->vpath;
4239
4240         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4241
4242         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4243                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4244
4245         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4246
4247         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4248         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4249
4250         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4251
4252         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4253
4254 exit:
4255         return status;
4256 }
4257
4258 /*
4259  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4260  * This function is used to open access to virtual path of an
4261  * adapter for offload, GRO operations. This function returns
4262  * synchronously.
4263  */
4264 enum vxge_hw_status
4265 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4266                    struct vxge_hw_vpath_attr *attr,
4267                    struct __vxge_hw_vpath_handle **vpath_handle)
4268 {
4269         struct __vxge_hw_virtualpath *vpath;
4270         struct __vxge_hw_vpath_handle *vp;
4271         enum vxge_hw_status status;
4272
4273         vpath = &hldev->virtual_paths[attr->vp_id];
4274
4275         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4276                 status = VXGE_HW_ERR_INVALID_STATE;
4277                 goto vpath_open_exit1;
4278         }
4279
4280         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4281                         &hldev->config.vp_config[attr->vp_id]);
4282
4283         if (status != VXGE_HW_OK)
4284                 goto vpath_open_exit1;
4285
4286         vp = (struct __vxge_hw_vpath_handle *)
4287                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4288         if (vp == NULL) {
4289                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4290                 goto vpath_open_exit2;
4291         }
4292
4293         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4294
4295         vp->vpath = vpath;
4296
4297         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4298                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4299                 if (status != VXGE_HW_OK)
4300                         goto vpath_open_exit6;
4301         }
4302
4303         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4304                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4305                 if (status != VXGE_HW_OK)
4306                         goto vpath_open_exit7;
4307
4308                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4309         }
4310
4311         vpath->fifoh->tx_intr_num =
4312                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4313                         VXGE_HW_VPATH_INTR_TX;
4314
4315         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4316                                 VXGE_HW_BLOCK_SIZE);
4317
4318         if (vpath->stats_block == NULL) {
4319                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4320                 goto vpath_open_exit8;
4321         }
4322
4323         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4324                         stats_block->memblock;
4325         memset(vpath->hw_stats, 0,
4326                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4327
4328         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4329                                                 vpath->hw_stats;
4330
4331         vpath->hw_stats_sav =
4332                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4333         memset(vpath->hw_stats_sav, 0,
4334                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4335
4336         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4337
4338         status = vxge_hw_vpath_stats_enable(vp);
4339         if (status != VXGE_HW_OK)
4340                 goto vpath_open_exit8;
4341
4342         list_add(&vp->item, &vpath->vpath_handles);
4343
4344         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4345
4346         *vpath_handle = vp;
4347
4348         attr->fifo_attr.userdata = vpath->fifoh;
4349         attr->ring_attr.userdata = vpath->ringh;
4350
4351         return VXGE_HW_OK;
4352
4353 vpath_open_exit8:
4354         if (vpath->ringh != NULL)
4355                 __vxge_hw_ring_delete(vp);
4356 vpath_open_exit7:
4357         if (vpath->fifoh != NULL)
4358                 __vxge_hw_fifo_delete(vp);
4359 vpath_open_exit6:
4360         vfree(vp);
4361 vpath_open_exit2:
4362         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4363 vpath_open_exit1:
4364
4365         return status;
4366 }
4367
4368 /**
4369  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4370  * (vpath) open
4371  * @vp: Handle got from previous vpath open
4372  *
4373  * This function is used to close access to virtual path opened
4374  * earlier.
4375  */
4376 void
4377 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4378 {
4379         struct __vxge_hw_virtualpath *vpath = NULL;
4380         u64 new_count, val64, val164;
4381         struct __vxge_hw_ring *ring;
4382
4383         vpath = vp->vpath;
4384         ring = vpath->ringh;
4385
4386         new_count = readq(&vpath->vp_reg->rxdmem_size);
4387         new_count &= 0x1fff;
4388         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4389
4390         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4391                 &vpath->vp_reg->prc_rxd_doorbell);
4392         readl(&vpath->vp_reg->prc_rxd_doorbell);
4393
4394         val164 /= 2;
4395         val64 = readq(&vpath->vp_reg->prc_cfg6);
4396         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4397         val64 &= 0x1ff;
4398
4399         /*
4400          * Each RxD is of 4 qwords
4401          */
4402         new_count -= (val64 + 1);
4403         val64 = min(val164, new_count) / 4;
4404
4405         ring->rxds_limit = min(ring->rxds_limit, val64);
4406         if (ring->rxds_limit < 4)
4407                 ring->rxds_limit = 4;
4408 }
4409
4410 /*
4411  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4412  * This function is used to close access to virtual path opened
4413  * earlier.
4414  */
4415 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4416 {
4417         struct __vxge_hw_virtualpath *vpath = NULL;
4418         struct __vxge_hw_device *devh = NULL;
4419         u32 vp_id = vp->vpath->vp_id;
4420         u32 is_empty = TRUE;
4421         enum vxge_hw_status status = VXGE_HW_OK;
4422
4423         vpath = vp->vpath;
4424         devh = vpath->hldev;
4425
4426         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4427                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4428                 goto vpath_close_exit;
4429         }
4430
4431         list_del(&vp->item);
4432
4433         if (!list_empty(&vpath->vpath_handles)) {
4434                 list_add(&vp->item, &vpath->vpath_handles);
4435                 is_empty = FALSE;
4436         }
4437
4438         if (!is_empty) {
4439                 status = VXGE_HW_FAIL;
4440                 goto vpath_close_exit;
4441         }
4442
4443         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4444
4445         if (vpath->ringh != NULL)
4446                 __vxge_hw_ring_delete(vp);
4447
4448         if (vpath->fifoh != NULL)
4449                 __vxge_hw_fifo_delete(vp);
4450
4451         if (vpath->stats_block != NULL)
4452                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4453
4454         vfree(vp);
4455
4456         __vxge_hw_vp_terminate(devh, vp_id);
4457
4458         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4459
4460 vpath_close_exit:
4461         return status;
4462 }
4463
4464 /*
4465  * vxge_hw_vpath_reset - Resets vpath
4466  * This function is used to request a reset of vpath
4467  */
4468 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4469 {
4470         enum vxge_hw_status status;
4471         u32 vp_id;
4472         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4473
4474         vp_id = vpath->vp_id;
4475
4476         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4477                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4478                 goto exit;
4479         }
4480
4481         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4482         if (status == VXGE_HW_OK)
4483                 vpath->sw_stats->soft_reset_cnt++;
4484 exit:
4485         return status;
4486 }
4487
4488 /*
4489  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4490  * This function poll's for the vpath reset completion and re initializes
4491  * the vpath.
4492  */
4493 enum vxge_hw_status
4494 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4495 {
4496         struct __vxge_hw_virtualpath *vpath = NULL;
4497         enum vxge_hw_status status;
4498         struct __vxge_hw_device *hldev;
4499         u32 vp_id;
4500
4501         vp_id = vp->vpath->vp_id;
4502         vpath = vp->vpath;
4503         hldev = vpath->hldev;
4504
4505         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4506                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4507                 goto exit;
4508         }
4509
4510         status = __vxge_hw_vpath_reset_check(vpath);
4511         if (status != VXGE_HW_OK)
4512                 goto exit;
4513
4514         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4515         if (status != VXGE_HW_OK)
4516                 goto exit;
4517
4518         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4519         if (status != VXGE_HW_OK)
4520                 goto exit;
4521
4522         if (vpath->ringh != NULL)
4523                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4524
4525         memset(vpath->hw_stats, 0,
4526                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4527
4528         memset(vpath->hw_stats_sav, 0,
4529                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4530
4531         writeq(vpath->stats_block->dma_addr,
4532                 &vpath->vp_reg->stats_cfg);
4533
4534         status = vxge_hw_vpath_stats_enable(vp);
4535
4536 exit:
4537         return status;
4538 }
4539
4540 /*
4541  * vxge_hw_vpath_enable - Enable vpath.
4542  * This routine clears the vpath reset thereby enabling a vpath
4543  * to start forwarding frames and generating interrupts.
4544  */
4545 void
4546 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4547 {
4548         struct __vxge_hw_device *hldev;
4549         u64 val64;
4550
4551         hldev = vp->vpath->hldev;
4552
4553         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4554                 1 << (16 - vp->vpath->vp_id));
4555
4556         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4557                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4558 }
4559
4560 /*
4561  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4562  * Enable the DMA vpath statistics. The function is to be called to re-enable
4563  * the adapter to update stats into the host memory
4564  */
4565 enum vxge_hw_status
4566 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4567 {
4568         enum vxge_hw_status status = VXGE_HW_OK;
4569         struct __vxge_hw_virtualpath *vpath;
4570
4571         vpath = vp->vpath;
4572
4573         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4574                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4575                 goto exit;
4576         }
4577
4578         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4579                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4580
4581         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4582 exit:
4583         return status;
4584 }
4585
4586 /*
4587  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4588  *                           and offset and perform an operation
4589  */
4590 enum vxge_hw_status
4591 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4592                              u32 operation, u32 offset, u64 *stat)
4593 {
4594         u64 val64;
4595         enum vxge_hw_status status = VXGE_HW_OK;
4596         struct vxge_hw_vpath_reg __iomem *vp_reg;
4597
4598         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4599                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4600                 goto vpath_stats_access_exit;
4601         }
4602
4603         vp_reg = vpath->vp_reg;
4604
4605         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4606                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4607                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4608
4609         status = __vxge_hw_pio_mem_write64(val64,
4610                                 &vp_reg->xmac_stats_access_cmd,
4611                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4612                                 vpath->hldev->config.device_poll_millis);
4613
4614         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4615                 *stat = readq(&vp_reg->xmac_stats_access_data);
4616         else
4617                 *stat = 0;
4618
4619 vpath_stats_access_exit:
4620         return status;
4621 }
4622
4623 /*
4624  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4625  */
4626 enum vxge_hw_status
4627 __vxge_hw_vpath_xmac_tx_stats_get(
4628         struct __vxge_hw_virtualpath *vpath,
4629         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4630 {
4631         u64 *val64;
4632         int i;
4633         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4634         enum vxge_hw_status status = VXGE_HW_OK;
4635
4636         val64 = (u64 *) vpath_tx_stats;
4637
4638         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4639                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4640                 goto exit;
4641         }
4642
4643         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4644                 status = __vxge_hw_vpath_stats_access(vpath,
4645                                         VXGE_HW_STATS_OP_READ,
4646                                         offset, val64);
4647                 if (status != VXGE_HW_OK)
4648                         goto exit;
4649                 offset++;
4650                 val64++;
4651         }
4652 exit:
4653         return status;
4654 }
4655
4656 /*
4657  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4658  */
4659 enum vxge_hw_status
4660 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4661                         struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4662 {
4663         u64 *val64;
4664         enum vxge_hw_status status = VXGE_HW_OK;
4665         int i;
4666         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4667         val64 = (u64 *) vpath_rx_stats;
4668
4669         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4670                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4671                 goto exit;
4672         }
4673         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4674                 status = __vxge_hw_vpath_stats_access(vpath,
4675                                         VXGE_HW_STATS_OP_READ,
4676                                         offset >> 3, val64);
4677                 if (status != VXGE_HW_OK)
4678                         goto exit;
4679
4680                 offset += 8;
4681                 val64++;
4682         }
4683 exit:
4684         return status;
4685 }
4686
4687 /*
4688  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4689  */
4690 enum vxge_hw_status __vxge_hw_vpath_stats_get(
4691                         struct __vxge_hw_virtualpath *vpath,
4692                         struct vxge_hw_vpath_stats_hw_info *hw_stats)
4693 {
4694         u64 val64;
4695         enum vxge_hw_status status = VXGE_HW_OK;
4696         struct vxge_hw_vpath_reg __iomem *vp_reg;
4697
4698         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4699                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4700                 goto exit;
4701         }
4702         vp_reg = vpath->vp_reg;
4703
4704         val64 = readq(&vp_reg->vpath_debug_stats0);
4705         hw_stats->ini_num_mwr_sent =
4706                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4707
4708         val64 = readq(&vp_reg->vpath_debug_stats1);
4709         hw_stats->ini_num_mrd_sent =
4710                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4711
4712         val64 = readq(&vp_reg->vpath_debug_stats2);
4713         hw_stats->ini_num_cpl_rcvd =
4714                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4715
4716         val64 = readq(&vp_reg->vpath_debug_stats3);
4717         hw_stats->ini_num_mwr_byte_sent =
4718                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4719
4720         val64 = readq(&vp_reg->vpath_debug_stats4);
4721         hw_stats->ini_num_cpl_byte_rcvd =
4722                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4723
4724         val64 = readq(&vp_reg->vpath_debug_stats5);
4725         hw_stats->wrcrdtarb_xoff =
4726                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4727
4728         val64 = readq(&vp_reg->vpath_debug_stats6);
4729         hw_stats->rdcrdtarb_xoff =
4730                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4731
4732         val64 = readq(&vp_reg->vpath_genstats_count01);
4733         hw_stats->vpath_genstats_count0 =
4734         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4735                 val64);
4736
4737         val64 = readq(&vp_reg->vpath_genstats_count01);
4738         hw_stats->vpath_genstats_count1 =
4739         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4740                 val64);
4741
4742         val64 = readq(&vp_reg->vpath_genstats_count23);
4743         hw_stats->vpath_genstats_count2 =
4744         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4745                 val64);
4746
4747         val64 = readq(&vp_reg->vpath_genstats_count01);
4748         hw_stats->vpath_genstats_count3 =
4749         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4750                 val64);
4751
4752         val64 = readq(&vp_reg->vpath_genstats_count4);
4753         hw_stats->vpath_genstats_count4 =
4754         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4755                 val64);
4756
4757         val64 = readq(&vp_reg->vpath_genstats_count5);
4758         hw_stats->vpath_genstats_count5 =
4759         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4760                 val64);
4761
4762         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4763         if (status != VXGE_HW_OK)
4764                 goto exit;
4765
4766         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4767         if (status != VXGE_HW_OK)
4768                 goto exit;
4769
4770         VXGE_HW_VPATH_STATS_PIO_READ(
4771                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4772
4773         hw_stats->prog_event_vnum0 =
4774                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4775
4776         hw_stats->prog_event_vnum1 =
4777                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4778
4779         VXGE_HW_VPATH_STATS_PIO_READ(
4780                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4781
4782         hw_stats->prog_event_vnum2 =
4783                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4784
4785         hw_stats->prog_event_vnum3 =
4786                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4787
4788         val64 = readq(&vp_reg->rx_multi_cast_stats);
4789         hw_stats->rx_multi_cast_frame_discard =
4790                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4791
4792         val64 = readq(&vp_reg->rx_frm_transferred);
4793         hw_stats->rx_frm_transferred =
4794                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4795
4796         val64 = readq(&vp_reg->rxd_returned);
4797         hw_stats->rxd_returned =
4798                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4799
4800         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4801         hw_stats->rx_mpa_len_fail_frms =
4802                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4803         hw_stats->rx_mpa_mrk_fail_frms =
4804                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4805         hw_stats->rx_mpa_crc_fail_frms =
4806                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4807
4808         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4809         hw_stats->rx_permitted_frms =
4810                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4811         hw_stats->rx_vp_reset_discarded_frms =
4812         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4813         hw_stats->rx_wol_frms =
4814                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4815
4816         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4817         hw_stats->tx_vp_reset_discarded_frms =
4818         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4819                 val64);
4820 exit:
4821         return status;
4822 }
4823
4824 /*
4825  * __vxge_hw_blockpool_create - Create block pool
4826  */
4827
4828 enum vxge_hw_status
4829 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4830                            struct __vxge_hw_blockpool *blockpool,
4831                            u32 pool_size,
4832                            u32 pool_max)
4833 {
4834         u32 i;
4835         struct __vxge_hw_blockpool_entry *entry = NULL;
4836         void *memblock;
4837         dma_addr_t dma_addr;
4838         struct pci_dev *dma_handle;
4839         struct pci_dev *acc_handle;
4840         enum vxge_hw_status status = VXGE_HW_OK;
4841
4842         if (blockpool == NULL) {
4843                 status = VXGE_HW_FAIL;
4844                 goto blockpool_create_exit;
4845         }
4846
4847         blockpool->hldev = hldev;
4848         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4849         blockpool->pool_size = 0;
4850         blockpool->pool_max = pool_max;
4851         blockpool->req_out = 0;
4852
4853         INIT_LIST_HEAD(&blockpool->free_block_list);
4854         INIT_LIST_HEAD(&blockpool->free_entry_list);
4855
4856         for (i = 0; i < pool_size + pool_max; i++) {
4857                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4858                                 GFP_KERNEL);
4859                 if (entry == NULL) {
4860                         __vxge_hw_blockpool_destroy(blockpool);
4861                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4862                         goto blockpool_create_exit;
4863                 }
4864                 list_add(&entry->item, &blockpool->free_entry_list);
4865         }
4866
4867         for (i = 0; i < pool_size; i++) {
4868
4869                 memblock = vxge_os_dma_malloc(
4870                                 hldev->pdev,
4871                                 VXGE_HW_BLOCK_SIZE,
4872                                 &dma_handle,
4873                                 &acc_handle);
4874
4875                 if (memblock == NULL) {
4876                         __vxge_hw_blockpool_destroy(blockpool);
4877                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4878                         goto blockpool_create_exit;
4879                 }
4880
4881                 dma_addr = pci_map_single(hldev->pdev, memblock,
4882                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4883
4884                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4885                                 dma_addr))) {
4886
4887                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4888                         __vxge_hw_blockpool_destroy(blockpool);
4889                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4890                         goto blockpool_create_exit;
4891                 }
4892
4893                 if (!list_empty(&blockpool->free_entry_list))
4894                         entry = (struct __vxge_hw_blockpool_entry *)
4895                                 list_first_entry(&blockpool->free_entry_list,
4896                                         struct __vxge_hw_blockpool_entry,
4897                                         item);
4898
4899                 if (entry == NULL)
4900                         entry =
4901                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4902                                         GFP_KERNEL);
4903                 if (entry != NULL) {
4904                         list_del(&entry->item);
4905                         entry->length = VXGE_HW_BLOCK_SIZE;
4906                         entry->memblock = memblock;
4907                         entry->dma_addr = dma_addr;
4908                         entry->acc_handle = acc_handle;
4909                         entry->dma_handle = dma_handle;
4910                         list_add(&entry->item,
4911                                           &blockpool->free_block_list);
4912                         blockpool->pool_size++;
4913                 } else {
4914                         __vxge_hw_blockpool_destroy(blockpool);
4915                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4916                         goto blockpool_create_exit;
4917                 }
4918         }
4919
4920 blockpool_create_exit:
4921         return status;
4922 }
4923
4924 /*
4925  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4926  */
4927
4928 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4929 {
4930
4931         struct __vxge_hw_device *hldev;
4932         struct list_head *p, *n;
4933         u16 ret;
4934
4935         if (blockpool == NULL) {
4936                 ret = 1;
4937                 goto exit;
4938         }
4939
4940         hldev = blockpool->hldev;
4941
4942         list_for_each_safe(p, n, &blockpool->free_block_list) {
4943
4944                 pci_unmap_single(hldev->pdev,
4945                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4946                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4947                         PCI_DMA_BIDIRECTIONAL);
4948
4949                 vxge_os_dma_free(hldev->pdev,
4950                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4951                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4952
4953                 list_del(
4954                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4955                 kfree(p);
4956                 blockpool->pool_size--;
4957         }
4958
4959         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4960                 list_del(
4961                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4962                 kfree((void *)p);
4963         }
4964         ret = 0;
4965 exit:
4966         return;
4967 }
4968
4969 /*
4970  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4971  */
4972 static
4973 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4974 {
4975         u32 nreq = 0, i;
4976
4977         if ((blockpool->pool_size  +  blockpool->req_out) <
4978                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4979                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4980                 blockpool->req_out += nreq;
4981         }
4982
4983         for (i = 0; i < nreq; i++)
4984                 vxge_os_dma_malloc_async(
4985                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4986                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4987 }
4988
4989 /*
4990  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4991  */
4992 static
4993 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4994 {
4995         struct list_head *p, *n;
4996
4997         list_for_each_safe(p, n, &blockpool->free_block_list) {
4998
4999                 if (blockpool->pool_size < blockpool->pool_max)
5000                         break;
5001
5002                 pci_unmap_single(
5003                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5004                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5005                         ((struct __vxge_hw_blockpool_entry *)p)->length,
5006                         PCI_DMA_BIDIRECTIONAL);
5007
5008                 vxge_os_dma_free(
5009                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5010                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5011                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5012
5013                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5014
5015                 list_add(p, &blockpool->free_entry_list);
5016
5017                 blockpool->pool_size--;
5018
5019         }
5020 }
5021
5022 /*
5023  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5024  * Adds a block to block pool
5025  */
5026 void vxge_hw_blockpool_block_add(
5027                         struct __vxge_hw_device *devh,
5028                         void *block_addr,
5029                         u32 length,
5030                         struct pci_dev *dma_h,
5031                         struct pci_dev *acc_handle)
5032 {
5033         struct __vxge_hw_blockpool  *blockpool;
5034         struct __vxge_hw_blockpool_entry  *entry = NULL;
5035         dma_addr_t dma_addr;
5036         enum vxge_hw_status status = VXGE_HW_OK;
5037         u32 req_out;
5038
5039         blockpool = &devh->block_pool;
5040
5041         if (block_addr == NULL) {
5042                 blockpool->req_out--;
5043                 status = VXGE_HW_FAIL;
5044                 goto exit;
5045         }
5046
5047         dma_addr = pci_map_single(devh->pdev, block_addr, length,
5048                                 PCI_DMA_BIDIRECTIONAL);
5049
5050         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5051
5052                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5053                 blockpool->req_out--;
5054                 status = VXGE_HW_FAIL;
5055                 goto exit;
5056         }
5057
5058
5059         if (!list_empty(&blockpool->free_entry_list))
5060                 entry = (struct __vxge_hw_blockpool_entry *)
5061                         list_first_entry(&blockpool->free_entry_list,
5062                                 struct __vxge_hw_blockpool_entry,
5063                                 item);
5064
5065         if (entry == NULL)
5066                 entry = (struct __vxge_hw_blockpool_entry *)
5067                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5068         else
5069                 list_del(&entry->item);
5070
5071         if (entry != NULL) {
5072                 entry->length = length;
5073                 entry->memblock = block_addr;
5074                 entry->dma_addr = dma_addr;
5075                 entry->acc_handle = acc_handle;
5076                 entry->dma_handle = dma_h;
5077                 list_add(&entry->item, &blockpool->free_block_list);
5078                 blockpool->pool_size++;
5079                 status = VXGE_HW_OK;
5080         } else
5081                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5082
5083         blockpool->req_out--;
5084
5085         req_out = blockpool->req_out;
5086 exit:
5087         return;
5088 }
5089
5090 /*
5091  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5092  * Allocates a block of memory of given size, either from block pool
5093  * or by calling vxge_os_dma_malloc()
5094  */
5095 void *
5096 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5097                                 struct vxge_hw_mempool_dma *dma_object)
5098 {
5099         struct __vxge_hw_blockpool_entry *entry = NULL;
5100         struct __vxge_hw_blockpool  *blockpool;
5101         void *memblock = NULL;
5102         enum vxge_hw_status status = VXGE_HW_OK;
5103
5104         blockpool = &devh->block_pool;
5105
5106         if (size != blockpool->block_size) {
5107
5108                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5109                                                 &dma_object->handle,
5110                                                 &dma_object->acc_handle);
5111
5112                 if (memblock == NULL) {
5113                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5114                         goto exit;
5115                 }
5116
5117                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5118                                         PCI_DMA_BIDIRECTIONAL);
5119
5120                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5121                                 dma_object->addr))) {
5122                         vxge_os_dma_free(devh->pdev, memblock,
5123                                 &dma_object->acc_handle);
5124                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5125                         goto exit;
5126                 }
5127
5128         } else {
5129
5130                 if (!list_empty(&blockpool->free_block_list))
5131                         entry = (struct __vxge_hw_blockpool_entry *)
5132                                 list_first_entry(&blockpool->free_block_list,
5133                                         struct __vxge_hw_blockpool_entry,
5134                                         item);
5135
5136                 if (entry != NULL) {
5137                         list_del(&entry->item);
5138                         dma_object->addr = entry->dma_addr;
5139                         dma_object->handle = entry->dma_handle;
5140                         dma_object->acc_handle = entry->acc_handle;
5141                         memblock = entry->memblock;
5142
5143                         list_add(&entry->item,
5144                                 &blockpool->free_entry_list);
5145                         blockpool->pool_size--;
5146                 }
5147
5148                 if (memblock != NULL)
5149                         __vxge_hw_blockpool_blocks_add(blockpool);
5150         }
5151 exit:
5152         return memblock;
5153 }
5154
5155 /*
5156  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5157                                 __vxge_hw_blockpool_malloc
5158  */
5159 void
5160 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5161                         void *memblock, u32 size,
5162                         struct vxge_hw_mempool_dma *dma_object)
5163 {
5164         struct __vxge_hw_blockpool_entry *entry = NULL;
5165         struct __vxge_hw_blockpool  *blockpool;
5166         enum vxge_hw_status status = VXGE_HW_OK;
5167
5168         blockpool = &devh->block_pool;
5169
5170         if (size != blockpool->block_size) {
5171                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5172                         PCI_DMA_BIDIRECTIONAL);
5173                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5174         } else {
5175
5176                 if (!list_empty(&blockpool->free_entry_list))
5177                         entry = (struct __vxge_hw_blockpool_entry *)
5178                                 list_first_entry(&blockpool->free_entry_list,
5179                                         struct __vxge_hw_blockpool_entry,
5180                                         item);
5181
5182                 if (entry == NULL)
5183                         entry = (struct __vxge_hw_blockpool_entry *)
5184                                 vmalloc(sizeof(
5185                                         struct __vxge_hw_blockpool_entry));
5186                 else
5187                         list_del(&entry->item);
5188
5189                 if (entry != NULL) {
5190                         entry->length = size;
5191                         entry->memblock = memblock;
5192                         entry->dma_addr = dma_object->addr;
5193                         entry->acc_handle = dma_object->acc_handle;
5194                         entry->dma_handle = dma_object->handle;
5195                         list_add(&entry->item,
5196                                         &blockpool->free_block_list);
5197                         blockpool->pool_size++;
5198                         status = VXGE_HW_OK;
5199                 } else
5200                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5201
5202                 if (status == VXGE_HW_OK)
5203                         __vxge_hw_blockpool_blocks_remove(blockpool);
5204         }
5205
5206         return;
5207 }
5208
5209 /*
5210  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5211  * This function allocates a block from block pool or from the system
5212  */
5213 struct __vxge_hw_blockpool_entry *
5214 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5215 {
5216         struct __vxge_hw_blockpool_entry *entry = NULL;
5217         struct __vxge_hw_blockpool  *blockpool;
5218
5219         blockpool = &devh->block_pool;
5220
5221         if (size == blockpool->block_size) {
5222
5223                 if (!list_empty(&blockpool->free_block_list))
5224                         entry = (struct __vxge_hw_blockpool_entry *)
5225                                 list_first_entry(&blockpool->free_block_list,
5226                                         struct __vxge_hw_blockpool_entry,
5227                                         item);
5228
5229                 if (entry != NULL) {
5230                         list_del(&entry->item);
5231                         blockpool->pool_size--;
5232                 }
5233         }
5234
5235         if (entry != NULL)
5236                 __vxge_hw_blockpool_blocks_add(blockpool);
5237
5238         return entry;
5239 }
5240
5241 /*
5242  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5243  * @devh: Hal device
5244  * @entry: Entry of block to be freed
5245  *
5246  * This function frees a block from block pool
5247  */
5248 void
5249 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5250                         struct __vxge_hw_blockpool_entry *entry)
5251 {
5252         struct __vxge_hw_blockpool  *blockpool;
5253
5254         blockpool = &devh->block_pool;
5255
5256         if (entry->length == blockpool->block_size) {
5257                 list_add(&entry->item, &blockpool->free_block_list);
5258                 blockpool->pool_size++;
5259         }
5260
5261         __vxge_hw_blockpool_blocks_remove(blockpool);
5262
5263         return;
5264 }