2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * Struct definition for eHCA internal structures
6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * All rights reserved.
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
43 #ifndef __EHCA_CLASSES_H__
44 #define __EHCA_CLASSES_H__
55 #include <linux/wait.h>
56 #include <linux/mutex.h>
58 #include <rdma/ib_verbs.h>
59 #include <rdma/ib_user_verbs.h>
62 #include "ehca_classes_pSeries.h"
64 #include "ipz_pt_fn.h"
68 #define EHCA_EQE_CACHE_SIZE 20
69 #define EHCA_MAX_NUM_QUEUES 0xffff
71 struct ehca_eqe_cache_entry {
78 struct ipz_queue ipz_queue;
79 struct ipz_eq_handle ipz_eq_handle;
80 struct work_struct work;
81 struct h_galpas galpas;
85 struct tasklet_struct interrupt_task;
87 spinlock_t irq_spinlock;
88 struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
91 struct ehca_sma_attr {
92 u16 lid, lmc, sm_sl, sm_lid;
93 u16 pkey_tbl_len, pkeys[16];
97 struct ib_cq *ibcq_aqp1;
98 struct ib_qp *ibqp_sqp[2];
99 /* lock to serialze modify_qp() calls for sqp in normal
100 * and irq path (when event PORT_ACTIVE is received first time)
102 spinlock_t mod_sqp_lock;
103 enum ib_port_state port_state;
104 struct ehca_sma_attr saved_attr;
108 #define HCA_CAP_MR_PGSIZE_4K 0x80000000
109 #define HCA_CAP_MR_PGSIZE_64K 0x40000000
110 #define HCA_CAP_MR_PGSIZE_1M 0x20000000
111 #define HCA_CAP_MR_PGSIZE_16M 0x10000000
114 struct ib_device ib_device;
115 struct of_device *ofdev;
118 struct list_head shca_list;
119 struct ipz_adapter_handle ipz_hca_handle;
120 struct ehca_sport sport[2];
123 struct ehca_mr *maxmr;
125 struct h_galpas galpas;
126 struct mutex modify_mutex;
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize;
138 /* small queue mgmt */
140 struct list_head free[2];
141 struct list_head full[2];
144 enum ehca_ext_qp_type {
151 /* struct to cache modify_qp()'s parms for GSI/SMI qp */
152 struct ehca_mod_qp_parm {
154 struct ib_qp_attr attr;
157 #define EHCA_MOD_QP_PARM_MAX 4
159 #define QMAP_IDX_MASK 0xFFFFULL
161 /* struct for tracking if cqes have been reported to the application */
162 struct ehca_qmap_entry {
170 struct ib_srq ib_srq;
173 enum ehca_ext_qp_type ext_type;
174 enum ib_qp_state state;
175 struct ipz_queue ipz_squeue;
176 struct ehca_qmap_entry *sq_map;
177 struct ipz_queue ipz_rqueue;
178 struct h_galpas galpas;
182 spinlock_t spinlock_s;
183 spinlock_t spinlock_r;
184 u32 sq_max_inline_data_size;
185 struct ipz_qp_handle ipz_qp_handle;
187 struct ib_qp_init_attr init_attr;
188 struct ehca_cq *send_cq;
189 struct ehca_cq *recv_cq;
190 unsigned int sqerr_purgeflag;
191 struct hlist_node list_entries;
192 /* array to cache modify_qp()'s parms for GSI/SMI qp */
193 struct ehca_mod_qp_parm *mod_qp_parm;
195 /* mmap counter for resources mapped into user space */
199 /* unsolicited ack circumvention */
204 atomic_t nr_events; /* events seen */
205 wait_queue_head_t wait_completion;
209 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
210 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
211 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
213 /* must be power of 2 */
214 #define QP_HASHTAB_LEN 8
218 struct ipz_queue ipz_queue;
219 struct h_galpas galpas;
224 struct ipz_cq_handle ipz_cq_handle;
227 struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
228 struct list_head entry;
229 u32 nr_callbacks; /* #events assigned to cpu by scaling code */
230 atomic_t nr_events; /* #events seen */
231 wait_queue_head_t wait_completion;
232 spinlock_t task_lock;
233 /* mmap counter for resources mapped into user space */
239 EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
240 EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
245 struct ib_mr ib_mr; /* must always be first in ehca_mr */
246 struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
248 struct ib_umem *umem;
251 enum ehca_mr_flag flags;
252 u32 num_kpages; /* number of kernel pages */
253 u32 num_hwpages; /* number of hw pages to form MR */
254 u64 hwpage_size; /* hw page size used for this MR */
255 int acl; /* ACL (stored here for usage in reregister) */
256 u64 *start; /* virtual start address (stored here for */
257 /* usage in reregister) */
258 u64 size; /* size (stored here for usage in reregister) */
259 u32 fmr_page_size; /* page size for FMR */
260 u32 fmr_max_pages; /* max pages for FMR */
261 u32 fmr_max_maps; /* max outstanding maps for FMR */
262 u32 fmr_map_cnt; /* map counter for FMR */
263 /* fw specific data */
264 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
265 struct h_galpas galpas;
269 struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
272 u8 never_bound; /* indication MW was never bound */
273 struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
274 struct h_galpas galpas;
277 enum ehca_mr_pgi_type {
278 EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
279 * ehca_rereg_phys_mr,
280 * ehca_reg_internal_maxmr */
281 EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
282 EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
285 struct ehca_mr_pginfo {
286 enum ehca_mr_pgi_type type;
289 u64 hwpage_size; /* hw page size used for this MR */
290 u64 num_hwpages; /* number of hw pages */
291 u64 hwpage_cnt; /* counter for hw pages */
292 u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
295 struct { /* type EHCA_MR_PGI_PHYS section */
297 struct ib_phys_buf *phys_buf_array;
300 struct { /* type EHCA_MR_PGI_USER section */
301 struct ib_umem *region;
302 struct ib_umem_chunk *next_chunk;
305 struct { /* type EHCA_MR_PGI_FMR section */
313 /* output parameters for MR/FMR hipz calls */
314 struct ehca_mr_hipzout_parms {
315 struct ipz_mrmw_handle handle;
323 /* output parameters for MW hipz calls */
324 struct ehca_mw_hipzout_parms {
325 struct ipz_mrmw_handle handle;
331 struct ehca_ud_av av;
334 struct ehca_ucontext {
335 struct ib_ucontext ib_ucontext;
338 int ehca_init_pd_cache(void);
339 void ehca_cleanup_pd_cache(void);
340 int ehca_init_cq_cache(void);
341 void ehca_cleanup_cq_cache(void);
342 int ehca_init_qp_cache(void);
343 void ehca_cleanup_qp_cache(void);
344 int ehca_init_av_cache(void);
345 void ehca_cleanup_av_cache(void);
346 int ehca_init_mrmw_cache(void);
347 void ehca_cleanup_mrmw_cache(void);
348 int ehca_init_small_qp_cache(void);
349 void ehca_cleanup_small_qp_cache(void);
351 extern rwlock_t ehca_qp_idr_lock;
352 extern rwlock_t ehca_cq_idr_lock;
353 extern struct idr ehca_qp_idr;
354 extern struct idr ehca_cq_idr;
356 extern int ehca_static_rate;
357 extern int ehca_port_act_time;
358 extern int ehca_use_hp_mr;
359 extern int ehca_scaling_code;
360 extern int ehca_lock_hcalls;
361 extern int ehca_nr_ports;
362 extern int ehca_max_cq;
363 extern int ehca_max_qp;
365 struct ipzu_queue_resp {
366 u32 qe_size; /* queue entry size */
368 u32 queue_length; /* queue length allocated in bytes */
371 u32 offset; /* save offset within a page for small_qp */
374 struct ehca_create_cq_resp {
377 struct ipzu_queue_resp ipz_queue;
382 struct ehca_create_qp_resp {
388 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
392 struct ipzu_queue_resp ipz_squeue;
393 struct ipzu_queue_resp ipz_rqueue;
396 struct ehca_alloc_cq_parms {
398 u32 act_nr_of_entries;
400 struct ipz_eq_handle eq_handle;
403 enum ehca_service_type {
410 enum ehca_ll_comp_flags {
411 LLQP_SEND_COMP = 0x20,
412 LLQP_RECV_COMP = 0x40,
413 LLQP_COMP_MASK = 0x60,
416 struct ehca_alloc_queue_parms {
417 /* input parameters */
423 /* output parameters */
426 u32 queue_size; /* bytes for small queues, pages otherwise */
429 struct ehca_alloc_qp_parms {
430 struct ehca_alloc_queue_parms squeue;
431 struct ehca_alloc_queue_parms rqueue;
433 /* input parameters */
434 enum ehca_service_type servicetype;
437 enum ehca_ext_qp_type ext_type;
438 enum ehca_ll_comp_flags ll_comp_flags;
442 struct ipz_eq_handle eq_handle;
444 struct ipz_cq_handle send_cq_handle, recv_cq_handle;
446 u32 srq_qpn, srq_token, srq_limit;
448 /* output parameters */
450 struct ipz_qp_handle qp_handle;
451 struct h_galpas galpas;
454 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
455 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
456 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);