2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 #include <linux/string.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
48 /* Protection domains */
50 struct ib_pd *ib_alloc_pd(struct ib_device *device)
54 pd = device->alloc_pd(device, NULL, NULL);
59 atomic_set(&pd->usecnt, 0);
64 EXPORT_SYMBOL(ib_alloc_pd);
66 int ib_dealloc_pd(struct ib_pd *pd)
68 if (atomic_read(&pd->usecnt))
71 return pd->device->dealloc_pd(pd);
73 EXPORT_SYMBOL(ib_dealloc_pd);
77 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
81 ah = pd->device->create_ah(pd, ah_attr);
84 ah->device = pd->device;
87 atomic_inc(&pd->usecnt);
92 EXPORT_SYMBOL(ib_create_ah);
94 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
95 struct ib_grh *grh, u8 port_num)
97 struct ib_ah_attr ah_attr;
102 memset(&ah_attr, 0, sizeof ah_attr);
103 ah_attr.dlid = wc->slid;
105 ah_attr.src_path_bits = wc->dlid_path_bits;
106 ah_attr.port_num = port_num;
108 if (wc->wc_flags & IB_WC_GRH) {
109 ah_attr.ah_flags = IB_AH_GRH;
110 ah_attr.grh.dgid = grh->dgid;
112 ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
117 ah_attr.grh.sgid_index = (u8) gid_index;
118 flow_class = be32_to_cpu(grh->version_tclass_flow);
119 ah_attr.grh.flow_label = flow_class & 0xFFFFF;
120 ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
121 ah_attr.grh.hop_limit = grh->hop_limit;
124 return ib_create_ah(pd, &ah_attr);
126 EXPORT_SYMBOL(ib_create_ah_from_wc);
128 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
130 return ah->device->modify_ah ?
131 ah->device->modify_ah(ah, ah_attr) :
134 EXPORT_SYMBOL(ib_modify_ah);
136 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
138 return ah->device->query_ah ?
139 ah->device->query_ah(ah, ah_attr) :
142 EXPORT_SYMBOL(ib_query_ah);
144 int ib_destroy_ah(struct ib_ah *ah)
150 ret = ah->device->destroy_ah(ah);
152 atomic_dec(&pd->usecnt);
156 EXPORT_SYMBOL(ib_destroy_ah);
158 /* Shared receive queues */
160 struct ib_srq *ib_create_srq(struct ib_pd *pd,
161 struct ib_srq_init_attr *srq_init_attr)
165 if (!pd->device->create_srq)
166 return ERR_PTR(-ENOSYS);
168 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
171 srq->device = pd->device;
174 srq->event_handler = srq_init_attr->event_handler;
175 srq->srq_context = srq_init_attr->srq_context;
176 atomic_inc(&pd->usecnt);
177 atomic_set(&srq->usecnt, 0);
182 EXPORT_SYMBOL(ib_create_srq);
184 int ib_modify_srq(struct ib_srq *srq,
185 struct ib_srq_attr *srq_attr,
186 enum ib_srq_attr_mask srq_attr_mask)
188 return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
190 EXPORT_SYMBOL(ib_modify_srq);
192 int ib_query_srq(struct ib_srq *srq,
193 struct ib_srq_attr *srq_attr)
195 return srq->device->query_srq ?
196 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
198 EXPORT_SYMBOL(ib_query_srq);
200 int ib_destroy_srq(struct ib_srq *srq)
205 if (atomic_read(&srq->usecnt))
210 ret = srq->device->destroy_srq(srq);
212 atomic_dec(&pd->usecnt);
216 EXPORT_SYMBOL(ib_destroy_srq);
220 struct ib_qp *ib_create_qp(struct ib_pd *pd,
221 struct ib_qp_init_attr *qp_init_attr)
225 qp = pd->device->create_qp(pd, qp_init_attr, NULL);
228 qp->device = pd->device;
230 qp->send_cq = qp_init_attr->send_cq;
231 qp->recv_cq = qp_init_attr->recv_cq;
232 qp->srq = qp_init_attr->srq;
234 qp->event_handler = qp_init_attr->event_handler;
235 qp->qp_context = qp_init_attr->qp_context;
236 qp->qp_type = qp_init_attr->qp_type;
237 atomic_inc(&pd->usecnt);
238 atomic_inc(&qp_init_attr->send_cq->usecnt);
239 atomic_inc(&qp_init_attr->recv_cq->usecnt);
240 if (qp_init_attr->srq)
241 atomic_inc(&qp_init_attr->srq->usecnt);
246 EXPORT_SYMBOL(ib_create_qp);
248 int ib_modify_qp(struct ib_qp *qp,
249 struct ib_qp_attr *qp_attr,
252 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
254 EXPORT_SYMBOL(ib_modify_qp);
256 int ib_query_qp(struct ib_qp *qp,
257 struct ib_qp_attr *qp_attr,
259 struct ib_qp_init_attr *qp_init_attr)
261 return qp->device->query_qp ?
262 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
265 EXPORT_SYMBOL(ib_query_qp);
267 int ib_destroy_qp(struct ib_qp *qp)
270 struct ib_cq *scq, *rcq;
279 ret = qp->device->destroy_qp(qp);
281 atomic_dec(&pd->usecnt);
282 atomic_dec(&scq->usecnt);
283 atomic_dec(&rcq->usecnt);
285 atomic_dec(&srq->usecnt);
290 EXPORT_SYMBOL(ib_destroy_qp);
292 /* Completion queues */
294 struct ib_cq *ib_create_cq(struct ib_device *device,
295 ib_comp_handler comp_handler,
296 void (*event_handler)(struct ib_event *, void *),
297 void *cq_context, int cqe)
301 cq = device->create_cq(device, cqe, NULL, NULL);
306 cq->comp_handler = comp_handler;
307 cq->event_handler = event_handler;
308 cq->cq_context = cq_context;
309 atomic_set(&cq->usecnt, 0);
314 EXPORT_SYMBOL(ib_create_cq);
316 int ib_destroy_cq(struct ib_cq *cq)
318 if (atomic_read(&cq->usecnt))
321 return cq->device->destroy_cq(cq);
323 EXPORT_SYMBOL(ib_destroy_cq);
325 int ib_resize_cq(struct ib_cq *cq,
328 return cq->device->resize_cq ?
329 cq->device->resize_cq(cq, cqe) : -ENOSYS;
331 EXPORT_SYMBOL(ib_resize_cq);
335 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
339 mr = pd->device->get_dma_mr(pd, mr_access_flags);
342 mr->device = pd->device;
345 atomic_inc(&pd->usecnt);
346 atomic_set(&mr->usecnt, 0);
351 EXPORT_SYMBOL(ib_get_dma_mr);
353 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
354 struct ib_phys_buf *phys_buf_array,
361 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
362 mr_access_flags, iova_start);
365 mr->device = pd->device;
368 atomic_inc(&pd->usecnt);
369 atomic_set(&mr->usecnt, 0);
374 EXPORT_SYMBOL(ib_reg_phys_mr);
376 int ib_rereg_phys_mr(struct ib_mr *mr,
379 struct ib_phys_buf *phys_buf_array,
384 struct ib_pd *old_pd;
387 if (!mr->device->rereg_phys_mr)
390 if (atomic_read(&mr->usecnt))
395 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
396 phys_buf_array, num_phys_buf,
397 mr_access_flags, iova_start);
399 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
400 atomic_dec(&old_pd->usecnt);
401 atomic_inc(&pd->usecnt);
406 EXPORT_SYMBOL(ib_rereg_phys_mr);
408 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
410 return mr->device->query_mr ?
411 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
413 EXPORT_SYMBOL(ib_query_mr);
415 int ib_dereg_mr(struct ib_mr *mr)
420 if (atomic_read(&mr->usecnt))
424 ret = mr->device->dereg_mr(mr);
426 atomic_dec(&pd->usecnt);
430 EXPORT_SYMBOL(ib_dereg_mr);
434 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
438 if (!pd->device->alloc_mw)
439 return ERR_PTR(-ENOSYS);
441 mw = pd->device->alloc_mw(pd);
443 mw->device = pd->device;
446 atomic_inc(&pd->usecnt);
451 EXPORT_SYMBOL(ib_alloc_mw);
453 int ib_dealloc_mw(struct ib_mw *mw)
459 ret = mw->device->dealloc_mw(mw);
461 atomic_dec(&pd->usecnt);
465 EXPORT_SYMBOL(ib_dealloc_mw);
467 /* "Fast" memory regions */
469 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
471 struct ib_fmr_attr *fmr_attr)
475 if (!pd->device->alloc_fmr)
476 return ERR_PTR(-ENOSYS);
478 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
480 fmr->device = pd->device;
482 atomic_inc(&pd->usecnt);
487 EXPORT_SYMBOL(ib_alloc_fmr);
489 int ib_unmap_fmr(struct list_head *fmr_list)
493 if (list_empty(fmr_list))
496 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
497 return fmr->device->unmap_fmr(fmr_list);
499 EXPORT_SYMBOL(ib_unmap_fmr);
501 int ib_dealloc_fmr(struct ib_fmr *fmr)
507 ret = fmr->device->dealloc_fmr(fmr);
509 atomic_dec(&pd->usecnt);
513 EXPORT_SYMBOL(ib_dealloc_fmr);
515 /* Multicast groups */
517 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
519 if (!qp->device->attach_mcast)
521 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
524 return qp->device->attach_mcast(qp, gid, lid);
526 EXPORT_SYMBOL(ib_attach_mcast);
528 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
530 if (!qp->device->detach_mcast)
532 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
535 return qp->device->detach_mcast(qp, gid, lid);
537 EXPORT_SYMBOL(ib_detach_mcast);