2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
36 #include <asm/uaccess.h>
40 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
42 (udata)->inbuf = (void __user *) (ibuf); \
43 (udata)->outbuf = (void __user *) (obuf); \
44 (udata)->inlen = (ilen); \
45 (udata)->outlen = (olen); \
48 ssize_t ib_uverbs_query_params(struct ib_uverbs_file *file,
49 const char __user *buf,
50 int in_len, int out_len)
52 struct ib_uverbs_query_params cmd;
53 struct ib_uverbs_query_params_resp resp;
55 if (out_len < sizeof resp)
58 if (copy_from_user(&cmd, buf, sizeof cmd))
61 memset(&resp, 0, sizeof resp);
63 resp.num_cq_events = file->device->num_comp;
65 if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp))
71 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
72 const char __user *buf,
73 int in_len, int out_len)
75 struct ib_uverbs_get_context cmd;
76 struct ib_uverbs_get_context_resp resp;
77 struct ib_udata udata;
78 struct ib_device *ibdev = file->device->ib_dev;
79 struct ib_ucontext *ucontext;
83 if (out_len < sizeof resp)
86 if (copy_from_user(&cmd, buf, sizeof cmd))
96 INIT_UDATA(&udata, buf + sizeof cmd,
97 (unsigned long) cmd.response + sizeof resp,
98 in_len - sizeof cmd, out_len - sizeof resp);
100 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
101 if (IS_ERR(ucontext))
102 return PTR_ERR(file->ucontext);
104 ucontext->device = ibdev;
105 INIT_LIST_HEAD(&ucontext->pd_list);
106 INIT_LIST_HEAD(&ucontext->mr_list);
107 INIT_LIST_HEAD(&ucontext->mw_list);
108 INIT_LIST_HEAD(&ucontext->cq_list);
109 INIT_LIST_HEAD(&ucontext->qp_list);
110 INIT_LIST_HEAD(&ucontext->srq_list);
111 INIT_LIST_HEAD(&ucontext->ah_list);
113 resp.async_fd = file->async_file.fd;
114 for (i = 0; i < file->device->num_comp; ++i)
115 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
117 &file->comp_file[i].fd, sizeof (__u32))) {
122 if (copy_to_user((void __user *) (unsigned long) cmd.response,
123 &resp, sizeof resp)) {
128 file->ucontext = ucontext;
134 ibdev->dealloc_ucontext(ucontext);
141 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
142 const char __user *buf,
143 int in_len, int out_len)
145 struct ib_uverbs_query_device cmd;
146 struct ib_uverbs_query_device_resp resp;
147 struct ib_device_attr attr;
150 if (out_len < sizeof resp)
153 if (copy_from_user(&cmd, buf, sizeof cmd))
156 ret = ib_query_device(file->device->ib_dev, &attr);
160 memset(&resp, 0, sizeof resp);
162 resp.fw_ver = attr.fw_ver;
163 resp.node_guid = attr.node_guid;
164 resp.sys_image_guid = attr.sys_image_guid;
165 resp.max_mr_size = attr.max_mr_size;
166 resp.page_size_cap = attr.page_size_cap;
167 resp.vendor_id = attr.vendor_id;
168 resp.vendor_part_id = attr.vendor_part_id;
169 resp.hw_ver = attr.hw_ver;
170 resp.max_qp = attr.max_qp;
171 resp.max_qp_wr = attr.max_qp_wr;
172 resp.device_cap_flags = attr.device_cap_flags;
173 resp.max_sge = attr.max_sge;
174 resp.max_sge_rd = attr.max_sge_rd;
175 resp.max_cq = attr.max_cq;
176 resp.max_cqe = attr.max_cqe;
177 resp.max_mr = attr.max_mr;
178 resp.max_pd = attr.max_pd;
179 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
180 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
181 resp.max_res_rd_atom = attr.max_res_rd_atom;
182 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
183 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
184 resp.atomic_cap = attr.atomic_cap;
185 resp.max_ee = attr.max_ee;
186 resp.max_rdd = attr.max_rdd;
187 resp.max_mw = attr.max_mw;
188 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
189 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
190 resp.max_mcast_grp = attr.max_mcast_grp;
191 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
192 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
193 resp.max_ah = attr.max_ah;
194 resp.max_fmr = attr.max_fmr;
195 resp.max_map_per_fmr = attr.max_map_per_fmr;
196 resp.max_srq = attr.max_srq;
197 resp.max_srq_wr = attr.max_srq_wr;
198 resp.max_srq_sge = attr.max_srq_sge;
199 resp.max_pkeys = attr.max_pkeys;
200 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
201 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
203 if (copy_to_user((void __user *) (unsigned long) cmd.response,
210 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
211 const char __user *buf,
212 int in_len, int out_len)
214 struct ib_uverbs_query_port cmd;
215 struct ib_uverbs_query_port_resp resp;
216 struct ib_port_attr attr;
219 if (out_len < sizeof resp)
222 if (copy_from_user(&cmd, buf, sizeof cmd))
225 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
229 memset(&resp, 0, sizeof resp);
231 resp.state = attr.state;
232 resp.max_mtu = attr.max_mtu;
233 resp.active_mtu = attr.active_mtu;
234 resp.gid_tbl_len = attr.gid_tbl_len;
235 resp.port_cap_flags = attr.port_cap_flags;
236 resp.max_msg_sz = attr.max_msg_sz;
237 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
238 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
239 resp.pkey_tbl_len = attr.pkey_tbl_len;
241 resp.sm_lid = attr.sm_lid;
243 resp.max_vl_num = attr.max_vl_num;
244 resp.sm_sl = attr.sm_sl;
245 resp.subnet_timeout = attr.subnet_timeout;
246 resp.init_type_reply = attr.init_type_reply;
247 resp.active_width = attr.active_width;
248 resp.active_speed = attr.active_speed;
249 resp.phys_state = attr.phys_state;
251 if (copy_to_user((void __user *) (unsigned long) cmd.response,
258 ssize_t ib_uverbs_query_gid(struct ib_uverbs_file *file,
259 const char __user *buf,
260 int in_len, int out_len)
262 struct ib_uverbs_query_gid cmd;
263 struct ib_uverbs_query_gid_resp resp;
266 if (out_len < sizeof resp)
269 if (copy_from_user(&cmd, buf, sizeof cmd))
272 memset(&resp, 0, sizeof resp);
274 ret = ib_query_gid(file->device->ib_dev, cmd.port_num, cmd.index,
275 (union ib_gid *) resp.gid);
279 if (copy_to_user((void __user *) (unsigned long) cmd.response,
286 ssize_t ib_uverbs_query_pkey(struct ib_uverbs_file *file,
287 const char __user *buf,
288 int in_len, int out_len)
290 struct ib_uverbs_query_pkey cmd;
291 struct ib_uverbs_query_pkey_resp resp;
294 if (out_len < sizeof resp)
297 if (copy_from_user(&cmd, buf, sizeof cmd))
300 memset(&resp, 0, sizeof resp);
302 ret = ib_query_pkey(file->device->ib_dev, cmd.port_num, cmd.index,
307 if (copy_to_user((void __user *) (unsigned long) cmd.response,
314 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
315 const char __user *buf,
316 int in_len, int out_len)
318 struct ib_uverbs_alloc_pd cmd;
319 struct ib_uverbs_alloc_pd_resp resp;
320 struct ib_udata udata;
321 struct ib_uobject *uobj;
325 if (out_len < sizeof resp)
328 if (copy_from_user(&cmd, buf, sizeof cmd))
331 INIT_UDATA(&udata, buf + sizeof cmd,
332 (unsigned long) cmd.response + sizeof resp,
333 in_len - sizeof cmd, out_len - sizeof resp);
335 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
339 uobj->context = file->ucontext;
341 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
342 file->ucontext, &udata);
348 pd->device = file->device->ib_dev;
350 atomic_set(&pd->usecnt, 0);
353 if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
358 down(&ib_uverbs_idr_mutex);
359 ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
360 up(&ib_uverbs_idr_mutex);
368 list_add_tail(&uobj->list, &file->ucontext->pd_list);
371 memset(&resp, 0, sizeof resp);
372 resp.pd_handle = uobj->id;
374 if (copy_to_user((void __user *) (unsigned long) cmd.response,
375 &resp, sizeof resp)) {
384 list_del(&uobj->list);
387 down(&ib_uverbs_idr_mutex);
388 idr_remove(&ib_uverbs_pd_idr, uobj->id);
389 up(&ib_uverbs_idr_mutex);
399 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
400 const char __user *buf,
401 int in_len, int out_len)
403 struct ib_uverbs_dealloc_pd cmd;
405 struct ib_uobject *uobj;
408 if (copy_from_user(&cmd, buf, sizeof cmd))
411 down(&ib_uverbs_idr_mutex);
413 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
414 if (!pd || pd->uobject->context != file->ucontext)
419 ret = ib_dealloc_pd(pd);
423 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
426 list_del(&uobj->list);
432 up(&ib_uverbs_idr_mutex);
434 return ret ? ret : in_len;
437 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
438 const char __user *buf, int in_len,
441 struct ib_uverbs_reg_mr cmd;
442 struct ib_uverbs_reg_mr_resp resp;
443 struct ib_udata udata;
444 struct ib_umem_object *obj;
449 if (out_len < sizeof resp)
452 if (copy_from_user(&cmd, buf, sizeof cmd))
455 INIT_UDATA(&udata, buf + sizeof cmd,
456 (unsigned long) cmd.response + sizeof resp,
457 in_len - sizeof cmd, out_len - sizeof resp);
459 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
462 obj = kmalloc(sizeof *obj, GFP_KERNEL);
466 obj->uobject.context = file->ucontext;
469 * We ask for writable memory if any access flags other than
470 * "remote read" are set. "Local write" and "remote write"
471 * obviously require write access. "Remote atomic" can do
472 * things like fetch and add, which will modify memory, and
473 * "MW bind" can change permissions by binding a window.
475 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
476 (void *) (unsigned long) cmd.start, cmd.length,
477 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
481 obj->umem.virt_base = cmd.hca_va;
483 down(&ib_uverbs_idr_mutex);
485 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
486 if (!pd || pd->uobject->context != file->ucontext) {
491 if (!pd->device->reg_user_mr) {
496 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
502 mr->device = pd->device;
504 mr->uobject = &obj->uobject;
505 atomic_inc(&pd->usecnt);
506 atomic_set(&mr->usecnt, 0);
508 memset(&resp, 0, sizeof resp);
509 resp.lkey = mr->lkey;
510 resp.rkey = mr->rkey;
513 if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
518 ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
525 resp.mr_handle = obj->uobject.id;
528 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
531 if (copy_to_user((void __user *) (unsigned long) cmd.response,
532 &resp, sizeof resp)) {
537 up(&ib_uverbs_idr_mutex);
543 list_del(&obj->uobject.list);
550 up(&ib_uverbs_idr_mutex);
552 ib_umem_release(file->device->ib_dev, &obj->umem);
559 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
560 const char __user *buf, int in_len,
563 struct ib_uverbs_dereg_mr cmd;
565 struct ib_umem_object *memobj;
568 if (copy_from_user(&cmd, buf, sizeof cmd))
571 down(&ib_uverbs_idr_mutex);
573 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
574 if (!mr || mr->uobject->context != file->ucontext)
577 memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
579 ret = ib_dereg_mr(mr);
583 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
586 list_del(&memobj->uobject.list);
589 ib_umem_release(file->device->ib_dev, &memobj->umem);
593 up(&ib_uverbs_idr_mutex);
595 return ret ? ret : in_len;
598 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
599 const char __user *buf, int in_len,
602 struct ib_uverbs_create_cq cmd;
603 struct ib_uverbs_create_cq_resp resp;
604 struct ib_udata udata;
605 struct ib_ucq_object *uobj;
609 if (out_len < sizeof resp)
612 if (copy_from_user(&cmd, buf, sizeof cmd))
615 INIT_UDATA(&udata, buf + sizeof cmd,
616 (unsigned long) cmd.response + sizeof resp,
617 in_len - sizeof cmd, out_len - sizeof resp);
619 if (cmd.event_handler >= file->device->num_comp)
622 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
626 uobj->uobject.user_handle = cmd.user_handle;
627 uobj->uobject.context = file->ucontext;
628 uobj->comp_events_reported = 0;
629 uobj->async_events_reported = 0;
630 INIT_LIST_HEAD(&uobj->comp_list);
631 INIT_LIST_HEAD(&uobj->async_list);
633 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
634 file->ucontext, &udata);
640 cq->device = file->device->ib_dev;
641 cq->uobject = &uobj->uobject;
642 cq->comp_handler = ib_uverbs_comp_handler;
643 cq->event_handler = ib_uverbs_cq_event_handler;
644 cq->cq_context = file;
645 atomic_set(&cq->usecnt, 0);
648 if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
653 down(&ib_uverbs_idr_mutex);
654 ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
655 up(&ib_uverbs_idr_mutex);
663 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
666 memset(&resp, 0, sizeof resp);
667 resp.cq_handle = uobj->uobject.id;
670 if (copy_to_user((void __user *) (unsigned long) cmd.response,
671 &resp, sizeof resp)) {
680 list_del(&uobj->uobject.list);
683 down(&ib_uverbs_idr_mutex);
684 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
685 up(&ib_uverbs_idr_mutex);
695 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
696 const char __user *buf, int in_len,
699 struct ib_uverbs_destroy_cq cmd;
700 struct ib_uverbs_destroy_cq_resp resp;
702 struct ib_ucq_object *uobj;
703 struct ib_uverbs_event *evt, *tmp;
707 if (copy_from_user(&cmd, buf, sizeof cmd))
710 memset(&resp, 0, sizeof resp);
712 down(&ib_uverbs_idr_mutex);
714 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
715 if (!cq || cq->uobject->context != file->ucontext)
718 user_handle = cq->uobject->user_handle;
719 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
721 ret = ib_destroy_cq(cq);
725 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
728 list_del(&uobj->uobject.list);
731 spin_lock_irq(&file->comp_file[0].lock);
732 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
733 list_del(&evt->list);
736 spin_unlock_irq(&file->comp_file[0].lock);
738 spin_lock_irq(&file->async_file.lock);
739 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
740 list_del(&evt->list);
743 spin_unlock_irq(&file->async_file.lock);
745 resp.comp_events_reported = uobj->comp_events_reported;
746 resp.async_events_reported = uobj->async_events_reported;
750 if (copy_to_user((void __user *) (unsigned long) cmd.response,
755 up(&ib_uverbs_idr_mutex);
757 return ret ? ret : in_len;
760 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
761 const char __user *buf, int in_len,
764 struct ib_uverbs_create_qp cmd;
765 struct ib_uverbs_create_qp_resp resp;
766 struct ib_udata udata;
767 struct ib_uevent_object *uobj;
769 struct ib_cq *scq, *rcq;
772 struct ib_qp_init_attr attr;
775 if (out_len < sizeof resp)
778 if (copy_from_user(&cmd, buf, sizeof cmd))
781 INIT_UDATA(&udata, buf + sizeof cmd,
782 (unsigned long) cmd.response + sizeof resp,
783 in_len - sizeof cmd, out_len - sizeof resp);
785 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
789 down(&ib_uverbs_idr_mutex);
791 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
792 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
793 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
794 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
796 if (!pd || pd->uobject->context != file->ucontext ||
797 !scq || scq->uobject->context != file->ucontext ||
798 !rcq || rcq->uobject->context != file->ucontext ||
799 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
804 attr.event_handler = ib_uverbs_qp_event_handler;
805 attr.qp_context = file;
809 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
810 attr.qp_type = cmd.qp_type;
812 attr.cap.max_send_wr = cmd.max_send_wr;
813 attr.cap.max_recv_wr = cmd.max_recv_wr;
814 attr.cap.max_send_sge = cmd.max_send_sge;
815 attr.cap.max_recv_sge = cmd.max_recv_sge;
816 attr.cap.max_inline_data = cmd.max_inline_data;
818 uobj->uobject.user_handle = cmd.user_handle;
819 uobj->uobject.context = file->ucontext;
820 uobj->events_reported = 0;
821 INIT_LIST_HEAD(&uobj->event_list);
823 qp = pd->device->create_qp(pd, &attr, &udata);
829 qp->device = pd->device;
831 qp->send_cq = attr.send_cq;
832 qp->recv_cq = attr.recv_cq;
834 qp->uobject = &uobj->uobject;
835 qp->event_handler = attr.event_handler;
836 qp->qp_context = attr.qp_context;
837 qp->qp_type = attr.qp_type;
838 atomic_inc(&pd->usecnt);
839 atomic_inc(&attr.send_cq->usecnt);
840 atomic_inc(&attr.recv_cq->usecnt);
842 atomic_inc(&attr.srq->usecnt);
844 memset(&resp, 0, sizeof resp);
845 resp.qpn = qp->qp_num;
848 if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
853 ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
860 resp.qp_handle = uobj->uobject.id;
863 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
866 if (copy_to_user((void __user *) (unsigned long) cmd.response,
867 &resp, sizeof resp)) {
872 up(&ib_uverbs_idr_mutex);
878 list_del(&uobj->uobject.list);
885 up(&ib_uverbs_idr_mutex);
891 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
892 const char __user *buf, int in_len,
895 struct ib_uverbs_modify_qp cmd;
897 struct ib_qp_attr *attr;
900 if (copy_from_user(&cmd, buf, sizeof cmd))
903 attr = kmalloc(sizeof *attr, GFP_KERNEL);
907 down(&ib_uverbs_idr_mutex);
909 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
910 if (!qp || qp->uobject->context != file->ucontext) {
915 attr->qp_state = cmd.qp_state;
916 attr->cur_qp_state = cmd.cur_qp_state;
917 attr->path_mtu = cmd.path_mtu;
918 attr->path_mig_state = cmd.path_mig_state;
919 attr->qkey = cmd.qkey;
920 attr->rq_psn = cmd.rq_psn;
921 attr->sq_psn = cmd.sq_psn;
922 attr->dest_qp_num = cmd.dest_qp_num;
923 attr->qp_access_flags = cmd.qp_access_flags;
924 attr->pkey_index = cmd.pkey_index;
925 attr->alt_pkey_index = cmd.pkey_index;
926 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
927 attr->max_rd_atomic = cmd.max_rd_atomic;
928 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
929 attr->min_rnr_timer = cmd.min_rnr_timer;
930 attr->port_num = cmd.port_num;
931 attr->timeout = cmd.timeout;
932 attr->retry_cnt = cmd.retry_cnt;
933 attr->rnr_retry = cmd.rnr_retry;
934 attr->alt_port_num = cmd.alt_port_num;
935 attr->alt_timeout = cmd.alt_timeout;
937 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
938 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
939 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
940 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
941 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
942 attr->ah_attr.dlid = cmd.dest.dlid;
943 attr->ah_attr.sl = cmd.dest.sl;
944 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
945 attr->ah_attr.static_rate = cmd.dest.static_rate;
946 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
947 attr->ah_attr.port_num = cmd.dest.port_num;
949 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
950 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
951 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
952 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
953 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
954 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
955 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
956 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
957 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
958 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
959 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
961 ret = ib_modify_qp(qp, attr, cmd.attr_mask);
968 up(&ib_uverbs_idr_mutex);
974 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
975 const char __user *buf, int in_len,
978 struct ib_uverbs_destroy_qp cmd;
979 struct ib_uverbs_destroy_qp_resp resp;
981 struct ib_uevent_object *uobj;
982 struct ib_uverbs_event *evt, *tmp;
985 if (copy_from_user(&cmd, buf, sizeof cmd))
988 memset(&resp, 0, sizeof resp);
990 down(&ib_uverbs_idr_mutex);
992 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
993 if (!qp || qp->uobject->context != file->ucontext)
996 uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
998 ret = ib_destroy_qp(qp);
1002 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
1005 list_del(&uobj->uobject.list);
1008 spin_lock_irq(&file->async_file.lock);
1009 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1010 list_del(&evt->list);
1013 spin_unlock_irq(&file->async_file.lock);
1015 resp.events_reported = uobj->events_reported;
1019 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1020 &resp, sizeof resp))
1024 up(&ib_uverbs_idr_mutex);
1026 return ret ? ret : in_len;
1029 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1030 const char __user *buf, int in_len,
1033 struct ib_uverbs_attach_mcast cmd;
1037 if (copy_from_user(&cmd, buf, sizeof cmd))
1040 down(&ib_uverbs_idr_mutex);
1042 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1043 if (qp && qp->uobject->context == file->ucontext)
1044 ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1046 up(&ib_uverbs_idr_mutex);
1048 return ret ? ret : in_len;
1051 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1052 const char __user *buf, int in_len,
1055 struct ib_uverbs_detach_mcast cmd;
1059 if (copy_from_user(&cmd, buf, sizeof cmd))
1062 down(&ib_uverbs_idr_mutex);
1064 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1065 if (qp && qp->uobject->context == file->ucontext)
1066 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1068 up(&ib_uverbs_idr_mutex);
1070 return ret ? ret : in_len;
1073 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1074 const char __user *buf, int in_len,
1077 struct ib_uverbs_create_srq cmd;
1078 struct ib_uverbs_create_srq_resp resp;
1079 struct ib_udata udata;
1080 struct ib_uevent_object *uobj;
1083 struct ib_srq_init_attr attr;
1086 if (out_len < sizeof resp)
1089 if (copy_from_user(&cmd, buf, sizeof cmd))
1092 INIT_UDATA(&udata, buf + sizeof cmd,
1093 (unsigned long) cmd.response + sizeof resp,
1094 in_len - sizeof cmd, out_len - sizeof resp);
1096 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1100 down(&ib_uverbs_idr_mutex);
1102 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1104 if (!pd || pd->uobject->context != file->ucontext) {
1109 attr.event_handler = ib_uverbs_srq_event_handler;
1110 attr.srq_context = file;
1111 attr.attr.max_wr = cmd.max_wr;
1112 attr.attr.max_sge = cmd.max_sge;
1113 attr.attr.srq_limit = cmd.srq_limit;
1115 uobj->uobject.user_handle = cmd.user_handle;
1116 uobj->uobject.context = file->ucontext;
1117 uobj->events_reported = 0;
1118 INIT_LIST_HEAD(&uobj->event_list);
1120 srq = pd->device->create_srq(pd, &attr, &udata);
1126 srq->device = pd->device;
1128 srq->uobject = &uobj->uobject;
1129 srq->event_handler = attr.event_handler;
1130 srq->srq_context = attr.srq_context;
1131 atomic_inc(&pd->usecnt);
1132 atomic_set(&srq->usecnt, 0);
1134 memset(&resp, 0, sizeof resp);
1137 if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
1142 ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
1149 resp.srq_handle = uobj->uobject.id;
1152 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1155 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1156 &resp, sizeof resp)) {
1161 up(&ib_uverbs_idr_mutex);
1167 list_del(&uobj->uobject.list);
1171 ib_destroy_srq(srq);
1174 up(&ib_uverbs_idr_mutex);
1180 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1181 const char __user *buf, int in_len,
1184 struct ib_uverbs_modify_srq cmd;
1186 struct ib_srq_attr attr;
1189 if (copy_from_user(&cmd, buf, sizeof cmd))
1192 down(&ib_uverbs_idr_mutex);
1194 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1195 if (!srq || srq->uobject->context != file->ucontext) {
1200 attr.max_wr = cmd.max_wr;
1201 attr.max_sge = cmd.max_sge;
1202 attr.srq_limit = cmd.srq_limit;
1204 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1207 up(&ib_uverbs_idr_mutex);
1209 return ret ? ret : in_len;
1212 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1213 const char __user *buf, int in_len,
1216 struct ib_uverbs_destroy_srq cmd;
1217 struct ib_uverbs_destroy_srq_resp resp;
1219 struct ib_uevent_object *uobj;
1220 struct ib_uverbs_event *evt, *tmp;
1223 if (copy_from_user(&cmd, buf, sizeof cmd))
1226 down(&ib_uverbs_idr_mutex);
1228 memset(&resp, 0, sizeof resp);
1230 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1231 if (!srq || srq->uobject->context != file->ucontext)
1234 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
1236 ret = ib_destroy_srq(srq);
1240 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1243 list_del(&uobj->uobject.list);
1246 spin_lock_irq(&file->async_file.lock);
1247 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
1248 list_del(&evt->list);
1251 spin_unlock_irq(&file->async_file.lock);
1253 resp.events_reported = uobj->events_reported;
1257 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1258 &resp, sizeof resp))
1262 up(&ib_uverbs_idr_mutex);
1264 return ret ? ret : in_len;