2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
34 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/device.h>
38 #include <linux/err.h>
39 #include <linux/poll.h>
40 #include <linux/file.h>
41 #include <linux/mount.h>
42 #include <linux/cdev.h>
44 #include <asm/uaccess.h>
48 MODULE_AUTHOR("Libor Michalek");
49 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
50 MODULE_LICENSE("Dual BSD/GPL");
57 #define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
59 static struct semaphore ctx_id_mutex;
60 static struct idr ctx_id_table;
61 static int ctx_id_rover = 0;
63 static struct ib_ucm_context *ib_ucm_ctx_get(int id)
65 struct ib_ucm_context *ctx;
68 ctx = idr_find(&ctx_id_table, id);
76 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
78 struct ib_ucm_event *uevent;
84 idr_remove(&ctx_id_table, ctx->id);
91 down(&ctx->file->mutex);
93 list_del(&ctx->file_list);
94 while (!list_empty(&ctx->events)) {
96 uevent = list_entry(ctx->events.next,
97 struct ib_ucm_event, ctx_list);
98 list_del(&uevent->file_list);
99 list_del(&uevent->ctx_list);
101 /* clear incoming connections. */
103 ib_destroy_cm_id(uevent->cm_id);
108 up(&ctx->file->mutex);
110 printk(KERN_ERR "UCM: Destroyed CM ID <%d>\n", ctx->id);
112 ib_destroy_cm_id(ctx->cm_id);
116 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
118 struct ib_ucm_context *ctx;
121 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
125 ctx->ref = 1; /* user reference */
128 INIT_LIST_HEAD(&ctx->events);
129 init_MUTEX(&ctx->mutex);
131 list_add_tail(&ctx->file_list, &file->ctxs);
133 ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
135 result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
140 result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
143 if (result == -EAGAIN)
148 printk(KERN_ERR "UCM: Allocated CM ID <%d>\n", ctx->id);
152 list_del(&ctx->file_list);
158 * Event portion of the API, handle CM events
159 * and allow event polling.
161 static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
162 struct ib_sa_path_rec *kpath)
164 if (!kpath || !upath)
167 memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid));
168 memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid));
170 upath->dlid = kpath->dlid;
171 upath->slid = kpath->slid;
172 upath->raw_traffic = kpath->raw_traffic;
173 upath->flow_label = kpath->flow_label;
174 upath->hop_limit = kpath->hop_limit;
175 upath->traffic_class = kpath->traffic_class;
176 upath->reversible = kpath->reversible;
177 upath->numb_path = kpath->numb_path;
178 upath->pkey = kpath->pkey;
179 upath->sl = kpath->sl;
180 upath->mtu_selector = kpath->mtu_selector;
181 upath->mtu = kpath->mtu;
182 upath->rate_selector = kpath->rate_selector;
183 upath->rate = kpath->rate;
184 upath->packet_life_time = kpath->packet_life_time;
185 upath->preference = kpath->preference;
187 upath->packet_life_time_selector =
188 kpath->packet_life_time_selector;
191 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
192 struct ib_cm_req_event_param *kreq)
194 ureq->listen_id = (long)kreq->listen_id->context;
196 ureq->remote_ca_guid = kreq->remote_ca_guid;
197 ureq->remote_qkey = kreq->remote_qkey;
198 ureq->remote_qpn = kreq->remote_qpn;
199 ureq->qp_type = kreq->qp_type;
200 ureq->starting_psn = kreq->starting_psn;
201 ureq->responder_resources = kreq->responder_resources;
202 ureq->initiator_depth = kreq->initiator_depth;
203 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
204 ureq->flow_control = kreq->flow_control;
205 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
206 ureq->retry_count = kreq->retry_count;
207 ureq->rnr_retry_count = kreq->rnr_retry_count;
208 ureq->srq = kreq->srq;
210 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
211 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
214 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
215 struct ib_cm_rep_event_param *krep)
217 urep->remote_ca_guid = krep->remote_ca_guid;
218 urep->remote_qkey = krep->remote_qkey;
219 urep->remote_qpn = krep->remote_qpn;
220 urep->starting_psn = krep->starting_psn;
221 urep->responder_resources = krep->responder_resources;
222 urep->initiator_depth = krep->initiator_depth;
223 urep->target_ack_delay = krep->target_ack_delay;
224 urep->failover_accepted = krep->failover_accepted;
225 urep->flow_control = krep->flow_control;
226 urep->rnr_retry_count = krep->rnr_retry_count;
227 urep->srq = krep->srq;
230 static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej,
231 struct ib_cm_rej_event_param *krej)
233 urej->reason = krej->reason;
236 static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
237 struct ib_cm_mra_event_param *kmra)
239 umra->timeout = kmra->service_timeout;
242 static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
243 struct ib_cm_lap_event_param *klap)
245 ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
248 static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
249 struct ib_cm_apr_event_param *kapr)
251 uapr->status = kapr->ap_status;
254 static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
255 struct ib_cm_sidr_req_event_param *kreq)
257 ureq->listen_id = (long)kreq->listen_id->context;
258 ureq->pkey = kreq->pkey;
261 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
262 struct ib_cm_sidr_rep_event_param *krep)
264 urep->status = krep->status;
265 urep->qkey = krep->qkey;
266 urep->qpn = krep->qpn;
269 static int ib_ucm_event_process(struct ib_cm_event *evt,
270 struct ib_ucm_event *uvt)
275 switch (evt->event) {
276 case IB_CM_REQ_RECEIVED:
277 ib_ucm_event_req_get(&uvt->resp.u.req_resp,
278 &evt->param.req_rcvd);
279 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
280 uvt->resp.present |= (evt->param.req_rcvd.primary_path ?
281 IB_UCM_PRES_PRIMARY : 0);
282 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
283 IB_UCM_PRES_ALTERNATE : 0);
285 case IB_CM_REP_RECEIVED:
286 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
287 &evt->param.rep_rcvd);
288 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
291 case IB_CM_RTU_RECEIVED:
292 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
293 uvt->resp.u.send_status = evt->param.send_status;
296 case IB_CM_DREQ_RECEIVED:
297 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
298 uvt->resp.u.send_status = evt->param.send_status;
301 case IB_CM_DREP_RECEIVED:
302 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
303 uvt->resp.u.send_status = evt->param.send_status;
306 case IB_CM_MRA_RECEIVED:
307 ib_ucm_event_mra_get(&uvt->resp.u.mra_resp,
308 &evt->param.mra_rcvd);
309 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
312 case IB_CM_REJ_RECEIVED:
313 ib_ucm_event_rej_get(&uvt->resp.u.rej_resp,
314 &evt->param.rej_rcvd);
315 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
316 uvt->info_len = evt->param.rej_rcvd.ari_length;
317 info = evt->param.rej_rcvd.ari;
320 case IB_CM_LAP_RECEIVED:
321 ib_ucm_event_lap_get(&uvt->resp.u.lap_resp,
322 &evt->param.lap_rcvd);
323 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
324 uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ?
325 IB_UCM_PRES_ALTERNATE : 0);
327 case IB_CM_APR_RECEIVED:
328 ib_ucm_event_apr_get(&uvt->resp.u.apr_resp,
329 &evt->param.apr_rcvd);
330 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
331 uvt->info_len = evt->param.apr_rcvd.info_len;
332 info = evt->param.apr_rcvd.apr_info;
335 case IB_CM_SIDR_REQ_RECEIVED:
336 ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp,
337 &evt->param.sidr_req_rcvd);
338 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
341 case IB_CM_SIDR_REP_RECEIVED:
342 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
343 &evt->param.sidr_rep_rcvd);
344 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
345 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
346 info = evt->param.sidr_rep_rcvd.info;
350 uvt->resp.u.send_status = evt->param.send_status;
355 if (uvt->data_len && evt->private_data) {
357 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
363 memcpy(uvt->data, evt->private_data, uvt->data_len);
364 uvt->resp.present |= IB_UCM_PRES_DATA;
367 if (uvt->info_len && info) {
369 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
375 memcpy(uvt->info, info, uvt->info_len);
376 uvt->resp.present |= IB_UCM_PRES_INFO;
388 static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
389 struct ib_cm_event *event)
391 struct ib_ucm_event *uevent;
392 struct ib_ucm_context *ctx;
396 * lookup correct context based on event type.
398 switch (event->event) {
399 case IB_CM_REQ_RECEIVED:
400 id = (long)event->param.req_rcvd.listen_id->context;
402 case IB_CM_SIDR_REQ_RECEIVED:
403 id = (long)event->param.sidr_req_rcvd.listen_id->context;
406 id = (long)cm_id->context;
410 printk(KERN_ERR "UCM: Event. CM ID <%d> event <%d>\n",
413 ctx = ib_ucm_ctx_get(id);
417 if (event->event == IB_CM_REQ_RECEIVED ||
418 event->event == IB_CM_SIDR_REQ_RECEIVED)
419 id = IB_UCM_CM_ID_INVALID;
421 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
427 memset(uevent, 0, sizeof(*uevent));
429 uevent->resp.id = id;
430 uevent->resp.event = event->event;
432 result = ib_ucm_event_process(event, uevent);
437 uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED ||
438 event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
441 down(&ctx->file->mutex);
443 list_add_tail(&uevent->file_list, &ctx->file->events);
444 list_add_tail(&uevent->ctx_list, &ctx->events);
446 wake_up_interruptible(&ctx->file->poll_wait);
448 up(&ctx->file->mutex);
451 ib_ucm_ctx_put(ctx); /* func reference */
455 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
456 const char __user *inbuf,
457 int in_len, int out_len)
459 struct ib_ucm_context *ctx;
460 struct ib_ucm_event_get cmd;
461 struct ib_ucm_event *uevent = NULL;
465 if (out_len < sizeof(struct ib_ucm_event_resp))
468 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
475 while (list_empty(&file->events)) {
477 if (file->filp->f_flags & O_NONBLOCK) {
482 if (signal_pending(current)) {
483 result = -ERESTARTSYS;
487 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
493 finish_wait(&file->poll_wait, &wait);
499 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
504 ctx = ib_ucm_ctx_alloc(file);
510 ctx->cm_id = uevent->cm_id;
511 ctx->cm_id->cm_handler = ib_ucm_event_handler;
512 ctx->cm_id->context = (void *)(unsigned long)ctx->id;
514 uevent->resp.id = ctx->id;
517 if (copy_to_user((void __user *)(unsigned long)cmd.response,
518 &uevent->resp, sizeof(uevent->resp))) {
525 if (cmd.data_len < uevent->data_len) {
530 if (copy_to_user((void __user *)(unsigned long)cmd.data,
531 uevent->data, uevent->data_len)) {
539 if (cmd.info_len < uevent->info_len) {
544 if (copy_to_user((void __user *)(unsigned long)cmd.info,
545 uevent->info, uevent->info_len)) {
551 list_del(&uevent->file_list);
552 list_del(&uevent->ctx_list);
565 static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
566 const char __user *inbuf,
567 int in_len, int out_len)
569 struct ib_ucm_create_id cmd;
570 struct ib_ucm_create_id_resp resp;
571 struct ib_ucm_context *ctx;
574 if (out_len < sizeof(resp))
577 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
580 ctx = ib_ucm_ctx_alloc(file);
584 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler,
585 (void *)(unsigned long)ctx->id);
592 if (copy_to_user((void __user *)(unsigned long)cmd.response,
593 &resp, sizeof(resp))) {
600 ib_destroy_cm_id(ctx->cm_id);
602 ib_ucm_ctx_put(ctx); /* user reference */
607 static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
608 const char __user *inbuf,
609 int in_len, int out_len)
611 struct ib_ucm_destroy_id cmd;
612 struct ib_ucm_context *ctx;
614 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
617 ctx = ib_ucm_ctx_get(cmd.id);
621 ib_ucm_ctx_put(ctx); /* user reference */
622 ib_ucm_ctx_put(ctx); /* func reference */
627 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
628 const char __user *inbuf,
629 int in_len, int out_len)
631 struct ib_ucm_attr_id_resp resp;
632 struct ib_ucm_attr_id cmd;
633 struct ib_ucm_context *ctx;
636 if (out_len < sizeof(resp))
639 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
642 ctx = ib_ucm_ctx_get(cmd.id);
646 down(&ctx->file->mutex);
647 if (ctx->file != file) {
652 resp.service_id = ctx->cm_id->service_id;
653 resp.service_mask = ctx->cm_id->service_mask;
654 resp.local_id = ctx->cm_id->local_id;
655 resp.remote_id = ctx->cm_id->remote_id;
657 if (copy_to_user((void __user *)(unsigned long)cmd.response,
658 &resp, sizeof(resp)))
662 up(&ctx->file->mutex);
663 ib_ucm_ctx_put(ctx); /* func reference */
667 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
668 const char __user *inbuf,
669 int in_len, int out_len)
671 struct ib_ucm_listen cmd;
672 struct ib_ucm_context *ctx;
675 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
678 ctx = ib_ucm_ctx_get(cmd.id);
682 down(&ctx->file->mutex);
683 if (ctx->file != file)
686 result = ib_cm_listen(ctx->cm_id, cmd.service_id,
689 up(&ctx->file->mutex);
690 ib_ucm_ctx_put(ctx); /* func reference */
694 static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
695 const char __user *inbuf,
696 int in_len, int out_len)
698 struct ib_ucm_establish cmd;
699 struct ib_ucm_context *ctx;
702 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
705 ctx = ib_ucm_ctx_get(cmd.id);
709 down(&ctx->file->mutex);
710 if (ctx->file != file)
713 result = ib_cm_establish(ctx->cm_id);
715 up(&ctx->file->mutex);
716 ib_ucm_ctx_put(ctx); /* func reference */
720 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
729 data = kmalloc(len, GFP_KERNEL);
733 if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
742 static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
744 struct ib_ucm_path_rec ucm_path;
745 struct ib_sa_path_rec *sa_path;
752 sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
756 if (copy_from_user(&ucm_path, (void __user *)(unsigned long)src,
763 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid));
764 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid));
766 sa_path->dlid = ucm_path.dlid;
767 sa_path->slid = ucm_path.slid;
768 sa_path->raw_traffic = ucm_path.raw_traffic;
769 sa_path->flow_label = ucm_path.flow_label;
770 sa_path->hop_limit = ucm_path.hop_limit;
771 sa_path->traffic_class = ucm_path.traffic_class;
772 sa_path->reversible = ucm_path.reversible;
773 sa_path->numb_path = ucm_path.numb_path;
774 sa_path->pkey = ucm_path.pkey;
775 sa_path->sl = ucm_path.sl;
776 sa_path->mtu_selector = ucm_path.mtu_selector;
777 sa_path->mtu = ucm_path.mtu;
778 sa_path->rate_selector = ucm_path.rate_selector;
779 sa_path->rate = ucm_path.rate;
780 sa_path->packet_life_time = ucm_path.packet_life_time;
781 sa_path->preference = ucm_path.preference;
783 sa_path->packet_life_time_selector =
784 ucm_path.packet_life_time_selector;
790 static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
791 const char __user *inbuf,
792 int in_len, int out_len)
794 struct ib_cm_req_param param;
795 struct ib_ucm_context *ctx;
796 struct ib_ucm_req cmd;
799 param.private_data = NULL;
800 param.primary_path = NULL;
801 param.alternate_path = NULL;
803 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
806 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
810 result = ib_ucm_path_get(¶m.primary_path, cmd.primary_path);
814 result = ib_ucm_path_get(¶m.alternate_path, cmd.alternate_path);
818 param.private_data_len = cmd.len;
819 param.service_id = cmd.sid;
820 param.qp_num = cmd.qpn;
821 param.qp_type = cmd.qp_type;
822 param.starting_psn = cmd.psn;
823 param.peer_to_peer = cmd.peer_to_peer;
824 param.responder_resources = cmd.responder_resources;
825 param.initiator_depth = cmd.initiator_depth;
826 param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
827 param.flow_control = cmd.flow_control;
828 param.local_cm_response_timeout = cmd.local_cm_response_timeout;
829 param.retry_count = cmd.retry_count;
830 param.rnr_retry_count = cmd.rnr_retry_count;
831 param.max_cm_retries = cmd.max_cm_retries;
834 ctx = ib_ucm_ctx_get(cmd.id);
840 down(&ctx->file->mutex);
841 if (ctx->file != file)
844 result = ib_send_cm_req(ctx->cm_id, ¶m);
846 up(&ctx->file->mutex);
847 ib_ucm_ctx_put(ctx); /* func reference */
849 if (param.private_data)
850 kfree(param.private_data);
851 if (param.primary_path)
852 kfree(param.primary_path);
853 if (param.alternate_path)
854 kfree(param.alternate_path);
859 static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
860 const char __user *inbuf,
861 int in_len, int out_len)
863 struct ib_cm_rep_param param;
864 struct ib_ucm_context *ctx;
865 struct ib_ucm_rep cmd;
868 param.private_data = NULL;
870 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
873 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
877 param.qp_num = cmd.qpn;
878 param.starting_psn = cmd.psn;
879 param.private_data_len = cmd.len;
880 param.responder_resources = cmd.responder_resources;
881 param.initiator_depth = cmd.initiator_depth;
882 param.target_ack_delay = cmd.target_ack_delay;
883 param.failover_accepted = cmd.failover_accepted;
884 param.flow_control = cmd.flow_control;
885 param.rnr_retry_count = cmd.rnr_retry_count;
888 ctx = ib_ucm_ctx_get(cmd.id);
894 down(&ctx->file->mutex);
895 if (ctx->file != file)
898 result = ib_send_cm_rep(ctx->cm_id, ¶m);
900 up(&ctx->file->mutex);
901 ib_ucm_ctx_put(ctx); /* func reference */
903 if (param.private_data)
904 kfree(param.private_data);
909 static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
910 const char __user *inbuf, int in_len,
911 int (*func)(struct ib_cm_id *cm_id,
912 const void *private_data,
913 u8 private_data_len))
915 struct ib_ucm_private_data cmd;
916 struct ib_ucm_context *ctx;
917 const void *private_data = NULL;
920 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
923 result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
927 ctx = ib_ucm_ctx_get(cmd.id);
933 down(&ctx->file->mutex);
934 if (ctx->file != file)
937 result = func(ctx->cm_id, private_data, cmd.len);
939 up(&ctx->file->mutex);
940 ib_ucm_ctx_put(ctx); /* func reference */
948 static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
949 const char __user *inbuf,
950 int in_len, int out_len)
952 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
955 static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
956 const char __user *inbuf,
957 int in_len, int out_len)
959 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
962 static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
963 const char __user *inbuf,
964 int in_len, int out_len)
966 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
969 static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
970 const char __user *inbuf, int in_len,
971 int (*func)(struct ib_cm_id *cm_id,
978 struct ib_ucm_context *ctx;
979 struct ib_ucm_info cmd;
980 const void *data = NULL;
981 const void *info = NULL;
984 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
987 result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
991 result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
995 ctx = ib_ucm_ctx_get(cmd.id);
1001 down(&ctx->file->mutex);
1002 if (ctx->file != file)
1005 result = func(ctx->cm_id, cmd.status,
1007 data, cmd.data_len);
1009 up(&ctx->file->mutex);
1010 ib_ucm_ctx_put(ctx); /* func reference */
1020 static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
1021 const char __user *inbuf,
1022 int in_len, int out_len)
1024 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
1027 static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
1028 const char __user *inbuf,
1029 int in_len, int out_len)
1031 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
1034 static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
1035 const char __user *inbuf,
1036 int in_len, int out_len)
1038 struct ib_ucm_context *ctx;
1039 struct ib_ucm_mra cmd;
1040 const void *data = NULL;
1043 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1046 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1050 ctx = ib_ucm_ctx_get(cmd.id);
1056 down(&ctx->file->mutex);
1057 if (ctx->file != file)
1060 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
1063 up(&ctx->file->mutex);
1064 ib_ucm_ctx_put(ctx); /* func reference */
1072 static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
1073 const char __user *inbuf,
1074 int in_len, int out_len)
1076 struct ib_ucm_context *ctx;
1077 struct ib_sa_path_rec *path = NULL;
1078 struct ib_ucm_lap cmd;
1079 const void *data = NULL;
1082 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1085 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1089 result = ib_ucm_path_get(&path, cmd.path);
1093 ctx = ib_ucm_ctx_get(cmd.id);
1099 down(&ctx->file->mutex);
1100 if (ctx->file != file)
1103 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
1105 up(&ctx->file->mutex);
1106 ib_ucm_ctx_put(ctx); /* func reference */
1116 static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1117 const char __user *inbuf,
1118 int in_len, int out_len)
1120 struct ib_cm_sidr_req_param param;
1121 struct ib_ucm_context *ctx;
1122 struct ib_ucm_sidr_req cmd;
1125 param.private_data = NULL;
1128 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1131 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
1135 result = ib_ucm_path_get(¶m.path, cmd.path);
1139 param.private_data_len = cmd.len;
1140 param.service_id = cmd.sid;
1141 param.timeout_ms = cmd.timeout;
1142 param.max_cm_retries = cmd.max_cm_retries;
1143 param.pkey = cmd.pkey;
1145 ctx = ib_ucm_ctx_get(cmd.id);
1151 down(&ctx->file->mutex);
1152 if (ctx->file != file)
1155 result = ib_send_cm_sidr_req(ctx->cm_id, ¶m);
1157 up(&ctx->file->mutex);
1158 ib_ucm_ctx_put(ctx); /* func reference */
1160 if (param.private_data)
1161 kfree(param.private_data);
1168 static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1169 const char __user *inbuf,
1170 int in_len, int out_len)
1172 struct ib_cm_sidr_rep_param param;
1173 struct ib_ucm_sidr_rep cmd;
1174 struct ib_ucm_context *ctx;
1179 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1182 result = ib_ucm_alloc_data(¶m.private_data,
1183 cmd.data, cmd.data_len);
1187 result = ib_ucm_alloc_data(¶m.info, cmd.info, cmd.info_len);
1191 param.qp_num = cmd.qpn;
1192 param.qkey = cmd.qkey;
1193 param.status = cmd.status;
1194 param.info_length = cmd.info_len;
1195 param.private_data_len = cmd.data_len;
1197 ctx = ib_ucm_ctx_get(cmd.id);
1203 down(&ctx->file->mutex);
1204 if (ctx->file != file)
1207 result = ib_send_cm_sidr_rep(ctx->cm_id, ¶m);
1209 up(&ctx->file->mutex);
1210 ib_ucm_ctx_put(ctx); /* func reference */
1212 if (param.private_data)
1213 kfree(param.private_data);
1220 static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1221 const char __user *inbuf,
1222 int in_len, int out_len) = {
1223 [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id,
1224 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1225 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1226 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1227 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
1228 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1229 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1230 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
1231 [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
1232 [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
1233 [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
1234 [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
1235 [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
1236 [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
1237 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1238 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1239 [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
1242 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1243 size_t len, loff_t *pos)
1245 struct ib_ucm_file *file = filp->private_data;
1246 struct ib_ucm_cmd_hdr hdr;
1249 if (len < sizeof(hdr))
1252 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1255 printk(KERN_ERR "UCM: Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
1256 hdr.cmd, hdr.in, hdr.out, len);
1258 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1261 if (hdr.in + sizeof(hdr) > len)
1264 result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1272 static unsigned int ib_ucm_poll(struct file *filp,
1273 struct poll_table_struct *wait)
1275 struct ib_ucm_file *file = filp->private_data;
1276 unsigned int mask = 0;
1278 poll_wait(filp, &file->poll_wait, wait);
1280 if (!list_empty(&file->events))
1281 mask = POLLIN | POLLRDNORM;
1286 static int ib_ucm_open(struct inode *inode, struct file *filp)
1288 struct ib_ucm_file *file;
1290 file = kmalloc(sizeof(*file), GFP_KERNEL);
1294 INIT_LIST_HEAD(&file->events);
1295 INIT_LIST_HEAD(&file->ctxs);
1296 init_waitqueue_head(&file->poll_wait);
1298 init_MUTEX(&file->mutex);
1300 filp->private_data = file;
1303 printk(KERN_ERR "UCM: Created struct\n");
1308 static int ib_ucm_close(struct inode *inode, struct file *filp)
1310 struct ib_ucm_file *file = filp->private_data;
1311 struct ib_ucm_context *ctx;
1315 while (!list_empty(&file->ctxs)) {
1317 ctx = list_entry(file->ctxs.next,
1318 struct ib_ucm_context, file_list);
1320 up(&ctx->file->mutex);
1321 ib_ucm_ctx_put(ctx); /* user reference */
1329 printk(KERN_ERR "UCM: Deleted struct\n");
1333 static struct file_operations ib_ucm_fops = {
1334 .owner = THIS_MODULE,
1335 .open = ib_ucm_open,
1336 .release = ib_ucm_close,
1337 .write = ib_ucm_write,
1338 .poll = ib_ucm_poll,
1342 static struct class *ib_ucm_class;
1343 static struct cdev ib_ucm_cdev;
1345 static int __init ib_ucm_init(void)
1349 result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
1351 printk(KERN_ERR "UCM: Error <%d> registering dev\n", result);
1355 cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
1357 result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
1359 printk(KERN_ERR "UCM: Error <%d> adding cdev\n", result);
1363 ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
1364 if (IS_ERR(ib_ucm_class)) {
1365 result = PTR_ERR(ib_ucm_class);
1366 printk(KERN_ERR "UCM: Error <%d> creating class\n", result);
1370 class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
1372 idr_init(&ctx_id_table);
1373 init_MUTEX(&ctx_id_mutex);
1377 cdev_del(&ib_ucm_cdev);
1379 unregister_chrdev_region(IB_UCM_DEV, 1);
1384 static void __exit ib_ucm_cleanup(void)
1386 class_device_destroy(ib_ucm_class, IB_UCM_DEV);
1387 class_destroy(ib_ucm_class);
1388 cdev_del(&ib_ucm_cdev);
1389 unregister_chrdev_region(IB_UCM_DEV, 1);
1392 module_init(ib_ucm_init);
1393 module_exit(ib_ucm_cleanup);