2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
34 #include <linux/init.h>
36 #include <linux/module.h>
37 #include <linux/device.h>
38 #include <linux/err.h>
39 #include <linux/poll.h>
40 #include <linux/file.h>
41 #include <linux/mount.h>
42 #include <linux/cdev.h>
44 #include <asm/uaccess.h>
48 MODULE_AUTHOR("Libor Michalek");
49 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
50 MODULE_LICENSE("Dual BSD/GPL");
52 static int ucm_debug_level;
54 module_param_named(debug_level, ucm_debug_level, int, 0644);
55 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
62 #define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
66 #define ucm_dbg(format, arg...) \
68 if (ucm_debug_level > 0) \
69 printk(KERN_DEBUG PFX format, ## arg); \
72 static struct semaphore ctx_id_mutex;
73 static struct idr ctx_id_table;
74 static int ctx_id_rover = 0;
76 static struct ib_ucm_context *ib_ucm_ctx_get(int id)
78 struct ib_ucm_context *ctx;
81 ctx = idr_find(&ctx_id_table, id);
89 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
91 struct ib_ucm_event *uevent;
97 idr_remove(&ctx_id_table, ctx->id);
104 down(&ctx->file->mutex);
106 list_del(&ctx->file_list);
107 while (!list_empty(&ctx->events)) {
109 uevent = list_entry(ctx->events.next,
110 struct ib_ucm_event, ctx_list);
111 list_del(&uevent->file_list);
112 list_del(&uevent->ctx_list);
114 /* clear incoming connections. */
116 ib_destroy_cm_id(uevent->cm_id);
121 up(&ctx->file->mutex);
123 ucm_dbg("Destroyed CM ID <%d>\n", ctx->id);
125 ib_destroy_cm_id(ctx->cm_id);
129 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
131 struct ib_ucm_context *ctx;
134 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
138 ctx->ref = 1; /* user reference */
141 INIT_LIST_HEAD(&ctx->events);
142 init_MUTEX(&ctx->mutex);
144 list_add_tail(&ctx->file_list, &file->ctxs);
146 ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
148 result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
153 result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
156 if (result == -EAGAIN)
161 ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
165 list_del(&ctx->file_list);
171 * Event portion of the API, handle CM events
172 * and allow event polling.
174 static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
175 struct ib_sa_path_rec *kpath)
177 if (!kpath || !upath)
180 memcpy(upath->dgid, kpath->dgid.raw, sizeof(union ib_gid));
181 memcpy(upath->sgid, kpath->sgid.raw, sizeof(union ib_gid));
183 upath->dlid = kpath->dlid;
184 upath->slid = kpath->slid;
185 upath->raw_traffic = kpath->raw_traffic;
186 upath->flow_label = kpath->flow_label;
187 upath->hop_limit = kpath->hop_limit;
188 upath->traffic_class = kpath->traffic_class;
189 upath->reversible = kpath->reversible;
190 upath->numb_path = kpath->numb_path;
191 upath->pkey = kpath->pkey;
192 upath->sl = kpath->sl;
193 upath->mtu_selector = kpath->mtu_selector;
194 upath->mtu = kpath->mtu;
195 upath->rate_selector = kpath->rate_selector;
196 upath->rate = kpath->rate;
197 upath->packet_life_time = kpath->packet_life_time;
198 upath->preference = kpath->preference;
200 upath->packet_life_time_selector =
201 kpath->packet_life_time_selector;
204 static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq,
205 struct ib_cm_req_event_param *kreq)
207 ureq->listen_id = (long)kreq->listen_id->context;
209 ureq->remote_ca_guid = kreq->remote_ca_guid;
210 ureq->remote_qkey = kreq->remote_qkey;
211 ureq->remote_qpn = kreq->remote_qpn;
212 ureq->qp_type = kreq->qp_type;
213 ureq->starting_psn = kreq->starting_psn;
214 ureq->responder_resources = kreq->responder_resources;
215 ureq->initiator_depth = kreq->initiator_depth;
216 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
217 ureq->flow_control = kreq->flow_control;
218 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
219 ureq->retry_count = kreq->retry_count;
220 ureq->rnr_retry_count = kreq->rnr_retry_count;
221 ureq->srq = kreq->srq;
223 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
224 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
227 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
228 struct ib_cm_rep_event_param *krep)
230 urep->remote_ca_guid = krep->remote_ca_guid;
231 urep->remote_qkey = krep->remote_qkey;
232 urep->remote_qpn = krep->remote_qpn;
233 urep->starting_psn = krep->starting_psn;
234 urep->responder_resources = krep->responder_resources;
235 urep->initiator_depth = krep->initiator_depth;
236 urep->target_ack_delay = krep->target_ack_delay;
237 urep->failover_accepted = krep->failover_accepted;
238 urep->flow_control = krep->flow_control;
239 urep->rnr_retry_count = krep->rnr_retry_count;
240 urep->srq = krep->srq;
243 static void ib_ucm_event_rej_get(struct ib_ucm_rej_event_resp *urej,
244 struct ib_cm_rej_event_param *krej)
246 urej->reason = krej->reason;
249 static void ib_ucm_event_mra_get(struct ib_ucm_mra_event_resp *umra,
250 struct ib_cm_mra_event_param *kmra)
252 umra->timeout = kmra->service_timeout;
255 static void ib_ucm_event_lap_get(struct ib_ucm_lap_event_resp *ulap,
256 struct ib_cm_lap_event_param *klap)
258 ib_ucm_event_path_get(&ulap->path, klap->alternate_path);
261 static void ib_ucm_event_apr_get(struct ib_ucm_apr_event_resp *uapr,
262 struct ib_cm_apr_event_param *kapr)
264 uapr->status = kapr->ap_status;
267 static void ib_ucm_event_sidr_req_get(struct ib_ucm_sidr_req_event_resp *ureq,
268 struct ib_cm_sidr_req_event_param *kreq)
270 ureq->listen_id = (long)kreq->listen_id->context;
271 ureq->pkey = kreq->pkey;
274 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
275 struct ib_cm_sidr_rep_event_param *krep)
277 urep->status = krep->status;
278 urep->qkey = krep->qkey;
279 urep->qpn = krep->qpn;
282 static int ib_ucm_event_process(struct ib_cm_event *evt,
283 struct ib_ucm_event *uvt)
288 switch (evt->event) {
289 case IB_CM_REQ_RECEIVED:
290 ib_ucm_event_req_get(&uvt->resp.u.req_resp,
291 &evt->param.req_rcvd);
292 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
293 uvt->resp.present |= (evt->param.req_rcvd.primary_path ?
294 IB_UCM_PRES_PRIMARY : 0);
295 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
296 IB_UCM_PRES_ALTERNATE : 0);
298 case IB_CM_REP_RECEIVED:
299 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
300 &evt->param.rep_rcvd);
301 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
304 case IB_CM_RTU_RECEIVED:
305 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
306 uvt->resp.u.send_status = evt->param.send_status;
309 case IB_CM_DREQ_RECEIVED:
310 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
311 uvt->resp.u.send_status = evt->param.send_status;
314 case IB_CM_DREP_RECEIVED:
315 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
316 uvt->resp.u.send_status = evt->param.send_status;
319 case IB_CM_MRA_RECEIVED:
320 ib_ucm_event_mra_get(&uvt->resp.u.mra_resp,
321 &evt->param.mra_rcvd);
322 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
325 case IB_CM_REJ_RECEIVED:
326 ib_ucm_event_rej_get(&uvt->resp.u.rej_resp,
327 &evt->param.rej_rcvd);
328 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
329 uvt->info_len = evt->param.rej_rcvd.ari_length;
330 info = evt->param.rej_rcvd.ari;
333 case IB_CM_LAP_RECEIVED:
334 ib_ucm_event_lap_get(&uvt->resp.u.lap_resp,
335 &evt->param.lap_rcvd);
336 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
337 uvt->resp.present |= (evt->param.lap_rcvd.alternate_path ?
338 IB_UCM_PRES_ALTERNATE : 0);
340 case IB_CM_APR_RECEIVED:
341 ib_ucm_event_apr_get(&uvt->resp.u.apr_resp,
342 &evt->param.apr_rcvd);
343 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
344 uvt->info_len = evt->param.apr_rcvd.info_len;
345 info = evt->param.apr_rcvd.apr_info;
348 case IB_CM_SIDR_REQ_RECEIVED:
349 ib_ucm_event_sidr_req_get(&uvt->resp.u.sidr_req_resp,
350 &evt->param.sidr_req_rcvd);
351 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
354 case IB_CM_SIDR_REP_RECEIVED:
355 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
356 &evt->param.sidr_rep_rcvd);
357 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
358 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
359 info = evt->param.sidr_rep_rcvd.info;
363 uvt->resp.u.send_status = evt->param.send_status;
368 if (uvt->data_len && evt->private_data) {
370 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
376 memcpy(uvt->data, evt->private_data, uvt->data_len);
377 uvt->resp.present |= IB_UCM_PRES_DATA;
380 if (uvt->info_len && info) {
382 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
388 memcpy(uvt->info, info, uvt->info_len);
389 uvt->resp.present |= IB_UCM_PRES_INFO;
399 static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
400 struct ib_cm_event *event)
402 struct ib_ucm_event *uevent;
403 struct ib_ucm_context *ctx;
407 * lookup correct context based on event type.
409 switch (event->event) {
410 case IB_CM_REQ_RECEIVED:
411 id = (long)event->param.req_rcvd.listen_id->context;
413 case IB_CM_SIDR_REQ_RECEIVED:
414 id = (long)event->param.sidr_req_rcvd.listen_id->context;
417 id = (long)cm_id->context;
421 ucm_dbg("Event. CM ID <%d> event <%d>\n", id, event->event);
423 ctx = ib_ucm_ctx_get(id);
427 if (event->event == IB_CM_REQ_RECEIVED ||
428 event->event == IB_CM_SIDR_REQ_RECEIVED)
429 id = IB_UCM_CM_ID_INVALID;
431 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
437 memset(uevent, 0, sizeof(*uevent));
439 uevent->resp.id = id;
440 uevent->resp.event = event->event;
442 result = ib_ucm_event_process(event, uevent);
447 uevent->cm_id = ((event->event == IB_CM_REQ_RECEIVED ||
448 event->event == IB_CM_SIDR_REQ_RECEIVED ) ?
451 down(&ctx->file->mutex);
453 list_add_tail(&uevent->file_list, &ctx->file->events);
454 list_add_tail(&uevent->ctx_list, &ctx->events);
456 wake_up_interruptible(&ctx->file->poll_wait);
458 up(&ctx->file->mutex);
461 ib_ucm_ctx_put(ctx); /* func reference */
465 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
466 const char __user *inbuf,
467 int in_len, int out_len)
469 struct ib_ucm_context *ctx;
470 struct ib_ucm_event_get cmd;
471 struct ib_ucm_event *uevent = NULL;
475 if (out_len < sizeof(struct ib_ucm_event_resp))
478 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
485 while (list_empty(&file->events)) {
487 if (file->filp->f_flags & O_NONBLOCK) {
492 if (signal_pending(current)) {
493 result = -ERESTARTSYS;
497 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
503 finish_wait(&file->poll_wait, &wait);
509 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
514 ctx = ib_ucm_ctx_alloc(file);
520 ctx->cm_id = uevent->cm_id;
521 ctx->cm_id->cm_handler = ib_ucm_event_handler;
522 ctx->cm_id->context = (void *)(unsigned long)ctx->id;
524 uevent->resp.id = ctx->id;
527 if (copy_to_user((void __user *)(unsigned long)cmd.response,
528 &uevent->resp, sizeof(uevent->resp))) {
535 if (cmd.data_len < uevent->data_len) {
540 if (copy_to_user((void __user *)(unsigned long)cmd.data,
541 uevent->data, uevent->data_len)) {
549 if (cmd.info_len < uevent->info_len) {
554 if (copy_to_user((void __user *)(unsigned long)cmd.info,
555 uevent->info, uevent->info_len)) {
561 list_del(&uevent->file_list);
562 list_del(&uevent->ctx_list);
573 static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
574 const char __user *inbuf,
575 int in_len, int out_len)
577 struct ib_ucm_create_id cmd;
578 struct ib_ucm_create_id_resp resp;
579 struct ib_ucm_context *ctx;
582 if (out_len < sizeof(resp))
585 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
588 ctx = ib_ucm_ctx_alloc(file);
592 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler,
593 (void *)(unsigned long)ctx->id);
600 if (copy_to_user((void __user *)(unsigned long)cmd.response,
601 &resp, sizeof(resp))) {
608 ib_destroy_cm_id(ctx->cm_id);
610 ib_ucm_ctx_put(ctx); /* user reference */
615 static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
616 const char __user *inbuf,
617 int in_len, int out_len)
619 struct ib_ucm_destroy_id cmd;
620 struct ib_ucm_context *ctx;
622 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
625 ctx = ib_ucm_ctx_get(cmd.id);
629 ib_ucm_ctx_put(ctx); /* user reference */
630 ib_ucm_ctx_put(ctx); /* func reference */
635 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
636 const char __user *inbuf,
637 int in_len, int out_len)
639 struct ib_ucm_attr_id_resp resp;
640 struct ib_ucm_attr_id cmd;
641 struct ib_ucm_context *ctx;
644 if (out_len < sizeof(resp))
647 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
650 ctx = ib_ucm_ctx_get(cmd.id);
654 down(&ctx->file->mutex);
655 if (ctx->file != file) {
660 resp.service_id = ctx->cm_id->service_id;
661 resp.service_mask = ctx->cm_id->service_mask;
662 resp.local_id = ctx->cm_id->local_id;
663 resp.remote_id = ctx->cm_id->remote_id;
665 if (copy_to_user((void __user *)(unsigned long)cmd.response,
666 &resp, sizeof(resp)))
670 up(&ctx->file->mutex);
671 ib_ucm_ctx_put(ctx); /* func reference */
675 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
676 const char __user *inbuf,
677 int in_len, int out_len)
679 struct ib_ucm_listen cmd;
680 struct ib_ucm_context *ctx;
683 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
686 ctx = ib_ucm_ctx_get(cmd.id);
690 down(&ctx->file->mutex);
691 if (ctx->file != file)
694 result = ib_cm_listen(ctx->cm_id, cmd.service_id,
697 up(&ctx->file->mutex);
698 ib_ucm_ctx_put(ctx); /* func reference */
702 static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
703 const char __user *inbuf,
704 int in_len, int out_len)
706 struct ib_ucm_establish cmd;
707 struct ib_ucm_context *ctx;
710 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
713 ctx = ib_ucm_ctx_get(cmd.id);
717 down(&ctx->file->mutex);
718 if (ctx->file != file)
721 result = ib_cm_establish(ctx->cm_id);
723 up(&ctx->file->mutex);
724 ib_ucm_ctx_put(ctx); /* func reference */
728 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
737 data = kmalloc(len, GFP_KERNEL);
741 if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
750 static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
752 struct ib_ucm_path_rec ucm_path;
753 struct ib_sa_path_rec *sa_path;
760 sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
764 if (copy_from_user(&ucm_path, (void __user *)(unsigned long)src,
771 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof(union ib_gid));
772 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof(union ib_gid));
774 sa_path->dlid = ucm_path.dlid;
775 sa_path->slid = ucm_path.slid;
776 sa_path->raw_traffic = ucm_path.raw_traffic;
777 sa_path->flow_label = ucm_path.flow_label;
778 sa_path->hop_limit = ucm_path.hop_limit;
779 sa_path->traffic_class = ucm_path.traffic_class;
780 sa_path->reversible = ucm_path.reversible;
781 sa_path->numb_path = ucm_path.numb_path;
782 sa_path->pkey = ucm_path.pkey;
783 sa_path->sl = ucm_path.sl;
784 sa_path->mtu_selector = ucm_path.mtu_selector;
785 sa_path->mtu = ucm_path.mtu;
786 sa_path->rate_selector = ucm_path.rate_selector;
787 sa_path->rate = ucm_path.rate;
788 sa_path->packet_life_time = ucm_path.packet_life_time;
789 sa_path->preference = ucm_path.preference;
791 sa_path->packet_life_time_selector =
792 ucm_path.packet_life_time_selector;
798 static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
799 const char __user *inbuf,
800 int in_len, int out_len)
802 struct ib_cm_req_param param;
803 struct ib_ucm_context *ctx;
804 struct ib_ucm_req cmd;
807 param.private_data = NULL;
808 param.primary_path = NULL;
809 param.alternate_path = NULL;
811 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
814 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
818 result = ib_ucm_path_get(¶m.primary_path, cmd.primary_path);
822 result = ib_ucm_path_get(¶m.alternate_path, cmd.alternate_path);
826 param.private_data_len = cmd.len;
827 param.service_id = cmd.sid;
828 param.qp_num = cmd.qpn;
829 param.qp_type = cmd.qp_type;
830 param.starting_psn = cmd.psn;
831 param.peer_to_peer = cmd.peer_to_peer;
832 param.responder_resources = cmd.responder_resources;
833 param.initiator_depth = cmd.initiator_depth;
834 param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
835 param.flow_control = cmd.flow_control;
836 param.local_cm_response_timeout = cmd.local_cm_response_timeout;
837 param.retry_count = cmd.retry_count;
838 param.rnr_retry_count = cmd.rnr_retry_count;
839 param.max_cm_retries = cmd.max_cm_retries;
842 ctx = ib_ucm_ctx_get(cmd.id);
848 down(&ctx->file->mutex);
849 if (ctx->file != file)
852 result = ib_send_cm_req(ctx->cm_id, ¶m);
854 up(&ctx->file->mutex);
855 ib_ucm_ctx_put(ctx); /* func reference */
857 kfree(param.private_data);
858 kfree(param.primary_path);
859 kfree(param.alternate_path);
864 static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
865 const char __user *inbuf,
866 int in_len, int out_len)
868 struct ib_cm_rep_param param;
869 struct ib_ucm_context *ctx;
870 struct ib_ucm_rep cmd;
873 param.private_data = NULL;
875 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
878 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
882 param.qp_num = cmd.qpn;
883 param.starting_psn = cmd.psn;
884 param.private_data_len = cmd.len;
885 param.responder_resources = cmd.responder_resources;
886 param.initiator_depth = cmd.initiator_depth;
887 param.target_ack_delay = cmd.target_ack_delay;
888 param.failover_accepted = cmd.failover_accepted;
889 param.flow_control = cmd.flow_control;
890 param.rnr_retry_count = cmd.rnr_retry_count;
893 ctx = ib_ucm_ctx_get(cmd.id);
899 down(&ctx->file->mutex);
900 if (ctx->file != file)
903 result = ib_send_cm_rep(ctx->cm_id, ¶m);
905 up(&ctx->file->mutex);
906 ib_ucm_ctx_put(ctx); /* func reference */
908 kfree(param.private_data);
913 static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
914 const char __user *inbuf, int in_len,
915 int (*func)(struct ib_cm_id *cm_id,
916 const void *private_data,
917 u8 private_data_len))
919 struct ib_ucm_private_data cmd;
920 struct ib_ucm_context *ctx;
921 const void *private_data = NULL;
924 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
927 result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
931 ctx = ib_ucm_ctx_get(cmd.id);
937 down(&ctx->file->mutex);
938 if (ctx->file != file)
941 result = func(ctx->cm_id, private_data, cmd.len);
943 up(&ctx->file->mutex);
944 ib_ucm_ctx_put(ctx); /* func reference */
951 static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
952 const char __user *inbuf,
953 int in_len, int out_len)
955 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
958 static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
959 const char __user *inbuf,
960 int in_len, int out_len)
962 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
965 static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
966 const char __user *inbuf,
967 int in_len, int out_len)
969 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
972 static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
973 const char __user *inbuf, int in_len,
974 int (*func)(struct ib_cm_id *cm_id,
981 struct ib_ucm_context *ctx;
982 struct ib_ucm_info cmd;
983 const void *data = NULL;
984 const void *info = NULL;
987 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
990 result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
994 result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
998 ctx = ib_ucm_ctx_get(cmd.id);
1004 down(&ctx->file->mutex);
1005 if (ctx->file != file)
1008 result = func(ctx->cm_id, cmd.status,
1010 data, cmd.data_len);
1012 up(&ctx->file->mutex);
1013 ib_ucm_ctx_put(ctx); /* func reference */
1021 static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
1022 const char __user *inbuf,
1023 int in_len, int out_len)
1025 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
1028 static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
1029 const char __user *inbuf,
1030 int in_len, int out_len)
1032 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
1035 static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
1036 const char __user *inbuf,
1037 int in_len, int out_len)
1039 struct ib_ucm_context *ctx;
1040 struct ib_ucm_mra cmd;
1041 const void *data = NULL;
1044 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1047 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1051 ctx = ib_ucm_ctx_get(cmd.id);
1057 down(&ctx->file->mutex);
1058 if (ctx->file != file)
1061 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout,
1064 up(&ctx->file->mutex);
1065 ib_ucm_ctx_put(ctx); /* func reference */
1072 static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
1073 const char __user *inbuf,
1074 int in_len, int out_len)
1076 struct ib_ucm_context *ctx;
1077 struct ib_sa_path_rec *path = NULL;
1078 struct ib_ucm_lap cmd;
1079 const void *data = NULL;
1082 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1085 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
1089 result = ib_ucm_path_get(&path, cmd.path);
1093 ctx = ib_ucm_ctx_get(cmd.id);
1099 down(&ctx->file->mutex);
1100 if (ctx->file != file)
1103 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
1105 up(&ctx->file->mutex);
1106 ib_ucm_ctx_put(ctx); /* func reference */
1114 static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
1115 const char __user *inbuf,
1116 int in_len, int out_len)
1118 struct ib_cm_sidr_req_param param;
1119 struct ib_ucm_context *ctx;
1120 struct ib_ucm_sidr_req cmd;
1123 param.private_data = NULL;
1126 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1129 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
1133 result = ib_ucm_path_get(¶m.path, cmd.path);
1137 param.private_data_len = cmd.len;
1138 param.service_id = cmd.sid;
1139 param.timeout_ms = cmd.timeout;
1140 param.max_cm_retries = cmd.max_cm_retries;
1141 param.pkey = cmd.pkey;
1143 ctx = ib_ucm_ctx_get(cmd.id);
1149 down(&ctx->file->mutex);
1150 if (ctx->file != file)
1153 result = ib_send_cm_sidr_req(ctx->cm_id, ¶m);
1155 up(&ctx->file->mutex);
1156 ib_ucm_ctx_put(ctx); /* func reference */
1158 kfree(param.private_data);
1164 static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1165 const char __user *inbuf,
1166 int in_len, int out_len)
1168 struct ib_cm_sidr_rep_param param;
1169 struct ib_ucm_sidr_rep cmd;
1170 struct ib_ucm_context *ctx;
1175 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1178 result = ib_ucm_alloc_data(¶m.private_data,
1179 cmd.data, cmd.data_len);
1183 result = ib_ucm_alloc_data(¶m.info, cmd.info, cmd.info_len);
1187 param.qp_num = cmd.qpn;
1188 param.qkey = cmd.qkey;
1189 param.status = cmd.status;
1190 param.info_length = cmd.info_len;
1191 param.private_data_len = cmd.data_len;
1193 ctx = ib_ucm_ctx_get(cmd.id);
1199 down(&ctx->file->mutex);
1200 if (ctx->file != file)
1203 result = ib_send_cm_sidr_rep(ctx->cm_id, ¶m);
1205 up(&ctx->file->mutex);
1206 ib_ucm_ctx_put(ctx); /* func reference */
1208 kfree(param.private_data);
1214 static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1215 const char __user *inbuf,
1216 int in_len, int out_len) = {
1217 [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id,
1218 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1219 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1220 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1221 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
1222 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1223 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1224 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
1225 [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
1226 [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
1227 [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
1228 [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
1229 [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
1230 [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
1231 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1232 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1233 [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
1236 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1237 size_t len, loff_t *pos)
1239 struct ib_ucm_file *file = filp->private_data;
1240 struct ib_ucm_cmd_hdr hdr;
1243 if (len < sizeof(hdr))
1246 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1249 ucm_dbg("Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
1250 hdr.cmd, hdr.in, hdr.out, len);
1252 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1255 if (hdr.in + sizeof(hdr) > len)
1258 result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1266 static unsigned int ib_ucm_poll(struct file *filp,
1267 struct poll_table_struct *wait)
1269 struct ib_ucm_file *file = filp->private_data;
1270 unsigned int mask = 0;
1272 poll_wait(filp, &file->poll_wait, wait);
1274 if (!list_empty(&file->events))
1275 mask = POLLIN | POLLRDNORM;
1280 static int ib_ucm_open(struct inode *inode, struct file *filp)
1282 struct ib_ucm_file *file;
1284 file = kmalloc(sizeof(*file), GFP_KERNEL);
1288 INIT_LIST_HEAD(&file->events);
1289 INIT_LIST_HEAD(&file->ctxs);
1290 init_waitqueue_head(&file->poll_wait);
1292 init_MUTEX(&file->mutex);
1294 filp->private_data = file;
1297 ucm_dbg("Created struct\n");
1302 static int ib_ucm_close(struct inode *inode, struct file *filp)
1304 struct ib_ucm_file *file = filp->private_data;
1305 struct ib_ucm_context *ctx;
1309 while (!list_empty(&file->ctxs)) {
1311 ctx = list_entry(file->ctxs.next,
1312 struct ib_ucm_context, file_list);
1314 up(&ctx->file->mutex);
1315 ib_ucm_ctx_put(ctx); /* user reference */
1323 ucm_dbg("Deleted struct\n");
1327 static struct file_operations ib_ucm_fops = {
1328 .owner = THIS_MODULE,
1329 .open = ib_ucm_open,
1330 .release = ib_ucm_close,
1331 .write = ib_ucm_write,
1332 .poll = ib_ucm_poll,
1336 static struct class *ib_ucm_class;
1337 static struct cdev ib_ucm_cdev;
1339 static int __init ib_ucm_init(void)
1343 result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
1345 ucm_dbg("Error <%d> registering dev\n", result);
1349 cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
1351 result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
1353 ucm_dbg("Error <%d> adding cdev\n", result);
1357 ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
1358 if (IS_ERR(ib_ucm_class)) {
1359 result = PTR_ERR(ib_ucm_class);
1360 ucm_dbg("Error <%d> creating class\n", result);
1364 class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
1366 idr_init(&ctx_id_table);
1367 init_MUTEX(&ctx_id_mutex);
1371 cdev_del(&ib_ucm_cdev);
1373 unregister_chrdev_region(IB_UCM_DEV, 1);
1378 static void __exit ib_ucm_cleanup(void)
1380 class_device_destroy(ib_ucm_class, IB_UCM_DEV);
1381 class_destroy(ib_ucm_class);
1382 cdev_del(&ib_ucm_cdev);
1383 unregister_chrdev_region(IB_UCM_DEV, 1);
1386 module_init(ib_ucm_init);
1387 module_exit(ib_ucm_cleanup);