2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $
35 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/poll.h>
41 #include <linux/file.h>
42 #include <linux/mount.h>
43 #include <linux/cdev.h>
45 #include <asm/uaccess.h>
49 MODULE_AUTHOR("Libor Michalek");
50 MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
51 MODULE_LICENSE("Dual BSD/GPL");
53 static int ucm_debug_level;
55 module_param_named(debug_level, ucm_debug_level, int, 0644);
56 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
63 #define IB_UCM_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_MINOR)
67 #define ucm_dbg(format, arg...) \
69 if (ucm_debug_level > 0) \
70 printk(KERN_DEBUG PFX format, ## arg); \
73 static struct semaphore ctx_id_mutex;
74 static struct idr ctx_id_table;
75 static int ctx_id_rover = 0;
77 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
79 struct ib_ucm_context *ctx;
82 ctx = idr_find(&ctx_id_table, id);
84 ctx = ERR_PTR(-ENOENT);
85 else if (ctx->file != file)
86 ctx = ERR_PTR(-EINVAL);
88 atomic_inc(&ctx->ref);
94 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx)
96 if (atomic_dec_and_test(&ctx->ref))
100 static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id)
102 struct ib_ucm_context *ctx;
103 struct ib_ucm_event *uevent;
106 ctx = idr_find(&ctx_id_table, id);
108 ctx = ERR_PTR(-ENOENT);
109 else if (ctx->file != file)
110 ctx = ERR_PTR(-EINVAL);
112 idr_remove(&ctx_id_table, ctx->id);
118 atomic_dec(&ctx->ref);
119 wait_event(ctx->wait, !atomic_read(&ctx->ref));
121 /* No new events will be generated after destroying the cm_id. */
122 if (!IS_ERR(ctx->cm_id))
123 ib_destroy_cm_id(ctx->cm_id);
125 /* Cleanup events not yet reported to the user. */
127 list_del(&ctx->file_list);
128 while (!list_empty(&ctx->events)) {
130 uevent = list_entry(ctx->events.next,
131 struct ib_ucm_event, ctx_list);
132 list_del(&uevent->file_list);
133 list_del(&uevent->ctx_list);
135 /* clear incoming connections. */
137 ib_destroy_cm_id(uevent->cm_id);
147 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
149 struct ib_ucm_context *ctx;
152 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
156 atomic_set(&ctx->ref, 1);
157 init_waitqueue_head(&ctx->wait);
160 INIT_LIST_HEAD(&ctx->events);
162 list_add_tail(&ctx->file_list, &file->ctxs);
164 ctx_id_rover = (ctx_id_rover + 1) & INT_MAX;
166 result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
171 result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id);
174 if (result == -EAGAIN)
179 ucm_dbg("Allocated CM ID <%d>\n", ctx->id);
183 list_del(&ctx->file_list);
189 * Event portion of the API, handle CM events
190 * and allow event polling.
192 static void ib_ucm_event_path_get(struct ib_ucm_path_rec *upath,
193 struct ib_sa_path_rec *kpath)
195 if (!kpath || !upath)
198 memcpy(upath->dgid, kpath->dgid.raw, sizeof *upath->dgid);
199 memcpy(upath->sgid, kpath->sgid.raw, sizeof *upath->sgid);
201 upath->dlid = kpath->dlid;
202 upath->slid = kpath->slid;
203 upath->raw_traffic = kpath->raw_traffic;
204 upath->flow_label = kpath->flow_label;
205 upath->hop_limit = kpath->hop_limit;
206 upath->traffic_class = kpath->traffic_class;
207 upath->reversible = kpath->reversible;
208 upath->numb_path = kpath->numb_path;
209 upath->pkey = kpath->pkey;
210 upath->sl = kpath->sl;
211 upath->mtu_selector = kpath->mtu_selector;
212 upath->mtu = kpath->mtu;
213 upath->rate_selector = kpath->rate_selector;
214 upath->rate = kpath->rate;
215 upath->packet_life_time = kpath->packet_life_time;
216 upath->preference = kpath->preference;
218 upath->packet_life_time_selector =
219 kpath->packet_life_time_selector;
222 static void ib_ucm_event_req_get(struct ib_ucm_context *ctx,
223 struct ib_ucm_req_event_resp *ureq,
224 struct ib_cm_req_event_param *kreq)
226 ureq->listen_id = ctx->id;
228 ureq->remote_ca_guid = kreq->remote_ca_guid;
229 ureq->remote_qkey = kreq->remote_qkey;
230 ureq->remote_qpn = kreq->remote_qpn;
231 ureq->qp_type = kreq->qp_type;
232 ureq->starting_psn = kreq->starting_psn;
233 ureq->responder_resources = kreq->responder_resources;
234 ureq->initiator_depth = kreq->initiator_depth;
235 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
236 ureq->flow_control = kreq->flow_control;
237 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
238 ureq->retry_count = kreq->retry_count;
239 ureq->rnr_retry_count = kreq->rnr_retry_count;
240 ureq->srq = kreq->srq;
242 ib_ucm_event_path_get(&ureq->primary_path, kreq->primary_path);
243 ib_ucm_event_path_get(&ureq->alternate_path, kreq->alternate_path);
246 static void ib_ucm_event_rep_get(struct ib_ucm_rep_event_resp *urep,
247 struct ib_cm_rep_event_param *krep)
249 urep->remote_ca_guid = krep->remote_ca_guid;
250 urep->remote_qkey = krep->remote_qkey;
251 urep->remote_qpn = krep->remote_qpn;
252 urep->starting_psn = krep->starting_psn;
253 urep->responder_resources = krep->responder_resources;
254 urep->initiator_depth = krep->initiator_depth;
255 urep->target_ack_delay = krep->target_ack_delay;
256 urep->failover_accepted = krep->failover_accepted;
257 urep->flow_control = krep->flow_control;
258 urep->rnr_retry_count = krep->rnr_retry_count;
259 urep->srq = krep->srq;
262 static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx,
263 struct ib_ucm_sidr_req_event_resp *ureq,
264 struct ib_cm_sidr_req_event_param *kreq)
266 ureq->listen_id = ctx->id;
267 ureq->pkey = kreq->pkey;
270 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep,
271 struct ib_cm_sidr_rep_event_param *krep)
273 urep->status = krep->status;
274 urep->qkey = krep->qkey;
275 urep->qpn = krep->qpn;
278 static int ib_ucm_event_process(struct ib_ucm_context *ctx,
279 struct ib_cm_event *evt,
280 struct ib_ucm_event *uvt)
284 switch (evt->event) {
285 case IB_CM_REQ_RECEIVED:
286 ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp,
287 &evt->param.req_rcvd);
288 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE;
289 uvt->resp.present = IB_UCM_PRES_PRIMARY;
290 uvt->resp.present |= (evt->param.req_rcvd.alternate_path ?
291 IB_UCM_PRES_ALTERNATE : 0);
293 case IB_CM_REP_RECEIVED:
294 ib_ucm_event_rep_get(&uvt->resp.u.rep_resp,
295 &evt->param.rep_rcvd);
296 uvt->data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
298 case IB_CM_RTU_RECEIVED:
299 uvt->data_len = IB_CM_RTU_PRIVATE_DATA_SIZE;
300 uvt->resp.u.send_status = evt->param.send_status;
302 case IB_CM_DREQ_RECEIVED:
303 uvt->data_len = IB_CM_DREQ_PRIVATE_DATA_SIZE;
304 uvt->resp.u.send_status = evt->param.send_status;
306 case IB_CM_DREP_RECEIVED:
307 uvt->data_len = IB_CM_DREP_PRIVATE_DATA_SIZE;
308 uvt->resp.u.send_status = evt->param.send_status;
310 case IB_CM_MRA_RECEIVED:
311 uvt->resp.u.mra_resp.timeout =
312 evt->param.mra_rcvd.service_timeout;
313 uvt->data_len = IB_CM_MRA_PRIVATE_DATA_SIZE;
315 case IB_CM_REJ_RECEIVED:
316 uvt->resp.u.rej_resp.reason = evt->param.rej_rcvd.reason;
317 uvt->data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
318 uvt->info_len = evt->param.rej_rcvd.ari_length;
319 info = evt->param.rej_rcvd.ari;
321 case IB_CM_LAP_RECEIVED:
322 ib_ucm_event_path_get(&uvt->resp.u.lap_resp.path,
323 evt->param.lap_rcvd.alternate_path);
324 uvt->data_len = IB_CM_LAP_PRIVATE_DATA_SIZE;
325 uvt->resp.present = IB_UCM_PRES_ALTERNATE;
327 case IB_CM_APR_RECEIVED:
328 uvt->resp.u.apr_resp.status = evt->param.apr_rcvd.ap_status;
329 uvt->data_len = IB_CM_APR_PRIVATE_DATA_SIZE;
330 uvt->info_len = evt->param.apr_rcvd.info_len;
331 info = evt->param.apr_rcvd.apr_info;
333 case IB_CM_SIDR_REQ_RECEIVED:
334 ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp,
335 &evt->param.sidr_req_rcvd);
336 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
338 case IB_CM_SIDR_REP_RECEIVED:
339 ib_ucm_event_sidr_rep_get(&uvt->resp.u.sidr_rep_resp,
340 &evt->param.sidr_rep_rcvd);
341 uvt->data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
342 uvt->info_len = evt->param.sidr_rep_rcvd.info_len;
343 info = evt->param.sidr_rep_rcvd.info;
346 uvt->resp.u.send_status = evt->param.send_status;
351 uvt->data = kmalloc(uvt->data_len, GFP_KERNEL);
355 memcpy(uvt->data, evt->private_data, uvt->data_len);
356 uvt->resp.present |= IB_UCM_PRES_DATA;
360 uvt->info = kmalloc(uvt->info_len, GFP_KERNEL);
364 memcpy(uvt->info, info, uvt->info_len);
365 uvt->resp.present |= IB_UCM_PRES_INFO;
375 static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
376 struct ib_cm_event *event)
378 struct ib_ucm_event *uevent;
379 struct ib_ucm_context *ctx;
383 ctx = cm_id->context;
385 if (event->event == IB_CM_REQ_RECEIVED ||
386 event->event == IB_CM_SIDR_REQ_RECEIVED)
387 id = IB_UCM_CM_ID_INVALID;
391 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL);
395 memset(uevent, 0, sizeof(*uevent));
396 uevent->resp.id = id;
397 uevent->resp.event = event->event;
399 result = ib_ucm_event_process(ctx, event, uevent);
404 uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL;
406 down(&ctx->file->mutex);
407 list_add_tail(&uevent->file_list, &ctx->file->events);
408 list_add_tail(&uevent->ctx_list, &ctx->events);
409 wake_up_interruptible(&ctx->file->poll_wait);
410 up(&ctx->file->mutex);
416 /* Destroy new cm_id's */
417 return (id == IB_UCM_CM_ID_INVALID);
420 static ssize_t ib_ucm_event(struct ib_ucm_file *file,
421 const char __user *inbuf,
422 int in_len, int out_len)
424 struct ib_ucm_context *ctx;
425 struct ib_ucm_event_get cmd;
426 struct ib_ucm_event *uevent = NULL;
430 if (out_len < sizeof(struct ib_ucm_event_resp))
433 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
440 while (list_empty(&file->events)) {
442 if (file->filp->f_flags & O_NONBLOCK) {
447 if (signal_pending(current)) {
448 result = -ERESTARTSYS;
452 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
458 finish_wait(&file->poll_wait, &wait);
464 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
469 ctx = ib_ucm_ctx_alloc(file);
475 ctx->cm_id = uevent->cm_id;
476 ctx->cm_id->context = ctx;
478 uevent->resp.id = ctx->id;
481 if (copy_to_user((void __user *)(unsigned long)cmd.response,
482 &uevent->resp, sizeof(uevent->resp))) {
489 if (cmd.data_len < uevent->data_len) {
494 if (copy_to_user((void __user *)(unsigned long)cmd.data,
495 uevent->data, uevent->data_len)) {
503 if (cmd.info_len < uevent->info_len) {
508 if (copy_to_user((void __user *)(unsigned long)cmd.info,
509 uevent->info, uevent->info_len)) {
515 list_del(&uevent->file_list);
516 list_del(&uevent->ctx_list);
527 static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
528 const char __user *inbuf,
529 int in_len, int out_len)
531 struct ib_ucm_create_id cmd;
532 struct ib_ucm_create_id_resp resp;
533 struct ib_ucm_context *ctx;
536 if (out_len < sizeof(resp))
539 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
543 ctx = ib_ucm_ctx_alloc(file);
548 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx);
549 if (IS_ERR(ctx->cm_id)) {
550 result = PTR_ERR(ctx->cm_id);
555 if (copy_to_user((void __user *)(unsigned long)cmd.response,
556 &resp, sizeof(resp))) {
564 ib_ucm_destroy_ctx(file, ctx->id);
568 static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
569 const char __user *inbuf,
570 int in_len, int out_len)
572 struct ib_ucm_destroy_id cmd;
574 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
577 return ib_ucm_destroy_ctx(file, cmd.id);
580 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file,
581 const char __user *inbuf,
582 int in_len, int out_len)
584 struct ib_ucm_attr_id_resp resp;
585 struct ib_ucm_attr_id cmd;
586 struct ib_ucm_context *ctx;
589 if (out_len < sizeof(resp))
592 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
595 ctx = ib_ucm_ctx_get(file, cmd.id);
599 resp.service_id = ctx->cm_id->service_id;
600 resp.service_mask = ctx->cm_id->service_mask;
601 resp.local_id = ctx->cm_id->local_id;
602 resp.remote_id = ctx->cm_id->remote_id;
604 if (copy_to_user((void __user *)(unsigned long)cmd.response,
605 &resp, sizeof(resp)))
612 static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
613 const char __user *inbuf,
614 int in_len, int out_len)
616 struct ib_ucm_listen cmd;
617 struct ib_ucm_context *ctx;
620 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
623 ctx = ib_ucm_ctx_get(file, cmd.id);
627 result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
632 static ssize_t ib_ucm_establish(struct ib_ucm_file *file,
633 const char __user *inbuf,
634 int in_len, int out_len)
636 struct ib_ucm_establish cmd;
637 struct ib_ucm_context *ctx;
640 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
643 ctx = ib_ucm_ctx_get(file, cmd.id);
647 result = ib_cm_establish(ctx->cm_id);
652 static int ib_ucm_alloc_data(const void **dest, u64 src, u32 len)
661 data = kmalloc(len, GFP_KERNEL);
665 if (copy_from_user(data, (void __user *)(unsigned long)src, len)) {
674 static int ib_ucm_path_get(struct ib_sa_path_rec **path, u64 src)
676 struct ib_ucm_path_rec ucm_path;
677 struct ib_sa_path_rec *sa_path;
684 sa_path = kmalloc(sizeof(*sa_path), GFP_KERNEL);
688 if (copy_from_user(&ucm_path, (void __user *)(unsigned long)src,
695 memcpy(sa_path->dgid.raw, ucm_path.dgid, sizeof sa_path->dgid);
696 memcpy(sa_path->sgid.raw, ucm_path.sgid, sizeof sa_path->sgid);
698 sa_path->dlid = ucm_path.dlid;
699 sa_path->slid = ucm_path.slid;
700 sa_path->raw_traffic = ucm_path.raw_traffic;
701 sa_path->flow_label = ucm_path.flow_label;
702 sa_path->hop_limit = ucm_path.hop_limit;
703 sa_path->traffic_class = ucm_path.traffic_class;
704 sa_path->reversible = ucm_path.reversible;
705 sa_path->numb_path = ucm_path.numb_path;
706 sa_path->pkey = ucm_path.pkey;
707 sa_path->sl = ucm_path.sl;
708 sa_path->mtu_selector = ucm_path.mtu_selector;
709 sa_path->mtu = ucm_path.mtu;
710 sa_path->rate_selector = ucm_path.rate_selector;
711 sa_path->rate = ucm_path.rate;
712 sa_path->packet_life_time = ucm_path.packet_life_time;
713 sa_path->preference = ucm_path.preference;
715 sa_path->packet_life_time_selector =
716 ucm_path.packet_life_time_selector;
722 static ssize_t ib_ucm_send_req(struct ib_ucm_file *file,
723 const char __user *inbuf,
724 int in_len, int out_len)
726 struct ib_cm_req_param param;
727 struct ib_ucm_context *ctx;
728 struct ib_ucm_req cmd;
731 param.private_data = NULL;
732 param.primary_path = NULL;
733 param.alternate_path = NULL;
735 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
738 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
742 result = ib_ucm_path_get(¶m.primary_path, cmd.primary_path);
746 result = ib_ucm_path_get(¶m.alternate_path, cmd.alternate_path);
750 param.private_data_len = cmd.len;
751 param.service_id = cmd.sid;
752 param.qp_num = cmd.qpn;
753 param.qp_type = cmd.qp_type;
754 param.starting_psn = cmd.psn;
755 param.peer_to_peer = cmd.peer_to_peer;
756 param.responder_resources = cmd.responder_resources;
757 param.initiator_depth = cmd.initiator_depth;
758 param.remote_cm_response_timeout = cmd.remote_cm_response_timeout;
759 param.flow_control = cmd.flow_control;
760 param.local_cm_response_timeout = cmd.local_cm_response_timeout;
761 param.retry_count = cmd.retry_count;
762 param.rnr_retry_count = cmd.rnr_retry_count;
763 param.max_cm_retries = cmd.max_cm_retries;
766 ctx = ib_ucm_ctx_get(file, cmd.id);
768 result = ib_send_cm_req(ctx->cm_id, ¶m);
771 result = PTR_ERR(ctx);
774 kfree(param.private_data);
775 kfree(param.primary_path);
776 kfree(param.alternate_path);
780 static ssize_t ib_ucm_send_rep(struct ib_ucm_file *file,
781 const char __user *inbuf,
782 int in_len, int out_len)
784 struct ib_cm_rep_param param;
785 struct ib_ucm_context *ctx;
786 struct ib_ucm_rep cmd;
789 param.private_data = NULL;
791 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
794 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
798 param.qp_num = cmd.qpn;
799 param.starting_psn = cmd.psn;
800 param.private_data_len = cmd.len;
801 param.responder_resources = cmd.responder_resources;
802 param.initiator_depth = cmd.initiator_depth;
803 param.target_ack_delay = cmd.target_ack_delay;
804 param.failover_accepted = cmd.failover_accepted;
805 param.flow_control = cmd.flow_control;
806 param.rnr_retry_count = cmd.rnr_retry_count;
809 ctx = ib_ucm_ctx_get(file, cmd.id);
811 result = ib_send_cm_rep(ctx->cm_id, ¶m);
814 result = PTR_ERR(ctx);
816 kfree(param.private_data);
820 static ssize_t ib_ucm_send_private_data(struct ib_ucm_file *file,
821 const char __user *inbuf, int in_len,
822 int (*func)(struct ib_cm_id *cm_id,
823 const void *private_data,
824 u8 private_data_len))
826 struct ib_ucm_private_data cmd;
827 struct ib_ucm_context *ctx;
828 const void *private_data = NULL;
831 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
834 result = ib_ucm_alloc_data(&private_data, cmd.data, cmd.len);
838 ctx = ib_ucm_ctx_get(file, cmd.id);
840 result = func(ctx->cm_id, private_data, cmd.len);
843 result = PTR_ERR(ctx);
849 static ssize_t ib_ucm_send_rtu(struct ib_ucm_file *file,
850 const char __user *inbuf,
851 int in_len, int out_len)
853 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_rtu);
856 static ssize_t ib_ucm_send_dreq(struct ib_ucm_file *file,
857 const char __user *inbuf,
858 int in_len, int out_len)
860 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_dreq);
863 static ssize_t ib_ucm_send_drep(struct ib_ucm_file *file,
864 const char __user *inbuf,
865 int in_len, int out_len)
867 return ib_ucm_send_private_data(file, inbuf, in_len, ib_send_cm_drep);
870 static ssize_t ib_ucm_send_info(struct ib_ucm_file *file,
871 const char __user *inbuf, int in_len,
872 int (*func)(struct ib_cm_id *cm_id,
879 struct ib_ucm_context *ctx;
880 struct ib_ucm_info cmd;
881 const void *data = NULL;
882 const void *info = NULL;
885 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
888 result = ib_ucm_alloc_data(&data, cmd.data, cmd.data_len);
892 result = ib_ucm_alloc_data(&info, cmd.info, cmd.info_len);
896 ctx = ib_ucm_ctx_get(file, cmd.id);
898 result = func(ctx->cm_id, cmd.status, info, cmd.info_len,
902 result = PTR_ERR(ctx);
910 static ssize_t ib_ucm_send_rej(struct ib_ucm_file *file,
911 const char __user *inbuf,
912 int in_len, int out_len)
914 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_rej);
917 static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
918 const char __user *inbuf,
919 int in_len, int out_len)
921 return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
924 static ssize_t ib_ucm_send_mra(struct ib_ucm_file *file,
925 const char __user *inbuf,
926 int in_len, int out_len)
928 struct ib_ucm_context *ctx;
929 struct ib_ucm_mra cmd;
930 const void *data = NULL;
933 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
936 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
940 ctx = ib_ucm_ctx_get(file, cmd.id);
942 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len);
945 result = PTR_ERR(ctx);
951 static ssize_t ib_ucm_send_lap(struct ib_ucm_file *file,
952 const char __user *inbuf,
953 int in_len, int out_len)
955 struct ib_ucm_context *ctx;
956 struct ib_sa_path_rec *path = NULL;
957 struct ib_ucm_lap cmd;
958 const void *data = NULL;
961 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
964 result = ib_ucm_alloc_data(&data, cmd.data, cmd.len);
968 result = ib_ucm_path_get(&path, cmd.path);
972 ctx = ib_ucm_ctx_get(file, cmd.id);
974 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len);
977 result = PTR_ERR(ctx);
985 static ssize_t ib_ucm_send_sidr_req(struct ib_ucm_file *file,
986 const char __user *inbuf,
987 int in_len, int out_len)
989 struct ib_cm_sidr_req_param param;
990 struct ib_ucm_context *ctx;
991 struct ib_ucm_sidr_req cmd;
994 param.private_data = NULL;
997 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1000 result = ib_ucm_alloc_data(¶m.private_data, cmd.data, cmd.len);
1004 result = ib_ucm_path_get(¶m.path, cmd.path);
1008 param.private_data_len = cmd.len;
1009 param.service_id = cmd.sid;
1010 param.timeout_ms = cmd.timeout;
1011 param.max_cm_retries = cmd.max_cm_retries;
1012 param.pkey = cmd.pkey;
1014 ctx = ib_ucm_ctx_get(file, cmd.id);
1016 result = ib_send_cm_sidr_req(ctx->cm_id, ¶m);
1017 ib_ucm_ctx_put(ctx);
1019 result = PTR_ERR(ctx);
1022 kfree(param.private_data);
1027 static ssize_t ib_ucm_send_sidr_rep(struct ib_ucm_file *file,
1028 const char __user *inbuf,
1029 int in_len, int out_len)
1031 struct ib_cm_sidr_rep_param param;
1032 struct ib_ucm_sidr_rep cmd;
1033 struct ib_ucm_context *ctx;
1038 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1041 result = ib_ucm_alloc_data(¶m.private_data,
1042 cmd.data, cmd.data_len);
1046 result = ib_ucm_alloc_data(¶m.info, cmd.info, cmd.info_len);
1050 param.qp_num = cmd.qpn;
1051 param.qkey = cmd.qkey;
1052 param.status = cmd.status;
1053 param.info_length = cmd.info_len;
1054 param.private_data_len = cmd.data_len;
1056 ctx = ib_ucm_ctx_get(file, cmd.id);
1058 result = ib_send_cm_sidr_rep(ctx->cm_id, ¶m);
1059 ib_ucm_ctx_put(ctx);
1061 result = PTR_ERR(ctx);
1064 kfree(param.private_data);
1069 static ssize_t (*ucm_cmd_table[])(struct ib_ucm_file *file,
1070 const char __user *inbuf,
1071 int in_len, int out_len) = {
1072 [IB_USER_CM_CMD_CREATE_ID] = ib_ucm_create_id,
1073 [IB_USER_CM_CMD_DESTROY_ID] = ib_ucm_destroy_id,
1074 [IB_USER_CM_CMD_ATTR_ID] = ib_ucm_attr_id,
1075 [IB_USER_CM_CMD_LISTEN] = ib_ucm_listen,
1076 [IB_USER_CM_CMD_ESTABLISH] = ib_ucm_establish,
1077 [IB_USER_CM_CMD_SEND_REQ] = ib_ucm_send_req,
1078 [IB_USER_CM_CMD_SEND_REP] = ib_ucm_send_rep,
1079 [IB_USER_CM_CMD_SEND_RTU] = ib_ucm_send_rtu,
1080 [IB_USER_CM_CMD_SEND_DREQ] = ib_ucm_send_dreq,
1081 [IB_USER_CM_CMD_SEND_DREP] = ib_ucm_send_drep,
1082 [IB_USER_CM_CMD_SEND_REJ] = ib_ucm_send_rej,
1083 [IB_USER_CM_CMD_SEND_MRA] = ib_ucm_send_mra,
1084 [IB_USER_CM_CMD_SEND_LAP] = ib_ucm_send_lap,
1085 [IB_USER_CM_CMD_SEND_APR] = ib_ucm_send_apr,
1086 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req,
1087 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep,
1088 [IB_USER_CM_CMD_EVENT] = ib_ucm_event,
1091 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1092 size_t len, loff_t *pos)
1094 struct ib_ucm_file *file = filp->private_data;
1095 struct ib_ucm_cmd_hdr hdr;
1098 if (len < sizeof(hdr))
1101 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1104 ucm_dbg("Write. cmd <%d> in <%d> out <%d> len <%Zu>\n",
1105 hdr.cmd, hdr.in, hdr.out, len);
1107 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
1110 if (hdr.in + sizeof(hdr) > len)
1113 result = ucm_cmd_table[hdr.cmd](file, buf + sizeof(hdr),
1121 static unsigned int ib_ucm_poll(struct file *filp,
1122 struct poll_table_struct *wait)
1124 struct ib_ucm_file *file = filp->private_data;
1125 unsigned int mask = 0;
1127 poll_wait(filp, &file->poll_wait, wait);
1129 if (!list_empty(&file->events))
1130 mask = POLLIN | POLLRDNORM;
1135 static int ib_ucm_open(struct inode *inode, struct file *filp)
1137 struct ib_ucm_file *file;
1139 file = kmalloc(sizeof(*file), GFP_KERNEL);
1143 INIT_LIST_HEAD(&file->events);
1144 INIT_LIST_HEAD(&file->ctxs);
1145 init_waitqueue_head(&file->poll_wait);
1147 init_MUTEX(&file->mutex);
1149 filp->private_data = file;
1152 ucm_dbg("Created struct\n");
1157 static int ib_ucm_close(struct inode *inode, struct file *filp)
1159 struct ib_ucm_file *file = filp->private_data;
1160 struct ib_ucm_context *ctx;
1163 while (!list_empty(&file->ctxs)) {
1165 ctx = list_entry(file->ctxs.next,
1166 struct ib_ucm_context, file_list);
1169 ib_ucm_destroy_ctx(file, ctx->id);
1177 static struct file_operations ib_ucm_fops = {
1178 .owner = THIS_MODULE,
1179 .open = ib_ucm_open,
1180 .release = ib_ucm_close,
1181 .write = ib_ucm_write,
1182 .poll = ib_ucm_poll,
1186 static struct class *ib_ucm_class;
1187 static struct cdev ib_ucm_cdev;
1189 static int __init ib_ucm_init(void)
1193 result = register_chrdev_region(IB_UCM_DEV, 1, "infiniband_cm");
1195 ucm_dbg("Error <%d> registering dev\n", result);
1199 cdev_init(&ib_ucm_cdev, &ib_ucm_fops);
1201 result = cdev_add(&ib_ucm_cdev, IB_UCM_DEV, 1);
1203 ucm_dbg("Error <%d> adding cdev\n", result);
1207 ib_ucm_class = class_create(THIS_MODULE, "infiniband_cm");
1208 if (IS_ERR(ib_ucm_class)) {
1209 result = PTR_ERR(ib_ucm_class);
1210 ucm_dbg("Error <%d> creating class\n", result);
1214 class_device_create(ib_ucm_class, IB_UCM_DEV, NULL, "ucm");
1216 idr_init(&ctx_id_table);
1217 init_MUTEX(&ctx_id_mutex);
1221 cdev_del(&ib_ucm_cdev);
1223 unregister_chrdev_region(IB_UCM_DEV, 1);
1228 static void __exit ib_ucm_cleanup(void)
1230 class_device_destroy(ib_ucm_class, IB_UCM_DEV);
1231 class_destroy(ib_ucm_class);
1232 cdev_del(&ib_ucm_cdev);
1233 unregister_chrdev_region(IB_UCM_DEV, 1);
1236 module_init(ib_ucm_init);
1237 module_exit(ib_ucm_cleanup);