2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/mutex.h>
35 #include <linux/poll.h>
36 #include <linux/idr.h>
38 #include <linux/in6.h>
39 #include <linux/miscdevice.h>
41 #include <rdma/rdma_user_cm.h>
42 #include <rdma/ib_marshall.h>
43 #include <rdma/rdma_cm.h>
45 MODULE_AUTHOR("Sean Hefty");
46 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47 MODULE_LICENSE("Dual BSD/GPL");
50 UCMA_MAX_BACKLOG = 128
56 struct list_head ctx_list;
57 struct list_head event_list;
58 wait_queue_head_t poll_wait;
63 struct completion comp;
68 struct ucma_file *file;
69 struct rdma_cm_id *cm_id;
72 struct list_head list;
76 struct ucma_context *ctx;
77 struct list_head list;
78 struct rdma_cm_id *cm_id;
79 struct rdma_ucm_event_resp resp;
82 static DEFINE_MUTEX(mut);
83 static DEFINE_IDR(ctx_idr);
85 static inline struct ucma_context *_ucma_find_context(int id,
86 struct ucma_file *file)
88 struct ucma_context *ctx;
90 ctx = idr_find(&ctx_idr, id);
92 ctx = ERR_PTR(-ENOENT);
93 else if (ctx->file != file)
94 ctx = ERR_PTR(-EINVAL);
98 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
100 struct ucma_context *ctx;
103 ctx = _ucma_find_context(id, file);
105 atomic_inc(&ctx->ref);
110 static void ucma_put_ctx(struct ucma_context *ctx)
112 if (atomic_dec_and_test(&ctx->ref))
113 complete(&ctx->comp);
116 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
118 struct ucma_context *ctx;
121 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
125 atomic_set(&ctx->ref, 1);
126 init_completion(&ctx->comp);
130 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
135 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
137 } while (ret == -EAGAIN);
142 list_add_tail(&ctx->list, &file->ctx_list);
150 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
151 struct rdma_conn_param *src)
153 if (src->private_data_len)
154 memcpy(dst->private_data, src->private_data,
155 src->private_data_len);
156 dst->private_data_len = src->private_data_len;
157 dst->responder_resources =src->responder_resources;
158 dst->initiator_depth = src->initiator_depth;
159 dst->flow_control = src->flow_control;
160 dst->retry_count = src->retry_count;
161 dst->rnr_retry_count = src->rnr_retry_count;
163 dst->qp_num = src->qp_num;
166 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
167 struct rdma_ud_param *src)
169 if (src->private_data_len)
170 memcpy(dst->private_data, src->private_data,
171 src->private_data_len);
172 dst->private_data_len = src->private_data_len;
173 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
174 dst->qp_num = src->qp_num;
175 dst->qkey = src->qkey;
178 static void ucma_set_event_context(struct ucma_context *ctx,
179 struct rdma_cm_event *event,
180 struct ucma_event *uevent)
183 uevent->resp.uid = ctx->uid;
184 uevent->resp.id = ctx->id;
187 static int ucma_event_handler(struct rdma_cm_id *cm_id,
188 struct rdma_cm_event *event)
190 struct ucma_event *uevent;
191 struct ucma_context *ctx = cm_id->context;
194 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
196 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
198 uevent->cm_id = cm_id;
199 ucma_set_event_context(ctx, event, uevent);
200 uevent->resp.event = event->event;
201 uevent->resp.status = event->status;
202 if (cm_id->ps == RDMA_PS_UDP)
203 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
205 ucma_copy_conn_event(&uevent->resp.param.conn,
208 mutex_lock(&ctx->file->mut);
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
216 } else if (!ctx->uid) {
218 * We ignore events for new connections until userspace has set
219 * their context. This can only happen if an error occurs on a
220 * new connection before the user accepts it. This is okay,
221 * since the accept will just fail later.
227 list_add_tail(&uevent->list, &ctx->file->event_list);
228 wake_up_interruptible(&ctx->file->poll_wait);
230 mutex_unlock(&ctx->file->mut);
234 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
235 int in_len, int out_len)
237 struct ucma_context *ctx;
238 struct rdma_ucm_get_event cmd;
239 struct ucma_event *uevent;
243 if (out_len < sizeof uevent->resp)
246 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
249 mutex_lock(&file->mut);
250 while (list_empty(&file->event_list)) {
251 if (file->filp->f_flags & O_NONBLOCK) {
256 if (signal_pending(current)) {
261 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
262 mutex_unlock(&file->mut);
264 mutex_lock(&file->mut);
265 finish_wait(&file->poll_wait, &wait);
271 uevent = list_entry(file->event_list.next, struct ucma_event, list);
273 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
274 ctx = ucma_alloc_ctx(file);
279 uevent->ctx->backlog++;
280 ctx->cm_id = uevent->cm_id;
281 ctx->cm_id->context = ctx;
282 uevent->resp.id = ctx->id;
285 if (copy_to_user((void __user *)(unsigned long)cmd.response,
286 &uevent->resp, sizeof uevent->resp)) {
291 list_del(&uevent->list);
292 uevent->ctx->events_reported++;
295 mutex_unlock(&file->mut);
299 static ssize_t ucma_create_id(struct ucma_file *file,
300 const char __user *inbuf,
301 int in_len, int out_len)
303 struct rdma_ucm_create_id cmd;
304 struct rdma_ucm_create_id_resp resp;
305 struct ucma_context *ctx;
308 if (out_len < sizeof(resp))
311 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
314 mutex_lock(&file->mut);
315 ctx = ucma_alloc_ctx(file);
316 mutex_unlock(&file->mut);
321 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
322 if (IS_ERR(ctx->cm_id)) {
323 ret = PTR_ERR(ctx->cm_id);
328 if (copy_to_user((void __user *)(unsigned long)cmd.response,
329 &resp, sizeof(resp))) {
336 rdma_destroy_id(ctx->cm_id);
339 idr_remove(&ctx_idr, ctx->id);
345 static void ucma_cleanup_events(struct ucma_context *ctx)
347 struct ucma_event *uevent, *tmp;
349 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
350 if (uevent->ctx != ctx)
353 list_del(&uevent->list);
355 /* clear incoming connections. */
356 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
357 rdma_destroy_id(uevent->cm_id);
363 static int ucma_free_ctx(struct ucma_context *ctx)
367 /* No new events will be generated after destroying the id. */
368 rdma_destroy_id(ctx->cm_id);
370 /* Cleanup events not yet reported to the user. */
371 mutex_lock(&ctx->file->mut);
372 ucma_cleanup_events(ctx);
373 list_del(&ctx->list);
374 mutex_unlock(&ctx->file->mut);
376 events_reported = ctx->events_reported;
378 return events_reported;
381 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
382 int in_len, int out_len)
384 struct rdma_ucm_destroy_id cmd;
385 struct rdma_ucm_destroy_id_resp resp;
386 struct ucma_context *ctx;
389 if (out_len < sizeof(resp))
392 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
396 ctx = _ucma_find_context(cmd.id, file);
398 idr_remove(&ctx_idr, ctx->id);
405 wait_for_completion(&ctx->comp);
406 resp.events_reported = ucma_free_ctx(ctx);
408 if (copy_to_user((void __user *)(unsigned long)cmd.response,
409 &resp, sizeof(resp)))
415 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
416 int in_len, int out_len)
418 struct rdma_ucm_bind_addr cmd;
419 struct ucma_context *ctx;
422 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
425 ctx = ucma_get_ctx(file, cmd.id);
429 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
434 static ssize_t ucma_resolve_addr(struct ucma_file *file,
435 const char __user *inbuf,
436 int in_len, int out_len)
438 struct rdma_ucm_resolve_addr cmd;
439 struct ucma_context *ctx;
442 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
445 ctx = ucma_get_ctx(file, cmd.id);
449 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
450 (struct sockaddr *) &cmd.dst_addr,
456 static ssize_t ucma_resolve_route(struct ucma_file *file,
457 const char __user *inbuf,
458 int in_len, int out_len)
460 struct rdma_ucm_resolve_route cmd;
461 struct ucma_context *ctx;
464 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
467 ctx = ucma_get_ctx(file, cmd.id);
471 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
476 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
477 struct rdma_route *route)
479 struct rdma_dev_addr *dev_addr;
481 resp->num_paths = route->num_paths;
482 switch (route->num_paths) {
484 dev_addr = &route->addr.dev_addr;
485 ib_addr_get_dgid(dev_addr,
486 (union ib_gid *) &resp->ib_route[0].dgid);
487 ib_addr_get_sgid(dev_addr,
488 (union ib_gid *) &resp->ib_route[0].sgid);
489 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
492 ib_copy_path_rec_to_user(&resp->ib_route[1],
493 &route->path_rec[1]);
496 ib_copy_path_rec_to_user(&resp->ib_route[0],
497 &route->path_rec[0]);
504 static ssize_t ucma_query_route(struct ucma_file *file,
505 const char __user *inbuf,
506 int in_len, int out_len)
508 struct rdma_ucm_query_route cmd;
509 struct rdma_ucm_query_route_resp resp;
510 struct ucma_context *ctx;
511 struct sockaddr *addr;
514 if (out_len < sizeof(resp))
517 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
520 ctx = ucma_get_ctx(file, cmd.id);
524 memset(&resp, 0, sizeof resp);
525 addr = &ctx->cm_id->route.addr.src_addr;
526 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
527 sizeof(struct sockaddr_in) :
528 sizeof(struct sockaddr_in6));
529 addr = &ctx->cm_id->route.addr.dst_addr;
530 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
531 sizeof(struct sockaddr_in) :
532 sizeof(struct sockaddr_in6));
533 if (!ctx->cm_id->device)
536 resp.node_guid = ctx->cm_id->device->node_guid;
537 resp.port_num = ctx->cm_id->port_num;
538 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
539 case RDMA_TRANSPORT_IB:
540 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
547 if (copy_to_user((void __user *)(unsigned long)cmd.response,
548 &resp, sizeof(resp)))
555 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
556 struct rdma_ucm_conn_param *src)
558 dst->private_data = src->private_data;
559 dst->private_data_len = src->private_data_len;
560 dst->responder_resources =src->responder_resources;
561 dst->initiator_depth = src->initiator_depth;
562 dst->flow_control = src->flow_control;
563 dst->retry_count = src->retry_count;
564 dst->rnr_retry_count = src->rnr_retry_count;
566 dst->qp_num = src->qp_num;
569 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
570 int in_len, int out_len)
572 struct rdma_ucm_connect cmd;
573 struct rdma_conn_param conn_param;
574 struct ucma_context *ctx;
577 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
580 if (!cmd.conn_param.valid)
583 ctx = ucma_get_ctx(file, cmd.id);
587 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
588 ret = rdma_connect(ctx->cm_id, &conn_param);
593 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
594 int in_len, int out_len)
596 struct rdma_ucm_listen cmd;
597 struct ucma_context *ctx;
600 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
603 ctx = ucma_get_ctx(file, cmd.id);
607 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
608 cmd.backlog : UCMA_MAX_BACKLOG;
609 ret = rdma_listen(ctx->cm_id, ctx->backlog);
614 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
615 int in_len, int out_len)
617 struct rdma_ucm_accept cmd;
618 struct rdma_conn_param conn_param;
619 struct ucma_context *ctx;
622 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
625 ctx = ucma_get_ctx(file, cmd.id);
629 if (cmd.conn_param.valid) {
631 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
632 ret = rdma_accept(ctx->cm_id, &conn_param);
634 ret = rdma_accept(ctx->cm_id, NULL);
640 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
641 int in_len, int out_len)
643 struct rdma_ucm_reject cmd;
644 struct ucma_context *ctx;
647 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
650 ctx = ucma_get_ctx(file, cmd.id);
654 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
659 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
660 int in_len, int out_len)
662 struct rdma_ucm_disconnect cmd;
663 struct ucma_context *ctx;
666 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
669 ctx = ucma_get_ctx(file, cmd.id);
673 ret = rdma_disconnect(ctx->cm_id);
678 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
679 const char __user *inbuf,
680 int in_len, int out_len)
682 struct rdma_ucm_init_qp_attr cmd;
683 struct ib_uverbs_qp_attr resp;
684 struct ucma_context *ctx;
685 struct ib_qp_attr qp_attr;
688 if (out_len < sizeof(resp))
691 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
694 ctx = ucma_get_ctx(file, cmd.id);
698 resp.qp_attr_mask = 0;
699 memset(&qp_attr, 0, sizeof qp_attr);
700 qp_attr.qp_state = cmd.qp_state;
701 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
705 ib_copy_qp_attr_to_user(&resp, &qp_attr);
706 if (copy_to_user((void __user *)(unsigned long)cmd.response,
707 &resp, sizeof(resp)))
715 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
716 int in_len, int out_len)
718 struct rdma_ucm_notify cmd;
719 struct ucma_context *ctx;
722 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
725 ctx = ucma_get_ctx(file, cmd.id);
729 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
734 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
735 const char __user *inbuf,
736 int in_len, int out_len) = {
737 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
738 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
739 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
740 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
741 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
742 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
743 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
744 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
745 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
746 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
747 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
748 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
749 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
750 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
751 [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
752 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
755 static ssize_t ucma_write(struct file *filp, const char __user *buf,
756 size_t len, loff_t *pos)
758 struct ucma_file *file = filp->private_data;
759 struct rdma_ucm_cmd_hdr hdr;
762 if (len < sizeof(hdr))
765 if (copy_from_user(&hdr, buf, sizeof(hdr)))
768 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
771 if (hdr.in + sizeof(hdr) > len)
774 if (!ucma_cmd_table[hdr.cmd])
777 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
784 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
786 struct ucma_file *file = filp->private_data;
787 unsigned int mask = 0;
789 poll_wait(filp, &file->poll_wait, wait);
791 if (!list_empty(&file->event_list))
792 mask = POLLIN | POLLRDNORM;
797 static int ucma_open(struct inode *inode, struct file *filp)
799 struct ucma_file *file;
801 file = kmalloc(sizeof *file, GFP_KERNEL);
805 INIT_LIST_HEAD(&file->event_list);
806 INIT_LIST_HEAD(&file->ctx_list);
807 init_waitqueue_head(&file->poll_wait);
808 mutex_init(&file->mut);
810 filp->private_data = file;
815 static int ucma_close(struct inode *inode, struct file *filp)
817 struct ucma_file *file = filp->private_data;
818 struct ucma_context *ctx, *tmp;
820 mutex_lock(&file->mut);
821 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
822 mutex_unlock(&file->mut);
825 idr_remove(&ctx_idr, ctx->id);
829 mutex_lock(&file->mut);
831 mutex_unlock(&file->mut);
836 static struct file_operations ucma_fops = {
837 .owner = THIS_MODULE,
839 .release = ucma_close,
844 static struct miscdevice ucma_misc = {
845 .minor = MISC_DYNAMIC_MINOR,
850 static ssize_t show_abi_version(struct device *dev,
851 struct device_attribute *attr,
854 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
856 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
858 static int __init ucma_init(void)
862 ret = misc_register(&ucma_misc);
866 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
868 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
873 misc_deregister(&ucma_misc);
877 static void __exit ucma_cleanup(void)
879 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
880 misc_deregister(&ucma_misc);
881 idr_destroy(&ctx_idr);
884 module_init(ucma_init);
885 module_exit(ucma_cleanup);