2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/idr.h>
39 #include <linux/in6.h>
40 #include <linux/miscdevice.h>
42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h>
46 MODULE_AUTHOR("Sean Hefty");
47 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
48 MODULE_LICENSE("Dual BSD/GPL");
51 UCMA_MAX_BACKLOG = 128
57 struct list_head ctx_list;
58 struct list_head event_list;
59 wait_queue_head_t poll_wait;
64 struct completion comp;
69 struct ucma_file *file;
70 struct rdma_cm_id *cm_id;
73 struct list_head list;
74 struct list_head mc_list;
77 struct ucma_multicast {
78 struct ucma_context *ctx;
83 struct list_head list;
84 struct sockaddr_storage addr;
88 struct ucma_context *ctx;
89 struct ucma_multicast *mc;
90 struct list_head list;
91 struct rdma_cm_id *cm_id;
92 struct rdma_ucm_event_resp resp;
95 static DEFINE_MUTEX(mut);
96 static DEFINE_IDR(ctx_idr);
97 static DEFINE_IDR(multicast_idr);
99 static inline struct ucma_context *_ucma_find_context(int id,
100 struct ucma_file *file)
102 struct ucma_context *ctx;
104 ctx = idr_find(&ctx_idr, id);
106 ctx = ERR_PTR(-ENOENT);
107 else if (ctx->file != file)
108 ctx = ERR_PTR(-EINVAL);
112 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
114 struct ucma_context *ctx;
117 ctx = _ucma_find_context(id, file);
119 atomic_inc(&ctx->ref);
124 static void ucma_put_ctx(struct ucma_context *ctx)
126 if (atomic_dec_and_test(&ctx->ref))
127 complete(&ctx->comp);
130 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
132 struct ucma_context *ctx;
135 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
139 atomic_set(&ctx->ref, 1);
140 init_completion(&ctx->comp);
141 INIT_LIST_HEAD(&ctx->mc_list);
145 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
150 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
152 } while (ret == -EAGAIN);
157 list_add_tail(&ctx->list, &file->ctx_list);
165 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
167 struct ucma_multicast *mc;
170 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
175 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
180 ret = idr_get_new(&multicast_idr, mc, &mc->id);
182 } while (ret == -EAGAIN);
188 list_add_tail(&mc->list, &ctx->mc_list);
196 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
197 struct rdma_conn_param *src)
199 if (src->private_data_len)
200 memcpy(dst->private_data, src->private_data,
201 src->private_data_len);
202 dst->private_data_len = src->private_data_len;
203 dst->responder_resources =src->responder_resources;
204 dst->initiator_depth = src->initiator_depth;
205 dst->flow_control = src->flow_control;
206 dst->retry_count = src->retry_count;
207 dst->rnr_retry_count = src->rnr_retry_count;
209 dst->qp_num = src->qp_num;
212 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
213 struct rdma_ud_param *src)
215 if (src->private_data_len)
216 memcpy(dst->private_data, src->private_data,
217 src->private_data_len);
218 dst->private_data_len = src->private_data_len;
219 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
220 dst->qp_num = src->qp_num;
221 dst->qkey = src->qkey;
224 static void ucma_set_event_context(struct ucma_context *ctx,
225 struct rdma_cm_event *event,
226 struct ucma_event *uevent)
229 switch (event->event) {
230 case RDMA_CM_EVENT_MULTICAST_JOIN:
231 case RDMA_CM_EVENT_MULTICAST_ERROR:
232 uevent->mc = (struct ucma_multicast *)
233 event->param.ud.private_data;
234 uevent->resp.uid = uevent->mc->uid;
235 uevent->resp.id = uevent->mc->id;
238 uevent->resp.uid = ctx->uid;
239 uevent->resp.id = ctx->id;
244 static int ucma_event_handler(struct rdma_cm_id *cm_id,
245 struct rdma_cm_event *event)
247 struct ucma_event *uevent;
248 struct ucma_context *ctx = cm_id->context;
251 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
253 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
255 uevent->cm_id = cm_id;
256 ucma_set_event_context(ctx, event, uevent);
257 uevent->resp.event = event->event;
258 uevent->resp.status = event->status;
259 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
260 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
262 ucma_copy_conn_event(&uevent->resp.param.conn,
265 mutex_lock(&ctx->file->mut);
266 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
273 } else if (!ctx->uid) {
275 * We ignore events for new connections until userspace has set
276 * their context. This can only happen if an error occurs on a
277 * new connection before the user accepts it. This is okay,
278 * since the accept will just fail later.
284 list_add_tail(&uevent->list, &ctx->file->event_list);
285 wake_up_interruptible(&ctx->file->poll_wait);
287 mutex_unlock(&ctx->file->mut);
291 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
292 int in_len, int out_len)
294 struct ucma_context *ctx;
295 struct rdma_ucm_get_event cmd;
296 struct ucma_event *uevent;
300 if (out_len < sizeof uevent->resp)
303 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
306 mutex_lock(&file->mut);
307 while (list_empty(&file->event_list)) {
308 mutex_unlock(&file->mut);
310 if (file->filp->f_flags & O_NONBLOCK)
313 if (wait_event_interruptible(file->poll_wait,
314 !list_empty(&file->event_list)))
317 mutex_lock(&file->mut);
320 uevent = list_entry(file->event_list.next, struct ucma_event, list);
322 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
323 ctx = ucma_alloc_ctx(file);
328 uevent->ctx->backlog++;
329 ctx->cm_id = uevent->cm_id;
330 ctx->cm_id->context = ctx;
331 uevent->resp.id = ctx->id;
334 if (copy_to_user((void __user *)(unsigned long)cmd.response,
335 &uevent->resp, sizeof uevent->resp)) {
340 list_del(&uevent->list);
341 uevent->ctx->events_reported++;
343 uevent->mc->events_reported++;
346 mutex_unlock(&file->mut);
350 static ssize_t ucma_create_id(struct ucma_file *file,
351 const char __user *inbuf,
352 int in_len, int out_len)
354 struct rdma_ucm_create_id cmd;
355 struct rdma_ucm_create_id_resp resp;
356 struct ucma_context *ctx;
359 if (out_len < sizeof(resp))
362 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
365 mutex_lock(&file->mut);
366 ctx = ucma_alloc_ctx(file);
367 mutex_unlock(&file->mut);
372 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
373 if (IS_ERR(ctx->cm_id)) {
374 ret = PTR_ERR(ctx->cm_id);
379 if (copy_to_user((void __user *)(unsigned long)cmd.response,
380 &resp, sizeof(resp))) {
387 rdma_destroy_id(ctx->cm_id);
390 idr_remove(&ctx_idr, ctx->id);
396 static void ucma_cleanup_multicast(struct ucma_context *ctx)
398 struct ucma_multicast *mc, *tmp;
401 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
403 idr_remove(&multicast_idr, mc->id);
409 static void ucma_cleanup_events(struct ucma_context *ctx)
411 struct ucma_event *uevent, *tmp;
413 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
414 if (uevent->ctx != ctx)
417 list_del(&uevent->list);
419 /* clear incoming connections. */
420 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
421 rdma_destroy_id(uevent->cm_id);
427 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
429 struct ucma_event *uevent, *tmp;
431 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
432 if (uevent->mc != mc)
435 list_del(&uevent->list);
440 static int ucma_free_ctx(struct ucma_context *ctx)
444 /* No new events will be generated after destroying the id. */
445 rdma_destroy_id(ctx->cm_id);
447 ucma_cleanup_multicast(ctx);
449 /* Cleanup events not yet reported to the user. */
450 mutex_lock(&ctx->file->mut);
451 ucma_cleanup_events(ctx);
452 list_del(&ctx->list);
453 mutex_unlock(&ctx->file->mut);
455 events_reported = ctx->events_reported;
457 return events_reported;
460 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
461 int in_len, int out_len)
463 struct rdma_ucm_destroy_id cmd;
464 struct rdma_ucm_destroy_id_resp resp;
465 struct ucma_context *ctx;
468 if (out_len < sizeof(resp))
471 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
475 ctx = _ucma_find_context(cmd.id, file);
477 idr_remove(&ctx_idr, ctx->id);
484 wait_for_completion(&ctx->comp);
485 resp.events_reported = ucma_free_ctx(ctx);
487 if (copy_to_user((void __user *)(unsigned long)cmd.response,
488 &resp, sizeof(resp)))
494 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
495 int in_len, int out_len)
497 struct rdma_ucm_bind_addr cmd;
498 struct ucma_context *ctx;
501 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
504 ctx = ucma_get_ctx(file, cmd.id);
508 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
513 static ssize_t ucma_resolve_addr(struct ucma_file *file,
514 const char __user *inbuf,
515 int in_len, int out_len)
517 struct rdma_ucm_resolve_addr cmd;
518 struct ucma_context *ctx;
521 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
524 ctx = ucma_get_ctx(file, cmd.id);
528 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
529 (struct sockaddr *) &cmd.dst_addr,
535 static ssize_t ucma_resolve_route(struct ucma_file *file,
536 const char __user *inbuf,
537 int in_len, int out_len)
539 struct rdma_ucm_resolve_route cmd;
540 struct ucma_context *ctx;
543 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
546 ctx = ucma_get_ctx(file, cmd.id);
550 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
555 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
556 struct rdma_route *route)
558 struct rdma_dev_addr *dev_addr;
560 resp->num_paths = route->num_paths;
561 switch (route->num_paths) {
563 dev_addr = &route->addr.dev_addr;
564 ib_addr_get_dgid(dev_addr,
565 (union ib_gid *) &resp->ib_route[0].dgid);
566 ib_addr_get_sgid(dev_addr,
567 (union ib_gid *) &resp->ib_route[0].sgid);
568 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
571 ib_copy_path_rec_to_user(&resp->ib_route[1],
572 &route->path_rec[1]);
575 ib_copy_path_rec_to_user(&resp->ib_route[0],
576 &route->path_rec[0]);
583 static ssize_t ucma_query_route(struct ucma_file *file,
584 const char __user *inbuf,
585 int in_len, int out_len)
587 struct rdma_ucm_query_route cmd;
588 struct rdma_ucm_query_route_resp resp;
589 struct ucma_context *ctx;
590 struct sockaddr *addr;
593 if (out_len < sizeof(resp))
596 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
599 ctx = ucma_get_ctx(file, cmd.id);
603 memset(&resp, 0, sizeof resp);
604 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
605 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
606 sizeof(struct sockaddr_in) :
607 sizeof(struct sockaddr_in6));
608 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
609 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
610 sizeof(struct sockaddr_in) :
611 sizeof(struct sockaddr_in6));
612 if (!ctx->cm_id->device)
615 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
616 resp.port_num = ctx->cm_id->port_num;
617 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
618 case RDMA_TRANSPORT_IB:
619 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
626 if (copy_to_user((void __user *)(unsigned long)cmd.response,
627 &resp, sizeof(resp)))
634 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
635 struct rdma_ucm_conn_param *src)
637 dst->private_data = src->private_data;
638 dst->private_data_len = src->private_data_len;
639 dst->responder_resources =src->responder_resources;
640 dst->initiator_depth = src->initiator_depth;
641 dst->flow_control = src->flow_control;
642 dst->retry_count = src->retry_count;
643 dst->rnr_retry_count = src->rnr_retry_count;
645 dst->qp_num = src->qp_num;
648 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
649 int in_len, int out_len)
651 struct rdma_ucm_connect cmd;
652 struct rdma_conn_param conn_param;
653 struct ucma_context *ctx;
656 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
659 if (!cmd.conn_param.valid)
662 ctx = ucma_get_ctx(file, cmd.id);
666 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
667 ret = rdma_connect(ctx->cm_id, &conn_param);
672 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
673 int in_len, int out_len)
675 struct rdma_ucm_listen cmd;
676 struct ucma_context *ctx;
679 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
682 ctx = ucma_get_ctx(file, cmd.id);
686 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
687 cmd.backlog : UCMA_MAX_BACKLOG;
688 ret = rdma_listen(ctx->cm_id, ctx->backlog);
693 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
694 int in_len, int out_len)
696 struct rdma_ucm_accept cmd;
697 struct rdma_conn_param conn_param;
698 struct ucma_context *ctx;
701 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
704 ctx = ucma_get_ctx(file, cmd.id);
708 if (cmd.conn_param.valid) {
710 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
711 ret = rdma_accept(ctx->cm_id, &conn_param);
713 ret = rdma_accept(ctx->cm_id, NULL);
719 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
720 int in_len, int out_len)
722 struct rdma_ucm_reject cmd;
723 struct ucma_context *ctx;
726 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
729 ctx = ucma_get_ctx(file, cmd.id);
733 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
738 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
739 int in_len, int out_len)
741 struct rdma_ucm_disconnect cmd;
742 struct ucma_context *ctx;
745 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
748 ctx = ucma_get_ctx(file, cmd.id);
752 ret = rdma_disconnect(ctx->cm_id);
757 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
758 const char __user *inbuf,
759 int in_len, int out_len)
761 struct rdma_ucm_init_qp_attr cmd;
762 struct ib_uverbs_qp_attr resp;
763 struct ucma_context *ctx;
764 struct ib_qp_attr qp_attr;
767 if (out_len < sizeof(resp))
770 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
773 ctx = ucma_get_ctx(file, cmd.id);
777 resp.qp_attr_mask = 0;
778 memset(&qp_attr, 0, sizeof qp_attr);
779 qp_attr.qp_state = cmd.qp_state;
780 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
784 ib_copy_qp_attr_to_user(&resp, &qp_attr);
785 if (copy_to_user((void __user *)(unsigned long)cmd.response,
786 &resp, sizeof(resp)))
794 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
795 void *optval, size_t optlen)
800 case RDMA_OPTION_ID_TOS:
801 if (optlen != sizeof(u8)) {
805 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
814 static int ucma_set_option_level(struct ucma_context *ctx, int level,
815 int optname, void *optval, size_t optlen)
821 ret = ucma_set_option_id(ctx, optname, optval, optlen);
830 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
831 int in_len, int out_len)
833 struct rdma_ucm_set_option cmd;
834 struct ucma_context *ctx;
838 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
841 ctx = ucma_get_ctx(file, cmd.id);
845 optval = kmalloc(cmd.optlen, GFP_KERNEL);
851 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
857 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
866 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
867 int in_len, int out_len)
869 struct rdma_ucm_notify cmd;
870 struct ucma_context *ctx;
873 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
876 ctx = ucma_get_ctx(file, cmd.id);
880 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
885 static ssize_t ucma_join_multicast(struct ucma_file *file,
886 const char __user *inbuf,
887 int in_len, int out_len)
889 struct rdma_ucm_join_mcast cmd;
890 struct rdma_ucm_create_id_resp resp;
891 struct ucma_context *ctx;
892 struct ucma_multicast *mc;
895 if (out_len < sizeof(resp))
898 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
901 ctx = ucma_get_ctx(file, cmd.id);
905 mutex_lock(&file->mut);
906 mc = ucma_alloc_multicast(ctx);
913 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
914 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
919 if (copy_to_user((void __user *)(unsigned long)cmd.response,
920 &resp, sizeof(resp))) {
925 mutex_unlock(&file->mut);
930 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
931 ucma_cleanup_mc_events(mc);
934 idr_remove(&multicast_idr, mc->id);
939 mutex_unlock(&file->mut);
944 static ssize_t ucma_leave_multicast(struct ucma_file *file,
945 const char __user *inbuf,
946 int in_len, int out_len)
948 struct rdma_ucm_destroy_id cmd;
949 struct rdma_ucm_destroy_id_resp resp;
950 struct ucma_multicast *mc;
953 if (out_len < sizeof(resp))
956 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
960 mc = idr_find(&multicast_idr, cmd.id);
962 mc = ERR_PTR(-ENOENT);
963 else if (mc->ctx->file != file)
964 mc = ERR_PTR(-EINVAL);
966 idr_remove(&multicast_idr, mc->id);
967 atomic_inc(&mc->ctx->ref);
976 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
977 mutex_lock(&mc->ctx->file->mut);
978 ucma_cleanup_mc_events(mc);
980 mutex_unlock(&mc->ctx->file->mut);
982 ucma_put_ctx(mc->ctx);
983 resp.events_reported = mc->events_reported;
986 if (copy_to_user((void __user *)(unsigned long)cmd.response,
987 &resp, sizeof(resp)))
993 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
995 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
997 mutex_lock(&file1->mut);
998 mutex_lock(&file2->mut);
1000 mutex_lock(&file2->mut);
1001 mutex_lock(&file1->mut);
1005 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1007 if (file1 < file2) {
1008 mutex_unlock(&file2->mut);
1009 mutex_unlock(&file1->mut);
1011 mutex_unlock(&file1->mut);
1012 mutex_unlock(&file2->mut);
1016 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1018 struct ucma_event *uevent, *tmp;
1020 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1021 if (uevent->ctx == ctx)
1022 list_move_tail(&uevent->list, &file->event_list);
1025 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1026 const char __user *inbuf,
1027 int in_len, int out_len)
1029 struct rdma_ucm_migrate_id cmd;
1030 struct rdma_ucm_migrate_resp resp;
1031 struct ucma_context *ctx;
1033 struct ucma_file *cur_file;
1036 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1039 /* Get current fd to protect against it being closed */
1040 filp = fget(cmd.fd);
1044 /* Validate current fd and prevent destruction of id. */
1045 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1051 cur_file = ctx->file;
1052 if (cur_file == new_file) {
1053 resp.events_reported = ctx->events_reported;
1058 * Migrate events between fd's, maintaining order, and avoiding new
1059 * events being added before existing events.
1061 ucma_lock_files(cur_file, new_file);
1064 list_move_tail(&ctx->list, &new_file->ctx_list);
1065 ucma_move_events(ctx, new_file);
1066 ctx->file = new_file;
1067 resp.events_reported = ctx->events_reported;
1070 ucma_unlock_files(cur_file, new_file);
1073 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1074 &resp, sizeof(resp)))
1083 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1084 const char __user *inbuf,
1085 int in_len, int out_len) = {
1086 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1087 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1088 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1089 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1090 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1091 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1092 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1093 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1094 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1095 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1096 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1097 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1098 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1099 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1100 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1101 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1102 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1103 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1104 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1107 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1108 size_t len, loff_t *pos)
1110 struct ucma_file *file = filp->private_data;
1111 struct rdma_ucm_cmd_hdr hdr;
1114 if (len < sizeof(hdr))
1117 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1120 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1123 if (hdr.in + sizeof(hdr) > len)
1126 if (!ucma_cmd_table[hdr.cmd])
1129 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1136 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1138 struct ucma_file *file = filp->private_data;
1139 unsigned int mask = 0;
1141 poll_wait(filp, &file->poll_wait, wait);
1143 if (!list_empty(&file->event_list))
1144 mask = POLLIN | POLLRDNORM;
1150 * ucma_open() does not need the BKL:
1152 * - no global state is referred to;
1153 * - there is no ioctl method to race against;
1154 * - no further module initialization is required for open to work
1155 * after the device is registered.
1157 static int ucma_open(struct inode *inode, struct file *filp)
1159 struct ucma_file *file;
1161 file = kmalloc(sizeof *file, GFP_KERNEL);
1165 INIT_LIST_HEAD(&file->event_list);
1166 INIT_LIST_HEAD(&file->ctx_list);
1167 init_waitqueue_head(&file->poll_wait);
1168 mutex_init(&file->mut);
1170 filp->private_data = file;
1175 static int ucma_close(struct inode *inode, struct file *filp)
1177 struct ucma_file *file = filp->private_data;
1178 struct ucma_context *ctx, *tmp;
1180 mutex_lock(&file->mut);
1181 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1182 mutex_unlock(&file->mut);
1185 idr_remove(&ctx_idr, ctx->id);
1189 mutex_lock(&file->mut);
1191 mutex_unlock(&file->mut);
1196 static const struct file_operations ucma_fops = {
1197 .owner = THIS_MODULE,
1199 .release = ucma_close,
1200 .write = ucma_write,
1204 static struct miscdevice ucma_misc = {
1205 .minor = MISC_DYNAMIC_MINOR,
1210 static ssize_t show_abi_version(struct device *dev,
1211 struct device_attribute *attr,
1214 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1216 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1218 static int __init ucma_init(void)
1222 ret = misc_register(&ucma_misc);
1226 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1228 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1233 misc_deregister(&ucma_misc);
1237 static void __exit ucma_cleanup(void)
1239 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1240 misc_deregister(&ucma_misc);
1241 idr_destroy(&ctx_idr);
1244 module_init(ucma_init);
1245 module_exit(ucma_cleanup);