2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/mutex.h>
35 #include <linux/poll.h>
36 #include <linux/idr.h>
38 #include <linux/in6.h>
39 #include <linux/miscdevice.h>
41 #include <rdma/rdma_user_cm.h>
42 #include <rdma/ib_marshall.h>
43 #include <rdma/rdma_cm.h>
45 MODULE_AUTHOR("Sean Hefty");
46 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
47 MODULE_LICENSE("Dual BSD/GPL");
50 UCMA_MAX_BACKLOG = 128
56 struct list_head ctx_list;
57 struct list_head event_list;
58 wait_queue_head_t poll_wait;
63 struct completion comp;
68 struct ucma_file *file;
69 struct rdma_cm_id *cm_id;
72 struct list_head list;
73 struct list_head mc_list;
76 struct ucma_multicast {
77 struct ucma_context *ctx;
82 struct list_head list;
84 u8 pad[sizeof(struct sockaddr_in6) -
85 sizeof(struct sockaddr)];
89 struct ucma_context *ctx;
90 struct ucma_multicast *mc;
91 struct list_head list;
92 struct rdma_cm_id *cm_id;
93 struct rdma_ucm_event_resp resp;
96 static DEFINE_MUTEX(mut);
97 static DEFINE_IDR(ctx_idr);
98 static DEFINE_IDR(multicast_idr);
100 static inline struct ucma_context *_ucma_find_context(int id,
101 struct ucma_file *file)
103 struct ucma_context *ctx;
105 ctx = idr_find(&ctx_idr, id);
107 ctx = ERR_PTR(-ENOENT);
108 else if (ctx->file != file)
109 ctx = ERR_PTR(-EINVAL);
113 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
115 struct ucma_context *ctx;
118 ctx = _ucma_find_context(id, file);
120 atomic_inc(&ctx->ref);
125 static void ucma_put_ctx(struct ucma_context *ctx)
127 if (atomic_dec_and_test(&ctx->ref))
128 complete(&ctx->comp);
131 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
133 struct ucma_context *ctx;
136 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
140 atomic_set(&ctx->ref, 1);
141 init_completion(&ctx->comp);
142 INIT_LIST_HEAD(&ctx->mc_list);
146 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
151 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
153 } while (ret == -EAGAIN);
158 list_add_tail(&ctx->list, &file->ctx_list);
166 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
168 struct ucma_multicast *mc;
171 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
176 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
181 ret = idr_get_new(&multicast_idr, mc, &mc->id);
183 } while (ret == -EAGAIN);
189 list_add_tail(&mc->list, &ctx->mc_list);
197 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
198 struct rdma_conn_param *src)
200 if (src->private_data_len)
201 memcpy(dst->private_data, src->private_data,
202 src->private_data_len);
203 dst->private_data_len = src->private_data_len;
204 dst->responder_resources =src->responder_resources;
205 dst->initiator_depth = src->initiator_depth;
206 dst->flow_control = src->flow_control;
207 dst->retry_count = src->retry_count;
208 dst->rnr_retry_count = src->rnr_retry_count;
210 dst->qp_num = src->qp_num;
213 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
214 struct rdma_ud_param *src)
216 if (src->private_data_len)
217 memcpy(dst->private_data, src->private_data,
218 src->private_data_len);
219 dst->private_data_len = src->private_data_len;
220 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
221 dst->qp_num = src->qp_num;
222 dst->qkey = src->qkey;
225 static void ucma_set_event_context(struct ucma_context *ctx,
226 struct rdma_cm_event *event,
227 struct ucma_event *uevent)
230 switch (event->event) {
231 case RDMA_CM_EVENT_MULTICAST_JOIN:
232 case RDMA_CM_EVENT_MULTICAST_ERROR:
233 uevent->mc = (struct ucma_multicast *)
234 event->param.ud.private_data;
235 uevent->resp.uid = uevent->mc->uid;
236 uevent->resp.id = uevent->mc->id;
239 uevent->resp.uid = ctx->uid;
240 uevent->resp.id = ctx->id;
245 static int ucma_event_handler(struct rdma_cm_id *cm_id,
246 struct rdma_cm_event *event)
248 struct ucma_event *uevent;
249 struct ucma_context *ctx = cm_id->context;
252 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
254 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
256 uevent->cm_id = cm_id;
257 ucma_set_event_context(ctx, event, uevent);
258 uevent->resp.event = event->event;
259 uevent->resp.status = event->status;
260 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
261 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
263 ucma_copy_conn_event(&uevent->resp.param.conn,
266 mutex_lock(&ctx->file->mut);
267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
274 } else if (!ctx->uid) {
276 * We ignore events for new connections until userspace has set
277 * their context. This can only happen if an error occurs on a
278 * new connection before the user accepts it. This is okay,
279 * since the accept will just fail later.
285 list_add_tail(&uevent->list, &ctx->file->event_list);
286 wake_up_interruptible(&ctx->file->poll_wait);
288 mutex_unlock(&ctx->file->mut);
292 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
293 int in_len, int out_len)
295 struct ucma_context *ctx;
296 struct rdma_ucm_get_event cmd;
297 struct ucma_event *uevent;
301 if (out_len < sizeof uevent->resp)
304 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
307 mutex_lock(&file->mut);
308 while (list_empty(&file->event_list)) {
309 if (file->filp->f_flags & O_NONBLOCK) {
314 if (signal_pending(current)) {
319 prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
320 mutex_unlock(&file->mut);
322 mutex_lock(&file->mut);
323 finish_wait(&file->poll_wait, &wait);
329 uevent = list_entry(file->event_list.next, struct ucma_event, list);
331 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
332 ctx = ucma_alloc_ctx(file);
337 uevent->ctx->backlog++;
338 ctx->cm_id = uevent->cm_id;
339 ctx->cm_id->context = ctx;
340 uevent->resp.id = ctx->id;
343 if (copy_to_user((void __user *)(unsigned long)cmd.response,
344 &uevent->resp, sizeof uevent->resp)) {
349 list_del(&uevent->list);
350 uevent->ctx->events_reported++;
352 uevent->mc->events_reported++;
355 mutex_unlock(&file->mut);
359 static ssize_t ucma_create_id(struct ucma_file *file,
360 const char __user *inbuf,
361 int in_len, int out_len)
363 struct rdma_ucm_create_id cmd;
364 struct rdma_ucm_create_id_resp resp;
365 struct ucma_context *ctx;
368 if (out_len < sizeof(resp))
371 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
374 mutex_lock(&file->mut);
375 ctx = ucma_alloc_ctx(file);
376 mutex_unlock(&file->mut);
381 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
382 if (IS_ERR(ctx->cm_id)) {
383 ret = PTR_ERR(ctx->cm_id);
388 if (copy_to_user((void __user *)(unsigned long)cmd.response,
389 &resp, sizeof(resp))) {
396 rdma_destroy_id(ctx->cm_id);
399 idr_remove(&ctx_idr, ctx->id);
405 static void ucma_cleanup_multicast(struct ucma_context *ctx)
407 struct ucma_multicast *mc, *tmp;
410 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
412 idr_remove(&multicast_idr, mc->id);
418 static void ucma_cleanup_events(struct ucma_context *ctx)
420 struct ucma_event *uevent, *tmp;
422 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
423 if (uevent->ctx != ctx)
426 list_del(&uevent->list);
428 /* clear incoming connections. */
429 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
430 rdma_destroy_id(uevent->cm_id);
436 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
438 struct ucma_event *uevent, *tmp;
440 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
441 if (uevent->mc != mc)
444 list_del(&uevent->list);
449 static int ucma_free_ctx(struct ucma_context *ctx)
453 /* No new events will be generated after destroying the id. */
454 rdma_destroy_id(ctx->cm_id);
456 ucma_cleanup_multicast(ctx);
458 /* Cleanup events not yet reported to the user. */
459 mutex_lock(&ctx->file->mut);
460 ucma_cleanup_events(ctx);
461 list_del(&ctx->list);
462 mutex_unlock(&ctx->file->mut);
464 events_reported = ctx->events_reported;
466 return events_reported;
469 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
470 int in_len, int out_len)
472 struct rdma_ucm_destroy_id cmd;
473 struct rdma_ucm_destroy_id_resp resp;
474 struct ucma_context *ctx;
477 if (out_len < sizeof(resp))
480 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
484 ctx = _ucma_find_context(cmd.id, file);
486 idr_remove(&ctx_idr, ctx->id);
493 wait_for_completion(&ctx->comp);
494 resp.events_reported = ucma_free_ctx(ctx);
496 if (copy_to_user((void __user *)(unsigned long)cmd.response,
497 &resp, sizeof(resp)))
503 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
504 int in_len, int out_len)
506 struct rdma_ucm_bind_addr cmd;
507 struct ucma_context *ctx;
510 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
513 ctx = ucma_get_ctx(file, cmd.id);
517 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
522 static ssize_t ucma_resolve_addr(struct ucma_file *file,
523 const char __user *inbuf,
524 int in_len, int out_len)
526 struct rdma_ucm_resolve_addr cmd;
527 struct ucma_context *ctx;
530 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
533 ctx = ucma_get_ctx(file, cmd.id);
537 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
538 (struct sockaddr *) &cmd.dst_addr,
544 static ssize_t ucma_resolve_route(struct ucma_file *file,
545 const char __user *inbuf,
546 int in_len, int out_len)
548 struct rdma_ucm_resolve_route cmd;
549 struct ucma_context *ctx;
552 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
555 ctx = ucma_get_ctx(file, cmd.id);
559 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
564 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
565 struct rdma_route *route)
567 struct rdma_dev_addr *dev_addr;
569 resp->num_paths = route->num_paths;
570 switch (route->num_paths) {
572 dev_addr = &route->addr.dev_addr;
573 ib_addr_get_dgid(dev_addr,
574 (union ib_gid *) &resp->ib_route[0].dgid);
575 ib_addr_get_sgid(dev_addr,
576 (union ib_gid *) &resp->ib_route[0].sgid);
577 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
580 ib_copy_path_rec_to_user(&resp->ib_route[1],
581 &route->path_rec[1]);
584 ib_copy_path_rec_to_user(&resp->ib_route[0],
585 &route->path_rec[0]);
592 static ssize_t ucma_query_route(struct ucma_file *file,
593 const char __user *inbuf,
594 int in_len, int out_len)
596 struct rdma_ucm_query_route cmd;
597 struct rdma_ucm_query_route_resp resp;
598 struct ucma_context *ctx;
599 struct sockaddr *addr;
602 if (out_len < sizeof(resp))
605 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
608 ctx = ucma_get_ctx(file, cmd.id);
612 memset(&resp, 0, sizeof resp);
613 addr = &ctx->cm_id->route.addr.src_addr;
614 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
615 sizeof(struct sockaddr_in) :
616 sizeof(struct sockaddr_in6));
617 addr = &ctx->cm_id->route.addr.dst_addr;
618 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
619 sizeof(struct sockaddr_in) :
620 sizeof(struct sockaddr_in6));
621 if (!ctx->cm_id->device)
624 resp.node_guid = ctx->cm_id->device->node_guid;
625 resp.port_num = ctx->cm_id->port_num;
626 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
627 case RDMA_TRANSPORT_IB:
628 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
635 if (copy_to_user((void __user *)(unsigned long)cmd.response,
636 &resp, sizeof(resp)))
643 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
644 struct rdma_ucm_conn_param *src)
646 dst->private_data = src->private_data;
647 dst->private_data_len = src->private_data_len;
648 dst->responder_resources =src->responder_resources;
649 dst->initiator_depth = src->initiator_depth;
650 dst->flow_control = src->flow_control;
651 dst->retry_count = src->retry_count;
652 dst->rnr_retry_count = src->rnr_retry_count;
654 dst->qp_num = src->qp_num;
657 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
658 int in_len, int out_len)
660 struct rdma_ucm_connect cmd;
661 struct rdma_conn_param conn_param;
662 struct ucma_context *ctx;
665 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
668 if (!cmd.conn_param.valid)
671 ctx = ucma_get_ctx(file, cmd.id);
675 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
676 ret = rdma_connect(ctx->cm_id, &conn_param);
681 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
682 int in_len, int out_len)
684 struct rdma_ucm_listen cmd;
685 struct ucma_context *ctx;
688 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
691 ctx = ucma_get_ctx(file, cmd.id);
695 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
696 cmd.backlog : UCMA_MAX_BACKLOG;
697 ret = rdma_listen(ctx->cm_id, ctx->backlog);
702 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
703 int in_len, int out_len)
705 struct rdma_ucm_accept cmd;
706 struct rdma_conn_param conn_param;
707 struct ucma_context *ctx;
710 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
713 ctx = ucma_get_ctx(file, cmd.id);
717 if (cmd.conn_param.valid) {
719 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
720 ret = rdma_accept(ctx->cm_id, &conn_param);
722 ret = rdma_accept(ctx->cm_id, NULL);
728 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
729 int in_len, int out_len)
731 struct rdma_ucm_reject cmd;
732 struct ucma_context *ctx;
735 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
738 ctx = ucma_get_ctx(file, cmd.id);
742 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
747 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
748 int in_len, int out_len)
750 struct rdma_ucm_disconnect cmd;
751 struct ucma_context *ctx;
754 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
757 ctx = ucma_get_ctx(file, cmd.id);
761 ret = rdma_disconnect(ctx->cm_id);
766 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
767 const char __user *inbuf,
768 int in_len, int out_len)
770 struct rdma_ucm_init_qp_attr cmd;
771 struct ib_uverbs_qp_attr resp;
772 struct ucma_context *ctx;
773 struct ib_qp_attr qp_attr;
776 if (out_len < sizeof(resp))
779 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
782 ctx = ucma_get_ctx(file, cmd.id);
786 resp.qp_attr_mask = 0;
787 memset(&qp_attr, 0, sizeof qp_attr);
788 qp_attr.qp_state = cmd.qp_state;
789 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
793 ib_copy_qp_attr_to_user(&resp, &qp_attr);
794 if (copy_to_user((void __user *)(unsigned long)cmd.response,
795 &resp, sizeof(resp)))
803 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
804 int in_len, int out_len)
806 struct rdma_ucm_notify cmd;
807 struct ucma_context *ctx;
810 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
813 ctx = ucma_get_ctx(file, cmd.id);
817 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
822 static ssize_t ucma_join_multicast(struct ucma_file *file,
823 const char __user *inbuf,
824 int in_len, int out_len)
826 struct rdma_ucm_join_mcast cmd;
827 struct rdma_ucm_create_id_resp resp;
828 struct ucma_context *ctx;
829 struct ucma_multicast *mc;
832 if (out_len < sizeof(resp))
835 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
838 ctx = ucma_get_ctx(file, cmd.id);
842 mutex_lock(&file->mut);
843 mc = ucma_alloc_multicast(ctx);
850 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
851 ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc);
856 if (copy_to_user((void __user *)(unsigned long)cmd.response,
857 &resp, sizeof(resp))) {
862 mutex_unlock(&file->mut);
867 rdma_leave_multicast(ctx->cm_id, &mc->addr);
868 ucma_cleanup_mc_events(mc);
871 idr_remove(&multicast_idr, mc->id);
876 mutex_unlock(&file->mut);
881 static ssize_t ucma_leave_multicast(struct ucma_file *file,
882 const char __user *inbuf,
883 int in_len, int out_len)
885 struct rdma_ucm_destroy_id cmd;
886 struct rdma_ucm_destroy_id_resp resp;
887 struct ucma_multicast *mc;
890 if (out_len < sizeof(resp))
893 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
897 mc = idr_find(&multicast_idr, cmd.id);
899 mc = ERR_PTR(-ENOENT);
900 else if (mc->ctx->file != file)
901 mc = ERR_PTR(-EINVAL);
903 idr_remove(&multicast_idr, mc->id);
904 atomic_inc(&mc->ctx->ref);
913 rdma_leave_multicast(mc->ctx->cm_id, &mc->addr);
914 mutex_lock(&mc->ctx->file->mut);
915 ucma_cleanup_mc_events(mc);
917 mutex_unlock(&mc->ctx->file->mut);
919 ucma_put_ctx(mc->ctx);
920 resp.events_reported = mc->events_reported;
923 if (copy_to_user((void __user *)(unsigned long)cmd.response,
924 &resp, sizeof(resp)))
930 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
931 const char __user *inbuf,
932 int in_len, int out_len) = {
933 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
934 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
935 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
936 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
937 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
938 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
939 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
940 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
941 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
942 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
943 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
944 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
945 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
946 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
947 [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
948 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
949 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
950 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
953 static ssize_t ucma_write(struct file *filp, const char __user *buf,
954 size_t len, loff_t *pos)
956 struct ucma_file *file = filp->private_data;
957 struct rdma_ucm_cmd_hdr hdr;
960 if (len < sizeof(hdr))
963 if (copy_from_user(&hdr, buf, sizeof(hdr)))
966 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
969 if (hdr.in + sizeof(hdr) > len)
972 if (!ucma_cmd_table[hdr.cmd])
975 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
982 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
984 struct ucma_file *file = filp->private_data;
985 unsigned int mask = 0;
987 poll_wait(filp, &file->poll_wait, wait);
989 if (!list_empty(&file->event_list))
990 mask = POLLIN | POLLRDNORM;
995 static int ucma_open(struct inode *inode, struct file *filp)
997 struct ucma_file *file;
999 file = kmalloc(sizeof *file, GFP_KERNEL);
1003 INIT_LIST_HEAD(&file->event_list);
1004 INIT_LIST_HEAD(&file->ctx_list);
1005 init_waitqueue_head(&file->poll_wait);
1006 mutex_init(&file->mut);
1008 filp->private_data = file;
1013 static int ucma_close(struct inode *inode, struct file *filp)
1015 struct ucma_file *file = filp->private_data;
1016 struct ucma_context *ctx, *tmp;
1018 mutex_lock(&file->mut);
1019 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1020 mutex_unlock(&file->mut);
1023 idr_remove(&ctx_idr, ctx->id);
1027 mutex_lock(&file->mut);
1029 mutex_unlock(&file->mut);
1034 static const struct file_operations ucma_fops = {
1035 .owner = THIS_MODULE,
1037 .release = ucma_close,
1038 .write = ucma_write,
1042 static struct miscdevice ucma_misc = {
1043 .minor = MISC_DYNAMIC_MINOR,
1048 static ssize_t show_abi_version(struct device *dev,
1049 struct device_attribute *attr,
1052 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1054 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1056 static int __init ucma_init(void)
1060 ret = misc_register(&ucma_misc);
1064 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1066 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1071 misc_deregister(&ucma_misc);
1075 static void __exit ucma_cleanup(void)
1077 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1078 misc_deregister(&ucma_misc);
1079 idr_destroy(&ctx_idr);
1082 module_init(ucma_init);
1083 module_exit(ucma_cleanup);