2 * linux/fs/9p/trans_rdma.c
4 * RDMA transport layer based on the trans_fd.c implementation.
6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to:
23 * Free Software Foundation
24 * 51 Franklin Street, Fifth Floor
25 * Boston, MA 02111-1301 USA
30 #include <linux/module.h>
31 #include <linux/net.h>
32 #include <linux/ipv6.h>
33 #include <linux/kthread.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
37 #include <linux/uaccess.h>
38 #include <linux/inet.h>
39 #include <linux/idr.h>
40 #include <linux/file.h>
41 #include <linux/parser.h>
42 #include <linux/semaphore.h>
43 #include <net/9p/9p.h>
44 #include <net/9p/client.h>
45 #include <net/9p/transport.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
50 #define P9_RDMA_SQ_DEPTH 32
51 #define P9_RDMA_RQ_DEPTH 32
52 #define P9_RDMA_SEND_SGE 4
53 #define P9_RDMA_RECV_SGE 4
56 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
57 #define P9_RDMA_MAXSIZE (4*4096) /* Min SGE is 4, so we can
58 * safely advertise a maxsize
61 #define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
63 * struct p9_trans_rdma - RDMA transport instance
65 * @state: tracks the transport state machine for connection setup and tear down
66 * @cm_id: The RDMA CM ID
67 * @pd: Protection Domain pointer
68 * @qp: Queue Pair pointer
69 * @cq: Completion Queue pointer
70 * @lkey: The local access only memory region key
71 * @timeout: Number of uSecs to wait for connection management events
72 * @sq_depth: The depth of the Send Queue
73 * @sq_sem: Semaphore for the SQ
74 * @rq_depth: The depth of the Receive Queue.
75 * @addr: The remote peer's address
76 * @req_lock: Protects the active request list
77 * @send_wait: Wait list when the SQ fills up
78 * @cm_done: Completion event for connection management tracking
80 struct p9_trans_rdma {
83 P9_RDMA_ADDR_RESOLVED,
84 P9_RDMA_ROUTE_RESOLVED,
90 struct rdma_cm_id *cm_id;
98 struct semaphore sq_sem;
101 struct sockaddr_in addr;
104 struct completion cm_done;
108 * p9_rdma_context - Keeps track of in-process WR
110 * @wc_op: The original WR op for when the CQE completes in error.
111 * @busa: Bus address to unmap when the WR completes
112 * @req: Keeps track of requests (send)
113 * @rc: Keepts track of replies (receive)
116 struct p9_rdma_context {
117 enum ib_wc_opcode wc_op;
120 struct p9_req_t *req;
126 * p9_rdma_opts - Collection of mount options
127 * @port: port of connection
128 * @sq_depth: The requested depth of the SQ. This really doesn't need
129 * to be any deeper than the number of threads used in the client
130 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
131 * @timeout: Time to wait in msecs for CM events
133 struct p9_rdma_opts {
141 * Option Parsing (code inspired by NFS code)
144 /* Options that take integer arguments */
145 Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
148 static match_table_t tokens = {
149 {Opt_port, "port=%u"},
150 {Opt_sq_depth, "sq=%u"},
151 {Opt_rq_depth, "rq=%u"},
152 {Opt_timeout, "timeout=%u"},
157 * parse_options - parse mount options into session structure
158 * @options: options string passed from mount
159 * @opts: transport-specific structure to parse options into
161 * Returns 0 upon success, -ERRNO upon failure
163 static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 substring_t args[MAX_OPT_ARGS];
171 opts->port = P9_PORT;
172 opts->sq_depth = P9_RDMA_SQ_DEPTH;
173 opts->rq_depth = P9_RDMA_RQ_DEPTH;
174 opts->timeout = P9_RDMA_TIMEOUT;
179 options = kstrdup(params, GFP_KERNEL);
181 P9_DPRINTK(P9_DEBUG_ERROR,
182 "failed to allocate copy of option string\n");
186 while ((p = strsep(&options, ",")) != NULL) {
191 token = match_token(p, tokens, args);
192 r = match_int(&args[0], &option);
194 P9_DPRINTK(P9_DEBUG_ERROR,
195 "integer field, but no integer?\n");
204 opts->sq_depth = option;
207 opts->rq_depth = option;
210 opts->timeout = option;
216 /* RQ must be at least as large as the SQ */
217 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
223 p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
225 struct p9_client *c = id->context;
226 struct p9_trans_rdma *rdma = c->trans;
227 switch (event->event) {
228 case RDMA_CM_EVENT_ADDR_RESOLVED:
229 BUG_ON(rdma->state != P9_RDMA_INIT);
230 rdma->state = P9_RDMA_ADDR_RESOLVED;
233 case RDMA_CM_EVENT_ROUTE_RESOLVED:
234 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
235 rdma->state = P9_RDMA_ROUTE_RESOLVED;
238 case RDMA_CM_EVENT_ESTABLISHED:
239 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
240 rdma->state = P9_RDMA_CONNECTED;
243 case RDMA_CM_EVENT_DISCONNECTED:
245 rdma->state = P9_RDMA_CLOSED;
247 c->status = Disconnected;
250 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
253 case RDMA_CM_EVENT_ADDR_CHANGE:
254 case RDMA_CM_EVENT_ROUTE_ERROR:
255 case RDMA_CM_EVENT_DEVICE_REMOVAL:
256 case RDMA_CM_EVENT_MULTICAST_JOIN:
257 case RDMA_CM_EVENT_MULTICAST_ERROR:
258 case RDMA_CM_EVENT_REJECTED:
259 case RDMA_CM_EVENT_CONNECT_REQUEST:
260 case RDMA_CM_EVENT_CONNECT_RESPONSE:
261 case RDMA_CM_EVENT_CONNECT_ERROR:
262 case RDMA_CM_EVENT_ADDR_ERROR:
263 case RDMA_CM_EVENT_UNREACHABLE:
264 c->status = Disconnected;
265 rdma_disconnect(rdma->cm_id);
270 complete(&rdma->cm_done);
275 handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
276 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
278 struct p9_req_t *req;
283 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
286 if (status != IB_WC_SUCCESS)
289 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
293 req = p9_tag_lookup(client, tag);
298 p9_client_cb(client, req);
303 P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
305 rdma->state = P9_RDMA_FLUSHING;
306 client->status = Disconnected;
311 handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
312 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
314 ib_dma_unmap_single(rdma->cm_id->device,
315 c->busa, c->req->tc->size,
319 static void qp_event_handler(struct ib_event *event, void *context)
321 P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
325 static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
327 struct p9_client *client = cq_context;
328 struct p9_trans_rdma *rdma = client->trans;
332 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
333 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
334 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
338 atomic_dec(&rdma->rq_count);
339 handle_recv(client, rdma, c, wc.status, wc.byte_len);
343 handle_send(client, rdma, c, wc.status, wc.byte_len);
348 printk(KERN_ERR "9prdma: unexpected completion type, "
349 "c->wc_op=%d, wc.opcode=%d, status=%d\n",
350 c->wc_op, wc.opcode, wc.status);
357 static void cq_event_handler(struct ib_event *e, void *v)
359 P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
362 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
367 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
368 ib_dereg_mr(rdma->dma_mr);
370 if (rdma->qp && !IS_ERR(rdma->qp))
371 ib_destroy_qp(rdma->qp);
373 if (rdma->pd && !IS_ERR(rdma->pd))
374 ib_dealloc_pd(rdma->pd);
376 if (rdma->cq && !IS_ERR(rdma->cq))
377 ib_destroy_cq(rdma->cq);
379 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
380 rdma_destroy_id(rdma->cm_id);
386 post_recv(struct p9_client *client, struct p9_rdma_context *c)
388 struct p9_trans_rdma *rdma = client->trans;
389 struct ib_recv_wr wr, *bad_wr;
392 c->busa = ib_dma_map_single(rdma->cm_id->device,
393 c->rc->sdata, client->msize,
395 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
399 sge.length = client->msize;
400 sge.lkey = rdma->lkey;
403 c->wc_op = IB_WC_RECV;
404 wr.wr_id = (unsigned long) c;
407 return ib_post_recv(rdma->qp, &wr, &bad_wr);
410 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
414 static int rdma_request(struct p9_client *client, struct p9_req_t *req)
416 struct p9_trans_rdma *rdma = client->trans;
417 struct ib_send_wr wr, *bad_wr;
421 struct p9_rdma_context *c = NULL;
422 struct p9_rdma_context *rpl_context = NULL;
424 /* Allocate an fcall for the reply */
425 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
430 * If the request has a buffer, steal it, otherwise
431 * allocate a new one. Typically, requests should already
432 * have receive buffers allocated and just swap them around
435 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
438 req->rc->sdata = (char *) req->rc +
439 sizeof(struct p9_fcall);
440 req->rc->capacity = client->msize;
443 rpl_context->rc = req->rc;
444 if (!rpl_context->rc) {
450 * Post a receive buffer for this request. We need to ensure
451 * there is a reply buffer available for every outstanding
452 * request. A flushed request can result in no reply for an
453 * outstanding request, so we must keep a count to avoid
454 * overflowing the RQ.
456 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
457 err = post_recv(client, rpl_context);
459 kfree(rpl_context->rc);
464 atomic_dec(&rdma->rq_count);
466 /* remove posted receive buffer from request structure */
469 /* Post the request */
470 c = kmalloc(sizeof *c, GFP_KERNEL);
475 c->busa = ib_dma_map_single(rdma->cm_id->device,
476 c->req->tc->sdata, c->req->tc->size,
478 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
482 sge.length = c->req->tc->size;
483 sge.lkey = rdma->lkey;
486 c->wc_op = IB_WC_SEND;
487 wr.wr_id = (unsigned long) c;
488 wr.opcode = IB_WR_SEND;
489 wr.send_flags = IB_SEND_SIGNALED;
493 if (down_interruptible(&rdma->sq_sem))
496 return ib_post_send(rdma->qp, &wr, &bad_wr);
499 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
503 spin_lock_irqsave(&rdma->req_lock, flags);
504 if (rdma->state < P9_RDMA_CLOSING) {
505 rdma->state = P9_RDMA_CLOSING;
506 spin_unlock_irqrestore(&rdma->req_lock, flags);
507 rdma_disconnect(rdma->cm_id);
509 spin_unlock_irqrestore(&rdma->req_lock, flags);
513 static void rdma_close(struct p9_client *client)
515 struct p9_trans_rdma *rdma;
520 rdma = client->trans;
524 client->status = Disconnected;
525 rdma_disconnect(rdma->cm_id);
526 rdma_destroy_trans(rdma);
530 * alloc_rdma - Allocate and initialize the rdma transport structure
532 * @dotu: Extension attribute
533 * @opts: Mount options structure
535 static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
537 struct p9_trans_rdma *rdma;
539 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
543 rdma->sq_depth = opts->sq_depth;
544 rdma->rq_depth = opts->rq_depth;
545 rdma->timeout = opts->timeout;
546 spin_lock_init(&rdma->req_lock);
547 init_completion(&rdma->cm_done);
548 sema_init(&rdma->sq_sem, rdma->sq_depth);
549 atomic_set(&rdma->rq_count, 0);
554 /* its not clear to me we can do anything after send has been posted */
555 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
561 * trans_create_rdma - Transport method for creating atransport instance
562 * @client: client instance
563 * @addr: IP address string
564 * @args: Mount options string
567 rdma_create_trans(struct p9_client *client, const char *addr, char *args)
570 struct p9_rdma_opts opts;
571 struct p9_trans_rdma *rdma;
572 struct rdma_conn_param conn_param;
573 struct ib_qp_init_attr qp_attr;
574 struct ib_device_attr devattr;
576 /* Parse the transport specific mount options */
577 err = parse_opts(args, &opts);
581 /* Create and initialize the RDMA transport structure */
582 rdma = alloc_rdma(&opts);
586 /* Create the RDMA CM ID */
587 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
588 if (IS_ERR(rdma->cm_id))
591 /* Associate the client with the transport */
592 client->trans = rdma;
594 /* Resolve the server's address */
595 rdma->addr.sin_family = AF_INET;
596 rdma->addr.sin_addr.s_addr = in_aton(addr);
597 rdma->addr.sin_port = htons(opts.port);
598 err = rdma_resolve_addr(rdma->cm_id, NULL,
599 (struct sockaddr *)&rdma->addr,
603 err = wait_for_completion_interruptible(&rdma->cm_done);
604 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
607 /* Resolve the route to the server */
608 err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
611 err = wait_for_completion_interruptible(&rdma->cm_done);
612 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
615 /* Query the device attributes */
616 err = ib_query_device(rdma->cm_id->device, &devattr);
620 /* Create the Completion Queue */
621 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
622 cq_event_handler, client,
623 opts.sq_depth + opts.rq_depth + 1, 0);
624 if (IS_ERR(rdma->cq))
626 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
628 /* Create the Protection Domain */
629 rdma->pd = ib_alloc_pd(rdma->cm_id->device);
630 if (IS_ERR(rdma->pd))
633 /* Cache the DMA lkey in the transport */
635 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
636 rdma->lkey = rdma->cm_id->device->local_dma_lkey;
638 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
639 if (IS_ERR(rdma->dma_mr))
641 rdma->lkey = rdma->dma_mr->lkey;
644 /* Create the Queue Pair */
645 memset(&qp_attr, 0, sizeof qp_attr);
646 qp_attr.event_handler = qp_event_handler;
647 qp_attr.qp_context = client;
648 qp_attr.cap.max_send_wr = opts.sq_depth;
649 qp_attr.cap.max_recv_wr = opts.rq_depth;
650 qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
651 qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
652 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
653 qp_attr.qp_type = IB_QPT_RC;
654 qp_attr.send_cq = rdma->cq;
655 qp_attr.recv_cq = rdma->cq;
656 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
659 rdma->qp = rdma->cm_id->qp;
661 /* Request a connection */
662 memset(&conn_param, 0, sizeof(conn_param));
663 conn_param.private_data = NULL;
664 conn_param.private_data_len = 0;
665 conn_param.responder_resources = P9_RDMA_IRD;
666 conn_param.initiator_depth = P9_RDMA_ORD;
667 err = rdma_connect(rdma->cm_id, &conn_param);
670 err = wait_for_completion_interruptible(&rdma->cm_done);
671 if (err || (rdma->state != P9_RDMA_CONNECTED))
674 client->status = Connected;
679 rdma_destroy_trans(rdma);
683 static struct p9_trans_module p9_rdma_trans = {
685 .maxsize = P9_RDMA_MAXSIZE,
687 .owner = THIS_MODULE,
688 .create = rdma_create_trans,
690 .request = rdma_request,
691 .cancel = rdma_cancel,
695 * p9_trans_rdma_init - Register the 9P RDMA transport driver
697 static int __init p9_trans_rdma_init(void)
699 v9fs_register_trans(&p9_rdma_trans);
703 static void __exit p9_trans_rdma_exit(void)
705 v9fs_unregister_trans(&p9_rdma_trans);
708 module_init(p9_trans_rdma_init);
709 module_exit(p9_trans_rdma_exit);
711 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
712 MODULE_DESCRIPTION("RDMA Transport for 9P");
713 MODULE_LICENSE("Dual BSD/GPL");