2 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/svc_xprt.h>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
53 struct sockaddr *sa, int salen,
55 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
56 static void svc_rdma_release_rqst(struct svc_rqst *);
57 static void dto_tasklet_func(unsigned long data);
58 static void svc_rdma_detach(struct svc_xprt *xprt);
59 static void svc_rdma_free(struct svc_xprt *xprt);
60 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
61 static void rq_cq_reap(struct svcxprt_rdma *xprt);
62 static void sq_cq_reap(struct svcxprt_rdma *xprt);
64 DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
65 static DEFINE_SPINLOCK(dto_lock);
66 static LIST_HEAD(dto_xprt_q);
68 static struct svc_xprt_ops svc_rdma_ops = {
69 .xpo_create = svc_rdma_create,
70 .xpo_recvfrom = svc_rdma_recvfrom,
71 .xpo_sendto = svc_rdma_sendto,
72 .xpo_release_rqst = svc_rdma_release_rqst,
73 .xpo_detach = svc_rdma_detach,
74 .xpo_free = svc_rdma_free,
75 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
76 .xpo_has_wspace = svc_rdma_has_wspace,
77 .xpo_accept = svc_rdma_accept,
80 struct svc_xprt_class svc_rdma_class = {
82 .xcl_owner = THIS_MODULE,
83 .xcl_ops = &svc_rdma_ops,
84 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
87 static int rdma_bump_context_cache(struct svcxprt_rdma *xprt)
91 struct svc_rdma_op_ctxt *ctxt;
93 target = min(xprt->sc_ctxt_cnt + xprt->sc_ctxt_bump,
96 spin_lock_bh(&xprt->sc_ctxt_lock);
97 while (xprt->sc_ctxt_cnt < target) {
99 spin_unlock_bh(&xprt->sc_ctxt_lock);
101 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
103 spin_lock_bh(&xprt->sc_ctxt_lock);
106 INIT_LIST_HEAD(&ctxt->free_list);
107 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
109 /* kmalloc failed...give up for now */
114 spin_unlock_bh(&xprt->sc_ctxt_lock);
115 dprintk("svcrdma: sc_ctxt_max=%d, sc_ctxt_cnt=%d\n",
116 xprt->sc_ctxt_max, xprt->sc_ctxt_cnt);
120 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
122 struct svc_rdma_op_ctxt *ctxt;
125 spin_lock_bh(&xprt->sc_ctxt_lock);
126 if (unlikely(list_empty(&xprt->sc_ctxt_free))) {
127 /* Try to bump my cache. */
128 spin_unlock_bh(&xprt->sc_ctxt_lock);
130 if (rdma_bump_context_cache(xprt))
133 printk(KERN_INFO "svcrdma: sleeping waiting for "
134 "context memory on xprt=%p\n",
136 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
139 ctxt = list_entry(xprt->sc_ctxt_free.next,
140 struct svc_rdma_op_ctxt,
142 list_del_init(&ctxt->free_list);
143 spin_unlock_bh(&xprt->sc_ctxt_lock);
145 INIT_LIST_HEAD(&ctxt->dto_q);
147 atomic_inc(&xprt->sc_ctxt_used);
153 static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
155 struct svcxprt_rdma *xprt = ctxt->xprt;
157 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
158 atomic_dec(&xprt->sc_dma_used);
159 ib_dma_unmap_single(xprt->sc_cm_id->device,
166 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
168 struct svcxprt_rdma *xprt;
174 for (i = 0; i < ctxt->count; i++)
175 put_page(ctxt->pages[i]);
177 spin_lock_bh(&xprt->sc_ctxt_lock);
178 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
179 spin_unlock_bh(&xprt->sc_ctxt_lock);
180 atomic_dec(&xprt->sc_ctxt_used);
183 /* Temporary NFS request map cache. Created in svc_rdma.c */
184 extern struct kmem_cache *svc_rdma_map_cachep;
187 * Temporary NFS req mappings are shared across all transport
188 * instances. These are short lived and should be bounded by the number
189 * of concurrent server threads * depth of the SQ.
191 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
193 struct svc_rdma_req_map *map;
195 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
198 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
204 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
206 kmem_cache_free(svc_rdma_map_cachep, map);
209 /* ib_cq event handler */
210 static void cq_event_handler(struct ib_event *event, void *context)
212 struct svc_xprt *xprt = context;
213 dprintk("svcrdma: received CQ event id=%d, context=%p\n",
214 event->event, context);
215 set_bit(XPT_CLOSE, &xprt->xpt_flags);
218 /* QP event handler */
219 static void qp_event_handler(struct ib_event *event, void *context)
221 struct svc_xprt *xprt = context;
223 switch (event->event) {
224 /* These are considered benign events */
225 case IB_EVENT_PATH_MIG:
226 case IB_EVENT_COMM_EST:
227 case IB_EVENT_SQ_DRAINED:
228 case IB_EVENT_QP_LAST_WQE_REACHED:
229 dprintk("svcrdma: QP event %d received for QP=%p\n",
230 event->event, event->element.qp);
232 /* These are considered fatal events */
233 case IB_EVENT_PATH_MIG_ERR:
234 case IB_EVENT_QP_FATAL:
235 case IB_EVENT_QP_REQ_ERR:
236 case IB_EVENT_QP_ACCESS_ERR:
237 case IB_EVENT_DEVICE_FATAL:
239 dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
240 "closing transport\n",
241 event->event, event->element.qp);
242 set_bit(XPT_CLOSE, &xprt->xpt_flags);
248 * Data Transfer Operation Tasklet
250 * Walks a list of transports with I/O pending, removing entries as
251 * they are added to the server's I/O pending list. Two bits indicate
252 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
253 * spinlock that serializes access to the transport list with the RQ
254 * and SQ interrupt handlers.
256 static void dto_tasklet_func(unsigned long data)
258 struct svcxprt_rdma *xprt;
261 spin_lock_irqsave(&dto_lock, flags);
262 while (!list_empty(&dto_xprt_q)) {
263 xprt = list_entry(dto_xprt_q.next,
264 struct svcxprt_rdma, sc_dto_q);
265 list_del_init(&xprt->sc_dto_q);
266 spin_unlock_irqrestore(&dto_lock, flags);
271 svc_xprt_put(&xprt->sc_xprt);
272 spin_lock_irqsave(&dto_lock, flags);
274 spin_unlock_irqrestore(&dto_lock, flags);
278 * Receive Queue Completion Handler
280 * Since an RQ completion handler is called on interrupt context, we
281 * need to defer the handling of the I/O to a tasklet
283 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
285 struct svcxprt_rdma *xprt = cq_context;
288 /* Guard against unconditional flush call for destroyed QP */
289 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
293 * Set the bit regardless of whether or not it's on the list
294 * because it may be on the list already due to an SQ
297 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
300 * If this transport is not already on the DTO transport queue,
303 spin_lock_irqsave(&dto_lock, flags);
304 if (list_empty(&xprt->sc_dto_q)) {
305 svc_xprt_get(&xprt->sc_xprt);
306 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
308 spin_unlock_irqrestore(&dto_lock, flags);
310 /* Tasklet does all the work to avoid irqsave locks. */
311 tasklet_schedule(&dto_tasklet);
315 * rq_cq_reap - Process the RQ CQ.
317 * Take all completing WC off the CQE and enqueue the associated DTO
318 * context on the dto_q for the transport.
320 * Note that caller must hold a transport reference.
322 static void rq_cq_reap(struct svcxprt_rdma *xprt)
326 struct svc_rdma_op_ctxt *ctxt = NULL;
328 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
331 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
332 atomic_inc(&rdma_stat_rq_poll);
334 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
335 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
336 ctxt->wc_status = wc.status;
337 ctxt->byte_len = wc.byte_len;
338 svc_rdma_unmap_dma(ctxt);
339 if (wc.status != IB_WC_SUCCESS) {
340 /* Close the transport */
341 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
342 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
343 svc_rdma_put_context(ctxt, 1);
344 svc_xprt_put(&xprt->sc_xprt);
347 spin_lock_bh(&xprt->sc_rq_dto_lock);
348 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
349 spin_unlock_bh(&xprt->sc_rq_dto_lock);
350 svc_xprt_put(&xprt->sc_xprt);
354 atomic_inc(&rdma_stat_rq_prod);
356 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
358 * If data arrived before established event,
359 * don't enqueue. This defers RPC I/O until the
360 * RDMA connection is complete.
362 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
363 svc_xprt_enqueue(&xprt->sc_xprt);
367 * Send Queue Completion Handler - potentially called on interrupt context.
369 * Note that caller must hold a transport reference.
371 static void sq_cq_reap(struct svcxprt_rdma *xprt)
373 struct svc_rdma_op_ctxt *ctxt = NULL;
375 struct ib_cq *cq = xprt->sc_sq_cq;
379 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
382 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
383 atomic_inc(&rdma_stat_sq_poll);
384 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
385 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
388 svc_rdma_unmap_dma(ctxt);
389 if (wc.status != IB_WC_SUCCESS)
390 /* Close the transport */
391 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
393 /* Decrement used SQ WR count */
394 atomic_dec(&xprt->sc_sq_count);
395 wake_up(&xprt->sc_send_wait);
397 switch (ctxt->wr_op) {
399 svc_rdma_put_context(ctxt, 1);
402 case IB_WR_RDMA_WRITE:
403 svc_rdma_put_context(ctxt, 0);
406 case IB_WR_RDMA_READ:
407 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
408 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
410 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
411 spin_lock_bh(&xprt->sc_read_complete_lock);
412 list_add_tail(&read_hdr->dto_q,
413 &xprt->sc_read_complete_q);
414 spin_unlock_bh(&xprt->sc_read_complete_lock);
415 svc_xprt_enqueue(&xprt->sc_xprt);
417 svc_rdma_put_context(ctxt, 0);
421 printk(KERN_ERR "svcrdma: unexpected completion type, "
422 "opcode=%d, status=%d\n",
423 wc.opcode, wc.status);
426 svc_xprt_put(&xprt->sc_xprt);
430 atomic_inc(&rdma_stat_sq_prod);
433 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
435 struct svcxprt_rdma *xprt = cq_context;
438 /* Guard against unconditional flush call for destroyed QP */
439 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
443 * Set the bit regardless of whether or not it's on the list
444 * because it may be on the list already due to an RQ
447 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
450 * If this transport is not already on the DTO transport queue,
453 spin_lock_irqsave(&dto_lock, flags);
454 if (list_empty(&xprt->sc_dto_q)) {
455 svc_xprt_get(&xprt->sc_xprt);
456 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
458 spin_unlock_irqrestore(&dto_lock, flags);
460 /* Tasklet does all the work to avoid irqsave locks. */
461 tasklet_schedule(&dto_tasklet);
464 static void create_context_cache(struct svcxprt_rdma *xprt,
465 int ctxt_count, int ctxt_bump, int ctxt_max)
467 struct svc_rdma_op_ctxt *ctxt;
470 xprt->sc_ctxt_max = ctxt_max;
471 xprt->sc_ctxt_bump = ctxt_bump;
472 xprt->sc_ctxt_cnt = 0;
473 atomic_set(&xprt->sc_ctxt_used, 0);
475 INIT_LIST_HEAD(&xprt->sc_ctxt_free);
476 for (i = 0; i < ctxt_count; i++) {
477 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
479 INIT_LIST_HEAD(&ctxt->free_list);
480 list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
486 static void destroy_context_cache(struct svcxprt_rdma *xprt)
488 while (!list_empty(&xprt->sc_ctxt_free)) {
489 struct svc_rdma_op_ctxt *ctxt;
490 ctxt = list_entry(xprt->sc_ctxt_free.next,
491 struct svc_rdma_op_ctxt,
493 list_del_init(&ctxt->free_list);
498 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
501 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
505 svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
506 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
507 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
508 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
509 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
510 init_waitqueue_head(&cma_xprt->sc_send_wait);
512 spin_lock_init(&cma_xprt->sc_lock);
513 spin_lock_init(&cma_xprt->sc_read_complete_lock);
514 spin_lock_init(&cma_xprt->sc_ctxt_lock);
515 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
517 cma_xprt->sc_ord = svcrdma_ord;
519 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
520 cma_xprt->sc_max_requests = svcrdma_max_requests;
521 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
522 atomic_set(&cma_xprt->sc_sq_count, 0);
523 atomic_set(&cma_xprt->sc_ctxt_used, 0);
526 int reqs = cma_xprt->sc_max_requests;
527 create_context_cache(cma_xprt,
528 reqs << 1, /* starting size */
529 reqs, /* bump amount */
531 cma_xprt->sc_sq_depth +
532 RPCRDMA_MAX_THREADS + 1); /* max */
533 if (list_empty(&cma_xprt->sc_ctxt_free)) {
537 clear_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
539 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
544 struct page *svc_rdma_get_page(void)
548 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
549 /* If we can't get memory, wait a bit and try again */
550 printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
552 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
557 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
559 struct ib_recv_wr recv_wr, *bad_recv_wr;
560 struct svc_rdma_op_ctxt *ctxt;
567 ctxt = svc_rdma_get_context(xprt);
569 ctxt->direction = DMA_FROM_DEVICE;
570 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
571 BUG_ON(sge_no >= xprt->sc_max_sge);
572 page = svc_rdma_get_page();
573 ctxt->pages[sge_no] = page;
574 atomic_inc(&xprt->sc_dma_used);
575 pa = ib_dma_map_page(xprt->sc_cm_id->device,
578 ctxt->sge[sge_no].addr = pa;
579 ctxt->sge[sge_no].length = PAGE_SIZE;
580 ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
583 ctxt->count = sge_no;
585 recv_wr.sg_list = &ctxt->sge[0];
586 recv_wr.num_sge = ctxt->count;
587 recv_wr.wr_id = (u64)(unsigned long)ctxt;
589 svc_xprt_get(&xprt->sc_xprt);
590 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
592 svc_xprt_put(&xprt->sc_xprt);
593 svc_rdma_put_context(ctxt, 1);
599 * This function handles the CONNECT_REQUEST event on a listening
600 * endpoint. It is passed the cma_id for the _new_ connection. The context in
601 * this cma_id is inherited from the listening cma_id and is the svc_xprt
602 * structure for the listening endpoint.
604 * This function creates a new xprt for the new connection and enqueues it on
605 * the accept queue for the listent xprt. When the listen thread is kicked, it
606 * will call the recvfrom method on the listen xprt which will accept the new
609 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
611 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
612 struct svcxprt_rdma *newxprt;
615 /* Create a new transport */
616 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
618 dprintk("svcrdma: failed to create new transport\n");
621 newxprt->sc_cm_id = new_cma_id;
622 new_cma_id->context = newxprt;
623 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
624 newxprt, newxprt->sc_cm_id, listen_xprt);
626 /* Save client advertised inbound read limit for use later in accept. */
627 newxprt->sc_ord = client_ird;
629 /* Set the local and remote addresses in the transport */
630 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
631 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
632 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
633 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
636 * Enqueue the new transport on the accept queue of the listening
639 spin_lock_bh(&listen_xprt->sc_lock);
640 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
641 spin_unlock_bh(&listen_xprt->sc_lock);
644 * Can't use svc_xprt_received here because we are not on a
647 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
648 svc_xprt_enqueue(&listen_xprt->sc_xprt);
652 * Handles events generated on the listening endpoint. These events will be
653 * either be incoming connect requests or adapter removal events.
655 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
656 struct rdma_cm_event *event)
658 struct svcxprt_rdma *xprt = cma_id->context;
661 switch (event->event) {
662 case RDMA_CM_EVENT_CONNECT_REQUEST:
663 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
664 "event=%d\n", cma_id, cma_id->context, event->event);
665 handle_connect_req(cma_id,
666 event->param.conn.responder_resources);
669 case RDMA_CM_EVENT_ESTABLISHED:
670 /* Accept complete */
671 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
672 "cm_id=%p\n", xprt, cma_id);
675 case RDMA_CM_EVENT_DEVICE_REMOVAL:
676 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
679 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
683 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
684 "event=%d\n", cma_id, event->event);
691 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
692 struct rdma_cm_event *event)
694 struct svc_xprt *xprt = cma_id->context;
695 struct svcxprt_rdma *rdma =
696 container_of(xprt, struct svcxprt_rdma, sc_xprt);
697 switch (event->event) {
698 case RDMA_CM_EVENT_ESTABLISHED:
699 /* Accept complete */
701 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
702 "cm_id=%p\n", xprt, cma_id);
703 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
704 svc_xprt_enqueue(xprt);
706 case RDMA_CM_EVENT_DISCONNECTED:
707 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
710 set_bit(XPT_CLOSE, &xprt->xpt_flags);
711 svc_xprt_enqueue(xprt);
715 case RDMA_CM_EVENT_DEVICE_REMOVAL:
716 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
717 "event=%d\n", cma_id, xprt, event->event);
719 set_bit(XPT_CLOSE, &xprt->xpt_flags);
720 svc_xprt_enqueue(xprt);
724 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
725 "event=%d\n", cma_id, event->event);
732 * Create a listening RDMA service endpoint.
734 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
735 struct sockaddr *sa, int salen,
738 struct rdma_cm_id *listen_id;
739 struct svcxprt_rdma *cma_xprt;
740 struct svc_xprt *xprt;
743 dprintk("svcrdma: Creating RDMA socket\n");
745 cma_xprt = rdma_create_xprt(serv, 1);
747 return ERR_PTR(-ENOMEM);
748 xprt = &cma_xprt->sc_xprt;
750 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
751 if (IS_ERR(listen_id)) {
752 ret = PTR_ERR(listen_id);
753 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
757 ret = rdma_bind_addr(listen_id, sa);
759 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
762 cma_xprt->sc_cm_id = listen_id;
764 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
766 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
771 * We need to use the address from the cm_id in case the
772 * caller specified 0 for the port number.
774 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
775 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
777 return &cma_xprt->sc_xprt;
780 rdma_destroy_id(listen_id);
787 * This is the xpo_recvfrom function for listening endpoints. Its
788 * purpose is to accept incoming connections. The CMA callback handler
789 * has already created a new transport and attached it to the new CMA
792 * There is a queue of pending connections hung on the listening
793 * transport. This queue contains the new svc_xprt structure. This
794 * function takes svc_xprt structures off the accept_q and completes
797 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
799 struct svcxprt_rdma *listen_rdma;
800 struct svcxprt_rdma *newxprt = NULL;
801 struct rdma_conn_param conn_param;
802 struct ib_qp_init_attr qp_attr;
803 struct ib_device_attr devattr;
807 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
808 clear_bit(XPT_CONN, &xprt->xpt_flags);
809 /* Get the next entry off the accept list */
810 spin_lock_bh(&listen_rdma->sc_lock);
811 if (!list_empty(&listen_rdma->sc_accept_q)) {
812 newxprt = list_entry(listen_rdma->sc_accept_q.next,
813 struct svcxprt_rdma, sc_accept_q);
814 list_del_init(&newxprt->sc_accept_q);
816 if (!list_empty(&listen_rdma->sc_accept_q))
817 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
818 spin_unlock_bh(&listen_rdma->sc_lock);
822 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
823 newxprt, newxprt->sc_cm_id);
825 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
827 dprintk("svcrdma: could not query device attributes on "
828 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
832 /* Qualify the transport resource defaults with the
833 * capabilities of this particular device */
834 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
835 (size_t)RPCSVC_MAXPAGES);
836 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
837 (size_t)svcrdma_max_requests);
838 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
841 * Limit ORD based on client limit, local device limit, and
842 * configured svcrdma limit.
844 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
845 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
847 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
848 if (IS_ERR(newxprt->sc_pd)) {
849 dprintk("svcrdma: error creating PD for connect request\n");
852 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
856 newxprt->sc_sq_depth,
858 if (IS_ERR(newxprt->sc_sq_cq)) {
859 dprintk("svcrdma: error creating SQ CQ for connect request\n");
862 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
866 newxprt->sc_max_requests,
868 if (IS_ERR(newxprt->sc_rq_cq)) {
869 dprintk("svcrdma: error creating RQ CQ for connect request\n");
873 memset(&qp_attr, 0, sizeof qp_attr);
874 qp_attr.event_handler = qp_event_handler;
875 qp_attr.qp_context = &newxprt->sc_xprt;
876 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
877 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
878 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
879 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
880 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
881 qp_attr.qp_type = IB_QPT_RC;
882 qp_attr.send_cq = newxprt->sc_sq_cq;
883 qp_attr.recv_cq = newxprt->sc_rq_cq;
884 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
885 " cm_id->device=%p, sc_pd->device=%p\n"
886 " cap.max_send_wr = %d\n"
887 " cap.max_recv_wr = %d\n"
888 " cap.max_send_sge = %d\n"
889 " cap.max_recv_sge = %d\n",
890 newxprt->sc_cm_id, newxprt->sc_pd,
891 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
892 qp_attr.cap.max_send_wr,
893 qp_attr.cap.max_recv_wr,
894 qp_attr.cap.max_send_sge,
895 qp_attr.cap.max_recv_sge);
897 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
900 * XXX: This is a hack. We need a xx_request_qp interface
901 * that will adjust the qp_attr's with a best-effort
904 qp_attr.cap.max_send_sge -= 2;
905 qp_attr.cap.max_recv_sge -= 2;
906 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
909 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
912 newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
913 newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
914 newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
915 newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
917 newxprt->sc_qp = newxprt->sc_cm_id->qp;
919 /* Register all of physical memory */
920 newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd,
921 IB_ACCESS_LOCAL_WRITE |
922 IB_ACCESS_REMOTE_WRITE);
923 if (IS_ERR(newxprt->sc_phys_mr)) {
924 dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret);
928 /* Post receive buffers */
929 for (i = 0; i < newxprt->sc_max_requests; i++) {
930 ret = svc_rdma_post_recv(newxprt);
932 dprintk("svcrdma: failure posting receive buffers\n");
937 /* Swap out the handler */
938 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
941 * Arm the CQs for the SQ and RQ before accepting so we can't
942 * miss the first message
944 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
945 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
947 /* Accept Connection */
948 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
949 memset(&conn_param, 0, sizeof conn_param);
950 conn_param.responder_resources = 0;
951 conn_param.initiator_depth = newxprt->sc_ord;
952 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
954 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
959 dprintk("svcrdma: new connection %p accepted with the following "
961 " local_ip : %d.%d.%d.%d\n"
963 " remote_ip : %d.%d.%d.%d\n"
964 " remote_port : %d\n"
967 " max_requests : %d\n"
970 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
971 route.addr.src_addr)->sin_addr.s_addr),
972 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
973 route.addr.src_addr)->sin_port),
974 NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id->
975 route.addr.dst_addr)->sin_addr.s_addr),
976 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
977 route.addr.dst_addr)->sin_port),
979 newxprt->sc_sq_depth,
980 newxprt->sc_max_requests,
983 return &newxprt->sc_xprt;
986 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
987 /* Take a reference in case the DTO handler runs */
988 svc_xprt_get(&newxprt->sc_xprt);
989 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
990 ib_destroy_qp(newxprt->sc_qp);
991 rdma_destroy_id(newxprt->sc_cm_id);
992 /* This call to put will destroy the transport */
993 svc_xprt_put(&newxprt->sc_xprt);
997 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1002 * When connected, an svc_xprt has at least two references:
1004 * - A reference held by the cm_id between the ESTABLISHED and
1005 * DISCONNECTED events. If the remote peer disconnected first, this
1006 * reference could be gone.
1008 * - A reference held by the svc_recv code that called this function
1009 * as part of close processing.
1011 * At a minimum one references should still be held.
1013 static void svc_rdma_detach(struct svc_xprt *xprt)
1015 struct svcxprt_rdma *rdma =
1016 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1017 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1019 /* Disconnect and flush posted WQE */
1020 rdma_disconnect(rdma->sc_cm_id);
1023 static void __svc_rdma_free(struct work_struct *work)
1025 struct svcxprt_rdma *rdma =
1026 container_of(work, struct svcxprt_rdma, sc_work);
1027 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1029 /* We should only be called from kref_put */
1030 BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
1033 * Destroy queued, but not processed read completions. Note
1034 * that this cleanup has to be done before destroying the
1035 * cm_id because the device ptr is needed to unmap the dma in
1036 * svc_rdma_put_context.
1038 while (!list_empty(&rdma->sc_read_complete_q)) {
1039 struct svc_rdma_op_ctxt *ctxt;
1040 ctxt = list_entry(rdma->sc_read_complete_q.next,
1041 struct svc_rdma_op_ctxt,
1043 list_del_init(&ctxt->dto_q);
1044 svc_rdma_put_context(ctxt, 1);
1047 /* Destroy queued, but not processed recv completions */
1048 while (!list_empty(&rdma->sc_rq_dto_q)) {
1049 struct svc_rdma_op_ctxt *ctxt;
1050 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1051 struct svc_rdma_op_ctxt,
1053 list_del_init(&ctxt->dto_q);
1054 svc_rdma_put_context(ctxt, 1);
1057 /* Warn if we leaked a resource or under-referenced */
1058 WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
1059 WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
1061 /* Destroy the QP if present (not a listener) */
1062 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1063 ib_destroy_qp(rdma->sc_qp);
1065 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1066 ib_destroy_cq(rdma->sc_sq_cq);
1068 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1069 ib_destroy_cq(rdma->sc_rq_cq);
1071 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1072 ib_dereg_mr(rdma->sc_phys_mr);
1074 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1075 ib_dealloc_pd(rdma->sc_pd);
1077 /* Destroy the CM ID */
1078 rdma_destroy_id(rdma->sc_cm_id);
1080 destroy_context_cache(rdma);
1084 static void svc_rdma_free(struct svc_xprt *xprt)
1086 struct svcxprt_rdma *rdma =
1087 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1088 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1089 schedule_work(&rdma->sc_work);
1092 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1094 struct svcxprt_rdma *rdma =
1095 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1098 * If there are fewer SQ WR available than required to send a
1099 * simple response, return false.
1101 if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
1105 * ...or there are already waiters on the SQ,
1108 if (waitqueue_active(&rdma->sc_send_wait))
1111 /* Otherwise return true. */
1115 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1117 struct ib_send_wr *bad_wr;
1120 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1123 BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
1124 BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
1126 /* If the SQ is full, wait until an SQ entry is available */
1128 spin_lock_bh(&xprt->sc_lock);
1129 if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
1130 spin_unlock_bh(&xprt->sc_lock);
1131 atomic_inc(&rdma_stat_sq_starve);
1133 /* See if we can opportunistically reap SQ WR to make room */
1136 /* Wait until SQ WR available if SQ still full */
1137 wait_event(xprt->sc_send_wait,
1138 atomic_read(&xprt->sc_sq_count) <
1140 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1144 /* Bumped used SQ WR count and post */
1145 svc_xprt_get(&xprt->sc_xprt);
1146 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1148 atomic_inc(&xprt->sc_sq_count);
1150 svc_xprt_put(&xprt->sc_xprt);
1151 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1152 "sc_sq_count=%d, sc_sq_depth=%d\n",
1153 ret, atomic_read(&xprt->sc_sq_count),
1156 spin_unlock_bh(&xprt->sc_lock);
1162 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1163 enum rpcrdma_errcode err)
1165 struct ib_send_wr err_wr;
1168 struct svc_rdma_op_ctxt *ctxt;
1173 p = svc_rdma_get_page();
1174 va = page_address(p);
1176 /* XDR encode error */
1177 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1179 /* Prepare SGE for local address */
1180 atomic_inc(&xprt->sc_dma_used);
1181 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
1182 p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1183 sge.lkey = xprt->sc_phys_mr->lkey;
1184 sge.length = length;
1186 ctxt = svc_rdma_get_context(xprt);
1190 /* Prepare SEND WR */
1191 memset(&err_wr, 0, sizeof err_wr);
1192 ctxt->wr_op = IB_WR_SEND;
1193 err_wr.wr_id = (unsigned long)ctxt;
1194 err_wr.sg_list = &sge;
1196 err_wr.opcode = IB_WR_SEND;
1197 err_wr.send_flags = IB_SEND_SIGNALED;
1200 ret = svc_rdma_send(xprt, &err_wr);
1202 dprintk("svcrdma: Error %d posting send for protocol error\n",
1204 svc_rdma_put_context(ctxt, 1);