2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 * Replace the pages in the rq_argpages array with the pages from the SGE in
54 * the RDMA_RECV completion. The SGL should contain full pages up until the
57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
58 struct svc_rdma_op_ctxt *ctxt,
65 /* Swap the page in the SGE with the page in argpages */
66 page = ctxt->pages[0];
67 put_page(rqstp->rq_pages[0]);
68 rqstp->rq_pages[0] = page;
70 /* Set up the XDR head */
71 rqstp->rq_arg.head[0].iov_base = page_address(page);
72 rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length);
73 rqstp->rq_arg.len = byte_count;
74 rqstp->rq_arg.buflen = byte_count;
76 /* Compute bytes past head in the SGL */
77 bc = byte_count - rqstp->rq_arg.head[0].iov_len;
79 /* If data remains, store it in the pagelist */
80 rqstp->rq_arg.page_len = bc;
81 rqstp->rq_arg.page_base = 0;
82 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
84 while (bc && sge_no < ctxt->count) {
85 page = ctxt->pages[sge_no];
86 put_page(rqstp->rq_pages[sge_no]);
87 rqstp->rq_pages[sge_no] = page;
88 bc -= min(bc, ctxt->sge[sge_no].length);
89 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
92 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
94 /* We should never run out of SGE because the limit is defined to
95 * support the max allowed RPC data length
97 BUG_ON(bc && (sge_no == ctxt->count));
98 BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
100 BUG_ON(rqstp->rq_arg.len != byte_count);
102 /* If not all pages were used from the SGL, free the remaining ones */
104 while (sge_no < ctxt->count) {
105 page = ctxt->pages[sge_no++];
111 rqstp->rq_arg.tail[0].iov_base = NULL;
112 rqstp->rq_arg.tail[0].iov_len = 0;
116 int start; /* sge no for this chunk */
117 int count; /* sge count for this chunk */
120 /* Encode a read-chunk-list as an array of IB SGE
123 * - chunk[0]->position points to pages[0] at an offset of 0
124 * - pages[] is not physically or virtually contigous and consists of
125 * PAGE_SIZE elements.
128 * - sge array pointing into pages[] array.
129 * - chunk_sge array specifying sge index and count for each
130 * chunk in the read list
133 static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
134 struct svc_rqst *rqstp,
135 struct svc_rdma_op_ctxt *head,
136 struct rpcrdma_msg *rmsgp,
138 struct chunk_sge *ch_sge_ary,
148 struct rpcrdma_read_chunk *ch;
153 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
155 ch_bytes = ch->rc_target.rs_length;
156 head->arg.head[0] = rqstp->rq_arg.head[0];
157 head->arg.tail[0] = rqstp->rq_arg.tail[0];
158 head->arg.pages = &head->pages[head->count];
159 head->sge[0].length = head->count; /* save count of hdr pages */
160 head->arg.page_base = 0;
161 head->arg.page_len = ch_bytes;
162 head->arg.len = rqstp->rq_arg.len + ch_bytes;
163 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
165 ch_sge_ary[0].start = 0;
167 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
169 ib_dma_map_page(xprt->sc_cm_id->device,
170 rqstp->rq_arg.pages[page_no],
173 sge[sge_no].length = sge_bytes;
174 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
176 * Don't bump head->count here because the same page
177 * may be used by multiple SGE.
179 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
180 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
182 byte_count -= sge_bytes;
183 ch_bytes -= sge_bytes;
186 * If all bytes for this chunk have been mapped to an
187 * SGE, move to the next SGE
190 ch_sge_ary[ch_no].count =
191 sge_no - ch_sge_ary[ch_no].start;
194 ch_sge_ary[ch_no].start = sge_no;
195 ch_bytes = ch->rc_target.rs_length;
196 /* If bytes remaining account for next chunk */
198 head->arg.page_len += ch_bytes;
199 head->arg.len += ch_bytes;
200 head->arg.buflen += ch_bytes;
204 * If this SGE consumed all of the page, move to the
207 if ((sge_bytes + page_off) == PAGE_SIZE) {
211 * If there are still bytes left to map, bump
217 page_off += sge_bytes;
219 BUG_ON(byte_count != 0);
223 static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt,
231 for (i = 0; i < count; i++) {
232 ctxt->sge[i].addr = sge[i].addr;
233 ctxt->sge[i].length = sge[i].length;
234 *sgl_offset = *sgl_offset + sge[i].length;
238 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
240 if ((RDMA_TRANSPORT_IWARP ==
241 rdma_node_get_transport(xprt->sc_cm_id->
246 return min_t(int, sge_count, xprt->sc_max_sge);
250 * Use RDMA_READ to read data from the advertised client buffer into the
251 * XDR stream starting at rq_arg.head[0].iov_base.
252 * Each chunk in the array
253 * contains the following fields:
254 * discrim - '1', This isn't used for data placement
255 * position - The xdr stream offset (the same for every chunk)
256 * handle - RMR for client memory region
257 * length - data transfer length
258 * offset - 64 bit tagged offset in remote memory region
260 * On our side, we need to read into a pagelist. The first page immediately
261 * follows the RPC header.
263 * This function returns:
264 * 0 - No error and no read-list found.
266 * 1 - Successful read-list processing. The data is not yet in
267 * the pagelist and therefore the RPC request must be deferred. The
268 * I/O completion will enqueue the transport again and
269 * svc_rdma_recvfrom will complete the request.
271 * <0 - Error processing/posting read-list.
273 * NOTE: The ctxt must not be touched after the last WR has been posted
274 * because the I/O completion processing may occur on another
275 * processor and free / modify the context. Ne touche pas!
277 static int rdma_read_xdr(struct svcxprt_rdma *xprt,
278 struct rpcrdma_msg *rmsgp,
279 struct svc_rqst *rqstp,
280 struct svc_rdma_op_ctxt *hdr_ctxt)
282 struct ib_send_wr read_wr;
290 struct rpcrdma_read_chunk *ch;
291 struct svc_rdma_op_ctxt *ctxt = NULL;
292 struct svc_rdma_op_ctxt *head;
293 struct svc_rdma_op_ctxt *tmp_sge_ctxt;
294 struct svc_rdma_op_ctxt *tmp_ch_ctxt;
295 struct chunk_sge *ch_sge_ary;
297 /* If no read list is present, return 0 */
298 ch = svc_rdma_get_read_chunk(rmsgp);
302 /* Allocate temporary contexts to keep SGE */
303 BUG_ON(sizeof(struct ib_sge) < sizeof(struct chunk_sge));
304 tmp_sge_ctxt = svc_rdma_get_context(xprt);
305 sge = tmp_sge_ctxt->sge;
306 tmp_ch_ctxt = svc_rdma_get_context(xprt);
307 ch_sge_ary = (struct chunk_sge *)tmp_ch_ctxt->sge;
309 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
310 sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
312 ch_count, byte_count);
313 head = svc_rdma_get_context(xprt);
317 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
318 ch->rc_discrim != 0; ch++, ch_no++) {
323 ctxt->next = svc_rdma_get_context(xprt);
327 ctxt->direction = DMA_FROM_DEVICE;
328 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
330 /* Prepare READ WR */
331 memset(&read_wr, 0, sizeof read_wr);
332 ctxt->wr_op = IB_WR_RDMA_READ;
333 read_wr.wr_id = (unsigned long)ctxt;
334 read_wr.opcode = IB_WR_RDMA_READ;
335 read_wr.send_flags = IB_SEND_SIGNALED;
336 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
337 read_wr.wr.rdma.remote_addr =
338 get_unaligned(&(ch->rc_target.rs_offset)) +
340 read_wr.sg_list = &sge[ch_sge_ary[ch_no].start];
342 rdma_read_max_sge(xprt, ch_sge_ary[ch_no].count);
343 rdma_set_ctxt_sge(ctxt, &sge[ch_sge_ary[ch_no].start],
346 if (((ch+1)->rc_discrim == 0) &&
347 (read_wr.num_sge == ch_sge_ary[ch_no].count)) {
349 * Mark the last RDMA_READ with a bit to
350 * indicate all RPC data has been fetched from
351 * the client and the RPC needs to be enqueued.
353 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
354 ctxt->next = hdr_ctxt;
355 hdr_ctxt->next = head;
358 err = svc_rdma_send(xprt, &read_wr);
360 printk(KERN_ERR "svcrdma: Error posting send = %d\n",
363 * Break the circular list so free knows when
364 * to stop if the error happened to occur on
370 atomic_inc(&rdma_stat_read);
372 if (read_wr.num_sge < ch_sge_ary[ch_no].count) {
373 ch_sge_ary[ch_no].count -= read_wr.num_sge;
374 ch_sge_ary[ch_no].start += read_wr.num_sge;
382 svc_rdma_put_context(tmp_sge_ctxt, 0);
383 svc_rdma_put_context(tmp_ch_ctxt, 0);
385 /* Detach arg pages. svc_recv will replenish them */
386 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
387 rqstp->rq_pages[ch_no] = NULL;
390 * Detach res pages. svc_release must see a resused count of
391 * zero or it will attempt to put them.
393 while (rqstp->rq_resused)
394 rqstp->rq_respages[--rqstp->rq_resused] = NULL;
397 printk(KERN_ERR "svcrdma : RDMA_READ error = %d\n", err);
398 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
399 /* Free the linked list of read contexts */
400 while (head != NULL) {
402 svc_rdma_put_context(head, 1);
411 static int rdma_read_complete(struct svc_rqst *rqstp,
412 struct svc_rdma_op_ctxt *data)
414 struct svc_rdma_op_ctxt *head = data->next;
421 for (page_no = 0; page_no < head->count; page_no++) {
422 put_page(rqstp->rq_pages[page_no]);
423 rqstp->rq_pages[page_no] = head->pages[page_no];
425 /* Point rq_arg.pages past header */
426 rqstp->rq_arg.pages = &rqstp->rq_pages[head->sge[0].length];
427 rqstp->rq_arg.page_len = head->arg.page_len;
428 rqstp->rq_arg.page_base = head->arg.page_base;
430 /* rq_respages starts after the last arg page */
431 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
432 rqstp->rq_resused = 0;
434 /* Rebuild rq_arg head and tail. */
435 rqstp->rq_arg.head[0] = head->arg.head[0];
436 rqstp->rq_arg.tail[0] = head->arg.tail[0];
437 rqstp->rq_arg.len = head->arg.len;
438 rqstp->rq_arg.buflen = head->arg.buflen;
440 /* XXX: What should this be? */
441 rqstp->rq_prot = IPPROTO_MAX;
444 * Free the contexts we used to build the RDMA_READ. We have
445 * to be careful here because the context list uses the same
446 * next pointer used to chain the contexts associated with the
449 data->next = NULL; /* terminate circular list */
452 svc_rdma_put_context(head, 0);
454 } while (head != NULL);
456 ret = rqstp->rq_arg.head[0].iov_len
457 + rqstp->rq_arg.page_len
458 + rqstp->rq_arg.tail[0].iov_len;
459 dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
460 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
461 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
462 rqstp->rq_arg.head[0].iov_len);
464 svc_xprt_received(rqstp->rq_xprt);
469 * Set up the rqstp thread context to point to the RQ buffer. If
470 * necessary, pull additional data from the client with an RDMA_READ
473 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
475 struct svc_xprt *xprt = rqstp->rq_xprt;
476 struct svcxprt_rdma *rdma_xprt =
477 container_of(xprt, struct svcxprt_rdma, sc_xprt);
478 struct svc_rdma_op_ctxt *ctxt = NULL;
479 struct rpcrdma_msg *rmsgp;
483 dprintk("svcrdma: rqstp=%p\n", rqstp);
485 spin_lock_bh(&rdma_xprt->sc_read_complete_lock);
486 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
487 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
488 struct svc_rdma_op_ctxt,
490 list_del_init(&ctxt->dto_q);
492 spin_unlock_bh(&rdma_xprt->sc_read_complete_lock);
494 return rdma_read_complete(rqstp, ctxt);
496 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
497 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
498 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
499 struct svc_rdma_op_ctxt,
501 list_del_init(&ctxt->dto_q);
503 atomic_inc(&rdma_stat_rq_starve);
504 clear_bit(XPT_DATA, &xprt->xpt_flags);
507 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
509 /* This is the EAGAIN path. The svc_recv routine will
510 * return -EAGAIN, the nfsd thread will go to call into
511 * svc_recv again and we shouldn't be on the active
514 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
520 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
521 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
522 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
523 atomic_inc(&rdma_stat_recv);
525 /* Build up the XDR from the receive buffers. */
526 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
528 /* Decode the RDMA header. */
529 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
530 rqstp->rq_xprt_hlen = len;
532 /* If the request is invalid, reply with an error */
535 (void)svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
539 /* Read read-list data. */
540 ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
542 /* read-list posted, defer until data received from client. */
543 svc_xprt_received(xprt);
547 /* Post of read-list failed, free context. */
548 svc_rdma_put_context(ctxt, 1);
552 ret = rqstp->rq_arg.head[0].iov_len
553 + rqstp->rq_arg.page_len
554 + rqstp->rq_arg.tail[0].iov_len;
555 svc_rdma_put_context(ctxt, 0);
557 dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
558 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
559 ret, rqstp->rq_arg.len,
560 rqstp->rq_arg.head[0].iov_base,
561 rqstp->rq_arg.head[0].iov_len);
562 rqstp->rq_prot = IPPROTO_MAX;
563 svc_xprt_copy_addrs(rqstp, xprt);
564 svc_xprt_received(xprt);
569 svc_rdma_put_context(ctxt, 1);
570 dprintk("svcrdma: transport %p is closing\n", xprt);
572 * Set the close bit and enqueue it. svc_recv will see the
573 * close bit and call svc_xprt_delete
575 set_bit(XPT_CLOSE, &xprt->xpt_flags);
576 svc_xprt_received(xprt);