2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 * Replace the pages in the rq_argpages array with the pages from the SGE in
54 * the RDMA_RECV completion. The SGL should contain full pages up until the
57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
58 struct svc_rdma_op_ctxt *ctxt,
65 /* Swap the page in the SGE with the page in argpages */
66 page = ctxt->pages[0];
67 put_page(rqstp->rq_pages[0]);
68 rqstp->rq_pages[0] = page;
70 /* Set up the XDR head */
71 rqstp->rq_arg.head[0].iov_base = page_address(page);
72 rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length);
73 rqstp->rq_arg.len = byte_count;
74 rqstp->rq_arg.buflen = byte_count;
76 /* Compute bytes past head in the SGL */
77 bc = byte_count - rqstp->rq_arg.head[0].iov_len;
79 /* If data remains, store it in the pagelist */
80 rqstp->rq_arg.page_len = bc;
81 rqstp->rq_arg.page_base = 0;
82 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
84 while (bc && sge_no < ctxt->count) {
85 page = ctxt->pages[sge_no];
86 put_page(rqstp->rq_pages[sge_no]);
87 rqstp->rq_pages[sge_no] = page;
88 bc -= min(bc, ctxt->sge[sge_no].length);
89 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
92 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
94 /* We should never run out of SGE because the limit is defined to
95 * support the max allowed RPC data length
97 BUG_ON(bc && (sge_no == ctxt->count));
98 BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
100 BUG_ON(rqstp->rq_arg.len != byte_count);
102 /* If not all pages were used from the SGL, free the remaining ones */
104 while (sge_no < ctxt->count) {
105 page = ctxt->pages[sge_no++];
111 rqstp->rq_arg.tail[0].iov_base = NULL;
112 rqstp->rq_arg.tail[0].iov_len = 0;
115 /* Encode a read-chunk-list as an array of IB SGE
118 * - chunk[0]->position points to pages[0] at an offset of 0
119 * - pages[] is not physically or virtually contigous and consists of
120 * PAGE_SIZE elements.
123 * - sge array pointing into pages[] array.
124 * - chunk_sge array specifying sge index and count for each
125 * chunk in the read list
128 static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
129 struct svc_rqst *rqstp,
130 struct svc_rdma_op_ctxt *head,
131 struct rpcrdma_msg *rmsgp,
132 struct svc_rdma_req_map *rpl_map,
133 struct svc_rdma_req_map *chl_map,
143 struct rpcrdma_read_chunk *ch;
148 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
150 ch_bytes = ch->rc_target.rs_length;
151 head->arg.head[0] = rqstp->rq_arg.head[0];
152 head->arg.tail[0] = rqstp->rq_arg.tail[0];
153 head->arg.pages = &head->pages[head->count];
154 head->hdr_count = head->count; /* save count of hdr pages */
155 head->arg.page_base = 0;
156 head->arg.page_len = ch_bytes;
157 head->arg.len = rqstp->rq_arg.len + ch_bytes;
158 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
160 chl_map->ch[0].start = 0;
162 rpl_map->sge[sge_no].iov_base =
163 page_address(rqstp->rq_arg.pages[page_no]) + page_off;
164 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
165 rpl_map->sge[sge_no].iov_len = sge_bytes;
167 * Don't bump head->count here because the same page
168 * may be used by multiple SGE.
170 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
171 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
173 byte_count -= sge_bytes;
174 ch_bytes -= sge_bytes;
177 * If all bytes for this chunk have been mapped to an
178 * SGE, move to the next SGE
181 chl_map->ch[ch_no].count =
182 sge_no - chl_map->ch[ch_no].start;
185 chl_map->ch[ch_no].start = sge_no;
186 ch_bytes = ch->rc_target.rs_length;
187 /* If bytes remaining account for next chunk */
189 head->arg.page_len += ch_bytes;
190 head->arg.len += ch_bytes;
191 head->arg.buflen += ch_bytes;
195 * If this SGE consumed all of the page, move to the
198 if ((sge_bytes + page_off) == PAGE_SIZE) {
202 * If there are still bytes left to map, bump
208 page_off += sge_bytes;
210 BUG_ON(byte_count != 0);
214 static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
215 struct svc_rdma_op_ctxt *ctxt,
223 ctxt->direction = DMA_FROM_DEVICE;
224 for (i = 0; i < count; i++) {
226 ib_dma_map_single(xprt->sc_cm_id->device,
227 vec[i].iov_base, vec[i].iov_len,
229 ctxt->sge[i].length = vec[i].iov_len;
230 ctxt->sge[i].lkey = xprt->sc_phys_mr->lkey;
231 *sgl_offset = *sgl_offset + vec[i].iov_len;
235 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
237 if ((RDMA_TRANSPORT_IWARP ==
238 rdma_node_get_transport(xprt->sc_cm_id->
243 return min_t(int, sge_count, xprt->sc_max_sge);
247 * Use RDMA_READ to read data from the advertised client buffer into the
248 * XDR stream starting at rq_arg.head[0].iov_base.
249 * Each chunk in the array
250 * contains the following fields:
251 * discrim - '1', This isn't used for data placement
252 * position - The xdr stream offset (the same for every chunk)
253 * handle - RMR for client memory region
254 * length - data transfer length
255 * offset - 64 bit tagged offset in remote memory region
257 * On our side, we need to read into a pagelist. The first page immediately
258 * follows the RPC header.
260 * This function returns:
261 * 0 - No error and no read-list found.
263 * 1 - Successful read-list processing. The data is not yet in
264 * the pagelist and therefore the RPC request must be deferred. The
265 * I/O completion will enqueue the transport again and
266 * svc_rdma_recvfrom will complete the request.
268 * <0 - Error processing/posting read-list.
270 * NOTE: The ctxt must not be touched after the last WR has been posted
271 * because the I/O completion processing may occur on another
272 * processor and free / modify the context. Ne touche pas!
274 static int rdma_read_xdr(struct svcxprt_rdma *xprt,
275 struct rpcrdma_msg *rmsgp,
276 struct svc_rqst *rqstp,
277 struct svc_rdma_op_ctxt *hdr_ctxt)
279 struct ib_send_wr read_wr;
286 struct rpcrdma_read_chunk *ch;
287 struct svc_rdma_op_ctxt *ctxt = NULL;
288 struct svc_rdma_req_map *rpl_map;
289 struct svc_rdma_req_map *chl_map;
291 /* If no read list is present, return 0 */
292 ch = svc_rdma_get_read_chunk(rmsgp);
296 /* Allocate temporary reply and chunk maps */
297 rpl_map = svc_rdma_get_req_map();
298 chl_map = svc_rdma_get_req_map();
300 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
301 if (ch_count > RPCSVC_MAXPAGES)
303 sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
305 ch_count, byte_count);
309 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
310 ch->rc_discrim != 0; ch++, ch_no++) {
312 ctxt = svc_rdma_get_context(xprt);
313 ctxt->direction = DMA_FROM_DEVICE;
314 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
316 /* Prepare READ WR */
317 memset(&read_wr, 0, sizeof read_wr);
318 ctxt->wr_op = IB_WR_RDMA_READ;
319 read_wr.wr_id = (unsigned long)ctxt;
320 read_wr.opcode = IB_WR_RDMA_READ;
321 read_wr.send_flags = IB_SEND_SIGNALED;
322 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
323 read_wr.wr.rdma.remote_addr =
324 get_unaligned(&(ch->rc_target.rs_offset)) +
326 read_wr.sg_list = ctxt->sge;
328 rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
329 rdma_set_ctxt_sge(xprt, ctxt,
330 &rpl_map->sge[chl_map->ch[ch_no].start],
333 if (((ch+1)->rc_discrim == 0) &&
334 (read_wr.num_sge == chl_map->ch[ch_no].count)) {
336 * Mark the last RDMA_READ with a bit to
337 * indicate all RPC data has been fetched from
338 * the client and the RPC needs to be enqueued.
340 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
341 ctxt->read_hdr = hdr_ctxt;
344 err = svc_rdma_send(xprt, &read_wr);
346 printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
348 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
349 svc_rdma_put_context(ctxt, 0);
352 atomic_inc(&rdma_stat_read);
354 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
355 chl_map->ch[ch_no].count -= read_wr.num_sge;
356 chl_map->ch[ch_no].start += read_wr.num_sge;
364 svc_rdma_put_req_map(rpl_map);
365 svc_rdma_put_req_map(chl_map);
367 /* Detach arg pages. svc_recv will replenish them */
368 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
369 rqstp->rq_pages[ch_no] = NULL;
372 * Detach res pages. svc_release must see a resused count of
373 * zero or it will attempt to put them.
375 while (rqstp->rq_resused)
376 rqstp->rq_respages[--rqstp->rq_resused] = NULL;
381 static int rdma_read_complete(struct svc_rqst *rqstp,
382 struct svc_rdma_op_ctxt *head)
390 for (page_no = 0; page_no < head->count; page_no++) {
391 put_page(rqstp->rq_pages[page_no]);
392 rqstp->rq_pages[page_no] = head->pages[page_no];
394 /* Point rq_arg.pages past header */
395 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
396 rqstp->rq_arg.page_len = head->arg.page_len;
397 rqstp->rq_arg.page_base = head->arg.page_base;
399 /* rq_respages starts after the last arg page */
400 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
401 rqstp->rq_resused = 0;
403 /* Rebuild rq_arg head and tail. */
404 rqstp->rq_arg.head[0] = head->arg.head[0];
405 rqstp->rq_arg.tail[0] = head->arg.tail[0];
406 rqstp->rq_arg.len = head->arg.len;
407 rqstp->rq_arg.buflen = head->arg.buflen;
409 /* Free the context */
410 svc_rdma_put_context(head, 0);
412 /* XXX: What should this be? */
413 rqstp->rq_prot = IPPROTO_MAX;
414 svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
416 ret = rqstp->rq_arg.head[0].iov_len
417 + rqstp->rq_arg.page_len
418 + rqstp->rq_arg.tail[0].iov_len;
419 dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
420 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
421 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
422 rqstp->rq_arg.head[0].iov_len);
424 svc_xprt_received(rqstp->rq_xprt);
429 * Set up the rqstp thread context to point to the RQ buffer. If
430 * necessary, pull additional data from the client with an RDMA_READ
433 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
435 struct svc_xprt *xprt = rqstp->rq_xprt;
436 struct svcxprt_rdma *rdma_xprt =
437 container_of(xprt, struct svcxprt_rdma, sc_xprt);
438 struct svc_rdma_op_ctxt *ctxt = NULL;
439 struct rpcrdma_msg *rmsgp;
443 dprintk("svcrdma: rqstp=%p\n", rqstp);
445 spin_lock_bh(&rdma_xprt->sc_read_complete_lock);
446 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
447 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
448 struct svc_rdma_op_ctxt,
450 list_del_init(&ctxt->dto_q);
452 spin_unlock_bh(&rdma_xprt->sc_read_complete_lock);
454 return rdma_read_complete(rqstp, ctxt);
456 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
457 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
458 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
459 struct svc_rdma_op_ctxt,
461 list_del_init(&ctxt->dto_q);
463 atomic_inc(&rdma_stat_rq_starve);
464 clear_bit(XPT_DATA, &xprt->xpt_flags);
467 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
469 /* This is the EAGAIN path. The svc_recv routine will
470 * return -EAGAIN, the nfsd thread will go to call into
471 * svc_recv again and we shouldn't be on the active
474 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
480 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
481 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
482 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
483 atomic_inc(&rdma_stat_recv);
485 /* Build up the XDR from the receive buffers. */
486 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
488 /* Decode the RDMA header. */
489 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
490 rqstp->rq_xprt_hlen = len;
492 /* If the request is invalid, reply with an error */
495 svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
499 /* Read read-list data. */
500 ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
502 /* read-list posted, defer until data received from client. */
503 svc_xprt_received(xprt);
507 /* Post of read-list failed, free context. */
508 svc_rdma_put_context(ctxt, 1);
512 ret = rqstp->rq_arg.head[0].iov_len
513 + rqstp->rq_arg.page_len
514 + rqstp->rq_arg.tail[0].iov_len;
515 svc_rdma_put_context(ctxt, 0);
517 dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
518 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
519 ret, rqstp->rq_arg.len,
520 rqstp->rq_arg.head[0].iov_base,
521 rqstp->rq_arg.head[0].iov_len);
522 rqstp->rq_prot = IPPROTO_MAX;
523 svc_xprt_copy_addrs(rqstp, xprt);
524 svc_xprt_received(xprt);
529 svc_rdma_put_context(ctxt, 1);
530 dprintk("svcrdma: transport %p is closing\n", xprt);
532 * Set the close bit and enqueue it. svc_recv will see the
533 * close bit and call svc_xprt_delete
535 set_bit(XPT_CLOSE, &xprt->xpt_flags);
536 svc_xprt_received(xprt);