2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
19 * XDR functions for basic NFS types
22 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 unsigned int quadlen = XDR_QUADLEN(obj->len);
26 p[quadlen] = 0; /* zero trailing bytes */
27 *p++ = htonl(obj->len);
28 memcpy(p, obj->data, obj->len);
29 return p + XDR_QUADLEN(obj->len);
31 EXPORT_SYMBOL(xdr_encode_netobj);
34 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
38 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
42 return p + XDR_QUADLEN(len);
44 EXPORT_SYMBOL(xdr_decode_netobj);
47 * xdr_encode_opaque_fixed - Encode fixed length opaque data
48 * @p: pointer to current position in XDR buffer.
49 * @ptr: pointer to data to encode (or NULL)
50 * @nbytes: size of data.
52 * Copy the array of data of length nbytes at ptr to the XDR buffer
53 * at position p, then align to the next 32-bit boundary by padding
54 * with zero bytes (see RFC1832).
55 * Note: if ptr is NULL, only the padding is performed.
57 * Returns the updated current XDR buffer position
60 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 if (likely(nbytes != 0)) {
63 unsigned int quadlen = XDR_QUADLEN(nbytes);
64 unsigned int padding = (quadlen << 2) - nbytes;
67 memcpy(p, ptr, nbytes);
69 memset((char *)p + nbytes, 0, padding);
74 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
77 * xdr_encode_opaque - Encode variable length opaque data
78 * @p: pointer to current position in XDR buffer.
79 * @ptr: pointer to data to encode (or NULL)
80 * @nbytes: size of data.
82 * Returns the updated current XDR buffer position
84 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
87 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 EXPORT_SYMBOL(xdr_encode_opaque);
92 xdr_encode_string(__be32 *p, const char *string)
94 return xdr_encode_array(p, string, strlen(string));
96 EXPORT_SYMBOL(xdr_encode_string);
99 xdr_decode_string_inplace(__be32 *p, char **sp,
100 unsigned int *lenp, unsigned int maxlen)
109 return p + XDR_QUADLEN(len);
111 EXPORT_SYMBOL(xdr_decode_string_inplace);
114 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
117 struct kvec *tail = xdr->tail;
121 xdr->page_base = base;
124 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
129 unsigned int pad = 4 - (len & 3);
132 tail->iov_base = (char *)p + (len & 3);
139 EXPORT_SYMBOL(xdr_encode_pages);
142 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
143 struct page **pages, unsigned int base, unsigned int len)
145 struct kvec *head = xdr->head;
146 struct kvec *tail = xdr->tail;
147 char *buf = (char *)head->iov_base;
148 unsigned int buflen = head->iov_len;
150 head->iov_len = offset;
153 xdr->page_base = base;
156 tail->iov_base = buf + offset;
157 tail->iov_len = buflen - offset;
161 EXPORT_SYMBOL(xdr_inline_pages);
164 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
166 * _shift_data_right_pages
167 * @pages: vector of pages containing both the source and dest memory area.
168 * @pgto_base: page vector address of destination
169 * @pgfrom_base: page vector address of source
170 * @len: number of bytes to copy
172 * Note: the addresses pgto_base and pgfrom_base are both calculated in
174 * if a memory area starts at byte 'base' in page 'pages[i]',
175 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
176 * Also note: pgfrom_base must be < pgto_base, but the memory areas
177 * they point to may overlap.
180 _shift_data_right_pages(struct page **pages, size_t pgto_base,
181 size_t pgfrom_base, size_t len)
183 struct page **pgfrom, **pgto;
187 BUG_ON(pgto_base <= pgfrom_base);
192 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
193 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
195 pgto_base &= ~PAGE_CACHE_MASK;
196 pgfrom_base &= ~PAGE_CACHE_MASK;
199 /* Are any pointers crossing a page boundary? */
200 if (pgto_base == 0) {
201 pgto_base = PAGE_CACHE_SIZE;
204 if (pgfrom_base == 0) {
205 pgfrom_base = PAGE_CACHE_SIZE;
210 if (copy > pgto_base)
212 if (copy > pgfrom_base)
217 vto = kmap_atomic(*pgto, KM_USER0);
218 vfrom = kmap_atomic(*pgfrom, KM_USER1);
219 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
220 flush_dcache_page(*pgto);
221 kunmap_atomic(vfrom, KM_USER1);
222 kunmap_atomic(vto, KM_USER0);
224 } while ((len -= copy) != 0);
229 * @pages: array of pages
230 * @pgbase: page vector address of destination
231 * @p: pointer to source data
234 * Copies data from an arbitrary memory location into an array of pages
235 * The copy is assumed to be non-overlapping.
238 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
244 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
245 pgbase &= ~PAGE_CACHE_MASK;
248 copy = PAGE_CACHE_SIZE - pgbase;
252 vto = kmap_atomic(*pgto, KM_USER0);
253 memcpy(vto + pgbase, p, copy);
254 kunmap_atomic(vto, KM_USER0);
257 if (pgbase == PAGE_CACHE_SIZE) {
258 flush_dcache_page(*pgto);
264 } while ((len -= copy) != 0);
265 flush_dcache_page(*pgto);
270 * @p: pointer to destination
271 * @pages: array of pages
272 * @pgbase: offset of source data
275 * Copies data into an arbitrary memory location from an array of pages
276 * The copy is assumed to be non-overlapping.
279 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
281 struct page **pgfrom;
285 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
286 pgbase &= ~PAGE_CACHE_MASK;
289 copy = PAGE_CACHE_SIZE - pgbase;
293 vfrom = kmap_atomic(*pgfrom, KM_USER0);
294 memcpy(p, vfrom + pgbase, copy);
295 kunmap_atomic(vfrom, KM_USER0);
298 if (pgbase == PAGE_CACHE_SIZE) {
304 } while ((len -= copy) != 0);
310 * @len: bytes to remove from buf->head[0]
312 * Shrinks XDR buffer's header kvec buf->head[0] by
313 * 'len' bytes. The extra data is not lost, but is instead
314 * moved into the inlined pages and/or the tail.
317 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
319 struct kvec *head, *tail;
321 unsigned int pglen = buf->page_len;
325 BUG_ON (len > head->iov_len);
327 /* Shift the tail first */
328 if (tail->iov_len != 0) {
329 if (tail->iov_len > len) {
330 copy = tail->iov_len - len;
331 memmove((char *)tail->iov_base + len,
332 tail->iov_base, copy);
334 /* Copy from the inlined pages into the tail */
339 if (offs >= tail->iov_len)
341 else if (copy > tail->iov_len - offs)
342 copy = tail->iov_len - offs;
344 _copy_from_pages((char *)tail->iov_base + offs,
346 buf->page_base + pglen + offs - len,
348 /* Do we also need to copy data from the head into the tail ? */
350 offs = copy = len - pglen;
351 if (copy > tail->iov_len)
352 copy = tail->iov_len;
353 memcpy(tail->iov_base,
354 (char *)head->iov_base +
355 head->iov_len - offs,
359 /* Now handle pages */
362 _shift_data_right_pages(buf->pages,
363 buf->page_base + len,
369 _copy_to_pages(buf->pages, buf->page_base,
370 (char *)head->iov_base + head->iov_len - len,
373 head->iov_len -= len;
375 /* Have we truncated the message? */
376 if (buf->len > buf->buflen)
377 buf->len = buf->buflen;
383 * @len: bytes to remove from buf->pages
385 * Shrinks XDR buffer's page array buf->pages by
386 * 'len' bytes. The extra data is not lost, but is instead
387 * moved into the tail.
390 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
395 unsigned int pglen = buf->page_len;
398 BUG_ON (len > pglen);
400 /* Shift the tail first */
401 if (tail->iov_len != 0) {
402 p = (char *)tail->iov_base + len;
403 if (tail->iov_len > len) {
404 copy = tail->iov_len - len;
405 memmove(p, tail->iov_base, copy);
408 /* Copy from the inlined pages into the tail */
410 if (copy > tail->iov_len)
411 copy = tail->iov_len;
412 _copy_from_pages((char *)tail->iov_base,
413 buf->pages, buf->page_base + pglen - len,
416 buf->page_len -= len;
418 /* Have we truncated the message? */
419 if (buf->len > buf->buflen)
420 buf->len = buf->buflen;
424 xdr_shift_buf(struct xdr_buf *buf, size_t len)
426 xdr_shrink_bufhead(buf, len);
428 EXPORT_SYMBOL(xdr_shift_buf);
431 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
432 * @xdr: pointer to xdr_stream struct
433 * @buf: pointer to XDR buffer in which to encode data
434 * @p: current pointer inside XDR buffer
436 * Note: at the moment the RPC client only passes the length of our
437 * scratch buffer in the xdr_buf's header kvec. Previously this
438 * meant we needed to call xdr_adjust_iovec() after encoding the
439 * data. With the new scheme, the xdr_stream manages the details
440 * of the buffer length, and takes care of adjusting the kvec
443 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
445 struct kvec *iov = buf->head;
446 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
448 BUG_ON(scratch_len < 0);
451 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
452 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
453 BUG_ON(iov->iov_len > scratch_len);
455 if (p != xdr->p && p != NULL) {
458 BUG_ON(p < xdr->p || p > xdr->end);
459 len = (char *)p - (char *)xdr->p;
465 EXPORT_SYMBOL(xdr_init_encode);
468 * xdr_reserve_space - Reserve buffer space for sending
469 * @xdr: pointer to xdr_stream
470 * @nbytes: number of bytes to reserve
472 * Checks that we have enough buffer space to encode 'nbytes' more
473 * bytes of data. If so, update the total xdr_buf length, and
474 * adjust the length of the current kvec.
476 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
481 /* align nbytes on the next 32-bit boundary */
484 q = p + (nbytes >> 2);
485 if (unlikely(q > xdr->end || q < p))
488 xdr->iov->iov_len += nbytes;
489 xdr->buf->len += nbytes;
492 EXPORT_SYMBOL(xdr_reserve_space);
495 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
496 * @xdr: pointer to xdr_stream
497 * @pages: list of pages
498 * @base: offset of first byte
499 * @len: length of data in bytes
502 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
505 struct xdr_buf *buf = xdr->buf;
506 struct kvec *iov = buf->tail;
508 buf->page_base = base;
511 iov->iov_base = (char *)xdr->p;
516 unsigned int pad = 4 - (len & 3);
518 BUG_ON(xdr->p >= xdr->end);
519 iov->iov_base = (char *)xdr->p + (len & 3);
527 EXPORT_SYMBOL(xdr_write_pages);
530 * xdr_init_decode - Initialize an xdr_stream for decoding data.
531 * @xdr: pointer to xdr_stream struct
532 * @buf: pointer to XDR buffer from which to decode data
533 * @p: current pointer inside XDR buffer
535 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
537 struct kvec *iov = buf->head;
538 unsigned int len = iov->iov_len;
545 xdr->end = (__be32 *)((char *)iov->iov_base + len);
547 EXPORT_SYMBOL(xdr_init_decode);
550 * xdr_inline_decode - Retrieve non-page XDR data to decode
551 * @xdr: pointer to xdr_stream struct
552 * @nbytes: number of bytes of data to decode
554 * Check if the input buffer is long enough to enable us to decode
555 * 'nbytes' more bytes of data starting at the current position.
556 * If so return the current pointer, then update the current
559 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
562 __be32 *q = p + XDR_QUADLEN(nbytes);
564 if (unlikely(q > xdr->end || q < p))
569 EXPORT_SYMBOL(xdr_inline_decode);
572 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
573 * @xdr: pointer to xdr_stream struct
574 * @len: number of bytes of page data
576 * Moves data beyond the current pointer position from the XDR head[] buffer
577 * into the page list. Any data that lies beyond current position + "len"
578 * bytes is moved into the XDR tail[].
580 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
582 struct xdr_buf *buf = xdr->buf;
588 /* Realign pages to current pointer position */
590 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
592 xdr_shrink_bufhead(buf, shift);
594 /* Truncate page data and move it into the tail */
595 if (buf->page_len > len)
596 xdr_shrink_pagelen(buf, buf->page_len - len);
597 padding = (XDR_QUADLEN(len) << 2) - len;
598 xdr->iov = iov = buf->tail;
599 /* Compute remaining message length. */
601 shift = buf->buflen - buf->len;
607 * Position current pointer at beginning of tail, and
608 * set remaining message length.
610 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
611 xdr->end = (__be32 *)((char *)iov->iov_base + end);
613 EXPORT_SYMBOL(xdr_read_pages);
616 * xdr_enter_page - decode data from the XDR page
617 * @xdr: pointer to xdr_stream struct
618 * @len: number of bytes of page data
620 * Moves data beyond the current pointer position from the XDR head[] buffer
621 * into the page list. Any data that lies beyond current position + "len"
622 * bytes is moved into the XDR tail[]. The current pointer is then
623 * repositioned at the beginning of the first XDR page.
625 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
627 char * kaddr = page_address(xdr->buf->pages[0]);
628 xdr_read_pages(xdr, len);
630 * Position current pointer at beginning of tail, and
631 * set remaining message length.
633 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
634 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
635 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
636 xdr->end = (__be32 *)((char *)xdr->p + len);
638 EXPORT_SYMBOL(xdr_enter_page);
640 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
643 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
646 buf->tail[0] = empty_iov;
648 buf->buflen = buf->len = iov->iov_len;
650 EXPORT_SYMBOL(xdr_buf_from_iov);
652 /* Sets subbuf to the portion of buf of length len beginning base bytes
653 * from the start of buf. Returns -1 if base of length are out of bounds. */
655 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
656 unsigned int base, unsigned int len)
658 subbuf->buflen = subbuf->len = len;
659 if (base < buf->head[0].iov_len) {
660 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
661 subbuf->head[0].iov_len = min_t(unsigned int, len,
662 buf->head[0].iov_len - base);
663 len -= subbuf->head[0].iov_len;
666 subbuf->head[0].iov_base = NULL;
667 subbuf->head[0].iov_len = 0;
668 base -= buf->head[0].iov_len;
671 if (base < buf->page_len) {
672 subbuf->page_len = min(buf->page_len - base, len);
673 base += buf->page_base;
674 subbuf->page_base = base & ~PAGE_CACHE_MASK;
675 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
676 len -= subbuf->page_len;
679 base -= buf->page_len;
680 subbuf->page_len = 0;
683 if (base < buf->tail[0].iov_len) {
684 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
685 subbuf->tail[0].iov_len = min_t(unsigned int, len,
686 buf->tail[0].iov_len - base);
687 len -= subbuf->tail[0].iov_len;
690 subbuf->tail[0].iov_base = NULL;
691 subbuf->tail[0].iov_len = 0;
692 base -= buf->tail[0].iov_len;
699 EXPORT_SYMBOL(xdr_buf_subsegment);
701 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
703 unsigned int this_len;
705 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
706 memcpy(obj, subbuf->head[0].iov_base, this_len);
709 this_len = min_t(unsigned int, len, subbuf->page_len);
711 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
714 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
715 memcpy(obj, subbuf->tail[0].iov_base, this_len);
718 /* obj is assumed to point to allocated memory of size at least len: */
719 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
721 struct xdr_buf subbuf;
724 status = xdr_buf_subsegment(buf, &subbuf, base, len);
727 __read_bytes_from_xdr_buf(&subbuf, obj, len);
730 EXPORT_SYMBOL(read_bytes_from_xdr_buf);
732 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
734 unsigned int this_len;
736 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
737 memcpy(subbuf->head[0].iov_base, obj, this_len);
740 this_len = min_t(unsigned int, len, subbuf->page_len);
742 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
745 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
746 memcpy(subbuf->tail[0].iov_base, obj, this_len);
749 /* obj is assumed to point to allocated memory of size at least len: */
750 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
752 struct xdr_buf subbuf;
755 status = xdr_buf_subsegment(buf, &subbuf, base, len);
758 __write_bytes_to_xdr_buf(&subbuf, obj, len);
763 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
768 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
774 EXPORT_SYMBOL(xdr_decode_word);
777 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
779 __be32 raw = htonl(obj);
781 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
783 EXPORT_SYMBOL(xdr_encode_word);
785 /* If the netobj starting offset bytes from the start of xdr_buf is contained
786 * entirely in the head or the tail, set object to point to it; otherwise
787 * try to find space for it at the end of the tail, copy it there, and
788 * set obj to point to it. */
789 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
791 struct xdr_buf subbuf;
793 if (xdr_decode_word(buf, offset, &obj->len))
795 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
798 /* Is the obj contained entirely in the head? */
799 obj->data = subbuf.head[0].iov_base;
800 if (subbuf.head[0].iov_len == obj->len)
802 /* ..or is the obj contained entirely in the tail? */
803 obj->data = subbuf.tail[0].iov_base;
804 if (subbuf.tail[0].iov_len == obj->len)
807 /* use end of tail as storage for obj:
808 * (We don't copy to the beginning because then we'd have
809 * to worry about doing a potentially overlapping copy.
810 * This assumes the object is at most half the length of the
812 if (obj->len > buf->buflen - buf->len)
814 if (buf->tail[0].iov_len != 0)
815 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
817 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
818 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
821 EXPORT_SYMBOL(xdr_buf_read_netobj);
823 /* Returns 0 on success, or else a negative error code. */
825 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
826 struct xdr_array2_desc *desc, int encode)
828 char *elem = NULL, *c;
829 unsigned int copied = 0, todo, avail_here;
830 struct page **ppages = NULL;
834 if (xdr_encode_word(buf, base, desc->array_len) != 0)
837 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
838 desc->array_len > desc->array_maxlen ||
839 (unsigned long) base + 4 + desc->array_len *
840 desc->elem_size > buf->len)
848 todo = desc->array_len * desc->elem_size;
851 if (todo && base < buf->head->iov_len) {
852 c = buf->head->iov_base + base;
853 avail_here = min_t(unsigned int, todo,
854 buf->head->iov_len - base);
857 while (avail_here >= desc->elem_size) {
858 err = desc->xcode(desc, c);
861 c += desc->elem_size;
862 avail_here -= desc->elem_size;
866 elem = kmalloc(desc->elem_size, GFP_KERNEL);
872 err = desc->xcode(desc, elem);
875 memcpy(c, elem, avail_here);
877 memcpy(elem, c, avail_here);
880 base = buf->head->iov_len; /* align to start of pages */
883 /* process pages array */
884 base -= buf->head->iov_len;
885 if (todo && base < buf->page_len) {
886 unsigned int avail_page;
888 avail_here = min(todo, buf->page_len - base);
891 base += buf->page_base;
892 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
893 base &= ~PAGE_CACHE_MASK;
894 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
896 c = kmap(*ppages) + base;
899 avail_here -= avail_page;
900 if (copied || avail_page < desc->elem_size) {
901 unsigned int l = min(avail_page,
902 desc->elem_size - copied);
904 elem = kmalloc(desc->elem_size,
912 err = desc->xcode(desc, elem);
916 memcpy(c, elem + copied, l);
918 if (copied == desc->elem_size)
921 memcpy(elem + copied, c, l);
923 if (copied == desc->elem_size) {
924 err = desc->xcode(desc, elem);
933 while (avail_page >= desc->elem_size) {
934 err = desc->xcode(desc, c);
937 c += desc->elem_size;
938 avail_page -= desc->elem_size;
941 unsigned int l = min(avail_page,
942 desc->elem_size - copied);
944 elem = kmalloc(desc->elem_size,
952 err = desc->xcode(desc, elem);
956 memcpy(c, elem + copied, l);
958 if (copied == desc->elem_size)
961 memcpy(elem + copied, c, l);
963 if (copied == desc->elem_size) {
964 err = desc->xcode(desc, elem);
977 avail_page = min(avail_here,
978 (unsigned int) PAGE_CACHE_SIZE);
980 base = buf->page_len; /* align to start of tail */
984 base -= buf->page_len;
986 c = buf->tail->iov_base + base;
988 unsigned int l = desc->elem_size - copied;
991 memcpy(c, elem + copied, l);
993 memcpy(elem + copied, c, l);
994 err = desc->xcode(desc, elem);
1002 err = desc->xcode(desc, c);
1005 c += desc->elem_size;
1006 todo -= desc->elem_size;
1019 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1020 struct xdr_array2_desc *desc)
1022 if (base >= buf->len)
1025 return xdr_xcode_array2(buf, base, desc, 0);
1027 EXPORT_SYMBOL(xdr_decode_array2);
1030 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1031 struct xdr_array2_desc *desc)
1033 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1034 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1037 return xdr_xcode_array2(buf, base, desc, 1);
1039 EXPORT_SYMBOL(xdr_encode_array2);
1042 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1043 int (*actor)(struct scatterlist *, void *), void *data)
1046 unsigned page_len, thislen, page_offset;
1047 struct scatterlist sg[1];
1049 sg_init_table(sg, 1);
1051 if (offset >= buf->head[0].iov_len) {
1052 offset -= buf->head[0].iov_len;
1054 thislen = buf->head[0].iov_len - offset;
1057 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1058 ret = actor(sg, data);
1067 if (offset >= buf->page_len) {
1068 offset -= buf->page_len;
1070 page_len = buf->page_len - offset;
1074 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1075 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1076 thislen = PAGE_CACHE_SIZE - page_offset;
1078 if (thislen > page_len)
1080 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1081 ret = actor(sg, data);
1084 page_len -= thislen;
1087 thislen = PAGE_CACHE_SIZE;
1088 } while (page_len != 0);
1093 if (offset < buf->tail[0].iov_len) {
1094 thislen = buf->tail[0].iov_len - offset;
1097 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1098 ret = actor(sg, data);
1106 EXPORT_SYMBOL(xdr_process_buf);