2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
19 * XDR functions for basic NFS types
22 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 unsigned int quadlen = XDR_QUADLEN(obj->len);
26 p[quadlen] = 0; /* zero trailing bytes */
27 *p++ = htonl(obj->len);
28 memcpy(p, obj->data, obj->len);
29 return p + XDR_QUADLEN(obj->len);
31 EXPORT_SYMBOL(xdr_encode_netobj);
34 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
38 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
42 return p + XDR_QUADLEN(len);
44 EXPORT_SYMBOL(xdr_decode_netobj);
47 * xdr_encode_opaque_fixed - Encode fixed length opaque data
48 * @p: pointer to current position in XDR buffer.
49 * @ptr: pointer to data to encode (or NULL)
50 * @nbytes: size of data.
52 * Copy the array of data of length nbytes at ptr to the XDR buffer
53 * at position p, then align to the next 32-bit boundary by padding
54 * with zero bytes (see RFC1832).
55 * Note: if ptr is NULL, only the padding is performed.
57 * Returns the updated current XDR buffer position
60 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 if (likely(nbytes != 0)) {
63 unsigned int quadlen = XDR_QUADLEN(nbytes);
64 unsigned int padding = (quadlen << 2) - nbytes;
67 memcpy(p, ptr, nbytes);
69 memset((char *)p + nbytes, 0, padding);
74 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
77 * xdr_encode_opaque - Encode variable length opaque data
78 * @p: pointer to current position in XDR buffer.
79 * @ptr: pointer to data to encode (or NULL)
80 * @nbytes: size of data.
82 * Returns the updated current XDR buffer position
84 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
87 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 EXPORT_SYMBOL(xdr_encode_opaque);
92 xdr_encode_string(__be32 *p, const char *string)
94 return xdr_encode_array(p, string, strlen(string));
96 EXPORT_SYMBOL(xdr_encode_string);
99 xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
103 if ((len = ntohl(*p++)) > maxlen)
107 return p + XDR_QUADLEN(len);
109 EXPORT_SYMBOL(xdr_decode_string_inplace);
112 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
115 struct kvec *tail = xdr->tail;
119 xdr->page_base = base;
122 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
127 unsigned int pad = 4 - (len & 3);
130 tail->iov_base = (char *)p + (len & 3);
137 EXPORT_SYMBOL(xdr_encode_pages);
140 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
141 struct page **pages, unsigned int base, unsigned int len)
143 struct kvec *head = xdr->head;
144 struct kvec *tail = xdr->tail;
145 char *buf = (char *)head->iov_base;
146 unsigned int buflen = head->iov_len;
148 head->iov_len = offset;
151 xdr->page_base = base;
154 tail->iov_base = buf + offset;
155 tail->iov_len = buflen - offset;
159 EXPORT_SYMBOL(xdr_inline_pages);
162 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
164 * _shift_data_right_pages
165 * @pages: vector of pages containing both the source and dest memory area.
166 * @pgto_base: page vector address of destination
167 * @pgfrom_base: page vector address of source
168 * @len: number of bytes to copy
170 * Note: the addresses pgto_base and pgfrom_base are both calculated in
172 * if a memory area starts at byte 'base' in page 'pages[i]',
173 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
174 * Also note: pgfrom_base must be < pgto_base, but the memory areas
175 * they point to may overlap.
178 _shift_data_right_pages(struct page **pages, size_t pgto_base,
179 size_t pgfrom_base, size_t len)
181 struct page **pgfrom, **pgto;
185 BUG_ON(pgto_base <= pgfrom_base);
190 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
191 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
193 pgto_base &= ~PAGE_CACHE_MASK;
194 pgfrom_base &= ~PAGE_CACHE_MASK;
197 /* Are any pointers crossing a page boundary? */
198 if (pgto_base == 0) {
199 pgto_base = PAGE_CACHE_SIZE;
202 if (pgfrom_base == 0) {
203 pgfrom_base = PAGE_CACHE_SIZE;
208 if (copy > pgto_base)
210 if (copy > pgfrom_base)
215 vto = kmap_atomic(*pgto, KM_USER0);
216 vfrom = kmap_atomic(*pgfrom, KM_USER1);
217 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
218 flush_dcache_page(*pgto);
219 kunmap_atomic(vfrom, KM_USER1);
220 kunmap_atomic(vto, KM_USER0);
222 } while ((len -= copy) != 0);
227 * @pages: array of pages
228 * @pgbase: page vector address of destination
229 * @p: pointer to source data
232 * Copies data from an arbitrary memory location into an array of pages
233 * The copy is assumed to be non-overlapping.
236 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
242 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
243 pgbase &= ~PAGE_CACHE_MASK;
246 copy = PAGE_CACHE_SIZE - pgbase;
250 vto = kmap_atomic(*pgto, KM_USER0);
251 memcpy(vto + pgbase, p, copy);
252 kunmap_atomic(vto, KM_USER0);
255 if (pgbase == PAGE_CACHE_SIZE) {
256 flush_dcache_page(*pgto);
262 } while ((len -= copy) != 0);
263 flush_dcache_page(*pgto);
268 * @p: pointer to destination
269 * @pages: array of pages
270 * @pgbase: offset of source data
273 * Copies data into an arbitrary memory location from an array of pages
274 * The copy is assumed to be non-overlapping.
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
279 struct page **pgfrom;
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK;
287 copy = PAGE_CACHE_SIZE - pgbase;
291 vfrom = kmap_atomic(*pgfrom, KM_USER0);
292 memcpy(p, vfrom + pgbase, copy);
293 kunmap_atomic(vfrom, KM_USER0);
296 if (pgbase == PAGE_CACHE_SIZE) {
302 } while ((len -= copy) != 0);
308 * @len: bytes to remove from buf->head[0]
310 * Shrinks XDR buffer's header kvec buf->head[0] by
311 * 'len' bytes. The extra data is not lost, but is instead
312 * moved into the inlined pages and/or the tail.
315 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
317 struct kvec *head, *tail;
319 unsigned int pglen = buf->page_len;
323 BUG_ON (len > head->iov_len);
325 /* Shift the tail first */
326 if (tail->iov_len != 0) {
327 if (tail->iov_len > len) {
328 copy = tail->iov_len - len;
329 memmove((char *)tail->iov_base + len,
330 tail->iov_base, copy);
332 /* Copy from the inlined pages into the tail */
337 if (offs >= tail->iov_len)
339 else if (copy > tail->iov_len - offs)
340 copy = tail->iov_len - offs;
342 _copy_from_pages((char *)tail->iov_base + offs,
344 buf->page_base + pglen + offs - len,
346 /* Do we also need to copy data from the head into the tail ? */
348 offs = copy = len - pglen;
349 if (copy > tail->iov_len)
350 copy = tail->iov_len;
351 memcpy(tail->iov_base,
352 (char *)head->iov_base +
353 head->iov_len - offs,
357 /* Now handle pages */
360 _shift_data_right_pages(buf->pages,
361 buf->page_base + len,
367 _copy_to_pages(buf->pages, buf->page_base,
368 (char *)head->iov_base + head->iov_len - len,
371 head->iov_len -= len;
373 /* Have we truncated the message? */
374 if (buf->len > buf->buflen)
375 buf->len = buf->buflen;
381 * @len: bytes to remove from buf->pages
383 * Shrinks XDR buffer's page array buf->pages by
384 * 'len' bytes. The extra data is not lost, but is instead
385 * moved into the tail.
388 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
393 unsigned int pglen = buf->page_len;
396 BUG_ON (len > pglen);
398 /* Shift the tail first */
399 if (tail->iov_len != 0) {
400 p = (char *)tail->iov_base + len;
401 if (tail->iov_len > len) {
402 copy = tail->iov_len - len;
403 memmove(p, tail->iov_base, copy);
406 /* Copy from the inlined pages into the tail */
408 if (copy > tail->iov_len)
409 copy = tail->iov_len;
410 _copy_from_pages((char *)tail->iov_base,
411 buf->pages, buf->page_base + pglen - len,
414 buf->page_len -= len;
416 /* Have we truncated the message? */
417 if (buf->len > buf->buflen)
418 buf->len = buf->buflen;
422 xdr_shift_buf(struct xdr_buf *buf, size_t len)
424 xdr_shrink_bufhead(buf, len);
426 EXPORT_SYMBOL(xdr_shift_buf);
429 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
430 * @xdr: pointer to xdr_stream struct
431 * @buf: pointer to XDR buffer in which to encode data
432 * @p: current pointer inside XDR buffer
434 * Note: at the moment the RPC client only passes the length of our
435 * scratch buffer in the xdr_buf's header kvec. Previously this
436 * meant we needed to call xdr_adjust_iovec() after encoding the
437 * data. With the new scheme, the xdr_stream manages the details
438 * of the buffer length, and takes care of adjusting the kvec
441 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
443 struct kvec *iov = buf->head;
444 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
446 BUG_ON(scratch_len < 0);
449 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
450 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
451 BUG_ON(iov->iov_len > scratch_len);
453 if (p != xdr->p && p != NULL) {
456 BUG_ON(p < xdr->p || p > xdr->end);
457 len = (char *)p - (char *)xdr->p;
463 EXPORT_SYMBOL(xdr_init_encode);
466 * xdr_reserve_space - Reserve buffer space for sending
467 * @xdr: pointer to xdr_stream
468 * @nbytes: number of bytes to reserve
470 * Checks that we have enough buffer space to encode 'nbytes' more
471 * bytes of data. If so, update the total xdr_buf length, and
472 * adjust the length of the current kvec.
474 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
479 /* align nbytes on the next 32-bit boundary */
482 q = p + (nbytes >> 2);
483 if (unlikely(q > xdr->end || q < p))
486 xdr->iov->iov_len += nbytes;
487 xdr->buf->len += nbytes;
490 EXPORT_SYMBOL(xdr_reserve_space);
493 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
494 * @xdr: pointer to xdr_stream
495 * @pages: list of pages
496 * @base: offset of first byte
497 * @len: length of data in bytes
500 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
503 struct xdr_buf *buf = xdr->buf;
504 struct kvec *iov = buf->tail;
506 buf->page_base = base;
509 iov->iov_base = (char *)xdr->p;
514 unsigned int pad = 4 - (len & 3);
516 BUG_ON(xdr->p >= xdr->end);
517 iov->iov_base = (char *)xdr->p + (len & 3);
525 EXPORT_SYMBOL(xdr_write_pages);
528 * xdr_init_decode - Initialize an xdr_stream for decoding data.
529 * @xdr: pointer to xdr_stream struct
530 * @buf: pointer to XDR buffer from which to decode data
531 * @p: current pointer inside XDR buffer
533 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
535 struct kvec *iov = buf->head;
536 unsigned int len = iov->iov_len;
543 xdr->end = (__be32 *)((char *)iov->iov_base + len);
545 EXPORT_SYMBOL(xdr_init_decode);
548 * xdr_inline_decode - Retrieve non-page XDR data to decode
549 * @xdr: pointer to xdr_stream struct
550 * @nbytes: number of bytes of data to decode
552 * Check if the input buffer is long enough to enable us to decode
553 * 'nbytes' more bytes of data starting at the current position.
554 * If so return the current pointer, then update the current
557 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
560 __be32 *q = p + XDR_QUADLEN(nbytes);
562 if (unlikely(q > xdr->end || q < p))
567 EXPORT_SYMBOL(xdr_inline_decode);
570 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
571 * @xdr: pointer to xdr_stream struct
572 * @len: number of bytes of page data
574 * Moves data beyond the current pointer position from the XDR head[] buffer
575 * into the page list. Any data that lies beyond current position + "len"
576 * bytes is moved into the XDR tail[].
578 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
580 struct xdr_buf *buf = xdr->buf;
586 /* Realign pages to current pointer position */
588 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
590 xdr_shrink_bufhead(buf, shift);
592 /* Truncate page data and move it into the tail */
593 if (buf->page_len > len)
594 xdr_shrink_pagelen(buf, buf->page_len - len);
595 padding = (XDR_QUADLEN(len) << 2) - len;
596 xdr->iov = iov = buf->tail;
597 /* Compute remaining message length. */
599 shift = buf->buflen - buf->len;
605 * Position current pointer at beginning of tail, and
606 * set remaining message length.
608 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
609 xdr->end = (__be32 *)((char *)iov->iov_base + end);
611 EXPORT_SYMBOL(xdr_read_pages);
614 * xdr_enter_page - decode data from the XDR page
615 * @xdr: pointer to xdr_stream struct
616 * @len: number of bytes of page data
618 * Moves data beyond the current pointer position from the XDR head[] buffer
619 * into the page list. Any data that lies beyond current position + "len"
620 * bytes is moved into the XDR tail[]. The current pointer is then
621 * repositioned at the beginning of the first XDR page.
623 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
625 char * kaddr = page_address(xdr->buf->pages[0]);
626 xdr_read_pages(xdr, len);
628 * Position current pointer at beginning of tail, and
629 * set remaining message length.
631 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
632 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
633 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
634 xdr->end = (__be32 *)((char *)xdr->p + len);
636 EXPORT_SYMBOL(xdr_enter_page);
638 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
641 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
644 buf->tail[0] = empty_iov;
646 buf->buflen = buf->len = iov->iov_len;
648 EXPORT_SYMBOL(xdr_buf_from_iov);
650 /* Sets subbuf to the portion of buf of length len beginning base bytes
651 * from the start of buf. Returns -1 if base of length are out of bounds. */
653 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
654 unsigned int base, unsigned int len)
656 subbuf->buflen = subbuf->len = len;
657 if (base < buf->head[0].iov_len) {
658 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
659 subbuf->head[0].iov_len = min_t(unsigned int, len,
660 buf->head[0].iov_len - base);
661 len -= subbuf->head[0].iov_len;
664 subbuf->head[0].iov_base = NULL;
665 subbuf->head[0].iov_len = 0;
666 base -= buf->head[0].iov_len;
669 if (base < buf->page_len) {
670 subbuf->page_len = min(buf->page_len - base, len);
671 base += buf->page_base;
672 subbuf->page_base = base & ~PAGE_CACHE_MASK;
673 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
674 len -= subbuf->page_len;
677 base -= buf->page_len;
678 subbuf->page_len = 0;
681 if (base < buf->tail[0].iov_len) {
682 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
683 subbuf->tail[0].iov_len = min_t(unsigned int, len,
684 buf->tail[0].iov_len - base);
685 len -= subbuf->tail[0].iov_len;
688 subbuf->tail[0].iov_base = NULL;
689 subbuf->tail[0].iov_len = 0;
690 base -= buf->tail[0].iov_len;
697 EXPORT_SYMBOL(xdr_buf_subsegment);
699 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
701 unsigned int this_len;
703 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
704 memcpy(obj, subbuf->head[0].iov_base, this_len);
707 this_len = min_t(unsigned int, len, subbuf->page_len);
709 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
712 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
713 memcpy(obj, subbuf->tail[0].iov_base, this_len);
716 /* obj is assumed to point to allocated memory of size at least len: */
717 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
719 struct xdr_buf subbuf;
722 status = xdr_buf_subsegment(buf, &subbuf, base, len);
725 __read_bytes_from_xdr_buf(&subbuf, obj, len);
728 EXPORT_SYMBOL(read_bytes_from_xdr_buf);
730 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
732 unsigned int this_len;
734 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
735 memcpy(subbuf->head[0].iov_base, obj, this_len);
738 this_len = min_t(unsigned int, len, subbuf->page_len);
740 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
743 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
744 memcpy(subbuf->tail[0].iov_base, obj, this_len);
747 /* obj is assumed to point to allocated memory of size at least len: */
748 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
750 struct xdr_buf subbuf;
753 status = xdr_buf_subsegment(buf, &subbuf, base, len);
756 __write_bytes_to_xdr_buf(&subbuf, obj, len);
761 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
766 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
772 EXPORT_SYMBOL(xdr_decode_word);
775 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
777 __be32 raw = htonl(obj);
779 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
781 EXPORT_SYMBOL(xdr_encode_word);
783 /* If the netobj starting offset bytes from the start of xdr_buf is contained
784 * entirely in the head or the tail, set object to point to it; otherwise
785 * try to find space for it at the end of the tail, copy it there, and
786 * set obj to point to it. */
787 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
789 struct xdr_buf subbuf;
791 if (xdr_decode_word(buf, offset, &obj->len))
793 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
796 /* Is the obj contained entirely in the head? */
797 obj->data = subbuf.head[0].iov_base;
798 if (subbuf.head[0].iov_len == obj->len)
800 /* ..or is the obj contained entirely in the tail? */
801 obj->data = subbuf.tail[0].iov_base;
802 if (subbuf.tail[0].iov_len == obj->len)
805 /* use end of tail as storage for obj:
806 * (We don't copy to the beginning because then we'd have
807 * to worry about doing a potentially overlapping copy.
808 * This assumes the object is at most half the length of the
810 if (obj->len > buf->buflen - buf->len)
812 if (buf->tail[0].iov_len != 0)
813 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
815 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
816 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
819 EXPORT_SYMBOL(xdr_buf_read_netobj);
821 /* Returns 0 on success, or else a negative error code. */
823 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
824 struct xdr_array2_desc *desc, int encode)
826 char *elem = NULL, *c;
827 unsigned int copied = 0, todo, avail_here;
828 struct page **ppages = NULL;
832 if (xdr_encode_word(buf, base, desc->array_len) != 0)
835 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
836 desc->array_len > desc->array_maxlen ||
837 (unsigned long) base + 4 + desc->array_len *
838 desc->elem_size > buf->len)
846 todo = desc->array_len * desc->elem_size;
849 if (todo && base < buf->head->iov_len) {
850 c = buf->head->iov_base + base;
851 avail_here = min_t(unsigned int, todo,
852 buf->head->iov_len - base);
855 while (avail_here >= desc->elem_size) {
856 err = desc->xcode(desc, c);
859 c += desc->elem_size;
860 avail_here -= desc->elem_size;
864 elem = kmalloc(desc->elem_size, GFP_KERNEL);
870 err = desc->xcode(desc, elem);
873 memcpy(c, elem, avail_here);
875 memcpy(elem, c, avail_here);
878 base = buf->head->iov_len; /* align to start of pages */
881 /* process pages array */
882 base -= buf->head->iov_len;
883 if (todo && base < buf->page_len) {
884 unsigned int avail_page;
886 avail_here = min(todo, buf->page_len - base);
889 base += buf->page_base;
890 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
891 base &= ~PAGE_CACHE_MASK;
892 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
894 c = kmap(*ppages) + base;
897 avail_here -= avail_page;
898 if (copied || avail_page < desc->elem_size) {
899 unsigned int l = min(avail_page,
900 desc->elem_size - copied);
902 elem = kmalloc(desc->elem_size,
910 err = desc->xcode(desc, elem);
914 memcpy(c, elem + copied, l);
916 if (copied == desc->elem_size)
919 memcpy(elem + copied, c, l);
921 if (copied == desc->elem_size) {
922 err = desc->xcode(desc, elem);
931 while (avail_page >= desc->elem_size) {
932 err = desc->xcode(desc, c);
935 c += desc->elem_size;
936 avail_page -= desc->elem_size;
939 unsigned int l = min(avail_page,
940 desc->elem_size - copied);
942 elem = kmalloc(desc->elem_size,
950 err = desc->xcode(desc, elem);
954 memcpy(c, elem + copied, l);
956 if (copied == desc->elem_size)
959 memcpy(elem + copied, c, l);
961 if (copied == desc->elem_size) {
962 err = desc->xcode(desc, elem);
975 avail_page = min(avail_here,
976 (unsigned int) PAGE_CACHE_SIZE);
978 base = buf->page_len; /* align to start of tail */
982 base -= buf->page_len;
984 c = buf->tail->iov_base + base;
986 unsigned int l = desc->elem_size - copied;
989 memcpy(c, elem + copied, l);
991 memcpy(elem + copied, c, l);
992 err = desc->xcode(desc, elem);
1000 err = desc->xcode(desc, c);
1003 c += desc->elem_size;
1004 todo -= desc->elem_size;
1017 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1018 struct xdr_array2_desc *desc)
1020 if (base >= buf->len)
1023 return xdr_xcode_array2(buf, base, desc, 0);
1025 EXPORT_SYMBOL(xdr_decode_array2);
1028 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1029 struct xdr_array2_desc *desc)
1031 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1032 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1035 return xdr_xcode_array2(buf, base, desc, 1);
1037 EXPORT_SYMBOL(xdr_encode_array2);
1040 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1041 int (*actor)(struct scatterlist *, void *), void *data)
1044 unsigned page_len, thislen, page_offset;
1045 struct scatterlist sg[1];
1047 sg_init_table(sg, 1);
1049 if (offset >= buf->head[0].iov_len) {
1050 offset -= buf->head[0].iov_len;
1052 thislen = buf->head[0].iov_len - offset;
1055 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1056 ret = actor(sg, data);
1065 if (offset >= buf->page_len) {
1066 offset -= buf->page_len;
1068 page_len = buf->page_len - offset;
1072 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1073 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1074 thislen = PAGE_CACHE_SIZE - page_offset;
1076 if (thislen > page_len)
1078 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1079 ret = actor(sg, data);
1082 page_len -= thislen;
1085 thislen = PAGE_CACHE_SIZE;
1086 } while (page_len != 0);
1091 if (offset < buf->tail[0].iov_len) {
1092 thislen = buf->tail[0].iov_len - offset;
1095 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1096 ret = actor(sg, data);
1104 EXPORT_SYMBOL(xdr_process_buf);