2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc {
41 struct page **pages; /* page map */
42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
55 struct pipe_buffer *buf)
57 struct page *page = buf->page;
58 struct address_space *mapping = page_mapping(page);
62 WARN_ON(!PageUptodate(page));
65 * At least for ext2 with nobh option, we need to wait on writeback
66 * completing on this page, since we'll remove it from the pagecache.
67 * Otherwise truncate wont wait on the page, allowing the disk
68 * blocks to be reused by someone else before we actually wrote our
69 * data to them. fs corruption ensues.
71 wait_on_page_writeback(page);
73 if (PagePrivate(page))
74 try_to_release_page(page, mapping_gfp_mask(mapping));
76 if (!remove_mapping(mapping, page)) {
81 buf->flags |= PIPE_BUF_FLAG_LRU;
85 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
86 struct pipe_buffer *buf)
88 page_cache_release(buf->page);
89 buf->flags &= ~PIPE_BUF_FLAG_LRU;
92 static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
93 struct pipe_buffer *buf)
95 struct page *page = buf->page;
98 if (!PageUptodate(page)) {
102 * Page got truncated/unhashed. This will cause a 0-byte
103 * splice, if this is the first page.
105 if (!page->mapping) {
111 * Uh oh, read-error from disk.
113 if (!PageUptodate(page)) {
119 * Page is ok afterall, we are done.
130 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
132 .map = generic_pipe_buf_map,
133 .unmap = generic_pipe_buf_unmap,
134 .pin = page_cache_pipe_buf_pin,
135 .release = page_cache_pipe_buf_release,
136 .steal = page_cache_pipe_buf_steal,
137 .get = generic_pipe_buf_get,
140 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
141 struct pipe_buffer *buf)
143 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
146 buf->flags |= PIPE_BUF_FLAG_LRU;
147 return generic_pipe_buf_steal(pipe, buf);
150 static struct pipe_buf_operations user_page_pipe_buf_ops = {
152 .map = generic_pipe_buf_map,
153 .unmap = generic_pipe_buf_unmap,
154 .pin = generic_pipe_buf_pin,
155 .release = page_cache_pipe_buf_release,
156 .steal = user_page_pipe_buf_steal,
157 .get = generic_pipe_buf_get,
161 * Pipe output worker. This sets up our pipe format with the page cache
162 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
164 static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
165 struct splice_pipe_desc *spd)
167 int ret, do_wakeup, page_nr;
174 mutex_lock(&pipe->inode->i_mutex);
177 if (!pipe->readers) {
178 send_sig(SIGPIPE, current, 0);
184 if (pipe->nrbufs < PIPE_BUFFERS) {
185 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
186 struct pipe_buffer *buf = pipe->bufs + newbuf;
188 buf->page = spd->pages[page_nr];
189 buf->offset = spd->partial[page_nr].offset;
190 buf->len = spd->partial[page_nr].len;
192 if (spd->flags & SPLICE_F_GIFT)
193 buf->flags |= PIPE_BUF_FLAG_GIFT;
202 if (!--spd->nr_pages)
204 if (pipe->nrbufs < PIPE_BUFFERS)
210 if (spd->flags & SPLICE_F_NONBLOCK) {
216 if (signal_pending(current)) {
224 if (waitqueue_active(&pipe->wait))
225 wake_up_interruptible_sync(&pipe->wait);
226 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
230 pipe->waiting_writers++;
232 pipe->waiting_writers--;
236 mutex_unlock(&pipe->inode->i_mutex);
240 if (waitqueue_active(&pipe->wait))
241 wake_up_interruptible(&pipe->wait);
242 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
245 while (page_nr < spd->nr_pages)
246 page_cache_release(spd->pages[page_nr++]);
252 __generic_file_splice_read(struct file *in, loff_t *ppos,
253 struct pipe_inode_info *pipe, size_t len,
256 struct address_space *mapping = in->f_mapping;
257 unsigned int loff, nr_pages;
258 struct page *pages[PIPE_BUFFERS];
259 struct partial_page partial[PIPE_BUFFERS];
261 pgoff_t index, end_index;
265 struct splice_pipe_desc spd = {
269 .ops = &page_cache_pipe_buf_ops,
272 index = *ppos >> PAGE_CACHE_SHIFT;
273 loff = *ppos & ~PAGE_CACHE_MASK;
274 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
276 if (nr_pages > PIPE_BUFFERS)
277 nr_pages = PIPE_BUFFERS;
280 * Initiate read-ahead on this page range. however, don't call into
281 * read-ahead if this is a non-zero offset (we are likely doing small
282 * chunk splice and the page is already there) for a single page.
284 if (!loff || nr_pages > 1)
285 page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
288 * Now fill in the holes:
294 * Lookup the (hopefully) full range of pages we need.
296 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
299 * If find_get_pages_contig() returned fewer pages than we needed,
302 index += spd.nr_pages;
303 while (spd.nr_pages < nr_pages) {
305 * Page could be there, find_get_pages_contig() breaks on
308 page = find_get_page(mapping, index);
311 * Make sure the read-ahead engine is notified
312 * about this failure.
314 handle_ra_miss(mapping, &in->f_ra, index);
317 * page didn't exist, allocate one.
319 page = page_cache_alloc_cold(mapping);
323 error = add_to_page_cache_lru(page, mapping, index,
324 mapping_gfp_mask(mapping));
325 if (unlikely(error)) {
326 page_cache_release(page);
327 if (error == -EEXIST)
332 * add_to_page_cache() locks the page, unlock it
333 * to avoid convoluting the logic below even more.
338 pages[spd.nr_pages++] = page;
343 * Now loop over the map and see if we need to start IO on any
344 * pages, fill in the partial map, etc.
346 index = *ppos >> PAGE_CACHE_SHIFT;
347 nr_pages = spd.nr_pages;
349 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
350 unsigned int this_len;
356 * this_len is the max we'll use from this page
358 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
359 page = pages[page_nr];
362 * If the page isn't uptodate, we may need to start io on it
364 if (!PageUptodate(page)) {
366 * If in nonblock mode then dont block on waiting
367 * for an in-flight io page
369 if (flags & SPLICE_F_NONBLOCK)
375 * page was truncated, stop here. if this isn't the
376 * first page, we'll just complete what we already
379 if (!page->mapping) {
384 * page was already under io and is now done, great
386 if (PageUptodate(page)) {
392 * need to read in the page
394 error = mapping->a_ops->readpage(in, page);
395 if (unlikely(error)) {
397 * We really should re-lookup the page here,
398 * but it complicates things a lot. Instead
399 * lets just do what we already stored, and
400 * we'll get it the next time we are called.
402 if (error == AOP_TRUNCATED_PAGE)
409 * i_size must be checked after ->readpage().
411 isize = i_size_read(mapping->host);
412 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
413 if (unlikely(!isize || index > end_index))
417 * if this is the last page, see if we need to shrink
418 * the length and stop
420 if (end_index == index) {
421 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
422 if (total_len + loff > isize)
425 * force quit after adding this page
428 this_len = min(this_len, loff);
433 partial[page_nr].offset = loff;
434 partial[page_nr].len = this_len;
436 total_len += this_len;
443 * Release any pages at the end, if we quit early. 'i' is how far
444 * we got, 'nr_pages' is how many pages are in the map.
446 while (page_nr < nr_pages)
447 page_cache_release(pages[page_nr++]);
450 return splice_to_pipe(pipe, &spd);
456 * generic_file_splice_read - splice data from file to a pipe
457 * @in: file to splice from
458 * @pipe: pipe to splice to
459 * @len: number of bytes to splice
460 * @flags: splice modifier flags
462 * Will read pages from given file and fill them into a pipe.
464 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
465 struct pipe_inode_info *pipe, size_t len,
475 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
482 if (flags & SPLICE_F_NONBLOCK) {
499 EXPORT_SYMBOL(generic_file_splice_read);
502 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
503 * using sendpage(). Return the number of bytes sent.
505 static int pipe_to_sendpage(struct pipe_inode_info *pipe,
506 struct pipe_buffer *buf, struct splice_desc *sd)
508 struct file *file = sd->file;
509 loff_t pos = sd->pos;
512 ret = buf->ops->pin(pipe, buf);
514 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
516 ret = file->f_op->sendpage(file, buf->page, buf->offset,
517 sd->len, &pos, more);
524 * This is a little more tricky than the file -> pipe splicing. There are
525 * basically three cases:
527 * - Destination page already exists in the address space and there
528 * are users of it. For that case we have no other option that
529 * copying the data. Tough luck.
530 * - Destination page already exists in the address space, but there
531 * are no users of it. Make sure it's uptodate, then drop it. Fall
532 * through to last case.
533 * - Destination page does not exist, we can add the pipe page to
534 * the page cache and avoid the copy.
536 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
537 * sd->flags), we attempt to migrate pages from the pipe to the output
538 * file address space page cache. This is possible if no one else has
539 * the pipe page referenced outside of the pipe and page cache. If
540 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
541 * a new page in the output file page cache and fill/dirty that.
543 static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
544 struct splice_desc *sd)
546 struct file *file = sd->file;
547 struct address_space *mapping = file->f_mapping;
548 gfp_t gfp_mask = mapping_gfp_mask(mapping);
549 unsigned int offset, this_len;
555 * make sure the data in this buffer is uptodate
557 ret = buf->ops->pin(pipe, buf);
561 index = sd->pos >> PAGE_CACHE_SHIFT;
562 offset = sd->pos & ~PAGE_CACHE_MASK;
565 if (this_len + offset > PAGE_CACHE_SIZE)
566 this_len = PAGE_CACHE_SIZE - offset;
569 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
572 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
574 * If steal succeeds, buf->page is now pruned from the
575 * pagecache and we can reuse it. The page will also be
576 * locked on successful return.
578 if (buf->ops->steal(pipe, buf))
582 if (add_to_page_cache(page, mapping, index, gfp_mask)) {
587 page_cache_get(page);
589 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
593 page = find_lock_page(mapping, index);
596 page = page_cache_alloc_cold(mapping);
601 * This will also lock the page
603 ret = add_to_page_cache_lru(page, mapping, index,
610 * We get here with the page locked. If the page is also
611 * uptodate, we don't need to do more. If it isn't, we
612 * may need to bring it in if we are not going to overwrite
615 if (!PageUptodate(page)) {
616 if (this_len < PAGE_CACHE_SIZE) {
617 ret = mapping->a_ops->readpage(file, page);
623 if (!PageUptodate(page)) {
625 * Page got invalidated, repeat.
627 if (!page->mapping) {
629 page_cache_release(page);
636 SetPageUptodate(page);
640 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
642 loff_t isize = i_size_read(mapping->host);
644 if (ret != AOP_TRUNCATED_PAGE)
646 page_cache_release(page);
647 if (ret == AOP_TRUNCATED_PAGE)
651 * prepare_write() may have instantiated a few blocks
652 * outside i_size. Trim these off again.
654 if (sd->pos + this_len > isize)
655 vmtruncate(mapping->host, isize);
660 if (buf->page != page) {
662 * Careful, ->map() uses KM_USER0!
664 char *src = buf->ops->map(pipe, buf, 1);
665 char *dst = kmap_atomic(page, KM_USER1);
667 memcpy(dst + offset, src + buf->offset, this_len);
668 flush_dcache_page(page);
669 kunmap_atomic(dst, KM_USER1);
670 buf->ops->unmap(pipe, buf, src);
673 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
676 * Return the number of bytes written and mark page as
677 * accessed, we are now done!
680 mark_page_accessed(page);
681 balance_dirty_pages_ratelimited(mapping);
682 } else if (ret == AOP_TRUNCATED_PAGE) {
683 page_cache_release(page);
687 page_cache_release(page);
694 * Pipe input worker. Most of this logic works like a regular pipe, the
695 * key here is the 'actor' worker passed in that actually moves the data
696 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
698 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
699 loff_t *ppos, size_t len, unsigned int flags,
702 int ret, do_wakeup, err;
703 struct splice_desc sd;
714 mutex_lock(&pipe->inode->i_mutex);
718 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
719 struct pipe_buf_operations *ops = buf->ops;
722 if (sd.len > sd.total_len)
723 sd.len = sd.total_len;
725 err = actor(pipe, buf, &sd);
727 if (!ret && err != -ENODATA)
745 ops->release(pipe, buf);
746 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
760 if (!pipe->waiting_writers) {
765 if (flags & SPLICE_F_NONBLOCK) {
771 if (signal_pending(current)) {
779 if (waitqueue_active(&pipe->wait))
780 wake_up_interruptible_sync(&pipe->wait);
781 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
789 mutex_unlock(&pipe->inode->i_mutex);
793 if (waitqueue_active(&pipe->wait))
794 wake_up_interruptible(&pipe->wait);
795 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
802 * generic_file_splice_write - splice data from a pipe to a file
804 * @out: file to write to
805 * @len: number of bytes to splice
806 * @flags: splice modifier flags
808 * Will either move or copy pages (determined by @flags options) from
809 * the given pipe inode to the given file.
813 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
814 loff_t *ppos, size_t len, unsigned int flags)
816 struct address_space *mapping = out->f_mapping;
819 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
821 struct inode *inode = mapping->host;
826 * If file or inode is SYNC and we actually wrote some data,
829 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
832 mutex_lock(&inode->i_mutex);
833 err = generic_osync_inode(inode, mapping,
834 OSYNC_METADATA|OSYNC_DATA);
835 mutex_unlock(&inode->i_mutex);
845 EXPORT_SYMBOL(generic_file_splice_write);
848 * generic_splice_sendpage - splice data from a pipe to a socket
850 * @out: socket to write to
851 * @len: number of bytes to splice
852 * @flags: splice modifier flags
854 * Will send @len bytes from the pipe to a network socket. No data copying
858 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
859 loff_t *ppos, size_t len, unsigned int flags)
861 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
864 EXPORT_SYMBOL(generic_splice_sendpage);
867 * Attempt to initiate a splice from pipe to file.
869 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
870 loff_t *ppos, size_t len, unsigned int flags)
874 if (unlikely(!out->f_op || !out->f_op->splice_write))
877 if (unlikely(!(out->f_mode & FMODE_WRITE)))
880 ret = rw_verify_area(WRITE, out, ppos, len);
881 if (unlikely(ret < 0))
884 return out->f_op->splice_write(pipe, out, ppos, len, flags);
888 * Attempt to initiate a splice from a file to a pipe.
890 static long do_splice_to(struct file *in, loff_t *ppos,
891 struct pipe_inode_info *pipe, size_t len,
897 if (unlikely(!in->f_op || !in->f_op->splice_read))
900 if (unlikely(!(in->f_mode & FMODE_READ)))
903 ret = rw_verify_area(READ, in, ppos, len);
904 if (unlikely(ret < 0))
907 isize = i_size_read(in->f_mapping->host);
908 if (unlikely(*ppos >= isize))
911 left = isize - *ppos;
912 if (unlikely(left < len))
915 return in->f_op->splice_read(in, ppos, pipe, len, flags);
918 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
919 size_t len, unsigned int flags)
921 struct pipe_inode_info *pipe;
928 * We require the input being a regular file, as we don't want to
929 * randomly drop data for eg socket -> socket splicing. Use the
930 * piped splicing for that!
932 i_mode = in->f_dentry->d_inode->i_mode;
933 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
937 * neither in nor out is a pipe, setup an internal pipe attached to
938 * 'out' and transfer the wanted data from 'in' to 'out' through that
940 pipe = current->splice_pipe;
941 if (unlikely(!pipe)) {
942 pipe = alloc_pipe_info(NULL);
947 * We don't have an immediate reader, but we'll read the stuff
948 * out of the pipe right after the splice_to_pipe(). So set
949 * PIPE_READERS appropriately.
953 current->splice_pipe = pipe;
964 size_t read_len, max_read_len;
967 * Do at most PIPE_BUFFERS pages worth of transfer:
969 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
971 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
972 if (unlikely(ret < 0))
978 * NOTE: nonblocking mode only applies to the input. We
979 * must not do the output in nonblocking mode as then we
980 * could get stuck data in the internal pipe:
982 ret = do_splice_from(pipe, out, &out_off, read_len,
983 flags & ~SPLICE_F_NONBLOCK);
984 if (unlikely(ret < 0))
991 * In nonblocking mode, if we got back a short read then
992 * that was due to either an IO error or due to the
993 * pagecache entry not being there. In the IO error case
994 * the _next_ splice attempt will produce a clean IO error
995 * return value (not a short read), so in both cases it's
996 * correct to break out of the loop here:
998 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
1002 pipe->nrbufs = pipe->curbuf = 0;
1008 * If we did an incomplete transfer we must release
1009 * the pipe buffers in question:
1011 for (i = 0; i < PIPE_BUFFERS; i++) {
1012 struct pipe_buffer *buf = pipe->bufs + i;
1015 buf->ops->release(pipe, buf);
1019 pipe->nrbufs = pipe->curbuf = 0;
1022 * If we transferred some data, return the number of bytes:
1030 EXPORT_SYMBOL(do_splice_direct);
1033 * Determine where to splice to/from.
1035 static long do_splice(struct file *in, loff_t __user *off_in,
1036 struct file *out, loff_t __user *off_out,
1037 size_t len, unsigned int flags)
1039 struct pipe_inode_info *pipe;
1040 loff_t offset, *off;
1043 pipe = in->f_dentry->d_inode->i_pipe;
1048 if (out->f_op->llseek == no_llseek)
1050 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1056 ret = do_splice_from(pipe, out, off, len, flags);
1058 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1064 pipe = out->f_dentry->d_inode->i_pipe;
1069 if (in->f_op->llseek == no_llseek)
1071 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1077 ret = do_splice_to(in, off, pipe, len, flags);
1079 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1089 * Map an iov into an array of pages and offset/length tupples. With the
1090 * partial_page structure, we can map several non-contiguous ranges into
1091 * our ones pages[] map instead of splitting that operation into pieces.
1092 * Could easily be exported as a generic helper for other users, in which
1093 * case one would probably want to add a 'max_nr_pages' parameter as well.
1095 static int get_iovec_page_array(const struct iovec __user *iov,
1096 unsigned int nr_vecs, struct page **pages,
1097 struct partial_page *partial, int aligned)
1099 int buffers = 0, error = 0;
1102 * It's ok to take the mmap_sem for reading, even
1103 * across a "get_user()".
1105 down_read(¤t->mm->mmap_sem);
1108 unsigned long off, npages;
1114 * Get user address base and length for this iovec.
1116 error = get_user(base, &iov->iov_base);
1117 if (unlikely(error))
1119 error = get_user(len, &iov->iov_len);
1120 if (unlikely(error))
1124 * Sanity check this iovec. 0 read succeeds.
1129 if (unlikely(!base))
1133 * Get this base offset and number of pages, then map
1134 * in the user pages.
1136 off = (unsigned long) base & ~PAGE_MASK;
1139 * If asked for alignment, the offset must be zero and the
1140 * length a multiple of the PAGE_SIZE.
1143 if (aligned && (off || len & ~PAGE_MASK))
1146 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1147 if (npages > PIPE_BUFFERS - buffers)
1148 npages = PIPE_BUFFERS - buffers;
1150 error = get_user_pages(current, current->mm,
1151 (unsigned long) base, npages, 0, 0,
1152 &pages[buffers], NULL);
1154 if (unlikely(error <= 0))
1158 * Fill this contiguous range into the partial page map.
1160 for (i = 0; i < error; i++) {
1161 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1163 partial[buffers].offset = off;
1164 partial[buffers].len = plen;
1172 * We didn't complete this iov, stop here since it probably
1173 * means we have to move some of this into a pipe to
1174 * be able to continue.
1180 * Don't continue if we mapped fewer pages than we asked for,
1181 * or if we mapped the max number of pages that we have
1184 if (error < npages || buffers == PIPE_BUFFERS)
1191 up_read(¤t->mm->mmap_sem);
1200 * vmsplice splices a user address range into a pipe. It can be thought of
1201 * as splice-from-memory, where the regular splice is splice-from-file (or
1202 * to file). In both cases the output is a pipe, naturally.
1204 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1205 * not the other way around. Splicing from user memory is a simple operation
1206 * that can be supported without any funky alignment restrictions or nasty
1207 * vm tricks. We simply map in the user memory and fill them into a pipe.
1208 * The reverse isn't quite as easy, though. There are two possible solutions
1211 * - memcpy() the data internally, at which point we might as well just
1212 * do a regular read() on the buffer anyway.
1213 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1214 * has restriction limitations on both ends of the pipe).
1216 * Alas, it isn't here.
1219 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1220 unsigned long nr_segs, unsigned int flags)
1222 struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
1223 struct page *pages[PIPE_BUFFERS];
1224 struct partial_page partial[PIPE_BUFFERS];
1225 struct splice_pipe_desc spd = {
1229 .ops = &user_page_pipe_buf_ops,
1232 if (unlikely(!pipe))
1234 if (unlikely(nr_segs > UIO_MAXIOV))
1236 else if (unlikely(!nr_segs))
1239 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1240 flags & SPLICE_F_GIFT);
1241 if (spd.nr_pages <= 0)
1242 return spd.nr_pages;
1244 return splice_to_pipe(pipe, &spd);
1247 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1248 unsigned long nr_segs, unsigned int flags)
1255 file = fget_light(fd, &fput);
1257 if (file->f_mode & FMODE_WRITE)
1258 error = do_vmsplice(file, iov, nr_segs, flags);
1260 fput_light(file, fput);
1266 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1267 int fd_out, loff_t __user *off_out,
1268 size_t len, unsigned int flags)
1271 struct file *in, *out;
1272 int fput_in, fput_out;
1278 in = fget_light(fd_in, &fput_in);
1280 if (in->f_mode & FMODE_READ) {
1281 out = fget_light(fd_out, &fput_out);
1283 if (out->f_mode & FMODE_WRITE)
1284 error = do_splice(in, off_in,
1287 fput_light(out, fput_out);
1291 fput_light(in, fput_in);
1298 * Link contents of ipipe to opipe.
1300 static int link_pipe(struct pipe_inode_info *ipipe,
1301 struct pipe_inode_info *opipe,
1302 size_t len, unsigned int flags)
1304 struct pipe_buffer *ibuf, *obuf;
1305 int ret, do_wakeup, i, ipipe_first;
1307 ret = do_wakeup = ipipe_first = 0;
1310 * Potential ABBA deadlock, work around it by ordering lock
1311 * grabbing by inode address. Otherwise two different processes
1312 * could deadlock (one doing tee from A -> B, the other from B -> A).
1314 if (ipipe->inode < opipe->inode) {
1316 mutex_lock(&ipipe->inode->i_mutex);
1317 mutex_lock(&opipe->inode->i_mutex);
1319 mutex_lock(&opipe->inode->i_mutex);
1320 mutex_lock(&ipipe->inode->i_mutex);
1324 if (!opipe->readers) {
1325 send_sig(SIGPIPE, current, 0);
1330 if (ipipe->nrbufs - i) {
1331 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1334 * If we have room, fill this buffer
1336 if (opipe->nrbufs < PIPE_BUFFERS) {
1337 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1340 * Get a reference to this pipe buffer,
1341 * so we can copy the contents over.
1343 ibuf->ops->get(ipipe, ibuf);
1345 obuf = opipe->bufs + nbuf;
1349 * Don't inherit the gift flag, we need to
1350 * prevent multiple steals of this page.
1352 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1354 if (obuf->len > len)
1364 if (opipe->nrbufs < PIPE_BUFFERS)
1369 * We have input available, but no output room.
1370 * If we already copied data, return that. If we
1371 * need to drop the opipe lock, it must be ordered
1372 * last to avoid deadlocks.
1374 if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1379 if (signal_pending(current)) {
1386 if (waitqueue_active(&opipe->wait))
1387 wake_up_interruptible(&opipe->wait);
1388 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1392 opipe->waiting_writers++;
1394 opipe->waiting_writers--;
1399 * No input buffers, do the usual checks for available
1400 * writers and blocking and wait if necessary
1402 if (!ipipe->writers)
1404 if (!ipipe->waiting_writers) {
1409 * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1410 * with another process, we can only safely do that if
1411 * the ipipe lock is ordered last.
1413 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1418 if (signal_pending(current)) {
1424 if (waitqueue_active(&ipipe->wait))
1425 wake_up_interruptible_sync(&ipipe->wait);
1426 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1431 mutex_unlock(&ipipe->inode->i_mutex);
1432 mutex_unlock(&opipe->inode->i_mutex);
1436 if (waitqueue_active(&opipe->wait))
1437 wake_up_interruptible(&opipe->wait);
1438 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1445 * This is a tee(1) implementation that works on pipes. It doesn't copy
1446 * any data, it simply references the 'in' pages on the 'out' pipe.
1447 * The 'flags' used are the SPLICE_F_* variants, currently the only
1448 * applicable one is SPLICE_F_NONBLOCK.
1450 static long do_tee(struct file *in, struct file *out, size_t len,
1453 struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1454 struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1457 * Link ipipe to the two output pipes, consuming as we go along.
1460 return link_pipe(ipipe, opipe, len, flags);
1465 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1474 in = fget_light(fdin, &fput_in);
1476 if (in->f_mode & FMODE_READ) {
1478 struct file *out = fget_light(fdout, &fput_out);
1481 if (out->f_mode & FMODE_WRITE)
1482 error = do_tee(in, out, len, flags);
1483 fput_light(out, fput_out);
1486 fput_light(in, fput_in);