2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc {
41 struct page **pages; /* page map */
42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
55 struct pipe_buffer *buf)
57 struct page *page = buf->page;
58 struct address_space *mapping = page_mapping(page);
62 WARN_ON(!PageUptodate(page));
65 * At least for ext2 with nobh option, we need to wait on writeback
66 * completing on this page, since we'll remove it from the pagecache.
67 * Otherwise truncate wont wait on the page, allowing the disk
68 * blocks to be reused by someone else before we actually wrote our
69 * data to them. fs corruption ensues.
71 wait_on_page_writeback(page);
73 if (PagePrivate(page))
74 try_to_release_page(page, mapping_gfp_mask(mapping));
76 if (!remove_mapping(mapping, page)) {
81 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
85 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
86 struct pipe_buffer *buf)
88 page_cache_release(buf->page);
90 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
93 static void *page_cache_pipe_buf_map(struct file *file,
94 struct pipe_inode_info *info,
95 struct pipe_buffer *buf)
97 struct page *page = buf->page;
100 if (!PageUptodate(page)) {
104 * Page got truncated/unhashed. This will cause a 0-byte
105 * splice, if this is the first page.
107 if (!page->mapping) {
113 * Uh oh, read-error from disk.
115 if (!PageUptodate(page)) {
121 * Page is ok afterall, fall through to mapping.
132 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
133 struct pipe_buffer *buf)
138 static void *user_page_pipe_buf_map(struct file *file,
139 struct pipe_inode_info *pipe,
140 struct pipe_buffer *buf)
142 return kmap(buf->page);
145 static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
146 struct pipe_buffer *buf)
151 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
152 struct pipe_buffer *buf)
154 page_cache_get(buf->page);
157 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
159 .map = page_cache_pipe_buf_map,
160 .unmap = page_cache_pipe_buf_unmap,
161 .release = page_cache_pipe_buf_release,
162 .steal = page_cache_pipe_buf_steal,
163 .get = page_cache_pipe_buf_get,
166 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
167 struct pipe_buffer *buf)
172 static struct pipe_buf_operations user_page_pipe_buf_ops = {
174 .map = user_page_pipe_buf_map,
175 .unmap = user_page_pipe_buf_unmap,
176 .release = page_cache_pipe_buf_release,
177 .steal = user_page_pipe_buf_steal,
178 .get = page_cache_pipe_buf_get,
182 * Pipe output worker. This sets up our pipe format with the page cache
183 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
185 static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
186 struct splice_pipe_desc *spd)
188 int ret, do_wakeup, page_nr;
195 mutex_lock(&pipe->inode->i_mutex);
198 if (!pipe->readers) {
199 send_sig(SIGPIPE, current, 0);
205 if (pipe->nrbufs < PIPE_BUFFERS) {
206 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
207 struct pipe_buffer *buf = pipe->bufs + newbuf;
209 buf->page = spd->pages[page_nr];
210 buf->offset = spd->partial[page_nr].offset;
211 buf->len = spd->partial[page_nr].len;
220 if (!--spd->nr_pages)
222 if (pipe->nrbufs < PIPE_BUFFERS)
228 if (spd->flags & SPLICE_F_NONBLOCK) {
234 if (signal_pending(current)) {
242 if (waitqueue_active(&pipe->wait))
243 wake_up_interruptible_sync(&pipe->wait);
244 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
248 pipe->waiting_writers++;
250 pipe->waiting_writers--;
254 mutex_unlock(&pipe->inode->i_mutex);
258 if (waitqueue_active(&pipe->wait))
259 wake_up_interruptible(&pipe->wait);
260 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
263 while (page_nr < spd->nr_pages)
264 page_cache_release(spd->pages[page_nr++]);
270 __generic_file_splice_read(struct file *in, loff_t *ppos,
271 struct pipe_inode_info *pipe, size_t len,
274 struct address_space *mapping = in->f_mapping;
275 unsigned int loff, nr_pages;
276 struct page *pages[PIPE_BUFFERS];
277 struct partial_page partial[PIPE_BUFFERS];
279 pgoff_t index, end_index;
283 struct splice_pipe_desc spd = {
287 .ops = &page_cache_pipe_buf_ops,
290 index = *ppos >> PAGE_CACHE_SHIFT;
291 loff = *ppos & ~PAGE_CACHE_MASK;
292 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
294 if (nr_pages > PIPE_BUFFERS)
295 nr_pages = PIPE_BUFFERS;
298 * Initiate read-ahead on this page range. however, don't call into
299 * read-ahead if this is a non-zero offset (we are likely doing small
300 * chunk splice and the page is already there) for a single page.
302 if (!loff || spd.nr_pages > 1)
303 do_page_cache_readahead(mapping, in, index, spd.nr_pages);
306 * Now fill in the holes:
310 for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
311 unsigned int this_len;
317 * this_len is the max we'll use from this page
319 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
322 * lookup the page for this index
324 page = find_get_page(mapping, index);
327 * page didn't exist, allocate one
329 page = page_cache_alloc_cold(mapping);
333 error = add_to_page_cache_lru(page, mapping, index,
334 mapping_gfp_mask(mapping));
335 if (unlikely(error)) {
336 page_cache_release(page);
344 * If the page isn't uptodate, we may need to start io on it
346 if (!PageUptodate(page)) {
348 * If in nonblock mode then dont block on waiting
349 * for an in-flight io page
351 if (flags & SPLICE_F_NONBLOCK)
357 * page was truncated, stop here. if this isn't the
358 * first page, we'll just complete what we already
361 if (!page->mapping) {
363 page_cache_release(page);
367 * page was already under io and is now done, great
369 if (PageUptodate(page)) {
376 * need to read in the page
378 error = mapping->a_ops->readpage(in, page);
380 if (unlikely(error)) {
381 page_cache_release(page);
382 if (error == AOP_TRUNCATED_PAGE)
388 * i_size must be checked after ->readpage().
390 isize = i_size_read(mapping->host);
391 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
392 if (unlikely(!isize || index > end_index)) {
393 page_cache_release(page);
398 * if this is the last page, see if we need to shrink
399 * the length and stop
401 if (end_index == index) {
402 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
403 if (total_len + loff > isize) {
404 page_cache_release(page);
408 * force quit after adding this page
410 nr_pages = spd.nr_pages;
411 this_len = min(this_len, loff);
416 pages[spd.nr_pages] = page;
417 partial[spd.nr_pages].offset = loff;
418 partial[spd.nr_pages].len = this_len;
420 total_len += this_len;
425 return splice_to_pipe(pipe, &spd);
431 * generic_file_splice_read - splice data from file to a pipe
432 * @in: file to splice from
433 * @pipe: pipe to splice to
434 * @len: number of bytes to splice
435 * @flags: splice modifier flags
437 * Will read pages from given file and fill them into a pipe.
439 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
440 struct pipe_inode_info *pipe, size_t len,
450 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
457 if (flags & SPLICE_F_NONBLOCK) {
474 EXPORT_SYMBOL(generic_file_splice_read);
477 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
478 * using sendpage(). Return the number of bytes sent.
480 static int pipe_to_sendpage(struct pipe_inode_info *info,
481 struct pipe_buffer *buf, struct splice_desc *sd)
483 struct file *file = sd->file;
484 loff_t pos = sd->pos;
490 * Sub-optimal, but we are limited by the pipe ->map. We don't
491 * need a kmap'ed buffer here, we just want to make sure we
492 * have the page pinned if the pipe page originates from the
495 ptr = buf->ops->map(file, info, buf);
499 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
501 ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
504 buf->ops->unmap(info, buf);
509 * This is a little more tricky than the file -> pipe splicing. There are
510 * basically three cases:
512 * - Destination page already exists in the address space and there
513 * are users of it. For that case we have no other option that
514 * copying the data. Tough luck.
515 * - Destination page already exists in the address space, but there
516 * are no users of it. Make sure it's uptodate, then drop it. Fall
517 * through to last case.
518 * - Destination page does not exist, we can add the pipe page to
519 * the page cache and avoid the copy.
521 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
522 * sd->flags), we attempt to migrate pages from the pipe to the output
523 * file address space page cache. This is possible if no one else has
524 * the pipe page referenced outside of the pipe and page cache. If
525 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
526 * a new page in the output file page cache and fill/dirty that.
528 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
529 struct splice_desc *sd)
531 struct file *file = sd->file;
532 struct address_space *mapping = file->f_mapping;
533 gfp_t gfp_mask = mapping_gfp_mask(mapping);
534 unsigned int offset, this_len;
541 * make sure the data in this buffer is uptodate
543 src = buf->ops->map(file, info, buf);
547 index = sd->pos >> PAGE_CACHE_SHIFT;
548 offset = sd->pos & ~PAGE_CACHE_MASK;
551 if (this_len + offset > PAGE_CACHE_SIZE)
552 this_len = PAGE_CACHE_SIZE - offset;
555 * Reuse buf page, if SPLICE_F_MOVE is set.
557 if (sd->flags & SPLICE_F_MOVE) {
559 * If steal succeeds, buf->page is now pruned from the vm
560 * side (LRU and page cache) and we can reuse it. The page
561 * will also be looked on successful return.
563 if (buf->ops->steal(info, buf))
567 if (add_to_page_cache(page, mapping, index, gfp_mask))
570 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
574 page = find_lock_page(mapping, index);
577 page = page_cache_alloc_cold(mapping);
582 * This will also lock the page
584 ret = add_to_page_cache_lru(page, mapping, index,
591 * We get here with the page locked. If the page is also
592 * uptodate, we don't need to do more. If it isn't, we
593 * may need to bring it in if we are not going to overwrite
596 if (!PageUptodate(page)) {
597 if (this_len < PAGE_CACHE_SIZE) {
598 ret = mapping->a_ops->readpage(file, page);
604 if (!PageUptodate(page)) {
606 * Page got invalidated, repeat.
608 if (!page->mapping) {
610 page_cache_release(page);
617 SetPageUptodate(page);
621 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
622 if (ret == AOP_TRUNCATED_PAGE) {
623 page_cache_release(page);
628 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
629 char *dst = kmap_atomic(page, KM_USER0);
631 memcpy(dst + offset, src + buf->offset, this_len);
632 flush_dcache_page(page);
633 kunmap_atomic(dst, KM_USER0);
636 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
637 if (ret == AOP_TRUNCATED_PAGE) {
638 page_cache_release(page);
644 * Return the number of bytes written.
647 mark_page_accessed(page);
648 balance_dirty_pages_ratelimited(mapping);
650 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
651 page_cache_release(page);
655 buf->ops->unmap(info, buf);
660 * Pipe input worker. Most of this logic works like a regular pipe, the
661 * key here is the 'actor' worker passed in that actually moves the data
662 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
664 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
665 loff_t *ppos, size_t len, unsigned int flags,
668 int ret, do_wakeup, err;
669 struct splice_desc sd;
680 mutex_lock(&pipe->inode->i_mutex);
684 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
685 struct pipe_buf_operations *ops = buf->ops;
688 if (sd.len > sd.total_len)
689 sd.len = sd.total_len;
691 err = actor(pipe, buf, &sd);
693 if (!ret && err != -ENODATA)
711 ops->release(pipe, buf);
712 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
726 if (!pipe->waiting_writers) {
731 if (flags & SPLICE_F_NONBLOCK) {
737 if (signal_pending(current)) {
745 if (waitqueue_active(&pipe->wait))
746 wake_up_interruptible_sync(&pipe->wait);
747 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
755 mutex_unlock(&pipe->inode->i_mutex);
759 if (waitqueue_active(&pipe->wait))
760 wake_up_interruptible(&pipe->wait);
761 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
768 * generic_file_splice_write - splice data from a pipe to a file
770 * @out: file to write to
771 * @len: number of bytes to splice
772 * @flags: splice modifier flags
774 * Will either move or copy pages (determined by @flags options) from
775 * the given pipe inode to the given file.
779 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
780 loff_t *ppos, size_t len, unsigned int flags)
782 struct address_space *mapping = out->f_mapping;
785 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
787 struct inode *inode = mapping->host;
792 * If file or inode is SYNC and we actually wrote some data,
795 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
798 mutex_lock(&inode->i_mutex);
799 err = generic_osync_inode(inode, mapping,
800 OSYNC_METADATA|OSYNC_DATA);
801 mutex_unlock(&inode->i_mutex);
811 EXPORT_SYMBOL(generic_file_splice_write);
814 * generic_splice_sendpage - splice data from a pipe to a socket
816 * @out: socket to write to
817 * @len: number of bytes to splice
818 * @flags: splice modifier flags
820 * Will send @len bytes from the pipe to a network socket. No data copying
824 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
825 loff_t *ppos, size_t len, unsigned int flags)
827 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
830 EXPORT_SYMBOL(generic_splice_sendpage);
833 * Attempt to initiate a splice from pipe to file.
835 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
836 loff_t *ppos, size_t len, unsigned int flags)
840 if (unlikely(!out->f_op || !out->f_op->splice_write))
843 if (unlikely(!(out->f_mode & FMODE_WRITE)))
846 ret = rw_verify_area(WRITE, out, ppos, len);
847 if (unlikely(ret < 0))
850 return out->f_op->splice_write(pipe, out, ppos, len, flags);
854 * Attempt to initiate a splice from a file to a pipe.
856 static long do_splice_to(struct file *in, loff_t *ppos,
857 struct pipe_inode_info *pipe, size_t len,
863 if (unlikely(!in->f_op || !in->f_op->splice_read))
866 if (unlikely(!(in->f_mode & FMODE_READ)))
869 ret = rw_verify_area(READ, in, ppos, len);
870 if (unlikely(ret < 0))
873 isize = i_size_read(in->f_mapping->host);
874 if (unlikely(*ppos >= isize))
877 left = isize - *ppos;
878 if (unlikely(left < len))
881 return in->f_op->splice_read(in, ppos, pipe, len, flags);
884 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
885 size_t len, unsigned int flags)
887 struct pipe_inode_info *pipe;
894 * We require the input being a regular file, as we don't want to
895 * randomly drop data for eg socket -> socket splicing. Use the
896 * piped splicing for that!
898 i_mode = in->f_dentry->d_inode->i_mode;
899 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
903 * neither in nor out is a pipe, setup an internal pipe attached to
904 * 'out' and transfer the wanted data from 'in' to 'out' through that
906 pipe = current->splice_pipe;
907 if (unlikely(!pipe)) {
908 pipe = alloc_pipe_info(NULL);
913 * We don't have an immediate reader, but we'll read the stuff
914 * out of the pipe right after the splice_to_pipe(). So set
915 * PIPE_READERS appropriately.
919 current->splice_pipe = pipe;
930 size_t read_len, max_read_len;
933 * Do at most PIPE_BUFFERS pages worth of transfer:
935 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
937 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
938 if (unlikely(ret < 0))
944 * NOTE: nonblocking mode only applies to the input. We
945 * must not do the output in nonblocking mode as then we
946 * could get stuck data in the internal pipe:
948 ret = do_splice_from(pipe, out, &out_off, read_len,
949 flags & ~SPLICE_F_NONBLOCK);
950 if (unlikely(ret < 0))
957 * In nonblocking mode, if we got back a short read then
958 * that was due to either an IO error or due to the
959 * pagecache entry not being there. In the IO error case
960 * the _next_ splice attempt will produce a clean IO error
961 * return value (not a short read), so in both cases it's
962 * correct to break out of the loop here:
964 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
968 pipe->nrbufs = pipe->curbuf = 0;
974 * If we did an incomplete transfer we must release
975 * the pipe buffers in question:
977 for (i = 0; i < PIPE_BUFFERS; i++) {
978 struct pipe_buffer *buf = pipe->bufs + i;
981 buf->ops->release(pipe, buf);
985 pipe->nrbufs = pipe->curbuf = 0;
988 * If we transferred some data, return the number of bytes:
996 EXPORT_SYMBOL(do_splice_direct);
999 * Determine where to splice to/from.
1001 static long do_splice(struct file *in, loff_t __user *off_in,
1002 struct file *out, loff_t __user *off_out,
1003 size_t len, unsigned int flags)
1005 struct pipe_inode_info *pipe;
1006 loff_t offset, *off;
1009 pipe = in->f_dentry->d_inode->i_pipe;
1014 if (out->f_op->llseek == no_llseek)
1016 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1022 ret = do_splice_from(pipe, out, off, len, flags);
1024 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1030 pipe = out->f_dentry->d_inode->i_pipe;
1035 if (in->f_op->llseek == no_llseek)
1037 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1043 ret = do_splice_to(in, off, pipe, len, flags);
1045 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1055 * Map an iov into an array of pages and offset/length tupples. With the
1056 * partial_page structure, we can map several non-contiguous ranges into
1057 * our ones pages[] map instead of splitting that operation into pieces.
1058 * Could easily be exported as a generic helper for other users, in which
1059 * case one would probably want to add a 'max_nr_pages' parameter as well.
1061 static int get_iovec_page_array(const struct iovec __user *iov,
1062 unsigned int nr_vecs, struct page **pages,
1063 struct partial_page *partial)
1065 int buffers = 0, error = 0;
1068 * It's ok to take the mmap_sem for reading, even
1069 * across a "get_user()".
1071 down_read(¤t->mm->mmap_sem);
1074 unsigned long off, npages;
1080 * Get user address base and length for this iovec.
1082 error = get_user(base, &iov->iov_base);
1083 if (unlikely(error))
1085 error = get_user(len, &iov->iov_len);
1086 if (unlikely(error))
1090 * Sanity check this iovec. 0 read succeeds.
1095 if (unlikely(!base))
1099 * Get this base offset and number of pages, then map
1100 * in the user pages.
1102 off = (unsigned long) base & ~PAGE_MASK;
1103 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 if (npages > PIPE_BUFFERS - buffers)
1105 npages = PIPE_BUFFERS - buffers;
1107 error = get_user_pages(current, current->mm,
1108 (unsigned long) base, npages, 0, 0,
1109 &pages[buffers], NULL);
1111 if (unlikely(error <= 0))
1115 * Fill this contiguous range into the partial page map.
1117 for (i = 0; i < error; i++) {
1118 const int plen = min_t(size_t, len, PAGE_SIZE) - off;
1120 partial[buffers].offset = off;
1121 partial[buffers].len = plen;
1129 * We didn't complete this iov, stop here since it probably
1130 * means we have to move some of this into a pipe to
1131 * be able to continue.
1137 * Don't continue if we mapped fewer pages than we asked for,
1138 * or if we mapped the max number of pages that we have
1141 if (error < npages || buffers == PIPE_BUFFERS)
1148 up_read(¤t->mm->mmap_sem);
1157 * vmsplice splices a user address range into a pipe. It can be thought of
1158 * as splice-from-memory, where the regular splice is splice-from-file (or
1159 * to file). In both cases the output is a pipe, naturally.
1161 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1162 * not the other way around. Splicing from user memory is a simple operation
1163 * that can be supported without any funky alignment restrictions or nasty
1164 * vm tricks. We simply map in the user memory and fill them into a pipe.
1165 * The reverse isn't quite as easy, though. There are two possible solutions
1168 * - memcpy() the data internally, at which point we might as well just
1169 * do a regular read() on the buffer anyway.
1170 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1171 * has restriction limitations on both ends of the pipe).
1173 * Alas, it isn't here.
1176 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1177 unsigned long nr_segs, unsigned int flags)
1179 struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
1180 struct page *pages[PIPE_BUFFERS];
1181 struct partial_page partial[PIPE_BUFFERS];
1182 struct splice_pipe_desc spd = {
1186 .ops = &user_page_pipe_buf_ops,
1189 if (unlikely(!pipe))
1191 if (unlikely(nr_segs > UIO_MAXIOV))
1193 else if (unlikely(!nr_segs))
1196 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial);
1197 if (spd.nr_pages <= 0)
1198 return spd.nr_pages;
1200 return splice_to_pipe(pipe, &spd);
1203 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1204 unsigned long nr_segs, unsigned int flags)
1211 file = fget_light(fd, &fput);
1213 if (file->f_mode & FMODE_WRITE)
1214 error = do_vmsplice(file, iov, nr_segs, flags);
1216 fput_light(file, fput);
1222 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1223 int fd_out, loff_t __user *off_out,
1224 size_t len, unsigned int flags)
1227 struct file *in, *out;
1228 int fput_in, fput_out;
1234 in = fget_light(fd_in, &fput_in);
1236 if (in->f_mode & FMODE_READ) {
1237 out = fget_light(fd_out, &fput_out);
1239 if (out->f_mode & FMODE_WRITE)
1240 error = do_splice(in, off_in,
1243 fput_light(out, fput_out);
1247 fput_light(in, fput_in);
1254 * Link contents of ipipe to opipe.
1256 static int link_pipe(struct pipe_inode_info *ipipe,
1257 struct pipe_inode_info *opipe,
1258 size_t len, unsigned int flags)
1260 struct pipe_buffer *ibuf, *obuf;
1261 int ret, do_wakeup, i, ipipe_first;
1263 ret = do_wakeup = ipipe_first = 0;
1266 * Potential ABBA deadlock, work around it by ordering lock
1267 * grabbing by inode address. Otherwise two different processes
1268 * could deadlock (one doing tee from A -> B, the other from B -> A).
1270 if (ipipe->inode < opipe->inode) {
1272 mutex_lock(&ipipe->inode->i_mutex);
1273 mutex_lock(&opipe->inode->i_mutex);
1275 mutex_lock(&opipe->inode->i_mutex);
1276 mutex_lock(&ipipe->inode->i_mutex);
1280 if (!opipe->readers) {
1281 send_sig(SIGPIPE, current, 0);
1286 if (ipipe->nrbufs - i) {
1287 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1290 * If we have room, fill this buffer
1292 if (opipe->nrbufs < PIPE_BUFFERS) {
1293 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1296 * Get a reference to this pipe buffer,
1297 * so we can copy the contents over.
1299 ibuf->ops->get(ipipe, ibuf);
1301 obuf = opipe->bufs + nbuf;
1304 if (obuf->len > len)
1314 if (opipe->nrbufs < PIPE_BUFFERS)
1319 * We have input available, but no output room.
1320 * If we already copied data, return that. If we
1321 * need to drop the opipe lock, it must be ordered
1322 * last to avoid deadlocks.
1324 if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1329 if (signal_pending(current)) {
1336 if (waitqueue_active(&opipe->wait))
1337 wake_up_interruptible(&opipe->wait);
1338 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1342 opipe->waiting_writers++;
1344 opipe->waiting_writers--;
1349 * No input buffers, do the usual checks for available
1350 * writers and blocking and wait if necessary
1352 if (!ipipe->writers)
1354 if (!ipipe->waiting_writers) {
1359 * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1360 * with another process, we can only safely do that if
1361 * the ipipe lock is ordered last.
1363 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1368 if (signal_pending(current)) {
1374 if (waitqueue_active(&ipipe->wait))
1375 wake_up_interruptible_sync(&ipipe->wait);
1376 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1381 mutex_unlock(&ipipe->inode->i_mutex);
1382 mutex_unlock(&opipe->inode->i_mutex);
1386 if (waitqueue_active(&opipe->wait))
1387 wake_up_interruptible(&opipe->wait);
1388 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1395 * This is a tee(1) implementation that works on pipes. It doesn't copy
1396 * any data, it simply references the 'in' pages on the 'out' pipe.
1397 * The 'flags' used are the SPLICE_F_* variants, currently the only
1398 * applicable one is SPLICE_F_NONBLOCK.
1400 static long do_tee(struct file *in, struct file *out, size_t len,
1403 struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1404 struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1407 * Link ipipe to the two output pipes, consuming as we go along.
1410 return link_pipe(ipipe, opipe, len, flags);
1415 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1424 in = fget_light(fdin, &fput_in);
1426 if (in->f_mode & FMODE_READ) {
1428 struct file *out = fget_light(fdout, &fput_out);
1431 if (out->f_mode & FMODE_WRITE)
1432 error = do_tee(in, out, len, flags);
1433 fput_light(out, fput_out);
1436 fput_light(in, fput_in);