2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
4 * Copyright (c) 2001-2006 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/buffer_head.h>
23 #include <linux/pagemap.h>
24 #include <linux/pagevec.h>
25 #include <linux/sched.h>
26 #include <linux/swap.h>
27 #include <linux/uio.h>
28 #include <linux/writeback.h>
31 #include <asm/uaccess.h>
43 * ntfs_file_open - called when an inode is about to be opened
44 * @vi: inode to be opened
45 * @filp: file structure describing the inode
47 * Limit file size to the page cache limit on architectures where unsigned long
48 * is 32-bits. This is the most we can do for now without overflowing the page
49 * cache page index. Doing it this way means we don't run into problems because
50 * of existing too large files. It would be better to allow the user to read
51 * the beginning of the file but I doubt very much anyone is going to hit this
52 * check on a 32-bit architecture, so there is no point in adding the extra
53 * complexity required to support this.
55 * On 64-bit architectures, the check is hopefully optimized away by the
58 * After the check passes, just call generic_file_open() to do its work.
60 static int ntfs_file_open(struct inode *vi, struct file *filp)
62 if (sizeof(unsigned long) < 8) {
63 if (i_size_read(vi) > MAX_LFS_FILESIZE)
66 return generic_file_open(vi, filp);
72 * ntfs_attr_extend_initialized - extend the initialized size of an attribute
73 * @ni: ntfs inode of the attribute to extend
74 * @new_init_size: requested new initialized size in bytes
75 * @cached_page: store any allocated but unused page here
76 * @lru_pvec: lru-buffering pagevec of the caller
78 * Extend the initialized size of an attribute described by the ntfs inode @ni
79 * to @new_init_size bytes. This involves zeroing any non-sparse space between
80 * the old initialized size and @new_init_size both in the page cache and on
81 * disk (if relevant complete pages are already uptodate in the page cache then
82 * these are simply marked dirty).
84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
85 * in the resident attribute case, it is tied to the initialized size and, in
86 * the non-resident attribute case, it may not fall below the initialized size.
88 * Note that if the attribute is resident, we do not need to touch the page
89 * cache at all. This is because if the page cache page is not uptodate we
90 * bring it uptodate later, when doing the write to the mft record since we
91 * then already have the page mapped. And if the page is uptodate, the
92 * non-initialized region will already have been zeroed when the page was
93 * brought uptodate and the region may in fact already have been overwritten
94 * with new data via mmap() based writes, so we cannot just zero it. And since
95 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
96 * is unspecified, we choose not to do zeroing and thus we do not need to touch
97 * the page at all. For a more detailed explanation see ntfs_truncate() in
100 * @cached_page and @lru_pvec are just optimizations for dealing with multiple
103 * Return 0 on success and -errno on error. In the case that an error is
104 * encountered it is possible that the initialized size will already have been
105 * incremented some way towards @new_init_size but it is guaranteed that if
106 * this is the case, the necessary zeroing will also have happened and that all
107 * metadata is self-consistent.
109 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
110 * held by the caller.
112 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
113 struct page **cached_page, struct pagevec *lru_pvec)
117 pgoff_t index, end_index;
119 struct inode *vi = VFS_I(ni);
121 MFT_RECORD *m = NULL;
123 ntfs_attr_search_ctx *ctx = NULL;
124 struct address_space *mapping;
125 struct page *page = NULL;
130 read_lock_irqsave(&ni->size_lock, flags);
131 old_init_size = ni->initialized_size;
132 old_i_size = i_size_read(vi);
133 BUG_ON(new_init_size > ni->allocated_size);
134 read_unlock_irqrestore(&ni->size_lock, flags);
135 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
136 "old_initialized_size 0x%llx, "
137 "new_initialized_size 0x%llx, i_size 0x%llx.",
138 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
139 (unsigned long long)old_init_size,
140 (unsigned long long)new_init_size, old_i_size);
144 base_ni = ni->ext.base_ntfs_ino;
145 /* Use goto to reduce indentation and we need the label below anyway. */
146 if (NInoNonResident(ni))
147 goto do_non_resident_extend;
148 BUG_ON(old_init_size != old_i_size);
149 m = map_mft_record(base_ni);
155 ctx = ntfs_attr_get_search_ctx(base_ni, m);
156 if (unlikely(!ctx)) {
160 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
161 CASE_SENSITIVE, 0, NULL, 0, ctx);
169 BUG_ON(a->non_resident);
170 /* The total length of the attribute value. */
171 attr_len = le32_to_cpu(a->data.resident.value_length);
172 BUG_ON(old_i_size != (loff_t)attr_len);
174 * Do the zeroing in the mft record and update the attribute size in
177 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
178 memset(kattr + attr_len, 0, new_init_size - attr_len);
179 a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
180 /* Finally, update the sizes in the vfs and ntfs inodes. */
181 write_lock_irqsave(&ni->size_lock, flags);
182 i_size_write(vi, new_init_size);
183 ni->initialized_size = new_init_size;
184 write_unlock_irqrestore(&ni->size_lock, flags);
186 do_non_resident_extend:
188 * If the new initialized size @new_init_size exceeds the current file
189 * size (vfs inode->i_size), we need to extend the file size to the
190 * new initialized size.
192 if (new_init_size > old_i_size) {
193 m = map_mft_record(base_ni);
199 ctx = ntfs_attr_get_search_ctx(base_ni, m);
200 if (unlikely(!ctx)) {
204 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
205 CASE_SENSITIVE, 0, NULL, 0, ctx);
213 BUG_ON(!a->non_resident);
214 BUG_ON(old_i_size != (loff_t)
215 sle64_to_cpu(a->data.non_resident.data_size));
216 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
217 flush_dcache_mft_record_page(ctx->ntfs_ino);
218 mark_mft_record_dirty(ctx->ntfs_ino);
219 /* Update the file size in the vfs inode. */
220 i_size_write(vi, new_init_size);
221 ntfs_attr_put_search_ctx(ctx);
223 unmap_mft_record(base_ni);
226 mapping = vi->i_mapping;
227 index = old_init_size >> PAGE_CACHE_SHIFT;
228 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
231 * Read the page. If the page is not present, this will zero
232 * the uninitialized regions for us.
234 page = read_mapping_page(mapping, index, NULL);
239 wait_on_page_locked(page);
240 if (unlikely(!PageUptodate(page) || PageError(page))) {
241 page_cache_release(page);
246 * Update the initialized size in the ntfs inode. This is
247 * enough to make ntfs_writepage() work.
249 write_lock_irqsave(&ni->size_lock, flags);
250 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
251 if (ni->initialized_size > new_init_size)
252 ni->initialized_size = new_init_size;
253 write_unlock_irqrestore(&ni->size_lock, flags);
254 /* Set the page dirty so it gets written out. */
255 set_page_dirty(page);
256 page_cache_release(page);
258 * Play nice with the vm and the rest of the system. This is
259 * very much needed as we can potentially be modifying the
260 * initialised size from a very small value to a really huge
262 * f = open(somefile, O_TRUNC);
263 * truncate(f, 10GiB);
266 * And this would mean we would be marking dirty hundreds of
267 * thousands of pages or as in the above example more than
268 * two and a half million pages!
270 * TODO: For sparse pages could optimize this workload by using
271 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
272 * would be set in readpage for sparse pages and here we would
273 * not need to mark dirty any pages which have this bit set.
274 * The only caveat is that we have to clear the bit everywhere
275 * where we allocate any clusters that lie in the page or that
278 * TODO: An even greater optimization would be for us to only
279 * call readpage() on pages which are not in sparse regions as
280 * determined from the runlist. This would greatly reduce the
281 * number of pages we read and make dirty in the case of sparse
284 balance_dirty_pages_ratelimited(mapping);
286 } while (++index < end_index);
287 read_lock_irqsave(&ni->size_lock, flags);
288 BUG_ON(ni->initialized_size != new_init_size);
289 read_unlock_irqrestore(&ni->size_lock, flags);
290 /* Now bring in sync the initialized_size in the mft record. */
291 m = map_mft_record(base_ni);
297 ctx = ntfs_attr_get_search_ctx(base_ni, m);
298 if (unlikely(!ctx)) {
302 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
303 CASE_SENSITIVE, 0, NULL, 0, ctx);
311 BUG_ON(!a->non_resident);
312 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
314 flush_dcache_mft_record_page(ctx->ntfs_ino);
315 mark_mft_record_dirty(ctx->ntfs_ino);
317 ntfs_attr_put_search_ctx(ctx);
319 unmap_mft_record(base_ni);
320 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
321 (unsigned long long)new_init_size, i_size_read(vi));
324 write_lock_irqsave(&ni->size_lock, flags);
325 ni->initialized_size = old_init_size;
326 write_unlock_irqrestore(&ni->size_lock, flags);
329 ntfs_attr_put_search_ctx(ctx);
331 unmap_mft_record(base_ni);
332 ntfs_debug("Failed. Returning error code %i.", err);
337 * ntfs_fault_in_pages_readable -
339 * Fault a number of userspace pages into pagetables.
341 * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
342 * with more than two userspace pages as well as handling the single page case
345 * If you find this difficult to understand, then think of the while loop being
346 * the following code, except that we do without the integer variable ret:
349 * ret = __get_user(c, uaddr);
350 * uaddr += PAGE_SIZE;
351 * } while (!ret && uaddr < end);
353 * Note, the final __get_user() may well run out-of-bounds of the user buffer,
354 * but _not_ out-of-bounds of the page the user buffer belongs to, and since
355 * this is only a read and not a write, and since it is still in the same page,
356 * it should not matter and this makes the code much simpler.
358 static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
361 const char __user *end;
364 /* Set @end to the first byte outside the last page we care about. */
365 end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
367 while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
372 * ntfs_fault_in_pages_readable_iovec -
374 * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
376 static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
377 size_t iov_ofs, int bytes)
380 const char __user *buf;
383 buf = iov->iov_base + iov_ofs;
384 len = iov->iov_len - iov_ofs;
387 ntfs_fault_in_pages_readable(buf, len);
395 * __ntfs_grab_cache_pages - obtain a number of locked pages
396 * @mapping: address space mapping from which to obtain page cache pages
397 * @index: starting index in @mapping at which to begin obtaining pages
398 * @nr_pages: number of page cache pages to obtain
399 * @pages: array of pages in which to return the obtained page cache pages
400 * @cached_page: allocated but as yet unused page
401 * @lru_pvec: lru-buffering pagevec of caller
403 * Obtain @nr_pages locked page cache pages from the mapping @maping and
404 * starting at index @index.
406 * If a page is newly created, increment its refcount and add it to the
407 * caller's lru-buffering pagevec @lru_pvec.
409 * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
410 * are obtained at once instead of just one page and that 0 is returned on
411 * success and -errno on error.
413 * Note, the page locks are obtained in ascending page index order.
415 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
416 pgoff_t index, const unsigned nr_pages, struct page **pages,
417 struct page **cached_page, struct pagevec *lru_pvec)
424 pages[nr] = find_lock_page(mapping, index);
427 *cached_page = page_cache_alloc(mapping);
428 if (unlikely(!*cached_page)) {
433 err = add_to_page_cache(*cached_page, mapping, index,
440 pages[nr] = *cached_page;
441 page_cache_get(*cached_page);
442 if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
443 __pagevec_lru_add(lru_pvec);
448 } while (nr < nr_pages);
453 unlock_page(pages[--nr]);
454 page_cache_release(pages[nr]);
459 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
463 bh->b_end_io = end_buffer_read_sync;
464 return submit_bh(READ, bh);
468 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
469 * @pages: array of destination pages
470 * @nr_pages: number of pages in @pages
471 * @pos: byte position in file at which the write begins
472 * @bytes: number of bytes to be written
474 * This is called for non-resident attributes from ntfs_file_buffered_write()
475 * with i_mutex held on the inode (@pages[0]->mapping->host). There are
476 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
477 * data has not yet been copied into the @pages.
479 * Need to fill any holes with actual clusters, allocate buffers if necessary,
480 * ensure all the buffers are mapped, and bring uptodate any buffers that are
481 * only partially being written to.
483 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
484 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
485 * the same cluster and that they are the entirety of that cluster, and that
486 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
488 * i_size is not to be modified yet.
490 * Return 0 on success or -errno on error.
492 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
493 unsigned nr_pages, s64 pos, size_t bytes)
495 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
497 s64 bh_pos, vcn_len, end, initialized_size;
501 ntfs_inode *ni, *base_ni = NULL;
503 runlist_element *rl, *rl2;
504 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
505 ntfs_attr_search_ctx *ctx = NULL;
506 MFT_RECORD *m = NULL;
507 ATTR_RECORD *a = NULL;
509 u32 attr_rec_len = 0;
510 unsigned blocksize, u;
512 bool rl_write_locked, was_hole, is_retry;
513 unsigned char blocksize_bits;
516 u8 mft_attr_mapped:1;
519 } status = { 0, 0, 0, 0 };
524 vi = pages[0]->mapping->host;
527 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
528 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
529 vi->i_ino, ni->type, pages[0]->index, nr_pages,
530 (long long)pos, bytes);
531 blocksize = vol->sb->s_blocksize;
532 blocksize_bits = vol->sb->s_blocksize_bits;
535 struct page *page = pages[u];
537 * create_empty_buffers() will create uptodate/dirty buffers if
538 * the page is uptodate/dirty.
540 if (!page_has_buffers(page)) {
541 create_empty_buffers(page, blocksize, 0);
542 if (unlikely(!page_has_buffers(page)))
545 } while (++u < nr_pages);
546 rl_write_locked = false;
553 cpos = pos >> vol->cluster_size_bits;
555 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
557 * Loop over each page and for each page over each buffer. Use goto to
558 * reduce indentation.
563 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
564 bh = head = page_buffers(page);
570 /* Clear buffer_new on all buffers to reinitialise state. */
572 clear_buffer_new(bh);
573 bh_end = bh_pos + blocksize;
574 bh_cpos = bh_pos >> vol->cluster_size_bits;
575 bh_cofs = bh_pos & vol->cluster_size_mask;
576 if (buffer_mapped(bh)) {
578 * The buffer is already mapped. If it is uptodate,
581 if (buffer_uptodate(bh))
584 * The buffer is not uptodate. If the page is uptodate
585 * set the buffer uptodate and otherwise ignore it.
587 if (PageUptodate(page)) {
588 set_buffer_uptodate(bh);
592 * Neither the page nor the buffer are uptodate. If
593 * the buffer is only partially being written to, we
594 * need to read it in before the write, i.e. now.
596 if ((bh_pos < pos && bh_end > pos) ||
597 (bh_pos < end && bh_end > end)) {
599 * If the buffer is fully or partially within
600 * the initialized size, do an actual read.
601 * Otherwise, simply zero the buffer.
603 read_lock_irqsave(&ni->size_lock, flags);
604 initialized_size = ni->initialized_size;
605 read_unlock_irqrestore(&ni->size_lock, flags);
606 if (bh_pos < initialized_size) {
607 ntfs_submit_bh_for_read(bh);
610 u8 *kaddr = kmap_atomic(page, KM_USER0);
611 memset(kaddr + bh_offset(bh), 0,
613 kunmap_atomic(kaddr, KM_USER0);
614 flush_dcache_page(page);
615 set_buffer_uptodate(bh);
620 /* Unmapped buffer. Need to map it. */
621 bh->b_bdev = vol->sb->s_bdev;
623 * If the current buffer is in the same clusters as the map
624 * cache, there is no need to check the runlist again. The
625 * map cache is made up of @vcn, which is the first cached file
626 * cluster, @vcn_len which is the number of cached file
627 * clusters, @lcn is the device cluster corresponding to @vcn,
628 * and @lcn_block is the block number corresponding to @lcn.
630 cdelta = bh_cpos - vcn;
631 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
634 bh->b_blocknr = lcn_block +
635 (cdelta << (vol->cluster_size_bits -
637 (bh_cofs >> blocksize_bits);
638 set_buffer_mapped(bh);
640 * If the page is uptodate so is the buffer. If the
641 * buffer is fully outside the write, we ignore it if
642 * it was already allocated and we mark it dirty so it
643 * gets written out if we allocated it. On the other
644 * hand, if we allocated the buffer but we are not
645 * marking it dirty we set buffer_new so we can do
648 if (PageUptodate(page)) {
649 if (!buffer_uptodate(bh))
650 set_buffer_uptodate(bh);
651 if (unlikely(was_hole)) {
652 /* We allocated the buffer. */
653 unmap_underlying_metadata(bh->b_bdev,
655 if (bh_end <= pos || bh_pos >= end)
656 mark_buffer_dirty(bh);
662 /* Page is _not_ uptodate. */
663 if (likely(!was_hole)) {
665 * Buffer was already allocated. If it is not
666 * uptodate and is only partially being written
667 * to, we need to read it in before the write,
670 if (!buffer_uptodate(bh) && bh_pos < end &&
675 * If the buffer is fully or partially
676 * within the initialized size, do an
677 * actual read. Otherwise, simply zero
680 read_lock_irqsave(&ni->size_lock,
682 initialized_size = ni->initialized_size;
683 read_unlock_irqrestore(&ni->size_lock,
685 if (bh_pos < initialized_size) {
686 ntfs_submit_bh_for_read(bh);
689 u8 *kaddr = kmap_atomic(page,
691 memset(kaddr + bh_offset(bh),
693 kunmap_atomic(kaddr, KM_USER0);
694 flush_dcache_page(page);
695 set_buffer_uptodate(bh);
700 /* We allocated the buffer. */
701 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
703 * If the buffer is fully outside the write, zero it,
704 * set it uptodate, and mark it dirty so it gets
705 * written out. If it is partially being written to,
706 * zero region surrounding the write but leave it to
707 * commit write to do anything else. Finally, if the
708 * buffer is fully being overwritten, do nothing.
710 if (bh_end <= pos || bh_pos >= end) {
711 if (!buffer_uptodate(bh)) {
712 u8 *kaddr = kmap_atomic(page, KM_USER0);
713 memset(kaddr + bh_offset(bh), 0,
715 kunmap_atomic(kaddr, KM_USER0);
716 flush_dcache_page(page);
717 set_buffer_uptodate(bh);
719 mark_buffer_dirty(bh);
723 if (!buffer_uptodate(bh) &&
724 (bh_pos < pos || bh_end > end)) {
728 kaddr = kmap_atomic(page, KM_USER0);
730 pofs = bh_pos & ~PAGE_CACHE_MASK;
731 memset(kaddr + pofs, 0, pos - bh_pos);
734 pofs = end & ~PAGE_CACHE_MASK;
735 memset(kaddr + pofs, 0, bh_end - end);
737 kunmap_atomic(kaddr, KM_USER0);
738 flush_dcache_page(page);
743 * Slow path: this is the first buffer in the cluster. If it
744 * is outside allocated size and is not uptodate, zero it and
747 read_lock_irqsave(&ni->size_lock, flags);
748 initialized_size = ni->allocated_size;
749 read_unlock_irqrestore(&ni->size_lock, flags);
750 if (bh_pos > initialized_size) {
751 if (PageUptodate(page)) {
752 if (!buffer_uptodate(bh))
753 set_buffer_uptodate(bh);
754 } else if (!buffer_uptodate(bh)) {
755 u8 *kaddr = kmap_atomic(page, KM_USER0);
756 memset(kaddr + bh_offset(bh), 0, blocksize);
757 kunmap_atomic(kaddr, KM_USER0);
758 flush_dcache_page(page);
759 set_buffer_uptodate(bh);
765 down_read(&ni->runlist.lock);
769 if (likely(rl != NULL)) {
770 /* Seek to element containing target cluster. */
771 while (rl->length && rl[1].vcn <= bh_cpos)
773 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
774 if (likely(lcn >= 0)) {
776 * Successful remap, setup the map cache and
777 * use that to deal with the buffer.
781 vcn_len = rl[1].vcn - vcn;
782 lcn_block = lcn << (vol->cluster_size_bits -
786 * If the number of remaining clusters touched
787 * by the write is smaller or equal to the
788 * number of cached clusters, unlock the
789 * runlist as the map cache will be used from
792 if (likely(vcn + vcn_len >= cend)) {
793 if (rl_write_locked) {
794 up_write(&ni->runlist.lock);
795 rl_write_locked = false;
797 up_read(&ni->runlist.lock);
800 goto map_buffer_cached;
803 lcn = LCN_RL_NOT_MAPPED;
805 * If it is not a hole and not out of bounds, the runlist is
806 * probably unmapped so try to map it now.
808 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
809 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
810 /* Attempt to map runlist. */
811 if (!rl_write_locked) {
813 * We need the runlist locked for
814 * writing, so if it is locked for
815 * reading relock it now and retry in
816 * case it changed whilst we dropped
819 up_read(&ni->runlist.lock);
820 down_write(&ni->runlist.lock);
821 rl_write_locked = true;
824 err = ntfs_map_runlist_nolock(ni, bh_cpos,
831 * If @vcn is out of bounds, pretend @lcn is
832 * LCN_ENOENT. As long as the buffer is out
833 * of bounds this will work fine.
835 if (err == -ENOENT) {
838 goto rl_not_mapped_enoent;
842 /* Failed to map the buffer, even after retrying. */
844 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
845 "attribute type 0x%x, vcn 0x%llx, "
846 "vcn offset 0x%x, because its "
847 "location on disk could not be "
848 "determined%s (error code %i).",
849 ni->mft_no, ni->type,
850 (unsigned long long)bh_cpos,
852 vol->cluster_size_mask,
853 is_retry ? " even after retrying" : "",
857 rl_not_mapped_enoent:
859 * The buffer is in a hole or out of bounds. We need to fill
860 * the hole, unless the buffer is in a cluster which is not
861 * touched by the write, in which case we just leave the buffer
862 * unmapped. This can only happen when the cluster size is
863 * less than the page cache size.
865 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
866 bh_cend = (bh_end + vol->cluster_size - 1) >>
867 vol->cluster_size_bits;
868 if ((bh_cend <= cpos || bh_cpos >= cend)) {
871 * If the buffer is uptodate we skip it. If it
872 * is not but the page is uptodate, we can set
873 * the buffer uptodate. If the page is not
874 * uptodate, we can clear the buffer and set it
875 * uptodate. Whether this is worthwhile is
876 * debatable and this could be removed.
878 if (PageUptodate(page)) {
879 if (!buffer_uptodate(bh))
880 set_buffer_uptodate(bh);
881 } else if (!buffer_uptodate(bh)) {
882 u8 *kaddr = kmap_atomic(page, KM_USER0);
883 memset(kaddr + bh_offset(bh), 0,
885 kunmap_atomic(kaddr, KM_USER0);
886 flush_dcache_page(page);
887 set_buffer_uptodate(bh);
893 * Out of bounds buffer is invalid if it was not really out of
896 BUG_ON(lcn != LCN_HOLE);
898 * We need the runlist locked for writing, so if it is locked
899 * for reading relock it now and retry in case it changed
900 * whilst we dropped the lock.
903 if (!rl_write_locked) {
904 up_read(&ni->runlist.lock);
905 down_write(&ni->runlist.lock);
906 rl_write_locked = true;
909 /* Find the previous last allocated cluster. */
910 BUG_ON(rl->lcn != LCN_HOLE);
913 while (--rl2 >= ni->runlist.rl) {
915 lcn = rl2->lcn + rl2->length;
919 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
923 ntfs_debug("Failed to allocate cluster, error code %i.",
928 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
933 if (ntfs_cluster_free_from_rl(vol, rl2)) {
934 ntfs_error(vol->sb, "Failed to release "
935 "allocated cluster in error "
936 "code path. Run chkdsk to "
937 "recover the lost cluster.");
944 status.runlist_merged = 1;
945 ntfs_debug("Allocated cluster, lcn 0x%llx.",
946 (unsigned long long)lcn);
947 /* Map and lock the mft record and get the attribute record. */
951 base_ni = ni->ext.base_ntfs_ino;
952 m = map_mft_record(base_ni);
957 ctx = ntfs_attr_get_search_ctx(base_ni, m);
958 if (unlikely(!ctx)) {
960 unmap_mft_record(base_ni);
963 status.mft_attr_mapped = 1;
964 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
965 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
974 * Find the runlist element with which the attribute extent
975 * starts. Note, we cannot use the _attr_ version because we
976 * have mapped the mft record. That is ok because we know the
977 * runlist fragment must be mapped already to have ever gotten
978 * here, so we can just use the _rl_ version.
980 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
981 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
983 BUG_ON(!rl2->length);
984 BUG_ON(rl2->lcn < LCN_HOLE);
985 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
987 * If @highest_vcn is zero, calculate the real highest_vcn
988 * (which can really be zero).
991 highest_vcn = (sle64_to_cpu(
992 a->data.non_resident.allocated_size) >>
993 vol->cluster_size_bits) - 1;
995 * Determine the size of the mapping pairs array for the new
996 * extent, i.e. the old extent with the hole filled.
998 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
1000 if (unlikely(mp_size <= 0)) {
1001 if (!(err = mp_size))
1003 ntfs_debug("Failed to get size for mapping pairs "
1004 "array, error code %i.", err);
1008 * Resize the attribute record to fit the new mapping pairs
1011 attr_rec_len = le32_to_cpu(a->length);
1012 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
1013 a->data.non_resident.mapping_pairs_offset));
1014 if (unlikely(err)) {
1015 BUG_ON(err != -ENOSPC);
1016 // TODO: Deal with this by using the current attribute
1017 // and fill it with as much of the mapping pairs
1018 // array as possible. Then loop over each attribute
1019 // extent rewriting the mapping pairs arrays as we go
1020 // along and if when we reach the end we have not
1021 // enough space, try to resize the last attribute
1022 // extent and if even that fails, add a new attribute
1024 // We could also try to resize at each step in the hope
1025 // that we will not need to rewrite every single extent.
1026 // Note, we may need to decompress some extents to fill
1027 // the runlist as we are walking the extents...
1028 ntfs_error(vol->sb, "Not enough space in the mft "
1029 "record for the extended attribute "
1030 "record. This case is not "
1031 "implemented yet.");
1035 status.mp_rebuilt = 1;
1037 * Generate the mapping pairs array directly into the attribute
1040 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1041 a->data.non_resident.mapping_pairs_offset),
1042 mp_size, rl2, vcn, highest_vcn, NULL);
1043 if (unlikely(err)) {
1044 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1045 "attribute type 0x%x, because building "
1046 "the mapping pairs failed with error "
1047 "code %i.", vi->i_ino,
1048 (unsigned)le32_to_cpu(ni->type), err);
1052 /* Update the highest_vcn but only if it was not set. */
1053 if (unlikely(!a->data.non_resident.highest_vcn))
1054 a->data.non_resident.highest_vcn =
1055 cpu_to_sle64(highest_vcn);
1057 * If the attribute is sparse/compressed, update the compressed
1058 * size in the ntfs_inode structure and the attribute record.
1060 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1062 * If we are not in the first attribute extent, switch
1063 * to it, but first ensure the changes will make it to
1066 if (a->data.non_resident.lowest_vcn) {
1067 flush_dcache_mft_record_page(ctx->ntfs_ino);
1068 mark_mft_record_dirty(ctx->ntfs_ino);
1069 ntfs_attr_reinit_search_ctx(ctx);
1070 err = ntfs_attr_lookup(ni->type, ni->name,
1071 ni->name_len, CASE_SENSITIVE,
1073 if (unlikely(err)) {
1074 status.attr_switched = 1;
1077 /* @m is not used any more so do not set it. */
1080 write_lock_irqsave(&ni->size_lock, flags);
1081 ni->itype.compressed.size += vol->cluster_size;
1082 a->data.non_resident.compressed_size =
1083 cpu_to_sle64(ni->itype.compressed.size);
1084 write_unlock_irqrestore(&ni->size_lock, flags);
1086 /* Ensure the changes make it to disk. */
1087 flush_dcache_mft_record_page(ctx->ntfs_ino);
1088 mark_mft_record_dirty(ctx->ntfs_ino);
1089 ntfs_attr_put_search_ctx(ctx);
1090 unmap_mft_record(base_ni);
1091 /* Successfully filled the hole. */
1092 status.runlist_merged = 0;
1093 status.mft_attr_mapped = 0;
1094 status.mp_rebuilt = 0;
1095 /* Setup the map cache and use that to deal with the buffer. */
1099 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1102 * If the number of remaining clusters in the @pages is smaller
1103 * or equal to the number of cached clusters, unlock the
1104 * runlist as the map cache will be used from now on.
1106 if (likely(vcn + vcn_len >= cend)) {
1107 up_write(&ni->runlist.lock);
1108 rl_write_locked = false;
1111 goto map_buffer_cached;
1112 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1113 /* If there are no errors, do the next page. */
1114 if (likely(!err && ++u < nr_pages))
1116 /* If there are no errors, release the runlist lock if we took it. */
1118 if (unlikely(rl_write_locked)) {
1119 up_write(&ni->runlist.lock);
1120 rl_write_locked = false;
1121 } else if (unlikely(rl))
1122 up_read(&ni->runlist.lock);
1125 /* If we issued read requests, let them complete. */
1126 read_lock_irqsave(&ni->size_lock, flags);
1127 initialized_size = ni->initialized_size;
1128 read_unlock_irqrestore(&ni->size_lock, flags);
1129 while (wait_bh > wait) {
1132 if (likely(buffer_uptodate(bh))) {
1134 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
1137 * If the buffer overflows the initialized size, need
1138 * to zero the overflowing region.
1140 if (unlikely(bh_pos + blocksize > initialized_size)) {
1144 if (likely(bh_pos < initialized_size))
1145 ofs = initialized_size - bh_pos;
1146 kaddr = kmap_atomic(page, KM_USER0);
1147 memset(kaddr + bh_offset(bh) + ofs, 0,
1149 kunmap_atomic(kaddr, KM_USER0);
1150 flush_dcache_page(page);
1152 } else /* if (unlikely(!buffer_uptodate(bh))) */
1156 /* Clear buffer_new on all buffers. */
1159 bh = head = page_buffers(pages[u]);
1162 clear_buffer_new(bh);
1163 } while ((bh = bh->b_this_page) != head);
1164 } while (++u < nr_pages);
1165 ntfs_debug("Done.");
1168 if (status.attr_switched) {
1169 /* Get back to the attribute extent we modified. */
1170 ntfs_attr_reinit_search_ctx(ctx);
1171 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1172 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1173 ntfs_error(vol->sb, "Failed to find required "
1174 "attribute extent of attribute in "
1175 "error code path. Run chkdsk to "
1177 write_lock_irqsave(&ni->size_lock, flags);
1178 ni->itype.compressed.size += vol->cluster_size;
1179 write_unlock_irqrestore(&ni->size_lock, flags);
1180 flush_dcache_mft_record_page(ctx->ntfs_ino);
1181 mark_mft_record_dirty(ctx->ntfs_ino);
1183 * The only thing that is now wrong is the compressed
1184 * size of the base attribute extent which chkdsk
1185 * should be able to fix.
1191 status.attr_switched = 0;
1195 * If the runlist has been modified, need to restore it by punching a
1196 * hole into it and we then need to deallocate the on-disk cluster as
1197 * well. Note, we only modify the runlist if we are able to generate a
1198 * new mapping pairs array, i.e. only when the mapped attribute extent
1201 if (status.runlist_merged && !status.attr_switched) {
1202 BUG_ON(!rl_write_locked);
1203 /* Make the file cluster we allocated sparse in the runlist. */
1204 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1205 ntfs_error(vol->sb, "Failed to punch hole into "
1206 "attribute runlist in error code "
1207 "path. Run chkdsk to recover the "
1210 } else /* if (success) */ {
1211 status.runlist_merged = 0;
1213 * Deallocate the on-disk cluster we allocated but only
1214 * if we succeeded in punching its vcn out of the
1217 down_write(&vol->lcnbmp_lock);
1218 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1219 ntfs_error(vol->sb, "Failed to release "
1220 "allocated cluster in error "
1221 "code path. Run chkdsk to "
1222 "recover the lost cluster.");
1225 up_write(&vol->lcnbmp_lock);
1229 * Resize the attribute record to its old size and rebuild the mapping
1230 * pairs array. Note, we only can do this if the runlist has been
1231 * restored to its old state which also implies that the mapped
1232 * attribute extent is not switched.
1234 if (status.mp_rebuilt && !status.runlist_merged) {
1235 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1236 ntfs_error(vol->sb, "Failed to restore attribute "
1237 "record in error code path. Run "
1238 "chkdsk to recover.");
1240 } else /* if (success) */ {
1241 if (ntfs_mapping_pairs_build(vol, (u8*)a +
1242 le16_to_cpu(a->data.non_resident.
1243 mapping_pairs_offset), attr_rec_len -
1244 le16_to_cpu(a->data.non_resident.
1245 mapping_pairs_offset), ni->runlist.rl,
1246 vcn, highest_vcn, NULL)) {
1247 ntfs_error(vol->sb, "Failed to restore "
1248 "mapping pairs array in error "
1249 "code path. Run chkdsk to "
1253 flush_dcache_mft_record_page(ctx->ntfs_ino);
1254 mark_mft_record_dirty(ctx->ntfs_ino);
1257 /* Release the mft record and the attribute. */
1258 if (status.mft_attr_mapped) {
1259 ntfs_attr_put_search_ctx(ctx);
1260 unmap_mft_record(base_ni);
1262 /* Release the runlist lock. */
1263 if (rl_write_locked)
1264 up_write(&ni->runlist.lock);
1266 up_read(&ni->runlist.lock);
1268 * Zero out any newly allocated blocks to avoid exposing stale data.
1269 * If BH_New is set, we know that the block was newly allocated above
1270 * and that it has not been fully zeroed and marked dirty yet.
1274 end = bh_cpos << vol->cluster_size_bits;
1277 bh = head = page_buffers(page);
1279 if (u == nr_pages &&
1280 ((s64)page->index << PAGE_CACHE_SHIFT) +
1281 bh_offset(bh) >= end)
1283 if (!buffer_new(bh))
1285 clear_buffer_new(bh);
1286 if (!buffer_uptodate(bh)) {
1287 if (PageUptodate(page))
1288 set_buffer_uptodate(bh);
1290 u8 *kaddr = kmap_atomic(page, KM_USER0);
1291 memset(kaddr + bh_offset(bh), 0,
1293 kunmap_atomic(kaddr, KM_USER0);
1294 flush_dcache_page(page);
1295 set_buffer_uptodate(bh);
1298 mark_buffer_dirty(bh);
1299 } while ((bh = bh->b_this_page) != head);
1300 } while (++u <= nr_pages);
1301 ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
1306 * Copy as much as we can into the pages and return the number of bytes which
1307 * were sucessfully copied. If a fault is encountered then clear the pages
1308 * out to (ofs + bytes) and return the number of bytes which were copied.
1310 static inline size_t ntfs_copy_from_user(struct page **pages,
1311 unsigned nr_pages, unsigned ofs, const char __user *buf,
1314 struct page **last_page = pages + nr_pages;
1321 len = PAGE_CACHE_SIZE - ofs;
1324 kaddr = kmap_atomic(*pages, KM_USER0);
1325 left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
1326 kunmap_atomic(kaddr, KM_USER0);
1327 if (unlikely(left)) {
1328 /* Do it the slow way. */
1329 kaddr = kmap(*pages);
1330 left = __copy_from_user(kaddr + ofs, buf, len);
1341 } while (++pages < last_page);
1345 total += len - left;
1346 /* Zero the rest of the target like __copy_from_user(). */
1347 while (++pages < last_page) {
1351 len = PAGE_CACHE_SIZE;
1354 kaddr = kmap_atomic(*pages, KM_USER0);
1355 memset(kaddr, 0, len);
1356 kunmap_atomic(kaddr, KM_USER0);
1361 static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
1362 const struct iovec *iov, size_t iov_ofs, size_t bytes)
1367 const char __user *buf = iov->iov_base + iov_ofs;
1371 len = iov->iov_len - iov_ofs;
1374 left = __copy_from_user_inatomic(vaddr, buf, len);
1378 if (unlikely(left)) {
1390 static inline void ntfs_set_next_iovec(const struct iovec **iovp,
1391 size_t *iov_ofsp, size_t bytes)
1393 const struct iovec *iov = *iovp;
1394 size_t iov_ofs = *iov_ofsp;
1399 len = iov->iov_len - iov_ofs;
1404 if (iov->iov_len == iov_ofs) {
1410 *iov_ofsp = iov_ofs;
1414 * This has the same side-effects and return value as ntfs_copy_from_user().
1415 * The difference is that on a fault we need to memset the remainder of the
1416 * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
1417 * single-segment behaviour.
1419 * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
1420 * when atomic and when not atomic. This is ok because
1421 * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
1422 * and it is ok to call this when non-atomic.
1423 * Infact, the only difference between __copy_from_user_inatomic() and
1424 * __copy_from_user() is that the latter calls might_sleep() and the former
1425 * should not zero the tail of the buffer on error. And on many
1426 * architectures __copy_from_user_inatomic() is just defined to
1427 * __copy_from_user() so it makes no difference at all on those architectures.
1429 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1430 unsigned nr_pages, unsigned ofs, const struct iovec **iov,
1431 size_t *iov_ofs, size_t bytes)
1433 struct page **last_page = pages + nr_pages;
1435 size_t copied, len, total = 0;
1438 len = PAGE_CACHE_SIZE - ofs;
1441 kaddr = kmap_atomic(*pages, KM_USER0);
1442 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
1443 *iov, *iov_ofs, len);
1444 kunmap_atomic(kaddr, KM_USER0);
1445 if (unlikely(copied != len)) {
1446 /* Do it the slow way. */
1447 kaddr = kmap(*pages);
1448 copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs,
1449 *iov, *iov_ofs, len);
1451 * Zero the rest of the target like __copy_from_user().
1453 memset(kaddr + ofs + copied, 0, len - copied);
1455 if (unlikely(copied != len))
1462 ntfs_set_next_iovec(iov, iov_ofs, len);
1464 } while (++pages < last_page);
1469 /* Zero the rest of the target like __copy_from_user(). */
1470 while (++pages < last_page) {
1474 len = PAGE_CACHE_SIZE;
1477 kaddr = kmap_atomic(*pages, KM_USER0);
1478 memset(kaddr, 0, len);
1479 kunmap_atomic(kaddr, KM_USER0);
1484 static inline void ntfs_flush_dcache_pages(struct page **pages,
1489 * Warning: Do not do the decrement at the same time as the call to
1490 * flush_dcache_page() because it is a NULL macro on i386 and hence the
1491 * decrement never happens so the loop never terminates.
1495 flush_dcache_page(pages[nr_pages]);
1496 } while (nr_pages > 0);
1500 * ntfs_commit_pages_after_non_resident_write - commit the received data
1501 * @pages: array of destination pages
1502 * @nr_pages: number of pages in @pages
1503 * @pos: byte position in file at which the write begins
1504 * @bytes: number of bytes to be written
1506 * See description of ntfs_commit_pages_after_write(), below.
1508 static inline int ntfs_commit_pages_after_non_resident_write(
1509 struct page **pages, const unsigned nr_pages,
1510 s64 pos, size_t bytes)
1512 s64 end, initialized_size;
1514 ntfs_inode *ni, *base_ni;
1515 struct buffer_head *bh, *head;
1516 ntfs_attr_search_ctx *ctx;
1519 unsigned long flags;
1520 unsigned blocksize, u;
1523 vi = pages[0]->mapping->host;
1525 blocksize = vi->i_sb->s_blocksize;
1534 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1535 bh = head = page_buffers(page);
1540 bh_end = bh_pos + blocksize;
1541 if (bh_end <= pos || bh_pos >= end) {
1542 if (!buffer_uptodate(bh))
1545 set_buffer_uptodate(bh);
1546 mark_buffer_dirty(bh);
1548 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1550 * If all buffers are now uptodate but the page is not, set the
1553 if (!partial && !PageUptodate(page))
1554 SetPageUptodate(page);
1555 } while (++u < nr_pages);
1557 * Finally, if we do not need to update initialized_size or i_size we
1560 read_lock_irqsave(&ni->size_lock, flags);
1561 initialized_size = ni->initialized_size;
1562 read_unlock_irqrestore(&ni->size_lock, flags);
1563 if (end <= initialized_size) {
1564 ntfs_debug("Done.");
1568 * Update initialized_size/i_size as appropriate, both in the inode and
1574 base_ni = ni->ext.base_ntfs_ino;
1575 /* Map, pin, and lock the mft record. */
1576 m = map_mft_record(base_ni);
1583 BUG_ON(!NInoNonResident(ni));
1584 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1585 if (unlikely(!ctx)) {
1589 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1590 CASE_SENSITIVE, 0, NULL, 0, ctx);
1591 if (unlikely(err)) {
1597 BUG_ON(!a->non_resident);
1598 write_lock_irqsave(&ni->size_lock, flags);
1599 BUG_ON(end > ni->allocated_size);
1600 ni->initialized_size = end;
1601 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1602 if (end > i_size_read(vi)) {
1603 i_size_write(vi, end);
1604 a->data.non_resident.data_size =
1605 a->data.non_resident.initialized_size;
1607 write_unlock_irqrestore(&ni->size_lock, flags);
1608 /* Mark the mft record dirty, so it gets written back. */
1609 flush_dcache_mft_record_page(ctx->ntfs_ino);
1610 mark_mft_record_dirty(ctx->ntfs_ino);
1611 ntfs_attr_put_search_ctx(ctx);
1612 unmap_mft_record(base_ni);
1613 ntfs_debug("Done.");
1617 ntfs_attr_put_search_ctx(ctx);
1619 unmap_mft_record(base_ni);
1620 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1623 NVolSetErrors(ni->vol);
1628 * ntfs_commit_pages_after_write - commit the received data
1629 * @pages: array of destination pages
1630 * @nr_pages: number of pages in @pages
1631 * @pos: byte position in file at which the write begins
1632 * @bytes: number of bytes to be written
1634 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
1635 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1636 * locked but not kmap()ped. The source data has already been copied into the
1637 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
1638 * the data was copied (for non-resident attributes only) and it returned
1641 * Need to set uptodate and mark dirty all buffers within the boundary of the
1642 * write. If all buffers in a page are uptodate we set the page uptodate, too.
1644 * Setting the buffers dirty ensures that they get written out later when
1645 * ntfs_writepage() is invoked by the VM.
1647 * Finally, we need to update i_size and initialized_size as appropriate both
1648 * in the inode and the mft record.
1650 * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1651 * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1652 * page are uptodate, and updates i_size if the end of io is beyond i_size. In
1653 * that case, it also marks the inode dirty.
1655 * If things have gone as outlined in
1656 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1657 * content modifications here for non-resident attributes. For resident
1658 * attributes we need to do the uptodate bringing here which we combine with
1659 * the copying into the mft record which means we save one atomic kmap.
1661 * Return 0 on success or -errno on error.
1663 static int ntfs_commit_pages_after_write(struct page **pages,
1664 const unsigned nr_pages, s64 pos, size_t bytes)
1666 s64 end, initialized_size;
1669 ntfs_inode *ni, *base_ni;
1671 ntfs_attr_search_ctx *ctx;
1674 char *kattr, *kaddr;
1675 unsigned long flags;
1683 vi = page->mapping->host;
1685 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1686 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1687 vi->i_ino, ni->type, page->index, nr_pages,
1688 (long long)pos, bytes);
1689 if (NInoNonResident(ni))
1690 return ntfs_commit_pages_after_non_resident_write(pages,
1691 nr_pages, pos, bytes);
1692 BUG_ON(nr_pages > 1);
1694 * Attribute is resident, implying it is not compressed, encrypted, or
1700 base_ni = ni->ext.base_ntfs_ino;
1701 BUG_ON(NInoNonResident(ni));
1702 /* Map, pin, and lock the mft record. */
1703 m = map_mft_record(base_ni);
1710 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1711 if (unlikely(!ctx)) {
1715 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1716 CASE_SENSITIVE, 0, NULL, 0, ctx);
1717 if (unlikely(err)) {
1723 BUG_ON(a->non_resident);
1724 /* The total length of the attribute value. */
1725 attr_len = le32_to_cpu(a->data.resident.value_length);
1726 i_size = i_size_read(vi);
1727 BUG_ON(attr_len != i_size);
1728 BUG_ON(pos > attr_len);
1730 BUG_ON(end > le32_to_cpu(a->length) -
1731 le16_to_cpu(a->data.resident.value_offset));
1732 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1733 kaddr = kmap_atomic(page, KM_USER0);
1734 /* Copy the received data from the page to the mft record. */
1735 memcpy(kattr + pos, kaddr + pos, bytes);
1736 /* Update the attribute length if necessary. */
1737 if (end > attr_len) {
1739 a->data.resident.value_length = cpu_to_le32(attr_len);
1742 * If the page is not uptodate, bring the out of bounds area(s)
1743 * uptodate by copying data from the mft record to the page.
1745 if (!PageUptodate(page)) {
1747 memcpy(kaddr, kattr, pos);
1749 memcpy(kaddr + end, kattr + end, attr_len - end);
1750 /* Zero the region outside the end of the attribute value. */
1751 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1752 flush_dcache_page(page);
1753 SetPageUptodate(page);
1755 kunmap_atomic(kaddr, KM_USER0);
1756 /* Update initialized_size/i_size if necessary. */
1757 read_lock_irqsave(&ni->size_lock, flags);
1758 initialized_size = ni->initialized_size;
1759 BUG_ON(end > ni->allocated_size);
1760 read_unlock_irqrestore(&ni->size_lock, flags);
1761 BUG_ON(initialized_size != i_size);
1762 if (end > initialized_size) {
1763 unsigned long flags;
1765 write_lock_irqsave(&ni->size_lock, flags);
1766 ni->initialized_size = end;
1767 i_size_write(vi, end);
1768 write_unlock_irqrestore(&ni->size_lock, flags);
1770 /* Mark the mft record dirty, so it gets written back. */
1771 flush_dcache_mft_record_page(ctx->ntfs_ino);
1772 mark_mft_record_dirty(ctx->ntfs_ino);
1773 ntfs_attr_put_search_ctx(ctx);
1774 unmap_mft_record(base_ni);
1775 ntfs_debug("Done.");
1778 if (err == -ENOMEM) {
1779 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1780 "commit the write.");
1781 if (PageUptodate(page)) {
1782 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1783 "dirty so the write will be retried "
1784 "later on by the VM.");
1786 * Put the page on mapping->dirty_pages, but leave its
1787 * buffers' dirty state as-is.
1789 __set_page_dirty_nobuffers(page);
1792 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1793 "data has been lost.");
1795 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1796 "with error %i.", err);
1797 NVolSetErrors(ni->vol);
1800 ntfs_attr_put_search_ctx(ctx);
1802 unmap_mft_record(base_ni);
1807 * ntfs_file_buffered_write -
1809 * Locking: The vfs is holding ->i_mutex on the inode.
1811 static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
1812 const struct iovec *iov, unsigned long nr_segs,
1813 loff_t pos, loff_t *ppos, size_t count)
1815 struct file *file = iocb->ki_filp;
1816 struct address_space *mapping = file->f_mapping;
1817 struct inode *vi = mapping->host;
1818 ntfs_inode *ni = NTFS_I(vi);
1819 ntfs_volume *vol = ni->vol;
1820 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1821 struct page *cached_page = NULL;
1822 char __user *buf = NULL;
1826 unsigned long flags;
1827 size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */
1828 ssize_t status, written;
1831 struct pagevec lru_pvec;
1833 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1834 "pos 0x%llx, count 0x%lx.",
1835 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
1836 (unsigned long long)pos, (unsigned long)count);
1837 if (unlikely(!count))
1839 BUG_ON(NInoMstProtected(ni));
1841 * If the attribute is not an index root and it is encrypted or
1842 * compressed, we cannot write to it yet. Note we need to check for
1843 * AT_INDEX_ALLOCATION since this is the type of both directory and
1846 if (ni->type != AT_INDEX_ALLOCATION) {
1847 /* If file is encrypted, deny access, just like NT4. */
1848 if (NInoEncrypted(ni)) {
1850 * Reminder for later: Encrypted files are _always_
1851 * non-resident so that the content can always be
1854 ntfs_debug("Denying write access to encrypted file.");
1857 if (NInoCompressed(ni)) {
1858 /* Only unnamed $DATA attribute can be compressed. */
1859 BUG_ON(ni->type != AT_DATA);
1860 BUG_ON(ni->name_len);
1862 * Reminder for later: If resident, the data is not
1863 * actually compressed. Only on the switch to non-
1864 * resident does compression kick in. This is in
1865 * contrast to encrypted files (see above).
1867 ntfs_error(vi->i_sb, "Writing to compressed files is "
1868 "not implemented yet. Sorry.");
1873 * If a previous ntfs_truncate() failed, repeat it and abort if it
1876 if (unlikely(NInoTruncateFailed(ni))) {
1877 down_write(&vi->i_alloc_sem);
1878 err = ntfs_truncate(vi);
1879 up_write(&vi->i_alloc_sem);
1880 if (err || NInoTruncateFailed(ni)) {
1883 ntfs_error(vol->sb, "Cannot perform write to inode "
1884 "0x%lx, attribute type 0x%x, because "
1885 "ntfs_truncate() failed (error code "
1887 (unsigned)le32_to_cpu(ni->type), err);
1891 /* The first byte after the write. */
1894 * If the write goes beyond the allocated size, extend the allocation
1895 * to cover the whole of the write, rounded up to the nearest cluster.
1897 read_lock_irqsave(&ni->size_lock, flags);
1898 ll = ni->allocated_size;
1899 read_unlock_irqrestore(&ni->size_lock, flags);
1901 /* Extend the allocation without changing the data size. */
1902 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
1903 if (likely(ll >= 0)) {
1905 /* If the extension was partial truncate the write. */
1907 ntfs_debug("Truncating write to inode 0x%lx, "
1908 "attribute type 0x%x, because "
1909 "the allocation was only "
1910 "partially extended.",
1911 vi->i_ino, (unsigned)
1912 le32_to_cpu(ni->type));
1918 read_lock_irqsave(&ni->size_lock, flags);
1919 ll = ni->allocated_size;
1920 read_unlock_irqrestore(&ni->size_lock, flags);
1921 /* Perform a partial write if possible or fail. */
1923 ntfs_debug("Truncating write to inode 0x%lx, "
1924 "attribute type 0x%x, because "
1925 "extending the allocation "
1926 "failed (error code %i).",
1927 vi->i_ino, (unsigned)
1928 le32_to_cpu(ni->type), err);
1932 ntfs_error(vol->sb, "Cannot perform write to "
1933 "inode 0x%lx, attribute type "
1934 "0x%x, because extending the "
1935 "allocation failed (error "
1936 "code %i).", vi->i_ino,
1938 le32_to_cpu(ni->type), err);
1943 pagevec_init(&lru_pvec, 0);
1946 * If the write starts beyond the initialized size, extend it up to the
1947 * beginning of the write and initialize all non-sparse space between
1948 * the old initialized size and the new one. This automatically also
1949 * increments the vfs inode->i_size to keep it above or equal to the
1952 read_lock_irqsave(&ni->size_lock, flags);
1953 ll = ni->initialized_size;
1954 read_unlock_irqrestore(&ni->size_lock, flags);
1956 err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
1959 ntfs_error(vol->sb, "Cannot perform write to inode "
1960 "0x%lx, attribute type 0x%x, because "
1961 "extending the initialized size "
1962 "failed (error code %i).", vi->i_ino,
1963 (unsigned)le32_to_cpu(ni->type), err);
1969 * Determine the number of pages per cluster for non-resident
1973 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
1974 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
1975 /* Finally, perform the actual write. */
1977 if (likely(nr_segs == 1))
1978 buf = iov->iov_base;
1981 pgoff_t idx, start_idx;
1982 unsigned ofs, do_pages, u;
1985 start_idx = idx = pos >> PAGE_CACHE_SHIFT;
1986 ofs = pos & ~PAGE_CACHE_MASK;
1987 bytes = PAGE_CACHE_SIZE - ofs;
1990 vcn = pos >> vol->cluster_size_bits;
1991 if (vcn != last_vcn) {
1994 * Get the lcn of the vcn the write is in. If
1995 * it is a hole, need to lock down all pages in
1998 down_read(&ni->runlist.lock);
1999 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
2000 vol->cluster_size_bits, false);
2001 up_read(&ni->runlist.lock);
2002 if (unlikely(lcn < LCN_HOLE)) {
2004 if (lcn == LCN_ENOMEM)
2007 ntfs_error(vol->sb, "Cannot "
2010 "attribute type 0x%x, "
2011 "because the attribute "
2013 vi->i_ino, (unsigned)
2014 le32_to_cpu(ni->type));
2017 if (lcn == LCN_HOLE) {
2018 start_idx = (pos & ~(s64)
2019 vol->cluster_size_mask)
2020 >> PAGE_CACHE_SHIFT;
2021 bytes = vol->cluster_size - (pos &
2022 vol->cluster_size_mask);
2023 do_pages = nr_pages;
2030 * Bring in the user page(s) that we will copy from _first_.
2031 * Otherwise there is a nasty deadlock on copying from the same
2032 * page(s) as we are writing to, without it/them being marked
2033 * up-to-date. Note, at present there is nothing to stop the
2034 * pages being swapped out between us bringing them into memory
2035 * and doing the actual copying.
2037 if (likely(nr_segs == 1))
2038 ntfs_fault_in_pages_readable(buf, bytes);
2040 ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
2041 /* Get and lock @do_pages starting at index @start_idx. */
2042 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
2043 pages, &cached_page, &lru_pvec);
2044 if (unlikely(status))
2047 * For non-resident attributes, we need to fill any holes with
2048 * actual clusters and ensure all bufferes are mapped. We also
2049 * need to bring uptodate any buffers that are only partially
2052 if (NInoNonResident(ni)) {
2053 status = ntfs_prepare_pages_for_non_resident_write(
2054 pages, do_pages, pos, bytes);
2055 if (unlikely(status)) {
2059 unlock_page(pages[--do_pages]);
2060 page_cache_release(pages[do_pages]);
2063 * The write preparation may have instantiated
2064 * allocated space outside i_size. Trim this
2065 * off again. We can ignore any errors in this
2066 * case as we will just be waisting a bit of
2067 * allocated space, which is not a disaster.
2069 i_size = i_size_read(vi);
2070 if (pos + bytes > i_size)
2071 vmtruncate(vi, i_size);
2075 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
2076 if (likely(nr_segs == 1)) {
2077 copied = ntfs_copy_from_user(pages + u, do_pages - u,
2081 copied = ntfs_copy_from_user_iovec(pages + u,
2082 do_pages - u, ofs, &iov, &iov_ofs,
2084 ntfs_flush_dcache_pages(pages + u, do_pages - u);
2085 status = ntfs_commit_pages_after_write(pages, do_pages, pos,
2087 if (likely(!status)) {
2091 if (unlikely(copied != bytes))
2095 unlock_page(pages[--do_pages]);
2096 mark_page_accessed(pages[do_pages]);
2097 page_cache_release(pages[do_pages]);
2099 if (unlikely(status))
2101 balance_dirty_pages_ratelimited(mapping);
2107 page_cache_release(cached_page);
2108 /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */
2109 if (likely(!status)) {
2110 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) {
2111 if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb))
2112 status = generic_osync_inode(vi, mapping,
2113 OSYNC_METADATA|OSYNC_DATA);
2116 pagevec_lru_add(&lru_pvec);
2117 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
2118 written ? "written" : "status", (unsigned long)written,
2120 return written ? written : status;
2124 * ntfs_file_aio_write_nolock -
2126 static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2127 const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
2129 struct file *file = iocb->ki_filp;
2130 struct address_space *mapping = file->f_mapping;
2131 struct inode *inode = mapping->host;
2134 size_t count; /* after file limit checks */
2135 ssize_t written, err;
2138 for (seg = 0; seg < nr_segs; seg++) {
2139 const struct iovec *iv = &iov[seg];
2141 * If any segment has a negative length, or the cumulative
2142 * length ever wraps negative then return -EINVAL.
2144 count += iv->iov_len;
2145 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
2147 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2152 count -= iv->iov_len; /* This segment is no good */
2156 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2157 /* We can write back this queue in page reclaim. */
2158 current->backing_dev_info = mapping->backing_dev_info;
2160 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2165 err = remove_suid(file->f_path.dentry);
2168 file_update_time(file);
2169 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2172 current->backing_dev_info = NULL;
2173 return written ? written : err;
2177 * ntfs_file_aio_write -
2179 static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2180 unsigned long nr_segs, loff_t pos)
2182 struct file *file = iocb->ki_filp;
2183 struct address_space *mapping = file->f_mapping;
2184 struct inode *inode = mapping->host;
2187 BUG_ON(iocb->ki_pos != pos);
2189 mutex_lock(&inode->i_mutex);
2190 ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
2191 mutex_unlock(&inode->i_mutex);
2192 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2193 int err = sync_page_range(inode, mapping, pos, ret);
2201 * ntfs_file_writev -
2203 * Basically the same as generic_file_writev() except that it ends up calling
2204 * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
2206 static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
2207 unsigned long nr_segs, loff_t *ppos)
2209 struct address_space *mapping = file->f_mapping;
2210 struct inode *inode = mapping->host;
2214 mutex_lock(&inode->i_mutex);
2215 init_sync_kiocb(&kiocb, file);
2216 ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2217 if (ret == -EIOCBQUEUED)
2218 ret = wait_on_sync_kiocb(&kiocb);
2219 mutex_unlock(&inode->i_mutex);
2220 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2221 int err = sync_page_range(inode, mapping, *ppos - ret, ret);
2229 * ntfs_file_write - simple wrapper for ntfs_file_writev()
2231 static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
2232 size_t count, loff_t *ppos)
2234 struct iovec local_iov = { .iov_base = (void __user *)buf,
2237 return ntfs_file_writev(file, &local_iov, 1, ppos);
2241 * ntfs_file_fsync - sync a file to disk
2242 * @filp: file to be synced
2243 * @dentry: dentry describing the file to sync
2244 * @datasync: if non-zero only flush user data and not metadata
2246 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
2247 * system calls. This function is inspired by fs/buffer.c::file_fsync().
2249 * If @datasync is false, write the mft record and all associated extent mft
2250 * records as well as the $DATA attribute and then sync the block device.
2252 * If @datasync is true and the attribute is non-resident, we skip the writing
2253 * of the mft record and all associated extent mft records (this might still
2254 * happen due to the write_inode_now() call).
2256 * Also, if @datasync is true, we do not wait on the inode to be written out
2257 * but we always wait on the page cache pages to be written out.
2259 * Note: In the past @filp could be NULL so we ignore it as we don't need it
2262 * Locking: Caller must hold i_mutex on the inode.
2264 * TODO: We should probably also write all attribute/index inodes associated
2265 * with this inode but since we have no simple way of getting to them we ignore
2266 * this problem for now.
2268 static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
2271 struct inode *vi = dentry->d_inode;
2274 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2275 BUG_ON(S_ISDIR(vi->i_mode));
2276 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2277 ret = ntfs_write_inode(vi, 1);
2278 write_inode_now(vi, !datasync);
2280 * NOTE: If we were to use mapping->private_list (see ext2 and
2281 * fs/buffer.c) for dirty blocks then we could optimize the below to be
2282 * sync_mapping_buffers(vi->i_mapping).
2284 err = sync_blockdev(vi->i_sb->s_bdev);
2285 if (unlikely(err && !ret))
2288 ntfs_debug("Done.");
2290 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
2291 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2295 #endif /* NTFS_RW */
2297 const struct file_operations ntfs_file_ops = {
2298 .llseek = generic_file_llseek, /* Seek inside file. */
2299 .read = do_sync_read, /* Read from file. */
2300 .aio_read = generic_file_aio_read, /* Async read from file. */
2302 .write = ntfs_file_write, /* Write to file. */
2303 .aio_write = ntfs_file_aio_write, /* Async write to file. */
2304 /*.release = ,*/ /* Last file is closed. See
2306 ext2_release_file() for
2307 how to use this to discard
2308 preallocated space for
2309 write opened files. */
2310 .fsync = ntfs_file_fsync, /* Sync a file to disk. */
2311 /*.aio_fsync = ,*/ /* Sync all outstanding async
2314 #endif /* NTFS_RW */
2315 /*.ioctl = ,*/ /* Perform function on the
2316 mounted filesystem. */
2317 .mmap = generic_file_mmap, /* Mmap file. */
2318 .open = ntfs_file_open, /* Open file. */
2319 .sendfile = generic_file_sendfile, /* Zero-copy data send with
2320 the data source being on
2321 the ntfs partition. We do
2322 not need to care about the
2323 data destination. */
2324 /*.sendpage = ,*/ /* Zero-copy data send with
2325 the data destination being
2326 on the ntfs partition. We
2327 do not need to care about
2331 const struct inode_operations ntfs_file_inode_ops = {
2333 .truncate = ntfs_truncate_vfs,
2334 .setattr = ntfs_setattr,
2335 #endif /* NTFS_RW */
2338 const struct file_operations ntfs_empty_file_ops = {};
2340 const struct inode_operations ntfs_empty_inode_ops = {};