1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
4 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public
17 * License along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA.
23 #include <linux/slab.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <asm/byteorder.h>
28 #define MLOG_MASK_PREFIX ML_FILE_IO
29 #include <cluster/masklog.h>
36 #include "extent_map.h"
43 #include "buffer_head_io.h"
45 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
46 struct buffer_head *bh_result, int create)
50 struct ocfs2_dinode *fe = NULL;
51 struct buffer_head *bh = NULL;
52 struct buffer_head *buffer_cache_bh = NULL;
53 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
56 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
57 (unsigned long long)iblock, bh_result, create);
59 BUG_ON(ocfs2_inode_is_fast_symlink(inode));
61 if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
62 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
63 (unsigned long long)iblock);
67 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
68 OCFS2_I(inode)->ip_blkno,
69 &bh, OCFS2_BH_CACHED, inode);
74 fe = (struct ocfs2_dinode *) bh->b_data;
76 if (!OCFS2_IS_VALID_DINODE(fe)) {
77 mlog(ML_ERROR, "Invalid dinode #%llu: signature = %.*s\n",
78 (unsigned long long)fe->i_blkno, 7, fe->i_signature);
82 if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
83 le32_to_cpu(fe->i_clusters))) {
84 mlog(ML_ERROR, "block offset is outside the allocated size: "
85 "%llu\n", (unsigned long long)iblock);
89 /* We don't use the page cache to create symlink data, so if
90 * need be, copy it over from the buffer cache. */
91 if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
92 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
94 buffer_cache_bh = sb_getblk(osb->sb, blkno);
95 if (!buffer_cache_bh) {
96 mlog(ML_ERROR, "couldn't getblock for symlink!\n");
100 /* we haven't locked out transactions, so a commit
101 * could've happened. Since we've got a reference on
102 * the bh, even if it commits while we're doing the
103 * copy, the data is still good. */
104 if (buffer_jbd(buffer_cache_bh)
105 && ocfs2_inode_is_new(inode)) {
106 kaddr = kmap_atomic(bh_result->b_page, KM_USER0);
108 mlog(ML_ERROR, "couldn't kmap!\n");
111 memcpy(kaddr + (bh_result->b_size * iblock),
112 buffer_cache_bh->b_data,
114 kunmap_atomic(kaddr, KM_USER0);
115 set_buffer_uptodate(bh_result);
117 brelse(buffer_cache_bh);
120 map_bh(bh_result, inode->i_sb,
121 le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
133 static int ocfs2_get_block(struct inode *inode, sector_t iblock,
134 struct buffer_head *bh_result, int create)
137 u64 p_blkno, past_eof;
139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
140 (unsigned long long)iblock, bh_result, create);
142 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
143 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
144 inode, inode->i_ino);
146 if (S_ISLNK(inode->i_mode)) {
147 /* this always does I/O for some reason. */
148 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
152 /* this can happen if another node truncs after our extend! */
153 spin_lock(&OCFS2_I(inode)->ip_lock);
154 if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
155 OCFS2_I(inode)->ip_clusters))
157 spin_unlock(&OCFS2_I(inode)->ip_lock);
161 err = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno,
164 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
165 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
166 (unsigned long long)p_blkno);
170 map_bh(bh_result, inode->i_sb, p_blkno);
172 if (bh_result->b_blocknr == 0) {
174 mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
175 (unsigned long long)iblock,
176 (unsigned long long)p_blkno,
177 (unsigned long long)OCFS2_I(inode)->ip_blkno);
180 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
181 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
182 (unsigned long long)past_eof);
184 if (create && (iblock >= past_eof))
185 set_buffer_new(bh_result);
195 static int ocfs2_readpage(struct file *file, struct page *page)
197 struct inode *inode = page->mapping->host;
198 loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
201 mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0));
203 ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
205 if (ret == AOP_TRUNCATED_PAGE)
211 down_read(&OCFS2_I(inode)->ip_alloc_sem);
214 * i_size might have just been updated as we grabed the meta lock. We
215 * might now be discovering a truncate that hit on another node.
216 * block_read_full_page->get_block freaks out if it is asked to read
217 * beyond the end of a file, so we check here. Callers
218 * (generic_file_read, fault->nopage) are clever enough to check i_size
219 * and notice that the page they just read isn't needed.
221 * XXX sys_readahead() seems to get that wrong?
223 if (start >= i_size_read(inode)) {
224 char *addr = kmap(page);
225 memset(addr, 0, PAGE_SIZE);
226 flush_dcache_page(page);
228 SetPageUptodate(page);
233 ret = ocfs2_data_lock_with_page(inode, 0, page);
235 if (ret == AOP_TRUNCATED_PAGE)
241 ret = block_read_full_page(page, ocfs2_get_block);
244 ocfs2_data_unlock(inode, 0);
246 up_read(&OCFS2_I(inode)->ip_alloc_sem);
247 ocfs2_meta_unlock(inode, 0);
255 /* Note: Because we don't support holes, our allocation has
256 * already happened (allocation writes zeros to the file data)
257 * so we don't have to worry about ordered writes in
260 * ->writepage is called during the process of invalidating the page cache
261 * during blocked lock processing. It can't block on any cluster locks
262 * to during block mapping. It's relying on the fact that the block
263 * mapping can't have disappeared under the dirty pages that it is
264 * being asked to write back.
266 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
270 mlog_entry("(0x%p)\n", page);
272 ret = block_write_full_page(page, ocfs2_get_block, wbc);
279 /* This can also be called from ocfs2_write_zero_page() which has done
280 * it's own cluster locking. */
281 int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
282 unsigned from, unsigned to)
286 down_read(&OCFS2_I(inode)->ip_alloc_sem);
288 ret = block_prepare_write(page, from, to, ocfs2_get_block);
290 up_read(&OCFS2_I(inode)->ip_alloc_sem);
296 * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
297 * from loopback. It must be able to perform its own locking around
300 static int ocfs2_prepare_write(struct file *file, struct page *page,
301 unsigned from, unsigned to)
303 struct inode *inode = page->mapping->host;
306 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
308 ret = ocfs2_meta_lock_with_page(inode, NULL, NULL, 0, page);
314 ret = ocfs2_prepare_write_nolock(inode, page, from, to);
316 ocfs2_meta_unlock(inode, 0);
322 /* Taken from ext3. We don't necessarily need the full blown
323 * functionality yet, but IMHO it's better to cut and paste the whole
324 * thing so we can avoid introducing our own bugs (and easily pick up
325 * their fixes when they happen) --Mark */
326 static int walk_page_buffers( handle_t *handle,
327 struct buffer_head *head,
331 int (*fn)( handle_t *handle,
332 struct buffer_head *bh))
334 struct buffer_head *bh;
335 unsigned block_start, block_end;
336 unsigned blocksize = head->b_size;
338 struct buffer_head *next;
340 for ( bh = head, block_start = 0;
341 ret == 0 && (bh != head || !block_start);
342 block_start = block_end, bh = next)
344 next = bh->b_this_page;
345 block_end = block_start + blocksize;
346 if (block_end <= from || block_start >= to) {
347 if (partial && !buffer_uptodate(bh))
351 err = (*fn)(handle, bh);
358 struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode,
363 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
364 struct ocfs2_journal_handle *handle = NULL;
367 handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS);
374 if (ocfs2_should_order_data(inode)) {
375 ret = walk_page_buffers(handle->k_handle,
378 ocfs2_journal_dirty_data);
385 ocfs2_commit_trans(handle);
386 handle = ERR_PTR(ret);
391 static int ocfs2_commit_write(struct file *file, struct page *page,
392 unsigned from, unsigned to)
395 struct buffer_head *di_bh = NULL;
396 struct inode *inode = page->mapping->host;
397 struct ocfs2_journal_handle *handle = NULL;
398 struct ocfs2_dinode *di;
400 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
402 /* NOTE: ocfs2_file_aio_write has ensured that it's safe for
403 * us to continue here without rechecking the I/O against
404 * changed inode values.
406 * 1) We're currently holding the inode alloc lock, so no
407 * nodes can change it underneath us.
409 * 2) We've had to take the metadata lock at least once
410 * already to check for extending writes, suid removal, etc.
411 * The meta data update code then ensures that we don't get a
412 * stale inode allocation image (i_size, i_clusters, etc).
415 ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page);
421 ret = ocfs2_data_lock_with_page(inode, 1, page);
424 goto out_unlock_meta;
427 handle = ocfs2_start_walk_page_trans(inode, page, from, to);
428 if (IS_ERR(handle)) {
429 ret = PTR_ERR(handle);
430 goto out_unlock_data;
433 /* Mark our buffer early. We'd rather catch this error up here
434 * as opposed to after a successful commit_write which would
435 * require us to set back inode->i_size. */
436 ret = ocfs2_journal_access(handle, inode, di_bh,
437 OCFS2_JOURNAL_ACCESS_WRITE);
443 /* might update i_size */
444 ret = generic_commit_write(file, page, from, to);
450 di = (struct ocfs2_dinode *)di_bh->b_data;
452 /* ocfs2_mark_inode_dirty() is too heavy to use here. */
453 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
454 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
455 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
457 inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
458 di->i_size = cpu_to_le64((u64)i_size_read(inode));
460 ret = ocfs2_journal_dirty(handle, di_bh);
467 ocfs2_commit_trans(handle);
469 ocfs2_data_unlock(inode, 1);
471 ocfs2_meta_unlock(inode, 1);
480 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
485 struct inode *inode = mapping->host;
487 mlog_entry("(block = %llu)\n", (unsigned long long)block);
489 /* We don't need to lock journal system files, since they aren't
490 * accessed concurrently from multiple nodes.
492 if (!INODE_JOURNAL(inode)) {
493 err = ocfs2_meta_lock(inode, NULL, NULL, 0);
499 down_read(&OCFS2_I(inode)->ip_alloc_sem);
502 err = ocfs2_extent_map_get_blocks(inode, block, 1, &p_blkno,
505 if (!INODE_JOURNAL(inode)) {
506 up_read(&OCFS2_I(inode)->ip_alloc_sem);
507 ocfs2_meta_unlock(inode, 0);
511 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
512 (unsigned long long)block);
519 status = err ? 0 : p_blkno;
521 mlog_exit((int)status);
527 * TODO: Make this into a generic get_blocks function.
529 * From do_direct_io in direct-io.c:
530 * "So what we do is to permit the ->get_blocks function to populate
531 * bh.b_size with the size of IO which is permitted at this offset and
534 * This function is called directly from get_more_blocks in direct-io.c.
536 * called like this: dio->get_blocks(dio->inode, fs_startblk,
537 * fs_count, map_bh, dio->rw == WRITE);
539 static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
540 struct buffer_head *bh_result, int create)
543 u64 vbo_max; /* file offset, max_blocks from iblock */
546 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
547 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
549 /* This function won't even be called if the request isn't all
550 * nicely aligned and of the right size, so there's no need
551 * for us to check any of that. */
553 vbo_max = ((u64)iblock + max_blocks) << blocksize_bits;
555 spin_lock(&OCFS2_I(inode)->ip_lock);
556 if ((iblock + max_blocks) >
557 ocfs2_clusters_to_blocks(inode->i_sb,
558 OCFS2_I(inode)->ip_clusters)) {
559 spin_unlock(&OCFS2_I(inode)->ip_lock);
563 spin_unlock(&OCFS2_I(inode)->ip_lock);
565 /* This figures out the size of the next contiguous block, and
566 * our logical offset */
567 ret = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno,
570 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
571 (unsigned long long)iblock);
576 map_bh(bh_result, inode->i_sb, p_blkno);
578 /* make sure we don't map more than max_blocks blocks here as
579 that's all the kernel will handle at this point. */
580 if (max_blocks < contig_blocks)
581 contig_blocks = max_blocks;
582 bh_result->b_size = contig_blocks << blocksize_bits;
588 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
589 * particularly interested in the aio/dio case. Like the core uses
590 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
591 * truncation on another.
593 static void ocfs2_dio_end_io(struct kiocb *iocb,
598 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
600 /* this io's submitter should not have unlocked this before we could */
601 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
602 ocfs2_iocb_clear_rw_locked(iocb);
603 up_read(&inode->i_alloc_sem);
604 ocfs2_rw_unlock(inode, 0);
607 static ssize_t ocfs2_direct_IO(int rw,
609 const struct iovec *iov,
611 unsigned long nr_segs)
613 struct file *file = iocb->ki_filp;
614 struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
620 * We get PR data locks even for O_DIRECT. This allows
621 * concurrent O_DIRECT I/O but doesn't let O_DIRECT with
622 * extending and buffered zeroing writes race. If they did
623 * race then the buffered zeroing could be written back after
624 * the O_DIRECT I/O. It's one thing to tell people not to mix
625 * buffered and O_DIRECT writes, but expecting them to
626 * understand that file extension is also an implicit buffered
627 * write is too much. By getting the PR we force writeback of
628 * the buffered zeroing before proceeding.
630 ret = ocfs2_data_lock(inode, 0);
635 ocfs2_data_unlock(inode, 0);
637 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
638 inode->i_sb->s_bdev, iov, offset,
640 ocfs2_direct_IO_get_blocks,
647 const struct address_space_operations ocfs2_aops = {
648 .readpage = ocfs2_readpage,
649 .writepage = ocfs2_writepage,
650 .prepare_write = ocfs2_prepare_write,
651 .commit_write = ocfs2_commit_write,
653 .sync_page = block_sync_page,
654 .direct_IO = ocfs2_direct_IO