5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
31 #include <linux/errno.h>
33 #include <linux/ufs_fs.h>
34 #include <linux/time.h>
35 #include <linux/stat.h>
36 #include <linux/string.h>
38 #include <linux/smp_lock.h>
39 #include <linux/buffer_head.h>
44 static u64 ufs_frag_map(struct inode *inode, sector_t frag);
46 static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t offsets[4])
48 struct ufs_sb_private_info *uspi = UFS_SB(inode->i_sb)->s_uspi;
49 int ptrs = uspi->s_apb;
50 int ptrs_bits = uspi->s_apbshift;
51 const long direct_blocks = UFS_NDADDR,
52 indirect_blocks = ptrs,
53 double_blocks = (1 << (ptrs_bits * 2));
57 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
59 ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
60 } else if (i_block < direct_blocks) {
61 offsets[n++] = i_block;
62 } else if ((i_block -= direct_blocks) < indirect_blocks) {
63 offsets[n++] = UFS_IND_BLOCK;
64 offsets[n++] = i_block;
65 } else if ((i_block -= indirect_blocks) < double_blocks) {
66 offsets[n++] = UFS_DIND_BLOCK;
67 offsets[n++] = i_block >> ptrs_bits;
68 offsets[n++] = i_block & (ptrs - 1);
69 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
70 offsets[n++] = UFS_TIND_BLOCK;
71 offsets[n++] = i_block >> (ptrs_bits * 2);
72 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
73 offsets[n++] = i_block & (ptrs - 1);
75 ufs_warning(inode->i_sb, "ufs_block_to_path", "block > big");
81 * Returns the location of the fragment from
82 * the begining of the filesystem.
85 static u64 ufs_frag_map(struct inode *inode, sector_t frag)
87 struct ufs_inode_info *ufsi = UFS_I(inode);
88 struct super_block *sb = inode->i_sb;
89 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
90 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift;
91 int shift = uspi->s_apbshift-uspi->s_fpbshift;
92 sector_t offsets[4], *p;
93 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets);
97 unsigned flags = UFS_SB(sb)->s_flags;
100 UFSD(": frag = %llu depth = %d\n", (unsigned long long)frag, depth);
101 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
102 uspi->s_fpbshift, uspi->s_apbmask,
103 (unsigned long long)mask);
111 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
114 block = ufsi->i_u1.i_data[*p++];
118 struct buffer_head *bh;
121 bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
124 block = ((__fs32 *) bh->b_data)[n & mask];
129 ret = (u64) (uspi->s_sbbase + fs32_to_cpu(sb, block) + (frag & uspi->s_fpbmask));
132 u2_block = ufsi->i_u1.u2_i_data[*p++];
138 struct buffer_head *bh;
142 temp = (u64)(uspi->s_sbbase) + fs64_to_cpu(sb, u2_block);
143 bh = sb_bread(sb, temp +(u64) (n>>shift));
146 u2_block = ((__fs64 *)bh->b_data)[n & mask];
151 temp = (u64)uspi->s_sbbase + fs64_to_cpu(sb, u2_block);
152 ret = temp + (u64) (frag & uspi->s_fpbmask);
160 * ufs_inode_getfrag() - allocate new fragment(s)
161 * @inode - pointer to inode
162 * @fragment - number of `fragment' which hold pointer
163 * to new allocated fragment(s)
164 * @new_fragment - number of new allocated fragment(s)
165 * @required - how many fragment(s) we require
166 * @err - we set it if something wrong
167 * @phys - pointer to where we save physical number of new allocated fragments,
168 * NULL if we allocate not data(indirect blocks for example).
169 * @new - we set it if we allocate new block
170 * @locked_page - for ufs_new_fragments()
172 static struct buffer_head *
173 ufs_inode_getfrag(struct inode *inode, u64 fragment,
174 sector_t new_fragment, unsigned int required, int *err,
175 long *phys, int *new, struct page *locked_page)
177 struct ufs_inode_info *ufsi = UFS_I(inode);
178 struct super_block *sb = inode->i_sb;
179 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
180 struct buffer_head * result;
181 unsigned blockoff, lastblockoff;
182 u64 tmp, goal, lastfrag, block, lastblock;
185 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
186 "metadata %d\n", inode->i_ino, (unsigned long long)fragment,
187 (unsigned long long)new_fragment, required, !phys);
189 /* TODO : to be done for write support
190 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
194 block = ufs_fragstoblks (fragment);
195 blockoff = ufs_fragnum (fragment);
196 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
201 tmp = ufs_data_ptr_to_cpu(sb, p);
203 lastfrag = ufsi->i_lastfrag;
204 if (tmp && fragment < lastfrag) {
206 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
207 if (tmp == ufs_data_ptr_to_cpu(sb, p)) {
208 UFSD("EXIT, result %llu\n",
209 (unsigned long long)tmp + blockoff);
215 *phys = uspi->s_sbbase + tmp + blockoff;
220 lastblock = ufs_fragstoblks (lastfrag);
221 lastblockoff = ufs_fragnum (lastfrag);
223 * We will extend file into new block beyond last allocated block
225 if (lastblock < block) {
227 * We must reallocate last allocated block
230 p2 = ufs_get_direct_data_ptr(uspi, ufsi, lastblock);
231 tmp = ufs_new_fragments(inode, p2, lastfrag,
232 ufs_data_ptr_to_cpu(sb, p2),
233 uspi->s_fpb - lastblockoff,
236 if (lastfrag != ufsi->i_lastfrag)
241 lastfrag = ufsi->i_lastfrag;
244 tmp = ufs_data_ptr_to_cpu(sb,
245 ufs_get_direct_data_ptr(uspi, ufsi,
248 goal = tmp + uspi->s_fpb;
249 tmp = ufs_new_fragments (inode, p, fragment - blockoff,
250 goal, required + blockoff,
252 phys != NULL ? locked_page : NULL);
253 } else if (lastblock == block) {
255 * We will extend last allocated block
257 tmp = ufs_new_fragments(inode, p, fragment -
258 (blockoff - lastblockoff),
259 ufs_data_ptr_to_cpu(sb, p),
260 required + (blockoff - lastblockoff),
261 err, phys != NULL ? locked_page : NULL);
262 } else /* (lastblock > block) */ {
264 * We will allocate new block before last allocated block
267 tmp = ufs_data_ptr_to_cpu(sb,
268 ufs_get_direct_data_ptr(uspi, ufsi, block - 1));
270 goal = tmp + uspi->s_fpb;
272 tmp = ufs_new_fragments(inode, p, fragment - blockoff,
273 goal, uspi->s_fpb, err,
274 phys != NULL ? locked_page : NULL);
277 if ((!blockoff && ufs_data_ptr_to_cpu(sb, p)) ||
278 (blockoff && lastfrag != ufsi->i_lastfrag))
285 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
287 *phys = uspi->s_sbbase + tmp + blockoff;
293 inode->i_ctime = CURRENT_TIME_SEC;
295 ufs_sync_inode (inode);
296 mark_inode_dirty(inode);
297 UFSD("EXIT, result %llu\n", (unsigned long long)tmp + blockoff);
300 /* This part : To be implemented ....
301 Required only for writing, not required for READ-ONLY.
304 u2_block = ufs_fragstoblks(fragment);
305 u2_blockoff = ufs_fragnum(fragment);
306 p = ufsi->i_u1.u2_i_data + block;
310 tmp = fs32_to_cpu(sb, *p);
311 lastfrag = ufsi->i_lastfrag;
317 * ufs_inode_getblock() - allocate new block
318 * @inode - pointer to inode
319 * @bh - pointer to block which hold "pointer" to new allocated block
320 * @fragment - number of `fragment' which hold pointer
321 * to new allocated block
322 * @new_fragment - number of new allocated fragment
323 * (block will hold this fragment and also uspi->s_fpb-1)
324 * @err - see ufs_inode_getfrag()
325 * @phys - see ufs_inode_getfrag()
326 * @new - see ufs_inode_getfrag()
327 * @locked_page - see ufs_inode_getfrag()
329 static struct buffer_head *
330 ufs_inode_getblock(struct inode *inode, struct buffer_head *bh,
331 u64 fragment, sector_t new_fragment, int *err,
332 long *phys, int *new, struct page *locked_page)
334 struct super_block *sb = inode->i_sb;
335 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
336 struct buffer_head * result;
338 u64 tmp, goal, block;
341 block = ufs_fragstoblks (fragment);
342 blockoff = ufs_fragnum (fragment);
344 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
345 inode->i_ino, (unsigned long long)fragment,
346 (unsigned long long)new_fragment, !phys);
351 if (!buffer_uptodate(bh)) {
352 ll_rw_block (READ, 1, &bh);
354 if (!buffer_uptodate(bh))
357 if (uspi->fs_magic == UFS2_MAGIC)
358 p = (__fs64 *)bh->b_data + block;
360 p = (__fs32 *)bh->b_data + block;
362 tmp = ufs_data_ptr_to_cpu(sb, p);
365 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
366 if (tmp == ufs_data_ptr_to_cpu(sb, p))
371 *phys = uspi->s_sbbase + tmp + blockoff;
376 if (block && (uspi->fs_magic == UFS2_MAGIC ?
377 (tmp = fs64_to_cpu(sb, ((__fs64 *)bh->b_data)[block-1])) :
378 (tmp = fs32_to_cpu(sb, ((__fs32 *)bh->b_data)[block-1]))))
379 goal = tmp + uspi->s_fpb;
381 goal = bh->b_blocknr + uspi->s_fpb;
382 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal,
383 uspi->s_fpb, err, locked_page);
385 if (ufs_data_ptr_to_cpu(sb, p))
392 result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
394 *phys = uspi->s_sbbase + tmp + blockoff;
398 mark_buffer_dirty(bh);
400 sync_dirty_buffer(bh);
401 inode->i_ctime = CURRENT_TIME_SEC;
402 mark_inode_dirty(inode);
403 UFSD("result %llu\n", (unsigned long long)tmp + blockoff);
411 * ufs_getfrag_bloc() - `get_block_t' function, interface between UFS and
412 * readpage, writepage and so on
415 int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create)
417 struct super_block * sb = inode->i_sb;
418 struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
419 struct buffer_head * bh;
421 unsigned long ptr,phys;
425 phys64 = ufs_frag_map(inode, fragment);
426 UFSD("phys64 = %llu\n", (unsigned long long)phys64);
428 map_bh(bh_result, sb, phys64);
432 /* This code entered only while writing ....? */
441 UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
445 ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
446 << uspi->s_fpbshift))
453 * ok, these macros clean the logic up a bit and make
454 * it much more readable:
456 #define GET_INODE_DATABLOCK(x) \
457 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
459 #define GET_INODE_PTR(x) \
460 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
462 #define GET_INDIRECT_DATABLOCK(x) \
463 ufs_inode_getblock(inode, bh, x, fragment, \
464 &err, &phys, &new, bh_result->b_page)
465 #define GET_INDIRECT_PTR(x) \
466 ufs_inode_getblock(inode, bh, x, fragment, \
467 &err, NULL, NULL, NULL)
469 if (ptr < UFS_NDIR_FRAGMENT) {
470 bh = GET_INODE_DATABLOCK(ptr);
473 ptr -= UFS_NDIR_FRAGMENT;
474 if (ptr < (1 << (uspi->s_apbshift + uspi->s_fpbshift))) {
475 bh = GET_INODE_PTR(UFS_IND_FRAGMENT + (ptr >> uspi->s_apbshift));
478 ptr -= 1 << (uspi->s_apbshift + uspi->s_fpbshift);
479 if (ptr < (1 << (uspi->s_2apbshift + uspi->s_fpbshift))) {
480 bh = GET_INODE_PTR(UFS_DIND_FRAGMENT + (ptr >> uspi->s_2apbshift));
483 ptr -= 1 << (uspi->s_2apbshift + uspi->s_fpbshift);
484 bh = GET_INODE_PTR(UFS_TIND_FRAGMENT + (ptr >> uspi->s_3apbshift));
485 bh = GET_INDIRECT_PTR((ptr >> uspi->s_2apbshift) & uspi->s_apbmask);
487 bh = GET_INDIRECT_PTR((ptr >> uspi->s_apbshift) & uspi->s_apbmask);
489 bh = GET_INDIRECT_DATABLOCK(ptr & uspi->s_apbmask);
491 #undef GET_INODE_DATABLOCK
493 #undef GET_INDIRECT_DATABLOCK
494 #undef GET_INDIRECT_PTR
500 set_buffer_new(bh_result);
501 map_bh(bh_result, sb, phys);
507 ufs_warning(sb, "ufs_get_block", "block < 0");
511 ufs_warning(sb, "ufs_get_block", "block > big");
515 static struct buffer_head *ufs_getfrag(struct inode *inode,
516 unsigned int fragment,
517 int create, int *err)
519 struct buffer_head dummy;
523 dummy.b_blocknr = -1000;
524 error = ufs_getfrag_block(inode, fragment, &dummy, create);
526 if (!error && buffer_mapped(&dummy)) {
527 struct buffer_head *bh;
528 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
529 if (buffer_new(&dummy)) {
530 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
531 set_buffer_uptodate(bh);
532 mark_buffer_dirty(bh);
539 struct buffer_head * ufs_bread (struct inode * inode, unsigned fragment,
540 int create, int * err)
542 struct buffer_head * bh;
544 UFSD("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment);
545 bh = ufs_getfrag (inode, fragment, create, err);
546 if (!bh || buffer_uptodate(bh))
548 ll_rw_block (READ, 1, &bh);
550 if (buffer_uptodate(bh))
557 static int ufs_writepage(struct page *page, struct writeback_control *wbc)
559 return block_write_full_page(page,ufs_getfrag_block,wbc);
561 static int ufs_readpage(struct file *file, struct page *page)
563 return block_read_full_page(page,ufs_getfrag_block);
565 static int ufs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
567 return block_prepare_write(page,from,to,ufs_getfrag_block);
569 static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
571 return generic_block_bmap(mapping,block,ufs_getfrag_block);
573 const struct address_space_operations ufs_aops = {
574 .readpage = ufs_readpage,
575 .writepage = ufs_writepage,
576 .sync_page = block_sync_page,
577 .prepare_write = ufs_prepare_write,
578 .commit_write = generic_commit_write,
582 static void ufs_set_inode_ops(struct inode *inode)
584 if (S_ISREG(inode->i_mode)) {
585 inode->i_op = &ufs_file_inode_operations;
586 inode->i_fop = &ufs_file_operations;
587 inode->i_mapping->a_ops = &ufs_aops;
588 } else if (S_ISDIR(inode->i_mode)) {
589 inode->i_op = &ufs_dir_inode_operations;
590 inode->i_fop = &ufs_dir_operations;
591 inode->i_mapping->a_ops = &ufs_aops;
592 } else if (S_ISLNK(inode->i_mode)) {
593 if (!inode->i_blocks)
594 inode->i_op = &ufs_fast_symlink_inode_operations;
596 inode->i_op = &page_symlink_inode_operations;
597 inode->i_mapping->a_ops = &ufs_aops;
600 init_special_inode(inode, inode->i_mode,
601 ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
604 static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
606 struct ufs_inode_info *ufsi = UFS_I(inode);
607 struct super_block *sb = inode->i_sb;
612 * Copy data to the in-core inode.
614 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode);
615 inode->i_nlink = fs16_to_cpu(sb, ufs_inode->ui_nlink);
616 if (inode->i_nlink == 0) {
617 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
622 * Linux now has 32-bit uid and gid, so we can support EFT.
624 inode->i_uid = ufs_get_inode_uid(sb, ufs_inode);
625 inode->i_gid = ufs_get_inode_gid(sb, ufs_inode);
627 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
628 inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
629 inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
630 inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
631 inode->i_mtime.tv_nsec = 0;
632 inode->i_atime.tv_nsec = 0;
633 inode->i_ctime.tv_nsec = 0;
634 inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
635 inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
636 ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
637 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
638 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
641 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
642 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
643 ufsi->i_u1.i_data[i] = ufs_inode->ui_u2.ui_addr.ui_db[i];
645 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
646 ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
651 static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
653 struct ufs_inode_info *ufsi = UFS_I(inode);
654 struct super_block *sb = inode->i_sb;
658 UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
660 * Copy data to the in-core inode.
662 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode);
663 inode->i_nlink = fs16_to_cpu(sb, ufs2_inode->ui_nlink);
664 if (inode->i_nlink == 0) {
665 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino);
670 * Linux now has 32-bit uid and gid, so we can support EFT.
672 inode->i_uid = fs32_to_cpu(sb, ufs2_inode->ui_uid);
673 inode->i_gid = fs32_to_cpu(sb, ufs2_inode->ui_gid);
675 inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
676 inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
677 inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
678 inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
679 inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
680 inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
681 inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
682 inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
683 inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
684 ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
686 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
687 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
690 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
691 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
692 ufsi->i_u1.u2_i_data[i] =
693 ufs2_inode->ui_u2.ui_addr.ui_db[i];
695 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
696 ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
701 void ufs_read_inode(struct inode * inode)
703 struct ufs_inode_info *ufsi = UFS_I(inode);
704 struct super_block * sb;
705 struct ufs_sb_private_info * uspi;
706 struct buffer_head * bh;
709 UFSD("ENTER, ino %lu\n", inode->i_ino);
712 uspi = UFS_SB(sb)->s_uspi;
714 if (inode->i_ino < UFS_ROOTINO ||
715 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
716 ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
721 bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
723 ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
727 if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
728 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
730 err = ufs2_read_inode(inode,
731 ufs2_inode + ufs_inotofsbo(inode->i_ino));
733 struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
735 err = ufs1_read_inode(inode,
736 ufs_inode + ufs_inotofsbo(inode->i_ino));
743 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
744 ufsi->i_dir_start_lookup = 0;
747 ufs_set_inode_ops(inode);
755 make_bad_inode(inode);
758 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
760 struct super_block *sb = inode->i_sb;
761 struct ufs_inode_info *ufsi = UFS_I(inode);
764 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
765 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
767 ufs_set_inode_uid(sb, ufs_inode, inode->i_uid);
768 ufs_set_inode_gid(sb, ufs_inode, inode->i_gid);
770 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
771 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
772 ufs_inode->ui_atime.tv_usec = 0;
773 ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
774 ufs_inode->ui_ctime.tv_usec = 0;
775 ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
776 ufs_inode->ui_mtime.tv_usec = 0;
777 ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
778 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
779 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
781 if ((UFS_SB(sb)->s_flags & UFS_UID_MASK) == UFS_UID_EFT) {
782 ufs_inode->ui_u3.ui_sun.ui_shadow = cpu_to_fs32(sb, ufsi->i_shadow);
783 ufs_inode->ui_u3.ui_sun.ui_oeftflag = cpu_to_fs32(sb, ufsi->i_oeftflag);
786 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
787 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
788 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.i_data[0];
789 } else if (inode->i_blocks) {
790 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
791 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.i_data[i];
794 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
795 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
799 memset (ufs_inode, 0, sizeof(struct ufs_inode));
802 static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
804 struct super_block *sb = inode->i_sb;
805 struct ufs_inode_info *ufsi = UFS_I(inode);
809 ufs_inode->ui_mode = cpu_to_fs16(sb, inode->i_mode);
810 ufs_inode->ui_nlink = cpu_to_fs16(sb, inode->i_nlink);
812 ufs_inode->ui_uid = cpu_to_fs32(sb, inode->i_uid);
813 ufs_inode->ui_gid = cpu_to_fs32(sb, inode->i_gid);
815 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
816 ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
817 ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
818 ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
819 ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
820 ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
821 ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
823 ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
824 ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
825 ufs_inode->ui_gen = cpu_to_fs32(sb, inode->i_generation);
827 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
828 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
829 ufs_inode->ui_u2.ui_addr.ui_db[0] = ufsi->i_u1.u2_i_data[0];
830 } else if (inode->i_blocks) {
831 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
832 ufs_inode->ui_u2.ui_addr.ui_db[i] = ufsi->i_u1.u2_i_data[i];
834 for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
835 ufs_inode->ui_u2.ui_symlink[i] = ufsi->i_u1.i_symlink[i];
839 memset (ufs_inode, 0, sizeof(struct ufs2_inode));
843 static int ufs_update_inode(struct inode * inode, int do_sync)
845 struct super_block *sb = inode->i_sb;
846 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
847 struct buffer_head * bh;
849 UFSD("ENTER, ino %lu\n", inode->i_ino);
851 if (inode->i_ino < UFS_ROOTINO ||
852 inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
853 ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
857 bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
859 ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
862 if (uspi->fs_magic == UFS2_MAGIC) {
863 struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
865 ufs2_update_inode(inode,
866 ufs2_inode + ufs_inotofsbo(inode->i_ino));
868 struct ufs_inode *ufs_inode = (struct ufs_inode *) bh->b_data;
870 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
873 mark_buffer_dirty(bh);
875 sync_dirty_buffer(bh);
882 int ufs_write_inode (struct inode * inode, int wait)
886 ret = ufs_update_inode (inode, wait);
891 int ufs_sync_inode (struct inode *inode)
893 return ufs_update_inode (inode, 1);
896 void ufs_delete_inode (struct inode * inode)
900 truncate_inode_pages(&inode->i_data, 0);
901 if (is_bad_inode(inode))
903 /*UFS_I(inode)->i_dtime = CURRENT_TIME;*/
905 mark_inode_dirty(inode);
906 ufs_update_inode(inode, IS_SYNC(inode));
907 old_i_size = inode->i_size;
909 if (inode->i_blocks && ufs_truncate(inode, old_i_size))
910 ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
911 ufs_free_inode (inode);
915 clear_inode(inode); /* We must guarantee clearing of inode... */