2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
30 #include "xfs_dmapi.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_inode_item.h"
41 #include "xfs_btree.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
46 #include "xfs_error.h"
47 #include "xfs_utils.h"
48 #include "xfs_dir2_trace.h"
49 #include "xfs_quota.h"
52 #include <linux/log2.h>
54 kmem_zone_t *xfs_ifork_zone;
55 kmem_zone_t *xfs_inode_zone;
56 kmem_zone_t *xfs_chashlist_zone;
59 * Used in xfs_itruncate(). This is the maximum number of extents
60 * freed from a file in a single transaction.
62 #define XFS_ITRUNC_MAX_EXTENTS 2
64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
72 * Make sure that the extents in the given memory buffer
87 for (i = 0; i < nrecs; i++) {
88 ep = xfs_iext_get_ext(ifp, i);
89 rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
90 rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
92 xfs_bmbt_disk_get_all(&rec, &irec);
94 xfs_bmbt_get_all(&rec, &irec);
95 if (fmt == XFS_EXTFMT_NOSTATE)
96 ASSERT(irec.br_state == XFS_EXT_NORM);
100 #define xfs_validate_extents(ifp, nrecs, disk, fmt)
104 * Check that none of the inode's in the buffer have a next
105 * unlinked field of 0.
117 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
119 for (i = 0; i < j; i++) {
120 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
121 i * mp->m_sb.sb_inodesize);
122 if (!dip->di_next_unlinked) {
123 xfs_fs_cmn_err(CE_ALERT, mp,
124 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
126 ASSERT(dip->di_next_unlinked);
133 * This routine is called to map an inode number within a file
134 * system to the buffer containing the on-disk version of the
135 * inode. It returns a pointer to the buffer containing the
136 * on-disk inode in the bpp parameter, and in the dip parameter
137 * it returns a pointer to the on-disk inode within that buffer.
139 * If a non-zero error is returned, then the contents of bpp and
140 * dipp are undefined.
142 * Use xfs_imap() to determine the size and location of the
143 * buffer to read from disk.
161 * Call the space management code to find the location of the
165 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
168 "xfs_inotobp: xfs_imap() returned an "
169 "error %d on %s. Returning error.", error, mp->m_fsname);
174 * If the inode number maps to a block outside the bounds of the
175 * file system then return NULL rather than calling read_buf
176 * and panicing when we get an error from the driver.
178 if ((imap.im_blkno + imap.im_len) >
179 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
181 "xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
182 "of the file system %s. Returning EINVAL.",
183 (unsigned long long)imap.im_blkno,
184 imap.im_len, mp->m_fsname);
185 return XFS_ERROR(EINVAL);
189 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will
190 * default to just a read_buf() call.
192 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
193 (int)imap.im_len, XFS_BUF_LOCK, &bp);
197 "xfs_inotobp: xfs_trans_read_buf() returned an "
198 "error %d on %s. Returning error.", error, mp->m_fsname);
201 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
203 INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
204 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
205 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
206 XFS_RANDOM_ITOBP_INOTOBP))) {
207 XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
208 xfs_trans_brelse(tp, bp);
210 "xfs_inotobp: XFS_TEST_ERROR() returned an "
211 "error on %s. Returning EFSCORRUPTED.", mp->m_fsname);
212 return XFS_ERROR(EFSCORRUPTED);
215 xfs_inobp_check(mp, bp);
218 * Set *dipp to point to the on-disk inode in the buffer.
220 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
222 *offset = imap.im_boffset;
228 * This routine is called to map an inode to the buffer containing
229 * the on-disk version of the inode. It returns a pointer to the
230 * buffer containing the on-disk inode in the bpp parameter, and in
231 * the dip parameter it returns a pointer to the on-disk inode within
234 * If a non-zero error is returned, then the contents of bpp and
235 * dipp are undefined.
237 * If the inode is new and has not yet been initialized, use xfs_imap()
238 * to determine the size and location of the buffer to read from disk.
239 * If the inode has already been mapped to its buffer and read in once,
240 * then use the mapping information stored in the inode rather than
241 * calling xfs_imap(). This allows us to avoid the overhead of looking
242 * at the inode btree for small block file systems (see xfs_dilocate()).
243 * We can tell whether the inode has been mapped in before by comparing
244 * its disk block address to 0. Only uninitialized inodes will have
245 * 0 for the disk block address.
263 if (ip->i_blkno == (xfs_daddr_t)0) {
265 * Call the space management code to find the location of the
269 if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
270 XFS_IMAP_LOOKUP | imap_flags)))
274 * If the inode number maps to a block outside the bounds
275 * of the file system then return NULL rather than calling
276 * read_buf and panicing when we get an error from the
279 if ((imap.im_blkno + imap.im_len) >
280 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
282 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
283 "(imap.im_blkno (0x%llx) "
284 "+ imap.im_len (0x%llx)) > "
285 " XFS_FSB_TO_BB(mp, "
286 "mp->m_sb.sb_dblocks) (0x%llx)",
287 (unsigned long long) imap.im_blkno,
288 (unsigned long long) imap.im_len,
289 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
291 return XFS_ERROR(EINVAL);
295 * Fill in the fields in the inode that will be used to
296 * map the inode to its buffer from now on.
298 ip->i_blkno = imap.im_blkno;
299 ip->i_len = imap.im_len;
300 ip->i_boffset = imap.im_boffset;
303 * We've already mapped the inode once, so just use the
304 * mapping that we saved the first time.
306 imap.im_blkno = ip->i_blkno;
307 imap.im_len = ip->i_len;
308 imap.im_boffset = ip->i_boffset;
310 ASSERT(bno == 0 || bno == imap.im_blkno);
313 * Read in the buffer. If tp is NULL, xfs_trans_read_buf() will
314 * default to just a read_buf() call.
316 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
317 (int)imap.im_len, XFS_BUF_LOCK, &bp);
320 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
321 "xfs_trans_read_buf() returned error %d, "
322 "imap.im_blkno 0x%llx, imap.im_len 0x%llx",
323 error, (unsigned long long) imap.im_blkno,
324 (unsigned long long) imap.im_len);
330 * Validate the magic number and version of every inode in the buffer
331 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
332 * No validation is done here in userspace (xfs_repair).
334 #if !defined(__KERNEL__)
337 ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
338 #else /* usual case */
342 for (i = 0; i < ni; i++) {
346 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
347 (i << mp->m_sb.sb_inodelog));
348 di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
349 XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
350 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
351 XFS_ERRTAG_ITOBP_INOTOBP,
352 XFS_RANDOM_ITOBP_INOTOBP))) {
353 if (imap_flags & XFS_IMAP_BULKSTAT) {
354 xfs_trans_brelse(tp, bp);
355 return XFS_ERROR(EINVAL);
359 "Device %s - bad inode magic/vsn "
360 "daddr %lld #%d (magic=%x)",
361 XFS_BUFTARG_NAME(mp->m_ddev_targp),
362 (unsigned long long)imap.im_blkno, i,
363 INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
365 XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
367 xfs_trans_brelse(tp, bp);
368 return XFS_ERROR(EFSCORRUPTED);
372 xfs_inobp_check(mp, bp);
375 * Mark the buffer as an inode buffer now that it looks good
377 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
380 * Set *dipp to point to the on-disk inode in the buffer.
382 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
388 * Move inode type and inode format specific information from the
389 * on-disk inode to the in-core inode. For fifos, devs, and sockets
390 * this means set if_rdev to the proper value. For files, directories,
391 * and symlinks this means to bring in the in-line data or extent
392 * pointers. For a file in B-tree format, only the root is immediately
393 * brought in-core. The rest will be in-lined in if_extents when it
394 * is first referenced (see xfs_iread_extents()).
401 xfs_attr_shortform_t *atp;
405 ip->i_df.if_ext_max =
406 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
410 INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
411 INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
412 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
413 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
414 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
415 (unsigned long long)ip->i_ino,
416 (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
417 + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
419 INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT));
420 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
422 return XFS_ERROR(EFSCORRUPTED);
425 if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
426 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
427 "corrupt dinode %Lu, forkoff = 0x%x.",
428 (unsigned long long)ip->i_ino,
429 (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
430 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
432 return XFS_ERROR(EFSCORRUPTED);
435 switch (ip->i_d.di_mode & S_IFMT) {
440 if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) {
441 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
443 return XFS_ERROR(EFSCORRUPTED);
447 ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
453 switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) {
454 case XFS_DINODE_FMT_LOCAL:
456 * no local regular files yet
458 if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
459 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
461 "(local format for regular file).",
462 (unsigned long long) ip->i_ino);
463 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
466 return XFS_ERROR(EFSCORRUPTED);
469 di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
470 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
471 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
473 "(bad size %Ld for local inode).",
474 (unsigned long long) ip->i_ino,
475 (long long) di_size);
476 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
479 return XFS_ERROR(EFSCORRUPTED);
483 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
485 case XFS_DINODE_FMT_EXTENTS:
486 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
488 case XFS_DINODE_FMT_BTREE:
489 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
492 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
494 return XFS_ERROR(EFSCORRUPTED);
499 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
500 return XFS_ERROR(EFSCORRUPTED);
505 if (!XFS_DFORK_Q(dip))
507 ASSERT(ip->i_afp == NULL);
508 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
509 ip->i_afp->if_ext_max =
510 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
511 switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
512 case XFS_DINODE_FMT_LOCAL:
513 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
514 size = be16_to_cpu(atp->hdr.totsize);
515 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
517 case XFS_DINODE_FMT_EXTENTS:
518 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
520 case XFS_DINODE_FMT_BTREE:
521 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
524 error = XFS_ERROR(EFSCORRUPTED);
528 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
530 xfs_idestroy_fork(ip, XFS_DATA_FORK);
536 * The file is in-lined in the on-disk inode.
537 * If it fits into if_inline_data, then copy
538 * it there, otherwise allocate a buffer for it
539 * and copy the data there. Either way, set
540 * if_data to point at the data.
541 * If we allocate a buffer for the data, make
542 * sure that its size is a multiple of 4 and
543 * record the real size in i_real_bytes.
556 * If the size is unreasonable, then something
557 * is wrong and we just bail out rather than crash in
558 * kmem_alloc() or memcpy() below.
560 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
561 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
563 "(bad size %d for local fork, size = %d).",
564 (unsigned long long) ip->i_ino, size,
565 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
566 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
568 return XFS_ERROR(EFSCORRUPTED);
570 ifp = XFS_IFORK_PTR(ip, whichfork);
573 ifp->if_u1.if_data = NULL;
574 else if (size <= sizeof(ifp->if_u2.if_inline_data))
575 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
577 real_size = roundup(size, 4);
578 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
580 ifp->if_bytes = size;
581 ifp->if_real_bytes = real_size;
583 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
584 ifp->if_flags &= ~XFS_IFEXTENTS;
585 ifp->if_flags |= XFS_IFINLINE;
590 * The file consists of a set of extents all
591 * of which fit into the on-disk inode.
592 * If there are few enough extents to fit into
593 * the if_inline_ext, then copy them there.
594 * Otherwise allocate a buffer for them and copy
595 * them into it. Either way, set if_extents
596 * to point at the extents.
604 xfs_bmbt_rec_t *ep, *dp;
610 ifp = XFS_IFORK_PTR(ip, whichfork);
611 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
612 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
615 * If the number of extents is unreasonable, then something
616 * is wrong and we just bail out rather than crash in
617 * kmem_alloc() or memcpy() below.
619 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
620 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
621 "corrupt inode %Lu ((a)extents = %d).",
622 (unsigned long long) ip->i_ino, nex);
623 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
625 return XFS_ERROR(EFSCORRUPTED);
628 ifp->if_real_bytes = 0;
630 ifp->if_u1.if_extents = NULL;
631 else if (nex <= XFS_INLINE_EXTS)
632 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
634 xfs_iext_add(ifp, 0, nex);
636 ifp->if_bytes = size;
638 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
639 xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip));
640 for (i = 0; i < nex; i++, dp++) {
641 ep = xfs_iext_get_ext(ifp, i);
642 ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
644 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
647 xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
649 if (whichfork != XFS_DATA_FORK ||
650 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
651 if (unlikely(xfs_check_nostate_extents(
653 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
656 return XFS_ERROR(EFSCORRUPTED);
659 ifp->if_flags |= XFS_IFEXTENTS;
664 * The file has too many extents to fit into
665 * the inode, so they are in B-tree format.
666 * Allocate a buffer for the root of the B-tree
667 * and copy the root into it. The i_extents
668 * field will remain NULL until all of the
669 * extents are read in (when they are needed).
677 xfs_bmdr_block_t *dfp;
683 ifp = XFS_IFORK_PTR(ip, whichfork);
684 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
685 size = XFS_BMAP_BROOT_SPACE(dfp);
686 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
689 * blow out if -- fork has less extents than can fit in
690 * fork (fork shouldn't be a btree format), root btree
691 * block has more records than can fit into the fork,
692 * or the number of extents is greater than the number of
695 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
696 || XFS_BMDR_SPACE_CALC(nrecs) >
697 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
698 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
699 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
700 "corrupt inode %Lu (btree).",
701 (unsigned long long) ip->i_ino);
702 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
704 return XFS_ERROR(EFSCORRUPTED);
707 ifp->if_broot_bytes = size;
708 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
709 ASSERT(ifp->if_broot != NULL);
711 * Copy and convert from the on-disk structure
712 * to the in-memory structure.
714 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
715 ifp->if_broot, size);
716 ifp->if_flags &= ~XFS_IFEXTENTS;
717 ifp->if_flags |= XFS_IFBROOT;
723 * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk
726 * buf = on-disk representation
727 * dip = native representation
728 * dir = direction - +ve -> disk to native
729 * -ve -> native to disk
732 xfs_xlate_dinode_core(
734 xfs_dinode_core_t *dip,
737 xfs_dinode_core_t *buf_core = (xfs_dinode_core_t *)buf;
738 xfs_dinode_core_t *mem_core = (xfs_dinode_core_t *)dip;
739 xfs_arch_t arch = ARCH_CONVERT;
743 INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
744 INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch);
745 INT_XLATE(buf_core->di_version, mem_core->di_version, dir, arch);
746 INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch);
747 INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
748 INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch);
749 INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch);
750 INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
751 INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
754 memcpy(mem_core->di_pad, buf_core->di_pad,
755 sizeof(buf_core->di_pad));
757 memcpy(buf_core->di_pad, mem_core->di_pad,
758 sizeof(buf_core->di_pad));
761 INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch);
763 INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec,
765 INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec,
767 INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec,
769 INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec,
771 INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec,
773 INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec,
775 INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch);
776 INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch);
777 INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch);
778 INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch);
779 INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch);
780 INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch);
781 INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch);
782 INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch);
783 INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch);
784 INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch);
785 INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch);
794 if (di_flags & XFS_DIFLAG_ANY) {
795 if (di_flags & XFS_DIFLAG_REALTIME)
796 flags |= XFS_XFLAG_REALTIME;
797 if (di_flags & XFS_DIFLAG_PREALLOC)
798 flags |= XFS_XFLAG_PREALLOC;
799 if (di_flags & XFS_DIFLAG_IMMUTABLE)
800 flags |= XFS_XFLAG_IMMUTABLE;
801 if (di_flags & XFS_DIFLAG_APPEND)
802 flags |= XFS_XFLAG_APPEND;
803 if (di_flags & XFS_DIFLAG_SYNC)
804 flags |= XFS_XFLAG_SYNC;
805 if (di_flags & XFS_DIFLAG_NOATIME)
806 flags |= XFS_XFLAG_NOATIME;
807 if (di_flags & XFS_DIFLAG_NODUMP)
808 flags |= XFS_XFLAG_NODUMP;
809 if (di_flags & XFS_DIFLAG_RTINHERIT)
810 flags |= XFS_XFLAG_RTINHERIT;
811 if (di_flags & XFS_DIFLAG_PROJINHERIT)
812 flags |= XFS_XFLAG_PROJINHERIT;
813 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
814 flags |= XFS_XFLAG_NOSYMLINKS;
815 if (di_flags & XFS_DIFLAG_EXTSIZE)
816 flags |= XFS_XFLAG_EXTSIZE;
817 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
818 flags |= XFS_XFLAG_EXTSZINHERIT;
819 if (di_flags & XFS_DIFLAG_NODEFRAG)
820 flags |= XFS_XFLAG_NODEFRAG;
830 xfs_dinode_core_t *dic = &ip->i_d;
832 return _xfs_dic2xflags(dic->di_flags) |
833 (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
838 xfs_dinode_core_t *dic)
840 return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) |
841 (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
845 * Given a mount structure and an inode number, return a pointer
846 * to a newly allocated in-core inode corresponding to the given
849 * Initialize the inode's attributes and extent pointers if it
850 * already has them (it will not if the inode has no links).
866 ASSERT(xfs_inode_zone != NULL);
868 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
871 spin_lock_init(&ip->i_flags_lock);
874 * Get pointer's to the on-disk inode and the buffer containing it.
875 * If the inode number refers to a block outside the file system
876 * then xfs_itobp() will return NULL. In this case we should
877 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
878 * know that this is a new incore inode.
880 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
882 kmem_zone_free(xfs_inode_zone, ip);
887 * Initialize inode's trace buffers.
888 * Do this before xfs_iformat in case it adds entries.
890 #ifdef XFS_BMAP_TRACE
891 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
893 #ifdef XFS_BMBT_TRACE
894 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
897 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
899 #ifdef XFS_ILOCK_TRACE
900 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
902 #ifdef XFS_DIR2_TRACE
903 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
907 * If we got something that isn't an inode it means someone
908 * (nfs or dmi) has a stale handle.
910 if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
911 kmem_zone_free(xfs_inode_zone, ip);
912 xfs_trans_brelse(tp, bp);
914 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
915 "dip->di_core.di_magic (0x%x) != "
916 "XFS_DINODE_MAGIC (0x%x)",
917 INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
920 return XFS_ERROR(EINVAL);
924 * If the on-disk inode is already linked to a directory
925 * entry, copy all of the inode into the in-core inode.
926 * xfs_iformat() handles copying in the inode format
927 * specific information.
928 * Otherwise, just get the truly permanent information.
930 if (dip->di_core.di_mode) {
931 xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
933 error = xfs_iformat(ip, dip);
935 kmem_zone_free(xfs_inode_zone, ip);
936 xfs_trans_brelse(tp, bp);
938 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
939 "xfs_iformat() returned error %d",
945 ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT);
946 ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT);
947 ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT);
948 ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT);
950 * Make sure to pull in the mode here as well in
951 * case the inode is released without being used.
952 * This ensures that xfs_inactive() will see that
953 * the inode is already free and not try to mess
954 * with the uninitialized part of it.
958 * Initialize the per-fork minima and maxima for a new
959 * inode here. xfs_iformat will do it for old inodes.
961 ip->i_df.if_ext_max =
962 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
965 INIT_LIST_HEAD(&ip->i_reclaim);
968 * The inode format changed when we moved the link count and
969 * made it 32 bits long. If this is an old format inode,
970 * convert it in memory to look like a new one. If it gets
971 * flushed to disk we will convert back before flushing or
972 * logging it. We zero out the new projid field and the old link
973 * count field. We'll handle clearing the pad field (the remains
974 * of the old uuid field) when we actually convert the inode to
975 * the new format. We don't change the version number so that we
976 * can distinguish this from a real new format inode.
978 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
979 ip->i_d.di_nlink = ip->i_d.di_onlink;
980 ip->i_d.di_onlink = 0;
981 ip->i_d.di_projid = 0;
984 ip->i_delayed_blks = 0;
985 ip->i_size = ip->i_d.di_size;
988 * Mark the buffer containing the inode as something to keep
989 * around for a while. This helps to keep recently accessed
990 * meta-data in-core longer.
992 XFS_BUF_SET_REF(bp, XFS_INO_REF);
995 * Use xfs_trans_brelse() to release the buffer containing the
996 * on-disk inode, because it was acquired with xfs_trans_read_buf()
997 * in xfs_itobp() above. If tp is NULL, this is just a normal
998 * brelse(). If we're within a transaction, then xfs_trans_brelse()
999 * will only release the buffer if it is not dirty within the
1000 * transaction. It will be OK to release the buffer in this case,
1001 * because inodes on disk are never destroyed and we will be
1002 * locking the new in-core inode before putting it in the hash
1003 * table where other processes can find it. Thus we don't have
1004 * to worry about the inode being changed just because we released
1007 xfs_trans_brelse(tp, bp);
1013 * Read in extents from a btree-format inode.
1014 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
1024 xfs_extnum_t nextents;
1027 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1028 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1030 return XFS_ERROR(EFSCORRUPTED);
1032 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1033 size = nextents * sizeof(xfs_bmbt_rec_t);
1034 ifp = XFS_IFORK_PTR(ip, whichfork);
1037 * We know that the size is valid (it's checked in iformat_btree)
1039 ifp->if_lastex = NULLEXTNUM;
1040 ifp->if_bytes = ifp->if_real_bytes = 0;
1041 ifp->if_flags |= XFS_IFEXTENTS;
1042 xfs_iext_add(ifp, 0, nextents);
1043 error = xfs_bmap_read_extents(tp, ip, whichfork);
1045 xfs_iext_destroy(ifp);
1046 ifp->if_flags &= ~XFS_IFEXTENTS;
1049 xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip));
1054 * Allocate an inode on disk and return a copy of its in-core version.
1055 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1056 * appropriately within the inode. The uid and gid for the inode are
1057 * set according to the contents of the given cred structure.
1059 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1060 * has a free inode available, call xfs_iget()
1061 * to obtain the in-core version of the allocated inode. Finally,
1062 * fill in the inode and log its initial contents. In this case,
1063 * ialloc_context would be set to NULL and call_again set to false.
1065 * If xfs_dialloc() does not have an available inode,
1066 * it will replenish its supply by doing an allocation. Since we can
1067 * only do one allocation within a transaction without deadlocks, we
1068 * must commit the current transaction before returning the inode itself.
1069 * In this case, therefore, we will set call_again to true and return.
1070 * The caller should then commit the current transaction, start a new
1071 * transaction, and call xfs_ialloc() again to actually get the inode.
1073 * To ensure that some other process does not grab the inode that
1074 * was allocated during the first call to xfs_ialloc(), this routine
1075 * also returns the [locked] bp pointing to the head of the freelist
1076 * as ialloc_context. The caller should hold this buffer across
1077 * the commit and pass it back into this routine on the second call.
1089 xfs_buf_t **ialloc_context,
1090 boolean_t *call_again,
1100 * Call the space management code to pick
1101 * the on-disk inode to be allocated.
1103 error = xfs_dialloc(tp, pip->i_ino, mode, okalloc,
1104 ialloc_context, call_again, &ino);
1108 if (*call_again || ino == NULLFSINO) {
1112 ASSERT(*ialloc_context == NULL);
1115 * Get the in-core inode with the lock held exclusively.
1116 * This is because we're setting fields here we need
1117 * to prevent others from looking at until we're done.
1119 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1120 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1127 ip->i_d.di_mode = (__uint16_t)mode;
1128 ip->i_d.di_onlink = 0;
1129 ip->i_d.di_nlink = nlink;
1130 ASSERT(ip->i_d.di_nlink == nlink);
1131 ip->i_d.di_uid = current_fsuid(cr);
1132 ip->i_d.di_gid = current_fsgid(cr);
1133 ip->i_d.di_projid = prid;
1134 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1137 * If the superblock version is up to where we support new format
1138 * inodes and this is currently an old format inode, then change
1139 * the inode version number now. This way we only do the conversion
1140 * here rather than here and in the flush/logging code.
1142 if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
1143 ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1144 ip->i_d.di_version = XFS_DINODE_VERSION_2;
1146 * We've already zeroed the old link count, the projid field,
1147 * and the pad field.
1152 * Project ids won't be stored on disk if we are using a version 1 inode.
1154 if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1155 xfs_bump_ino_vers2(tp, ip);
1157 if (XFS_INHERIT_GID(pip, vp->v_vfsp)) {
1158 ip->i_d.di_gid = pip->i_d.di_gid;
1159 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1160 ip->i_d.di_mode |= S_ISGID;
1165 * If the group ID of the new file does not match the effective group
1166 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1167 * (and only if the irix_sgid_inherit compatibility variable is set).
1169 if ((irix_sgid_inherit) &&
1170 (ip->i_d.di_mode & S_ISGID) &&
1171 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1172 ip->i_d.di_mode &= ~S_ISGID;
1175 ip->i_d.di_size = 0;
1177 ip->i_d.di_nextents = 0;
1178 ASSERT(ip->i_d.di_nblocks == 0);
1179 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1181 * di_gen will have been taken care of in xfs_iread.
1183 ip->i_d.di_extsize = 0;
1184 ip->i_d.di_dmevmask = 0;
1185 ip->i_d.di_dmstate = 0;
1186 ip->i_d.di_flags = 0;
1187 flags = XFS_ILOG_CORE;
1188 switch (mode & S_IFMT) {
1193 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1194 ip->i_df.if_u2.if_rdev = rdev;
1195 ip->i_df.if_flags = 0;
1196 flags |= XFS_ILOG_DEV;
1200 if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1203 if ((mode & S_IFMT) == S_IFDIR) {
1204 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1205 di_flags |= XFS_DIFLAG_RTINHERIT;
1206 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1207 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1208 ip->i_d.di_extsize = pip->i_d.di_extsize;
1210 } else if ((mode & S_IFMT) == S_IFREG) {
1211 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1212 di_flags |= XFS_DIFLAG_REALTIME;
1213 ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1215 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1216 di_flags |= XFS_DIFLAG_EXTSIZE;
1217 ip->i_d.di_extsize = pip->i_d.di_extsize;
1220 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1221 xfs_inherit_noatime)
1222 di_flags |= XFS_DIFLAG_NOATIME;
1223 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1225 di_flags |= XFS_DIFLAG_NODUMP;
1226 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1228 di_flags |= XFS_DIFLAG_SYNC;
1229 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1230 xfs_inherit_nosymlinks)
1231 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1232 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1233 di_flags |= XFS_DIFLAG_PROJINHERIT;
1234 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1235 xfs_inherit_nodefrag)
1236 di_flags |= XFS_DIFLAG_NODEFRAG;
1237 ip->i_d.di_flags |= di_flags;
1241 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1242 ip->i_df.if_flags = XFS_IFEXTENTS;
1243 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1244 ip->i_df.if_u1.if_extents = NULL;
1250 * Attribute fork settings for new inode.
1252 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1253 ip->i_d.di_anextents = 0;
1256 * Log the new values stuffed into the inode.
1258 xfs_trans_log_inode(tp, ip, flags);
1260 /* now that we have an i_mode we can setup inode ops and unlock */
1261 bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1268 * Check to make sure that there are no blocks allocated to the
1269 * file beyond the size of the file. We don't check this for
1270 * files with fixed size extents or real time extents, but we
1271 * at least do it for regular files.
1280 xfs_fileoff_t map_first;
1282 xfs_bmbt_irec_t imaps[2];
1284 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1287 if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1291 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1293 * The filesystem could be shutting down, so bmapi may return
1296 if (xfs_bmapi(NULL, ip, map_first,
1298 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1300 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1303 ASSERT(nimaps == 1);
1304 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1309 * Calculate the last possible buffered byte in a file. This must
1310 * include data that was buffered beyond the EOF by the write code.
1311 * This also needs to deal with overflowing the xfs_fsize_t type
1312 * which can happen for sizes near the limit.
1314 * We also need to take into account any blocks beyond the EOF. It
1315 * may be the case that they were buffered by a write which failed.
1316 * In that case the pages will still be in memory, but the inode size
1317 * will never have been updated.
1324 xfs_fsize_t last_byte;
1325 xfs_fileoff_t last_block;
1326 xfs_fileoff_t size_last_block;
1329 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1333 * Only check for blocks beyond the EOF if the extents have
1334 * been read in. This eliminates the need for the inode lock,
1335 * and it also saves us from looking when it really isn't
1338 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1339 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1347 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1348 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1350 last_byte = XFS_FSB_TO_B(mp, last_block);
1351 if (last_byte < 0) {
1352 return XFS_MAXIOFFSET(mp);
1354 last_byte += (1 << mp->m_writeio_log);
1355 if (last_byte < 0) {
1356 return XFS_MAXIOFFSET(mp);
1361 #if defined(XFS_RW_TRACE)
1367 xfs_fsize_t new_size,
1368 xfs_off_t toss_start,
1369 xfs_off_t toss_finish)
1371 if (ip->i_rwtrace == NULL) {
1375 ktrace_enter(ip->i_rwtrace,
1378 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1379 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1380 (void*)((long)flag),
1381 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1382 (void*)(unsigned long)(new_size & 0xffffffff),
1383 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1384 (void*)(unsigned long)(toss_start & 0xffffffff),
1385 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1386 (void*)(unsigned long)(toss_finish & 0xffffffff),
1387 (void*)(unsigned long)current_cpu(),
1388 (void*)(unsigned long)current_pid(),
1394 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1398 * Start the truncation of the file to new_size. The new size
1399 * must be smaller than the current size. This routine will
1400 * clear the buffer and page caches of file data in the removed
1401 * range, and xfs_itruncate_finish() will remove the underlying
1404 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1405 * must NOT have the inode lock held at all. This is because we're
1406 * calling into the buffer/page cache code and we can't hold the
1407 * inode lock when we do so.
1409 * We need to wait for any direct I/Os in flight to complete before we
1410 * proceed with the truncate. This is needed to prevent the extents
1411 * being read or written by the direct I/Os from being removed while the
1412 * I/O is in flight as there is no other method of synchronising
1413 * direct I/O with the truncate operation. Also, because we hold
1414 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1415 * started until the truncate completes and drops the lock. Essentially,
1416 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1417 * between direct I/Os and the truncate operation.
1419 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1420 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1421 * in the case that the caller is locking things out of order and
1422 * may not be able to call xfs_itruncate_finish() with the inode lock
1423 * held without dropping the I/O lock. If the caller must drop the
1424 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1425 * must be called again with all the same restrictions as the initial
1429 xfs_itruncate_start(
1432 xfs_fsize_t new_size)
1434 xfs_fsize_t last_byte;
1435 xfs_off_t toss_start;
1440 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1441 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1442 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1443 (flags == XFS_ITRUNC_MAYBE));
1448 vn_iowait(vp); /* wait for the completion of any pending DIOs */
1451 * Call toss_pages or flushinval_pages to get rid of pages
1452 * overlapping the region being removed. We have to use
1453 * the less efficient flushinval_pages in the case that the
1454 * caller may not be able to finish the truncate without
1455 * dropping the inode's I/O lock. Make sure
1456 * to catch any pages brought in by buffers overlapping
1457 * the EOF by searching out beyond the isize by our
1458 * block size. We round new_size up to a block boundary
1459 * so that we don't toss things on the same block as
1460 * new_size but before it.
1462 * Before calling toss_page or flushinval_pages, make sure to
1463 * call remapf() over the same region if the file is mapped.
1464 * This frees up mapped file references to the pages in the
1465 * given range and for the flushinval_pages case it ensures
1466 * that we get the latest mapped changes flushed out.
1468 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1469 toss_start = XFS_FSB_TO_B(mp, toss_start);
1470 if (toss_start < 0) {
1472 * The place to start tossing is beyond our maximum
1473 * file size, so there is no way that the data extended
1478 last_byte = xfs_file_last_byte(ip);
1479 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1481 if (last_byte > toss_start) {
1482 if (flags & XFS_ITRUNC_DEFINITE) {
1483 bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1485 error = bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1490 if (new_size == 0) {
1491 ASSERT(VN_CACHED(vp) == 0);
1498 * Shrink the file to the given new_size. The new
1499 * size must be smaller than the current size.
1500 * This will free up the underlying blocks
1501 * in the removed range after a call to xfs_itruncate_start()
1502 * or xfs_atruncate_start().
1504 * The transaction passed to this routine must have made
1505 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1506 * This routine may commit the given transaction and
1507 * start new ones, so make sure everything involved in
1508 * the transaction is tidy before calling here.
1509 * Some transaction will be returned to the caller to be
1510 * committed. The incoming transaction must already include
1511 * the inode, and both inode locks must be held exclusively.
1512 * The inode must also be "held" within the transaction. On
1513 * return the inode will be "held" within the returned transaction.
1514 * This routine does NOT require any disk space to be reserved
1515 * for it within the transaction.
1517 * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1518 * and it indicates the fork which is to be truncated. For the
1519 * attribute fork we only support truncation to size 0.
1521 * We use the sync parameter to indicate whether or not the first
1522 * transaction we perform might have to be synchronous. For the attr fork,
1523 * it needs to be so if the unlink of the inode is not yet known to be
1524 * permanent in the log. This keeps us from freeing and reusing the
1525 * blocks of the attribute fork before the unlink of the inode becomes
1528 * For the data fork, we normally have to run synchronously if we're
1529 * being called out of the inactive path or we're being called
1530 * out of the create path where we're truncating an existing file.
1531 * Either way, the truncate needs to be sync so blocks don't reappear
1532 * in the file with altered data in case of a crash. wsync filesystems
1533 * can run the first case async because anything that shrinks the inode
1534 * has to run sync so by the time we're called here from inactive, the
1535 * inode size is permanently set to 0.
1537 * Calls from the truncate path always need to be sync unless we're
1538 * in a wsync filesystem and the file has already been unlinked.
1540 * The caller is responsible for correctly setting the sync parameter.
1541 * It gets too hard for us to guess here which path we're being called
1542 * out of just based on inode state.
1545 xfs_itruncate_finish(
1548 xfs_fsize_t new_size,
1552 xfs_fsblock_t first_block;
1553 xfs_fileoff_t first_unmap_block;
1554 xfs_fileoff_t last_block;
1555 xfs_filblks_t unmap_len=0;
1560 xfs_bmap_free_t free_list;
1563 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1564 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1565 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1566 ASSERT(*tp != NULL);
1567 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1568 ASSERT(ip->i_transp == *tp);
1569 ASSERT(ip->i_itemp != NULL);
1570 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1574 mp = (ntp)->t_mountp;
1575 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1578 * We only support truncating the entire attribute fork.
1580 if (fork == XFS_ATTR_FORK) {
1583 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1584 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1586 * The first thing we do is set the size to new_size permanently
1587 * on disk. This way we don't have to worry about anyone ever
1588 * being able to look at the data being freed even in the face
1589 * of a crash. What we're getting around here is the case where
1590 * we free a block, it is allocated to another file, it is written
1591 * to, and then we crash. If the new data gets written to the
1592 * file but the log buffers containing the free and reallocation
1593 * don't, then we'd end up with garbage in the blocks being freed.
1594 * As long as we make the new_size permanent before actually
1595 * freeing any blocks it doesn't matter if they get writtten to.
1597 * The callers must signal into us whether or not the size
1598 * setting here must be synchronous. There are a few cases
1599 * where it doesn't have to be synchronous. Those cases
1600 * occur if the file is unlinked and we know the unlink is
1601 * permanent or if the blocks being truncated are guaranteed
1602 * to be beyond the inode eof (regardless of the link count)
1603 * and the eof value is permanent. Both of these cases occur
1604 * only on wsync-mounted filesystems. In those cases, we're
1605 * guaranteed that no user will ever see the data in the blocks
1606 * that are being truncated so the truncate can run async.
1607 * In the free beyond eof case, the file may wind up with
1608 * more blocks allocated to it than it needs if we crash
1609 * and that won't get fixed until the next time the file
1610 * is re-opened and closed but that's ok as that shouldn't
1611 * be too many blocks.
1613 * However, we can't just make all wsync xactions run async
1614 * because there's one call out of the create path that needs
1615 * to run sync where it's truncating an existing file to size
1616 * 0 whose size is > 0.
1618 * It's probably possible to come up with a test in this
1619 * routine that would correctly distinguish all the above
1620 * cases from the values of the function parameters and the
1621 * inode state but for sanity's sake, I've decided to let the
1622 * layers above just tell us. It's simpler to correctly figure
1623 * out in the layer above exactly under what conditions we
1624 * can run async and I think it's easier for others read and
1625 * follow the logic in case something has to be changed.
1626 * cscope is your friend -- rcc.
1628 * The attribute fork is much simpler.
1630 * For the attribute fork we allow the caller to tell us whether
1631 * the unlink of the inode that led to this call is yet permanent
1632 * in the on disk log. If it is not and we will be freeing extents
1633 * in this inode then we make the first transaction synchronous
1634 * to make sure that the unlink is permanent by the time we free
1637 if (fork == XFS_DATA_FORK) {
1638 if (ip->i_d.di_nextents > 0) {
1640 * If we are not changing the file size then do
1641 * not update the on-disk file size - we may be
1642 * called from xfs_inactive_free_eofblocks(). If we
1643 * update the on-disk file size and then the system
1644 * crashes before the contents of the file are
1645 * flushed to disk then the files may be full of
1646 * holes (ie NULL files bug).
1648 if (ip->i_size != new_size) {
1649 ip->i_d.di_size = new_size;
1650 ip->i_size = new_size;
1651 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1655 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1656 if (ip->i_d.di_anextents > 0)
1657 xfs_trans_set_sync(ntp);
1659 ASSERT(fork == XFS_DATA_FORK ||
1660 (fork == XFS_ATTR_FORK &&
1661 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1662 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1665 * Since it is possible for space to become allocated beyond
1666 * the end of the file (in a crash where the space is allocated
1667 * but the inode size is not yet updated), simply remove any
1668 * blocks which show up between the new EOF and the maximum
1669 * possible file size. If the first block to be removed is
1670 * beyond the maximum file size (ie it is the same as last_block),
1671 * then there is nothing to do.
1673 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1674 ASSERT(first_unmap_block <= last_block);
1676 if (last_block == first_unmap_block) {
1679 unmap_len = last_block - first_unmap_block + 1;
1683 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1684 * will tell us whether it freed the entire range or
1685 * not. If this is a synchronous mount (wsync),
1686 * then we can tell bunmapi to keep all the
1687 * transactions asynchronous since the unlink
1688 * transaction that made this inode inactive has
1689 * already hit the disk. There's no danger of
1690 * the freed blocks being reused, there being a
1691 * crash, and the reused blocks suddenly reappearing
1692 * in this file with garbage in them once recovery
1695 XFS_BMAP_INIT(&free_list, &first_block);
1696 error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
1697 first_unmap_block, unmap_len,
1698 XFS_BMAPI_AFLAG(fork) |
1699 (sync ? 0 : XFS_BMAPI_ASYNC),
1700 XFS_ITRUNC_MAX_EXTENTS,
1701 &first_block, &free_list,
1705 * If the bunmapi call encounters an error,
1706 * return to the caller where the transaction
1707 * can be properly aborted. We just need to
1708 * make sure we're not holding any resources
1709 * that we were not when we came in.
1711 xfs_bmap_cancel(&free_list);
1716 * Duplicate the transaction that has the permanent
1717 * reservation and commit the old transaction.
1719 error = xfs_bmap_finish(tp, &free_list, &committed);
1723 * If the bmap finish call encounters an error,
1724 * return to the caller where the transaction
1725 * can be properly aborted. We just need to
1726 * make sure we're not holding any resources
1727 * that we were not when we came in.
1729 * Aborting from this point might lose some
1730 * blocks in the file system, but oh well.
1732 xfs_bmap_cancel(&free_list);
1735 * If the passed in transaction committed
1736 * in xfs_bmap_finish(), then we want to
1737 * add the inode to this one before returning.
1738 * This keeps things simple for the higher
1739 * level code, because it always knows that
1740 * the inode is locked and held in the
1741 * transaction that returns to it whether
1742 * errors occur or not. We don't mark the
1743 * inode dirty so that this transaction can
1744 * be easily aborted if possible.
1746 xfs_trans_ijoin(ntp, ip,
1747 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1748 xfs_trans_ihold(ntp, ip);
1755 * The first xact was committed,
1756 * so add the inode to the new one.
1757 * Mark it dirty so it will be logged
1758 * and moved forward in the log as
1759 * part of every commit.
1761 xfs_trans_ijoin(ntp, ip,
1762 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1763 xfs_trans_ihold(ntp, ip);
1764 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1766 ntp = xfs_trans_dup(ntp);
1767 (void) xfs_trans_commit(*tp, 0);
1769 error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1770 XFS_TRANS_PERM_LOG_RES,
1771 XFS_ITRUNCATE_LOG_COUNT);
1773 * Add the inode being truncated to the next chained
1776 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1777 xfs_trans_ihold(ntp, ip);
1782 * Only update the size in the case of the data fork, but
1783 * always re-log the inode so that our permanent transaction
1784 * can keep on rolling it forward in the log.
1786 if (fork == XFS_DATA_FORK) {
1787 xfs_isize_check(mp, ip, new_size);
1789 * If we are not changing the file size then do
1790 * not update the on-disk file size - we may be
1791 * called from xfs_inactive_free_eofblocks(). If we
1792 * update the on-disk file size and then the system
1793 * crashes before the contents of the file are
1794 * flushed to disk then the files may be full of
1795 * holes (ie NULL files bug).
1797 if (ip->i_size != new_size) {
1798 ip->i_d.di_size = new_size;
1799 ip->i_size = new_size;
1802 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1803 ASSERT((new_size != 0) ||
1804 (fork == XFS_ATTR_FORK) ||
1805 (ip->i_delayed_blks == 0));
1806 ASSERT((new_size != 0) ||
1807 (fork == XFS_ATTR_FORK) ||
1808 (ip->i_d.di_nextents == 0));
1809 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1817 * Do the first part of growing a file: zero any data in the last
1818 * block that is beyond the old EOF. We need to do this before
1819 * the inode is joined to the transaction to modify the i_size.
1820 * That way we can drop the inode lock and call into the buffer
1821 * cache to get the buffer mapping the EOF.
1826 xfs_fsize_t new_size,
1831 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1832 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1833 ASSERT(new_size > ip->i_size);
1836 * Zero any pages that may have been created by
1837 * xfs_write_file() beyond the end of the file
1838 * and any blocks between the old and new file sizes.
1840 error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
1848 * This routine is called to extend the size of a file.
1849 * The inode must have both the iolock and the ilock locked
1850 * for update and it must be a part of the current transaction.
1851 * The xfs_igrow_start() function must have been called previously.
1852 * If the change_flag is not zero, the inode change timestamp will
1859 xfs_fsize_t new_size,
1862 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1863 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1864 ASSERT(ip->i_transp == tp);
1865 ASSERT(new_size > ip->i_size);
1868 * Update the file size. Update the inode change timestamp
1869 * if change_flag set.
1871 ip->i_d.di_size = new_size;
1872 ip->i_size = new_size;
1874 xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1875 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1881 * This is called when the inode's link count goes to 0.
1882 * We place the on-disk inode on a list in the AGI. It
1883 * will be pulled from this list when the inode is freed.
1895 xfs_agnumber_t agno;
1896 xfs_daddr_t agdaddr;
1903 ASSERT(ip->i_d.di_nlink == 0);
1904 ASSERT(ip->i_d.di_mode != 0);
1905 ASSERT(ip->i_transp == tp);
1909 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1910 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1913 * Get the agi buffer first. It ensures lock ordering
1916 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1917 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1922 * Validate the magic number of the agi block.
1924 agi = XFS_BUF_TO_AGI(agibp);
1926 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1927 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1928 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1929 XFS_RANDOM_IUNLINK))) {
1930 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1931 xfs_trans_brelse(tp, agibp);
1932 return XFS_ERROR(EFSCORRUPTED);
1935 * Get the index into the agi hash table for the
1936 * list this inode will go on.
1938 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1940 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1941 ASSERT(agi->agi_unlinked[bucket_index]);
1942 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1944 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1946 * There is already another inode in the bucket we need
1947 * to add ourselves to. Add us at the front of the list.
1948 * Here we put the head pointer into our next pointer,
1949 * and then we fall through to point the head at us.
1951 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1955 ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO);
1956 ASSERT(dip->di_next_unlinked);
1957 /* both on-disk, don't endian flip twice */
1958 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1959 offset = ip->i_boffset +
1960 offsetof(xfs_dinode_t, di_next_unlinked);
1961 xfs_trans_inode_buf(tp, ibp);
1962 xfs_trans_log_buf(tp, ibp, offset,
1963 (offset + sizeof(xfs_agino_t) - 1));
1964 xfs_inobp_check(mp, ibp);
1968 * Point the bucket head pointer at the inode being inserted.
1971 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1972 offset = offsetof(xfs_agi_t, agi_unlinked) +
1973 (sizeof(xfs_agino_t) * bucket_index);
1974 xfs_trans_log_buf(tp, agibp, offset,
1975 (offset + sizeof(xfs_agino_t) - 1));
1980 * Pull the on-disk inode from the AGI unlinked list.
1993 xfs_agnumber_t agno;
1994 xfs_daddr_t agdaddr;
1996 xfs_agino_t next_agino;
1997 xfs_buf_t *last_ibp;
1998 xfs_dinode_t *last_dip = NULL;
2000 int offset, last_offset = 0;
2005 * First pull the on-disk inode from the AGI unlinked list.
2009 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2010 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
2013 * Get the agi buffer first. It ensures lock ordering
2016 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
2017 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
2020 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
2021 error, mp->m_fsname);
2025 * Validate the magic number of the agi block.
2027 agi = XFS_BUF_TO_AGI(agibp);
2029 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
2030 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
2031 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
2032 XFS_RANDOM_IUNLINK_REMOVE))) {
2033 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2035 xfs_trans_brelse(tp, agibp);
2037 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
2039 return XFS_ERROR(EFSCORRUPTED);
2042 * Get the index into the agi hash table for the
2043 * list this inode will go on.
2045 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2047 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2048 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2049 ASSERT(agi->agi_unlinked[bucket_index]);
2051 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2053 * We're at the head of the list. Get the inode's
2054 * on-disk buffer to see if there is anyone after us
2055 * on the list. Only modify our next pointer if it
2056 * is not already NULLAGINO. This saves us the overhead
2057 * of dealing with the buffer when there is no need to
2060 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2063 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2064 error, mp->m_fsname);
2067 next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2068 ASSERT(next_agino != 0);
2069 if (next_agino != NULLAGINO) {
2070 INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2071 offset = ip->i_boffset +
2072 offsetof(xfs_dinode_t, di_next_unlinked);
2073 xfs_trans_inode_buf(tp, ibp);
2074 xfs_trans_log_buf(tp, ibp, offset,
2075 (offset + sizeof(xfs_agino_t) - 1));
2076 xfs_inobp_check(mp, ibp);
2078 xfs_trans_brelse(tp, ibp);
2081 * Point the bucket head pointer at the next inode.
2083 ASSERT(next_agino != 0);
2084 ASSERT(next_agino != agino);
2085 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2086 offset = offsetof(xfs_agi_t, agi_unlinked) +
2087 (sizeof(xfs_agino_t) * bucket_index);
2088 xfs_trans_log_buf(tp, agibp, offset,
2089 (offset + sizeof(xfs_agino_t) - 1));
2092 * We need to search the list for the inode being freed.
2094 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2096 while (next_agino != agino) {
2098 * If the last inode wasn't the one pointing to
2099 * us, then release its buffer since we're not
2100 * going to do anything with it.
2102 if (last_ibp != NULL) {
2103 xfs_trans_brelse(tp, last_ibp);
2105 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2106 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2107 &last_ibp, &last_offset);
2110 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
2111 error, mp->m_fsname);
2114 next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT);
2115 ASSERT(next_agino != NULLAGINO);
2116 ASSERT(next_agino != 0);
2119 * Now last_ibp points to the buffer previous to us on
2120 * the unlinked list. Pull us from the list.
2122 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2125 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2126 error, mp->m_fsname);
2129 next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2130 ASSERT(next_agino != 0);
2131 ASSERT(next_agino != agino);
2132 if (next_agino != NULLAGINO) {
2133 INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2134 offset = ip->i_boffset +
2135 offsetof(xfs_dinode_t, di_next_unlinked);
2136 xfs_trans_inode_buf(tp, ibp);
2137 xfs_trans_log_buf(tp, ibp, offset,
2138 (offset + sizeof(xfs_agino_t) - 1));
2139 xfs_inobp_check(mp, ibp);
2141 xfs_trans_brelse(tp, ibp);
2144 * Point the previous inode on the list to the next inode.
2146 INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino);
2147 ASSERT(next_agino != 0);
2148 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2149 xfs_trans_inode_buf(tp, last_ibp);
2150 xfs_trans_log_buf(tp, last_ibp, offset,
2151 (offset + sizeof(xfs_agino_t) - 1));
2152 xfs_inobp_check(mp, last_ibp);
2157 STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
2159 return (((ip->i_itemp == NULL) ||
2160 !(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2161 (ip->i_update_core == 0));
2166 xfs_inode_t *free_ip,
2170 xfs_mount_t *mp = free_ip->i_mount;
2171 int blks_per_cluster;
2174 int i, j, found, pre_flushed;
2178 xfs_inode_t *ip, **ip_found;
2179 xfs_inode_log_item_t *iip;
2180 xfs_log_item_t *lip;
2183 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2184 blks_per_cluster = 1;
2185 ninodes = mp->m_sb.sb_inopblock;
2186 nbufs = XFS_IALLOC_BLOCKS(mp);
2188 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2189 mp->m_sb.sb_blocksize;
2190 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2191 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2194 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2196 for (j = 0; j < nbufs; j++, inum += ninodes) {
2197 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2198 XFS_INO_TO_AGBNO(mp, inum));
2202 * Look for each inode in memory and attempt to lock it,
2203 * we can be racing with flush and tail pushing here.
2204 * any inode we get the locks on, add to an array of
2205 * inode items to process later.
2207 * The get the buffer lock, we could beat a flush
2208 * or tail pushing thread to the lock here, in which
2209 * case they will go looking for the inode buffer
2210 * and fail, we need some other form of interlock
2214 for (i = 0; i < ninodes; i++) {
2215 ih = XFS_IHASH(mp, inum + i);
2216 read_lock(&ih->ih_lock);
2217 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
2218 if (ip->i_ino == inum + i)
2222 /* Inode not in memory or we found it already,
2225 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2226 read_unlock(&ih->ih_lock);
2230 if (xfs_inode_clean(ip)) {
2231 read_unlock(&ih->ih_lock);
2235 /* If we can get the locks then add it to the
2236 * list, otherwise by the time we get the bp lock
2237 * below it will already be attached to the
2241 /* This inode will already be locked - by us, lets
2245 if (ip == free_ip) {
2246 if (xfs_iflock_nowait(ip)) {
2247 xfs_iflags_set(ip, XFS_ISTALE);
2248 if (xfs_inode_clean(ip)) {
2251 ip_found[found++] = ip;
2254 read_unlock(&ih->ih_lock);
2258 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2259 if (xfs_iflock_nowait(ip)) {
2260 xfs_iflags_set(ip, XFS_ISTALE);
2262 if (xfs_inode_clean(ip)) {
2264 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2266 ip_found[found++] = ip;
2269 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2273 read_unlock(&ih->ih_lock);
2276 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2277 mp->m_bsize * blks_per_cluster,
2281 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2283 if (lip->li_type == XFS_LI_INODE) {
2284 iip = (xfs_inode_log_item_t *)lip;
2285 ASSERT(iip->ili_logged == 1);
2286 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2288 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2290 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2293 lip = lip->li_bio_list;
2296 for (i = 0; i < found; i++) {
2301 ip->i_update_core = 0;
2303 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2307 iip->ili_last_fields = iip->ili_format.ilf_fields;
2308 iip->ili_format.ilf_fields = 0;
2309 iip->ili_logged = 1;
2311 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2314 xfs_buf_attach_iodone(bp,
2315 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2316 xfs_istale_done, (xfs_log_item_t *)iip);
2317 if (ip != free_ip) {
2318 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2322 if (found || pre_flushed)
2323 xfs_trans_stale_inode_buf(tp, bp);
2324 xfs_trans_binval(tp, bp);
2327 kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2331 * This is called to return an inode to the inode free list.
2332 * The inode should already be truncated to 0 length and have
2333 * no pages associated with it. This routine also assumes that
2334 * the inode is already a part of the transaction.
2336 * The on-disk copy of the inode will have been added to the list
2337 * of unlinked inodes in the AGI. We need to remove the inode from
2338 * that list atomically with respect to freeing it here.
2344 xfs_bmap_free_t *flist)
2348 xfs_ino_t first_ino;
2350 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2351 ASSERT(ip->i_transp == tp);
2352 ASSERT(ip->i_d.di_nlink == 0);
2353 ASSERT(ip->i_d.di_nextents == 0);
2354 ASSERT(ip->i_d.di_anextents == 0);
2355 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2356 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2357 ASSERT(ip->i_d.di_nblocks == 0);
2360 * Pull the on-disk inode from the AGI unlinked list.
2362 error = xfs_iunlink_remove(tp, ip);
2367 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2371 ip->i_d.di_mode = 0; /* mark incore inode as free */
2372 ip->i_d.di_flags = 0;
2373 ip->i_d.di_dmevmask = 0;
2374 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2375 ip->i_df.if_ext_max =
2376 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2377 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2378 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2380 * Bump the generation count so no one will be confused
2381 * by reincarnations of this inode.
2384 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2387 xfs_ifree_cluster(ip, tp, first_ino);
2394 * Reallocate the space for if_broot based on the number of records
2395 * being added or deleted as indicated in rec_diff. Move the records
2396 * and pointers in if_broot to fit the new size. When shrinking this
2397 * will eliminate holes between the records and pointers created by
2398 * the caller. When growing this will create holes to be filled in
2401 * The caller must not request to add more records than would fit in
2402 * the on-disk inode root. If the if_broot is currently NULL, then
2403 * if we adding records one will be allocated. The caller must also
2404 * not request that the number of records go below zero, although
2405 * it can go to zero.
2407 * ip -- the inode whose if_broot area is changing
2408 * ext_diff -- the change in the number of records, positive or negative,
2409 * requested for the if_broot array.
2419 xfs_bmbt_block_t *new_broot;
2426 * Handle the degenerate case quietly.
2428 if (rec_diff == 0) {
2432 ifp = XFS_IFORK_PTR(ip, whichfork);
2435 * If there wasn't any memory allocated before, just
2436 * allocate it now and get out.
2438 if (ifp->if_broot_bytes == 0) {
2439 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2440 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2442 ifp->if_broot_bytes = (int)new_size;
2447 * If there is already an existing if_broot, then we need
2448 * to realloc() it and shift the pointers to their new
2449 * location. The records don't change location because
2450 * they are kept butted up against the btree block header.
2452 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2453 new_max = cur_max + rec_diff;
2454 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2455 ifp->if_broot = (xfs_bmbt_block_t *)
2456 kmem_realloc(ifp->if_broot,
2458 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2460 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2461 ifp->if_broot_bytes);
2462 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2464 ifp->if_broot_bytes = (int)new_size;
2465 ASSERT(ifp->if_broot_bytes <=
2466 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2467 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2472 * rec_diff is less than 0. In this case, we are shrinking the
2473 * if_broot buffer. It must already exist. If we go to zero
2474 * records, just get rid of the root and clear the status bit.
2476 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2477 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2478 new_max = cur_max + rec_diff;
2479 ASSERT(new_max >= 0);
2481 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2485 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2487 * First copy over the btree block header.
2489 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2492 ifp->if_flags &= ~XFS_IFBROOT;
2496 * Only copy the records and pointers if there are any.
2500 * First copy the records.
2502 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2503 ifp->if_broot_bytes);
2504 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2506 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2509 * Then copy the pointers.
2511 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2512 ifp->if_broot_bytes);
2513 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2515 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2517 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2518 ifp->if_broot = new_broot;
2519 ifp->if_broot_bytes = (int)new_size;
2520 ASSERT(ifp->if_broot_bytes <=
2521 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2527 * This is called when the amount of space needed for if_data
2528 * is increased or decreased. The change in size is indicated by
2529 * the number of bytes that need to be added or deleted in the
2530 * byte_diff parameter.
2532 * If the amount of space needed has decreased below the size of the
2533 * inline buffer, then switch to using the inline buffer. Otherwise,
2534 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2535 * to what is needed.
2537 * ip -- the inode whose if_data area is changing
2538 * byte_diff -- the change in the number of bytes, positive or negative,
2539 * requested for the if_data array.
2551 if (byte_diff == 0) {
2555 ifp = XFS_IFORK_PTR(ip, whichfork);
2556 new_size = (int)ifp->if_bytes + byte_diff;
2557 ASSERT(new_size >= 0);
2559 if (new_size == 0) {
2560 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2561 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2563 ifp->if_u1.if_data = NULL;
2565 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2567 * If the valid extents/data can fit in if_inline_ext/data,
2568 * copy them from the malloc'd vector and free it.
2570 if (ifp->if_u1.if_data == NULL) {
2571 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2572 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2573 ASSERT(ifp->if_real_bytes != 0);
2574 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2576 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2577 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2582 * Stuck with malloc/realloc.
2583 * For inline data, the underlying buffer must be
2584 * a multiple of 4 bytes in size so that it can be
2585 * logged and stay on word boundaries. We enforce
2588 real_size = roundup(new_size, 4);
2589 if (ifp->if_u1.if_data == NULL) {
2590 ASSERT(ifp->if_real_bytes == 0);
2591 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2592 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2594 * Only do the realloc if the underlying size
2595 * is really changing.
2597 if (ifp->if_real_bytes != real_size) {
2598 ifp->if_u1.if_data =
2599 kmem_realloc(ifp->if_u1.if_data,
2605 ASSERT(ifp->if_real_bytes == 0);
2606 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2607 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2611 ifp->if_real_bytes = real_size;
2612 ifp->if_bytes = new_size;
2613 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2620 * Map inode to disk block and offset.
2622 * mp -- the mount point structure for the current file system
2623 * tp -- the current transaction
2624 * ino -- the inode number of the inode to be located
2625 * imap -- this structure is filled in with the information necessary
2626 * to retrieve the given inode from disk
2627 * flags -- flags to pass to xfs_dilocate indicating whether or not
2628 * lookups in the inode btree were OK or not
2638 xfs_fsblock_t fsbno;
2643 fsbno = imap->im_blkno ?
2644 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2645 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2649 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2650 imap->im_len = XFS_FSB_TO_BB(mp, len);
2651 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2652 imap->im_ioffset = (ushort)off;
2653 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2664 ifp = XFS_IFORK_PTR(ip, whichfork);
2665 if (ifp->if_broot != NULL) {
2666 kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2667 ifp->if_broot = NULL;
2671 * If the format is local, then we can't have an extents
2672 * array so just look for an inline data array. If we're
2673 * not local then we may or may not have an extents list,
2674 * so check and free it up if we do.
2676 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2677 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2678 (ifp->if_u1.if_data != NULL)) {
2679 ASSERT(ifp->if_real_bytes != 0);
2680 kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2681 ifp->if_u1.if_data = NULL;
2682 ifp->if_real_bytes = 0;
2684 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2685 ((ifp->if_flags & XFS_IFEXTIREC) ||
2686 ((ifp->if_u1.if_extents != NULL) &&
2687 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2688 ASSERT(ifp->if_real_bytes != 0);
2689 xfs_iext_destroy(ifp);
2691 ASSERT(ifp->if_u1.if_extents == NULL ||
2692 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2693 ASSERT(ifp->if_real_bytes == 0);
2694 if (whichfork == XFS_ATTR_FORK) {
2695 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2701 * This is called free all the memory associated with an inode.
2702 * It must free the inode itself and any buffers allocated for
2703 * if_extents/if_data and if_broot. It must also free the lock
2704 * associated with the inode.
2711 switch (ip->i_d.di_mode & S_IFMT) {
2715 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2719 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2720 mrfree(&ip->i_lock);
2721 mrfree(&ip->i_iolock);
2722 freesema(&ip->i_flock);
2723 #ifdef XFS_BMAP_TRACE
2724 ktrace_free(ip->i_xtrace);
2726 #ifdef XFS_BMBT_TRACE
2727 ktrace_free(ip->i_btrace);
2730 ktrace_free(ip->i_rwtrace);
2732 #ifdef XFS_ILOCK_TRACE
2733 ktrace_free(ip->i_lock_trace);
2735 #ifdef XFS_DIR2_TRACE
2736 ktrace_free(ip->i_dir_trace);
2740 * Only if we are shutting down the fs will we see an
2741 * inode still in the AIL. If it is there, we should remove
2742 * it to prevent a use-after-free from occurring.
2744 xfs_mount_t *mp = ip->i_mount;
2745 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2748 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2749 XFS_FORCED_SHUTDOWN(ip->i_mount));
2750 if (lip->li_flags & XFS_LI_IN_AIL) {
2752 if (lip->li_flags & XFS_LI_IN_AIL)
2753 xfs_trans_delete_ail(mp, lip, s);
2757 xfs_inode_item_destroy(ip);
2759 kmem_zone_free(xfs_inode_zone, ip);
2764 * Increment the pin count of the given buffer.
2765 * This value is protected by ipinlock spinlock in the mount structure.
2771 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2773 atomic_inc(&ip->i_pincount);
2777 * Decrement the pin count of the given inode, and wake up
2778 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2779 * inode must have been previously pinned with a call to xfs_ipin().
2785 ASSERT(atomic_read(&ip->i_pincount) > 0);
2787 if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) {
2790 * If the inode is currently being reclaimed, the link between
2791 * the bhv_vnode and the xfs_inode will be broken after the
2792 * XFS_IRECLAIM* flag is set. Hence, if these flags are not
2793 * set, then we can move forward and mark the linux inode dirty
2794 * knowing that it is still valid as it won't freed until after
2795 * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
2796 * i_flags_lock is used to synchronise the setting of the
2797 * XFS_IRECLAIM* flags and the breaking of the link, and so we
2798 * can execute atomically w.r.t to reclaim by holding this lock
2801 * However, we still need to issue the unpin wakeup call as the
2802 * inode reclaim may be blocked waiting for the inode to become
2806 if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
2807 bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
2808 struct inode *inode = NULL;
2811 inode = vn_to_inode(vp);
2812 BUG_ON(inode->i_state & I_CLEAR);
2814 /* make sync come back and flush this inode */
2815 if (!(inode->i_state & (I_NEW|I_FREEING)))
2816 mark_inode_dirty_sync(inode);
2818 spin_unlock(&ip->i_flags_lock);
2819 wake_up(&ip->i_ipin_wait);
2824 * This is called to wait for the given inode to be unpinned.
2825 * It will sleep until this happens. The caller must have the
2826 * inode locked in at least shared mode so that the buffer cannot
2827 * be subsequently pinned once someone is waiting for it to be
2834 xfs_inode_log_item_t *iip;
2837 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2839 if (atomic_read(&ip->i_pincount) == 0) {
2844 if (iip && iip->ili_last_lsn) {
2845 lsn = iip->ili_last_lsn;
2851 * Give the log a push so we don't wait here too long.
2853 xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2855 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2860 * xfs_iextents_copy()
2862 * This is called to copy the REAL extents (as opposed to the delayed
2863 * allocation extents) from the inode into the given buffer. It
2864 * returns the number of bytes copied into the buffer.
2866 * If there are no delayed allocation extents, then we can just
2867 * memcpy() the extents into the buffer. Otherwise, we need to
2868 * examine each extent in turn and skip those which are delayed.
2873 xfs_bmbt_rec_t *buffer,
2877 xfs_bmbt_rec_t *dest_ep;
2879 #ifdef XFS_BMAP_TRACE
2880 static char fname[] = "xfs_iextents_copy";
2885 xfs_fsblock_t start_block;
2887 ifp = XFS_IFORK_PTR(ip, whichfork);
2888 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2889 ASSERT(ifp->if_bytes > 0);
2891 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2892 xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork);
2896 * There are some delayed allocation extents in the
2897 * inode, so copy the extents one at a time and skip
2898 * the delayed ones. There must be at least one
2899 * non-delayed extent.
2903 for (i = 0; i < nrecs; i++) {
2904 ep = xfs_iext_get_ext(ifp, i);
2905 start_block = xfs_bmbt_get_startblock(ep);
2906 if (ISNULLSTARTBLOCK(start_block)) {
2908 * It's a delayed allocation extent, so skip it.
2913 /* Translate to on disk format */
2914 put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
2915 (__uint64_t*)&dest_ep->l0);
2916 put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
2917 (__uint64_t*)&dest_ep->l1);
2921 ASSERT(copied != 0);
2922 xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip));
2924 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2928 * Each of the following cases stores data into the same region
2929 * of the on-disk inode, so only one of them can be valid at
2930 * any given time. While it is possible to have conflicting formats
2931 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2932 * in EXTENTS format, this can only happen when the fork has
2933 * changed formats after being modified but before being flushed.
2934 * In these cases, the format always takes precedence, because the
2935 * format indicates the current state of the fork.
2942 xfs_inode_log_item_t *iip,
2949 #ifdef XFS_TRANS_DEBUG
2952 static const short brootflag[2] =
2953 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2954 static const short dataflag[2] =
2955 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2956 static const short extflag[2] =
2957 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2961 ifp = XFS_IFORK_PTR(ip, whichfork);
2963 * This can happen if we gave up in iformat in an error path,
2964 * for the attribute fork.
2967 ASSERT(whichfork == XFS_ATTR_FORK);
2970 cp = XFS_DFORK_PTR(dip, whichfork);
2972 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2973 case XFS_DINODE_FMT_LOCAL:
2974 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2975 (ifp->if_bytes > 0)) {
2976 ASSERT(ifp->if_u1.if_data != NULL);
2977 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2978 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2982 case XFS_DINODE_FMT_EXTENTS:
2983 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2984 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2985 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2986 (ifp->if_bytes == 0));
2987 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2988 (ifp->if_bytes > 0));
2989 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2990 (ifp->if_bytes > 0)) {
2991 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2992 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2997 case XFS_DINODE_FMT_BTREE:
2998 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2999 (ifp->if_broot_bytes > 0)) {
3000 ASSERT(ifp->if_broot != NULL);
3001 ASSERT(ifp->if_broot_bytes <=
3002 (XFS_IFORK_SIZE(ip, whichfork) +
3003 XFS_BROOT_SIZE_ADJ));
3004 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
3005 (xfs_bmdr_block_t *)cp,
3006 XFS_DFORK_SIZE(dip, mp, whichfork));
3010 case XFS_DINODE_FMT_DEV:
3011 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
3012 ASSERT(whichfork == XFS_DATA_FORK);
3013 INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev);
3017 case XFS_DINODE_FMT_UUID:
3018 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
3019 ASSERT(whichfork == XFS_DATA_FORK);
3020 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
3034 * xfs_iflush() will write a modified inode's changes out to the
3035 * inode's on disk home. The caller must have the inode lock held
3036 * in at least shared mode and the inode flush semaphore must be
3037 * held as well. The inode lock will still be held upon return from
3038 * the call and the caller is free to unlock it.
3039 * The inode flush lock will be unlocked when the inode reaches the disk.
3040 * The flags indicate how the inode's buffer should be written out.
3047 xfs_inode_log_item_t *iip;
3055 int clcount; /* count of inodes clustered */
3057 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3060 XFS_STATS_INC(xs_iflush_count);
3062 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3063 ASSERT(issemalocked(&(ip->i_flock)));
3064 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3065 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3071 * If the inode isn't dirty, then just release the inode
3072 * flush lock and do nothing.
3074 if ((ip->i_update_core == 0) &&
3075 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3076 ASSERT((iip != NULL) ?
3077 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3083 * We can't flush the inode until it is unpinned, so
3084 * wait for it. We know noone new can pin it, because
3085 * we are holding the inode lock shared and you need
3086 * to hold it exclusively to pin the inode.
3088 xfs_iunpin_wait(ip);
3091 * This may have been unpinned because the filesystem is shutting
3092 * down forcibly. If that's the case we must not write this inode
3093 * to disk, because the log record didn't make it to disk!
3095 if (XFS_FORCED_SHUTDOWN(mp)) {
3096 ip->i_update_core = 0;
3098 iip->ili_format.ilf_fields = 0;
3100 return XFS_ERROR(EIO);
3104 * Get the buffer containing the on-disk inode.
3106 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3113 * Decide how buffer will be flushed out. This is done before
3114 * the call to xfs_iflush_int because this field is zeroed by it.
3116 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3118 * Flush out the inode buffer according to the directions
3119 * of the caller. In the cases where the caller has given
3120 * us a choice choose the non-delwri case. This is because
3121 * the inode is in the AIL and we need to get it out soon.
3124 case XFS_IFLUSH_SYNC:
3125 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3128 case XFS_IFLUSH_ASYNC:
3129 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3132 case XFS_IFLUSH_DELWRI:
3142 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3143 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3144 case XFS_IFLUSH_DELWRI:
3147 case XFS_IFLUSH_ASYNC:
3150 case XFS_IFLUSH_SYNC:
3161 * First flush out the inode that xfs_iflush was called with.
3163 error = xfs_iflush_int(ip, bp);
3170 * see if other inodes can be gathered into this write
3173 ip->i_chash->chl_buf = bp;
3175 ch = XFS_CHASH(mp, ip->i_blkno);
3176 s = mutex_spinlock(&ch->ch_lock);
3179 for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) {
3181 * Do an un-protected check to see if the inode is dirty and
3182 * is a candidate for flushing. These checks will be repeated
3183 * later after the appropriate locks are acquired.
3186 if ((iq->i_update_core == 0) &&
3188 !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3189 xfs_ipincount(iq) == 0) {
3194 * Try to get locks. If any are unavailable,
3195 * then this inode cannot be flushed and is skipped.
3198 /* get inode locks (just i_lock) */
3199 if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3200 /* get inode flush lock */
3201 if (xfs_iflock_nowait(iq)) {
3202 /* check if pinned */
3203 if (xfs_ipincount(iq) == 0) {
3204 /* arriving here means that
3205 * this inode can be flushed.
3206 * first re-check that it's
3210 if ((iq->i_update_core != 0)||
3212 (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3214 error = xfs_iflush_int(iq, bp);
3218 goto cluster_corrupt_out;
3227 xfs_iunlock(iq, XFS_ILOCK_SHARED);
3230 mutex_spinunlock(&ch->ch_lock, s);
3233 XFS_STATS_INC(xs_icluster_flushcnt);
3234 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3238 * If the buffer is pinned then push on the log so we won't
3239 * get stuck waiting in the write for too long.
3241 if (XFS_BUF_ISPINNED(bp)){
3242 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3245 if (flags & INT_DELWRI) {
3246 xfs_bdwrite(mp, bp);
3247 } else if (flags & INT_ASYNC) {
3248 xfs_bawrite(mp, bp);
3250 error = xfs_bwrite(mp, bp);
3256 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3257 xfs_iflush_abort(ip);
3259 * Unlocks the flush lock
3261 return XFS_ERROR(EFSCORRUPTED);
3263 cluster_corrupt_out:
3264 /* Corruption detected in the clustering loop. Invalidate the
3265 * inode buffer and shut down the filesystem.
3267 mutex_spinunlock(&ch->ch_lock, s);
3270 * Clean up the buffer. If it was B_DELWRI, just release it --
3271 * brelse can handle it with no problems. If not, shut down the
3272 * filesystem before releasing the buffer.
3274 if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3278 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3282 * Just like incore_relse: if we have b_iodone functions,
3283 * mark the buffer as an error and call them. Otherwise
3284 * mark it as stale and brelse.
3286 if (XFS_BUF_IODONE_FUNC(bp)) {
3287 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3291 XFS_BUF_ERROR(bp,EIO);
3299 xfs_iflush_abort(iq);
3301 * Unlocks the flush lock
3303 return XFS_ERROR(EFSCORRUPTED);
3312 xfs_inode_log_item_t *iip;
3315 #ifdef XFS_TRANS_DEBUG
3320 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3321 ASSERT(issemalocked(&(ip->i_flock)));
3322 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3323 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3330 * If the inode isn't dirty, then just release the inode
3331 * flush lock and do nothing.
3333 if ((ip->i_update_core == 0) &&
3334 ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3339 /* set *dip = inode's place in the buffer */
3340 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3343 * Clear i_update_core before copying out the data.
3344 * This is for coordination with our timestamp updates
3345 * that don't hold the inode lock. They will always
3346 * update the timestamps BEFORE setting i_update_core,
3347 * so if we clear i_update_core after they set it we
3348 * are guaranteed to see their updates to the timestamps.
3349 * I believe that this depends on strongly ordered memory
3350 * semantics, but we have that. We use the SYNCHRONIZE
3351 * macro to make sure that the compiler does not reorder
3352 * the i_update_core access below the data copy below.
3354 ip->i_update_core = 0;
3358 * Make sure to get the latest atime from the Linux inode.
3360 xfs_synchronize_atime(ip);
3362 if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
3363 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3364 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3365 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3366 ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip);
3369 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3370 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3371 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3372 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3373 ip->i_ino, ip, ip->i_d.di_magic);
3376 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3378 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3379 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3380 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3381 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3382 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3386 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3388 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3389 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3390 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3391 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3392 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3393 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3398 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3399 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3400 XFS_RANDOM_IFLUSH_5)) {
3401 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3402 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3404 ip->i_d.di_nextents + ip->i_d.di_anextents,
3409 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3410 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3411 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3412 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3413 ip->i_ino, ip->i_d.di_forkoff, ip);
3417 * bump the flush iteration count, used to detect flushes which
3418 * postdate a log record during recovery.
3421 ip->i_d.di_flushiter++;
3424 * Copy the dirty parts of the inode into the on-disk
3425 * inode. We always copy out the core of the inode,
3426 * because if the inode is dirty at all the core must
3429 xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1);
3431 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3432 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3433 ip->i_d.di_flushiter = 0;
3436 * If this is really an old format inode and the superblock version
3437 * has not been updated to support only new format inodes, then
3438 * convert back to the old inode format. If the superblock version
3439 * has been updated, then make the conversion permanent.
3441 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3442 XFS_SB_VERSION_HASNLINK(&mp->m_sb));
3443 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3444 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
3448 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3449 INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink);
3452 * The superblock version has already been bumped,
3453 * so just make the conversion to the new inode
3456 ip->i_d.di_version = XFS_DINODE_VERSION_2;
3457 INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
3458 ip->i_d.di_onlink = 0;
3459 dip->di_core.di_onlink = 0;
3460 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3461 memset(&(dip->di_core.di_pad[0]), 0,
3462 sizeof(dip->di_core.di_pad));
3463 ASSERT(ip->i_d.di_projid == 0);
3467 if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3471 if (XFS_IFORK_Q(ip)) {
3473 * The only error from xfs_iflush_fork is on the data fork.
3475 (void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3477 xfs_inobp_check(mp, bp);
3480 * We've recorded everything logged in the inode, so we'd
3481 * like to clear the ilf_fields bits so we don't log and
3482 * flush things unnecessarily. However, we can't stop
3483 * logging all this information until the data we've copied
3484 * into the disk buffer is written to disk. If we did we might
3485 * overwrite the copy of the inode in the log with all the
3486 * data after re-logging only part of it, and in the face of
3487 * a crash we wouldn't have all the data we need to recover.
3489 * What we do is move the bits to the ili_last_fields field.
3490 * When logging the inode, these bits are moved back to the
3491 * ilf_fields field. In the xfs_iflush_done() routine we
3492 * clear ili_last_fields, since we know that the information
3493 * those bits represent is permanently on disk. As long as
3494 * the flush completes before the inode is logged again, then
3495 * both ilf_fields and ili_last_fields will be cleared.
3497 * We can play with the ilf_fields bits here, because the inode
3498 * lock must be held exclusively in order to set bits there
3499 * and the flush lock protects the ili_last_fields bits.
3500 * Set ili_logged so the flush done
3501 * routine can tell whether or not to look in the AIL.
3502 * Also, store the current LSN of the inode so that we can tell
3503 * whether the item has moved in the AIL from xfs_iflush_done().
3504 * In order to read the lsn we need the AIL lock, because
3505 * it is a 64 bit value that cannot be read atomically.
3507 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3508 iip->ili_last_fields = iip->ili_format.ilf_fields;
3509 iip->ili_format.ilf_fields = 0;
3510 iip->ili_logged = 1;
3512 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
3514 iip->ili_flush_lsn = iip->ili_item.li_lsn;
3518 * Attach the function xfs_iflush_done to the inode's
3519 * buffer. This will remove the inode from the AIL
3520 * and unlock the inode's flush lock when the inode is
3521 * completely written to disk.
3523 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3524 xfs_iflush_done, (xfs_log_item_t *)iip);
3526 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3527 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3530 * We're flushing an inode which is not in the AIL and has
3531 * not been logged but has i_update_core set. For this
3532 * case we can use a B_DELWRI flush and immediately drop
3533 * the inode flush lock because we can avoid the whole
3534 * AIL state thing. It's OK to drop the flush lock now,
3535 * because we've already locked the buffer and to do anything
3536 * you really need both.
3539 ASSERT(iip->ili_logged == 0);
3540 ASSERT(iip->ili_last_fields == 0);
3541 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3549 return XFS_ERROR(EFSCORRUPTED);
3554 * Flush all inactive inodes in mp.
3564 XFS_MOUNT_ILOCK(mp);
3570 /* Make sure we skip markers inserted by sync */
3571 if (ip->i_mount == NULL) {
3576 vp = XFS_ITOV_NULL(ip);
3578 XFS_MOUNT_IUNLOCK(mp);
3579 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3583 ASSERT(vn_count(vp) == 0);
3586 } while (ip != mp->m_inodes);
3588 XFS_MOUNT_IUNLOCK(mp);
3592 * xfs_iaccess: check accessibility of inode for mode.
3601 mode_t orgmode = mode;
3602 struct inode *inode = vn_to_inode(XFS_ITOV(ip));
3604 if (mode & S_IWUSR) {
3605 umode_t imode = inode->i_mode;
3607 if (IS_RDONLY(inode) &&
3608 (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
3609 return XFS_ERROR(EROFS);
3611 if (IS_IMMUTABLE(inode))
3612 return XFS_ERROR(EACCES);
3616 * If there's an Access Control List it's used instead of
3619 if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
3620 return error ? XFS_ERROR(error) : 0;
3622 if (current_fsuid(cr) != ip->i_d.di_uid) {
3624 if (!in_group_p((gid_t)ip->i_d.di_gid))
3629 * If the DACs are ok we don't need any capability check.
3631 if ((ip->i_d.di_mode & mode) == mode)
3634 * Read/write DACs are always overridable.
3635 * Executable DACs are overridable if at least one exec bit is set.
3637 if (!(orgmode & S_IXUSR) ||
3638 (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
3639 if (capable_cred(cr, CAP_DAC_OVERRIDE))
3642 if ((orgmode == S_IRUSR) ||
3643 (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
3644 if (capable_cred(cr, CAP_DAC_READ_SEARCH))
3647 cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
3649 return XFS_ERROR(EACCES);
3651 return XFS_ERROR(EACCES);
3655 * xfs_iroundup: round up argument to next power of two
3664 if ((v & (v - 1)) == 0)
3666 ASSERT((v & 0x80000000) == 0);
3667 if ((v & (v + 1)) == 0)
3669 for (i = 0, m = 1; i < 31; i++, m <<= 1) {
3673 if ((v & (v + 1)) == 0)
3680 #ifdef XFS_ILOCK_TRACE
3681 ktrace_t *xfs_ilock_trace_buf;
3684 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3686 ktrace_enter(ip->i_lock_trace,
3688 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3689 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3690 (void *)ra, /* caller of ilock */
3691 (void *)(unsigned long)current_cpu(),
3692 (void *)(unsigned long)current_pid(),
3693 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3698 * Return a pointer to the extent record at file index idx.
3702 xfs_ifork_t *ifp, /* inode fork pointer */
3703 xfs_extnum_t idx) /* index of target extent */
3706 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3707 return ifp->if_u1.if_ext_irec->er_extbuf;
3708 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3709 xfs_ext_irec_t *erp; /* irec pointer */
3710 int erp_idx = 0; /* irec index */
3711 xfs_extnum_t page_idx = idx; /* ext index in target list */
3713 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3714 return &erp->er_extbuf[page_idx];
3715 } else if (ifp->if_bytes) {
3716 return &ifp->if_u1.if_extents[idx];
3723 * Insert new item(s) into the extent records for incore inode
3724 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3728 xfs_ifork_t *ifp, /* inode fork pointer */
3729 xfs_extnum_t idx, /* starting index of new items */
3730 xfs_extnum_t count, /* number of inserted items */
3731 xfs_bmbt_irec_t *new) /* items to insert */
3733 xfs_bmbt_rec_t *ep; /* extent record pointer */
3734 xfs_extnum_t i; /* extent record index */
3736 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3737 xfs_iext_add(ifp, idx, count);
3738 for (i = idx; i < idx + count; i++, new++) {
3739 ep = xfs_iext_get_ext(ifp, i);
3740 xfs_bmbt_set_all(ep, new);
3745 * This is called when the amount of space required for incore file
3746 * extents needs to be increased. The ext_diff parameter stores the
3747 * number of new extents being added and the idx parameter contains
3748 * the extent index where the new extents will be added. If the new
3749 * extents are being appended, then we just need to (re)allocate and
3750 * initialize the space. Otherwise, if the new extents are being
3751 * inserted into the middle of the existing entries, a bit more work
3752 * is required to make room for the new extents to be inserted. The
3753 * caller is responsible for filling in the new extent entries upon
3758 xfs_ifork_t *ifp, /* inode fork pointer */
3759 xfs_extnum_t idx, /* index to begin adding exts */
3760 int ext_diff) /* number of extents to add */
3762 int byte_diff; /* new bytes being added */
3763 int new_size; /* size of extents after adding */
3764 xfs_extnum_t nextents; /* number of extents in file */
3766 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3767 ASSERT((idx >= 0) && (idx <= nextents));
3768 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3769 new_size = ifp->if_bytes + byte_diff;
3771 * If the new number of extents (nextents + ext_diff)
3772 * fits inside the inode, then continue to use the inline
3775 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3776 if (idx < nextents) {
3777 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3778 &ifp->if_u2.if_inline_ext[idx],
3779 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3780 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3782 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3783 ifp->if_real_bytes = 0;
3784 ifp->if_lastex = nextents + ext_diff;
3787 * Otherwise use a linear (direct) extent list.
3788 * If the extents are currently inside the inode,
3789 * xfs_iext_realloc_direct will switch us from
3790 * inline to direct extent allocation mode.
3792 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3793 xfs_iext_realloc_direct(ifp, new_size);
3794 if (idx < nextents) {
3795 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3796 &ifp->if_u1.if_extents[idx],
3797 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3798 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3801 /* Indirection array */
3803 xfs_ext_irec_t *erp;
3807 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3808 if (ifp->if_flags & XFS_IFEXTIREC) {
3809 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3811 xfs_iext_irec_init(ifp);
3812 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3813 erp = ifp->if_u1.if_ext_irec;
3815 /* Extents fit in target extent page */
3816 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3817 if (page_idx < erp->er_extcount) {
3818 memmove(&erp->er_extbuf[page_idx + ext_diff],
3819 &erp->er_extbuf[page_idx],
3820 (erp->er_extcount - page_idx) *
3821 sizeof(xfs_bmbt_rec_t));
3822 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3824 erp->er_extcount += ext_diff;
3825 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3827 /* Insert a new extent page */
3829 xfs_iext_add_indirect_multi(ifp,
3830 erp_idx, page_idx, ext_diff);
3833 * If extent(s) are being appended to the last page in
3834 * the indirection array and the new extent(s) don't fit
3835 * in the page, then erp is NULL and erp_idx is set to
3836 * the next index needed in the indirection array.
3839 int count = ext_diff;
3842 erp = xfs_iext_irec_new(ifp, erp_idx);
3843 erp->er_extcount = count;
3844 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3851 ifp->if_bytes = new_size;
3855 * This is called when incore extents are being added to the indirection
3856 * array and the new extents do not fit in the target extent list. The
3857 * erp_idx parameter contains the irec index for the target extent list
3858 * in the indirection array, and the idx parameter contains the extent
3859 * index within the list. The number of extents being added is stored
3860 * in the count parameter.
3862 * |-------| |-------|
3863 * | | | | idx - number of extents before idx
3865 * | | | | count - number of extents being inserted at idx
3866 * |-------| |-------|
3867 * | count | | nex2 | nex2 - number of extents after idx + count
3868 * |-------| |-------|
3871 xfs_iext_add_indirect_multi(
3872 xfs_ifork_t *ifp, /* inode fork pointer */
3873 int erp_idx, /* target extent irec index */
3874 xfs_extnum_t idx, /* index within target list */
3875 int count) /* new extents being added */
3877 int byte_diff; /* new bytes being added */
3878 xfs_ext_irec_t *erp; /* pointer to irec entry */
3879 xfs_extnum_t ext_diff; /* number of extents to add */
3880 xfs_extnum_t ext_cnt; /* new extents still needed */
3881 xfs_extnum_t nex2; /* extents after idx + count */
3882 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3883 int nlists; /* number of irec's (lists) */
3885 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3886 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3887 nex2 = erp->er_extcount - idx;
3888 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3891 * Save second part of target extent list
3892 * (all extents past */
3894 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3895 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3896 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3897 erp->er_extcount -= nex2;
3898 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3899 memset(&erp->er_extbuf[idx], 0, byte_diff);
3903 * Add the new extents to the end of the target
3904 * list, then allocate new irec record(s) and
3905 * extent buffer(s) as needed to store the rest
3906 * of the new extents.
3909 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3911 erp->er_extcount += ext_diff;
3912 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3913 ext_cnt -= ext_diff;
3917 erp = xfs_iext_irec_new(ifp, erp_idx);
3918 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3919 erp->er_extcount = ext_diff;
3920 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3921 ext_cnt -= ext_diff;
3924 /* Add nex2 extents back to indirection array */
3926 xfs_extnum_t ext_avail;
3929 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3930 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3933 * If nex2 extents fit in the current page, append
3934 * nex2_ep after the new extents.
3936 if (nex2 <= ext_avail) {
3937 i = erp->er_extcount;
3940 * Otherwise, check if space is available in the
3943 else if ((erp_idx < nlists - 1) &&
3944 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3945 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3948 /* Create a hole for nex2 extents */
3949 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3950 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3953 * Final choice, create a new extent page for
3958 erp = xfs_iext_irec_new(ifp, erp_idx);
3960 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3961 kmem_free(nex2_ep, byte_diff);
3962 erp->er_extcount += nex2;
3963 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3968 * This is called when the amount of space required for incore file
3969 * extents needs to be decreased. The ext_diff parameter stores the
3970 * number of extents to be removed and the idx parameter contains
3971 * the extent index where the extents will be removed from.
3973 * If the amount of space needed has decreased below the linear
3974 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3975 * extent array. Otherwise, use kmem_realloc() to adjust the
3976 * size to what is needed.
3980 xfs_ifork_t *ifp, /* inode fork pointer */
3981 xfs_extnum_t idx, /* index to begin removing exts */
3982 int ext_diff) /* number of extents to remove */
3984 xfs_extnum_t nextents; /* number of extents in file */
3985 int new_size; /* size of extents after removal */
3987 ASSERT(ext_diff > 0);
3988 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3989 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3991 if (new_size == 0) {
3992 xfs_iext_destroy(ifp);
3993 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3994 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3995 } else if (ifp->if_real_bytes) {
3996 xfs_iext_remove_direct(ifp, idx, ext_diff);
3998 xfs_iext_remove_inline(ifp, idx, ext_diff);
4000 ifp->if_bytes = new_size;
4004 * This removes ext_diff extents from the inline buffer, beginning
4005 * at extent index idx.
4008 xfs_iext_remove_inline(
4009 xfs_ifork_t *ifp, /* inode fork pointer */
4010 xfs_extnum_t idx, /* index to begin removing exts */
4011 int ext_diff) /* number of extents to remove */
4013 int nextents; /* number of extents in file */
4015 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4016 ASSERT(idx < XFS_INLINE_EXTS);
4017 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4018 ASSERT(((nextents - ext_diff) > 0) &&
4019 (nextents - ext_diff) < XFS_INLINE_EXTS);
4021 if (idx + ext_diff < nextents) {
4022 memmove(&ifp->if_u2.if_inline_ext[idx],
4023 &ifp->if_u2.if_inline_ext[idx + ext_diff],
4024 (nextents - (idx + ext_diff)) *
4025 sizeof(xfs_bmbt_rec_t));
4026 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
4027 0, ext_diff * sizeof(xfs_bmbt_rec_t));
4029 memset(&ifp->if_u2.if_inline_ext[idx], 0,
4030 ext_diff * sizeof(xfs_bmbt_rec_t));
4035 * This removes ext_diff extents from a linear (direct) extent list,
4036 * beginning at extent index idx. If the extents are being removed
4037 * from the end of the list (ie. truncate) then we just need to re-
4038 * allocate the list to remove the extra space. Otherwise, if the
4039 * extents are being removed from the middle of the existing extent
4040 * entries, then we first need to move the extent records beginning
4041 * at idx + ext_diff up in the list to overwrite the records being
4042 * removed, then remove the extra space via kmem_realloc.
4045 xfs_iext_remove_direct(
4046 xfs_ifork_t *ifp, /* inode fork pointer */
4047 xfs_extnum_t idx, /* index to begin removing exts */
4048 int ext_diff) /* number of extents to remove */
4050 xfs_extnum_t nextents; /* number of extents in file */
4051 int new_size; /* size of extents after removal */
4053 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4054 new_size = ifp->if_bytes -
4055 (ext_diff * sizeof(xfs_bmbt_rec_t));
4056 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4058 if (new_size == 0) {
4059 xfs_iext_destroy(ifp);
4062 /* Move extents up in the list (if needed) */
4063 if (idx + ext_diff < nextents) {
4064 memmove(&ifp->if_u1.if_extents[idx],
4065 &ifp->if_u1.if_extents[idx + ext_diff],
4066 (nextents - (idx + ext_diff)) *
4067 sizeof(xfs_bmbt_rec_t));
4069 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4070 0, ext_diff * sizeof(xfs_bmbt_rec_t));
4072 * Reallocate the direct extent list. If the extents
4073 * will fit inside the inode then xfs_iext_realloc_direct
4074 * will switch from direct to inline extent allocation
4077 xfs_iext_realloc_direct(ifp, new_size);
4078 ifp->if_bytes = new_size;
4082 * This is called when incore extents are being removed from the
4083 * indirection array and the extents being removed span multiple extent
4084 * buffers. The idx parameter contains the file extent index where we
4085 * want to begin removing extents, and the count parameter contains
4086 * how many extents need to be removed.
4088 * |-------| |-------|
4089 * | nex1 | | | nex1 - number of extents before idx
4090 * |-------| | count |
4091 * | | | | count - number of extents being removed at idx
4092 * | count | |-------|
4093 * | | | nex2 | nex2 - number of extents after idx + count
4094 * |-------| |-------|
4097 xfs_iext_remove_indirect(
4098 xfs_ifork_t *ifp, /* inode fork pointer */
4099 xfs_extnum_t idx, /* index to begin removing extents */
4100 int count) /* number of extents to remove */
4102 xfs_ext_irec_t *erp; /* indirection array pointer */
4103 int erp_idx = 0; /* indirection array index */
4104 xfs_extnum_t ext_cnt; /* extents left to remove */
4105 xfs_extnum_t ext_diff; /* extents to remove in current list */
4106 xfs_extnum_t nex1; /* number of extents before idx */
4107 xfs_extnum_t nex2; /* extents after idx + count */
4108 int nlists; /* entries in indirection array */
4109 int page_idx = idx; /* index in target extent list */
4111 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4112 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
4113 ASSERT(erp != NULL);
4114 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4118 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4119 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4121 * Check for deletion of entire list;
4122 * xfs_iext_irec_remove() updates extent offsets.
4124 if (ext_diff == erp->er_extcount) {
4125 xfs_iext_irec_remove(ifp, erp_idx);
4126 ext_cnt -= ext_diff;
4129 ASSERT(erp_idx < ifp->if_real_bytes /
4131 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4138 /* Move extents up (if needed) */
4140 memmove(&erp->er_extbuf[nex1],
4141 &erp->er_extbuf[nex1 + ext_diff],
4142 nex2 * sizeof(xfs_bmbt_rec_t));
4144 /* Zero out rest of page */
4145 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4146 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4147 /* Update remaining counters */
4148 erp->er_extcount -= ext_diff;
4149 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4150 ext_cnt -= ext_diff;
4155 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4156 xfs_iext_irec_compact(ifp);
4160 * Create, destroy, or resize a linear (direct) block of extents.
4163 xfs_iext_realloc_direct(
4164 xfs_ifork_t *ifp, /* inode fork pointer */
4165 int new_size) /* new size of extents */
4167 int rnew_size; /* real new size of extents */
4169 rnew_size = new_size;
4171 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4172 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4173 (new_size != ifp->if_real_bytes)));
4175 /* Free extent records */
4176 if (new_size == 0) {
4177 xfs_iext_destroy(ifp);
4179 /* Resize direct extent list and zero any new bytes */
4180 else if (ifp->if_real_bytes) {
4181 /* Check if extents will fit inside the inode */
4182 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4183 xfs_iext_direct_to_inline(ifp, new_size /
4184 (uint)sizeof(xfs_bmbt_rec_t));
4185 ifp->if_bytes = new_size;
4188 if (!is_power_of_2(new_size)){
4189 rnew_size = xfs_iroundup(new_size);
4191 if (rnew_size != ifp->if_real_bytes) {
4192 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4193 kmem_realloc(ifp->if_u1.if_extents,
4198 if (rnew_size > ifp->if_real_bytes) {
4199 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4200 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4201 rnew_size - ifp->if_real_bytes);
4205 * Switch from the inline extent buffer to a direct
4206 * extent list. Be sure to include the inline extent
4207 * bytes in new_size.
4210 new_size += ifp->if_bytes;
4211 if (!is_power_of_2(new_size)) {
4212 rnew_size = xfs_iroundup(new_size);
4214 xfs_iext_inline_to_direct(ifp, rnew_size);
4216 ifp->if_real_bytes = rnew_size;
4217 ifp->if_bytes = new_size;
4221 * Switch from linear (direct) extent records to inline buffer.
4224 xfs_iext_direct_to_inline(
4225 xfs_ifork_t *ifp, /* inode fork pointer */
4226 xfs_extnum_t nextents) /* number of extents in file */
4228 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4229 ASSERT(nextents <= XFS_INLINE_EXTS);
4231 * The inline buffer was zeroed when we switched
4232 * from inline to direct extent allocation mode,
4233 * so we don't need to clear it here.
4235 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4236 nextents * sizeof(xfs_bmbt_rec_t));
4237 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4238 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4239 ifp->if_real_bytes = 0;
4243 * Switch from inline buffer to linear (direct) extent records.
4244 * new_size should already be rounded up to the next power of 2
4245 * by the caller (when appropriate), so use new_size as it is.
4246 * However, since new_size may be rounded up, we can't update
4247 * if_bytes here. It is the caller's responsibility to update
4248 * if_bytes upon return.
4251 xfs_iext_inline_to_direct(
4252 xfs_ifork_t *ifp, /* inode fork pointer */
4253 int new_size) /* number of extents in file */
4255 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4256 kmem_alloc(new_size, KM_SLEEP);
4257 memset(ifp->if_u1.if_extents, 0, new_size);
4258 if (ifp->if_bytes) {
4259 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4261 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4262 sizeof(xfs_bmbt_rec_t));
4264 ifp->if_real_bytes = new_size;
4268 * Resize an extent indirection array to new_size bytes.
4271 xfs_iext_realloc_indirect(
4272 xfs_ifork_t *ifp, /* inode fork pointer */
4273 int new_size) /* new indirection array size */
4275 int nlists; /* number of irec's (ex lists) */
4276 int size; /* current indirection array size */
4278 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4279 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4280 size = nlists * sizeof(xfs_ext_irec_t);
4281 ASSERT(ifp->if_real_bytes);
4282 ASSERT((new_size >= 0) && (new_size != size));
4283 if (new_size == 0) {
4284 xfs_iext_destroy(ifp);
4286 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4287 kmem_realloc(ifp->if_u1.if_ext_irec,
4288 new_size, size, KM_SLEEP);
4293 * Switch from indirection array to linear (direct) extent allocations.
4296 xfs_iext_indirect_to_direct(
4297 xfs_ifork_t *ifp) /* inode fork pointer */
4299 xfs_bmbt_rec_t *ep; /* extent record pointer */
4300 xfs_extnum_t nextents; /* number of extents in file */
4301 int size; /* size of file extents */
4303 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4304 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4305 ASSERT(nextents <= XFS_LINEAR_EXTS);
4306 size = nextents * sizeof(xfs_bmbt_rec_t);
4308 xfs_iext_irec_compact_full(ifp);
4309 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4311 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4312 kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4313 ifp->if_flags &= ~XFS_IFEXTIREC;
4314 ifp->if_u1.if_extents = ep;
4315 ifp->if_bytes = size;
4316 if (nextents < XFS_LINEAR_EXTS) {
4317 xfs_iext_realloc_direct(ifp, size);
4322 * Free incore file extents.
4326 xfs_ifork_t *ifp) /* inode fork pointer */
4328 if (ifp->if_flags & XFS_IFEXTIREC) {
4332 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4333 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4334 xfs_iext_irec_remove(ifp, erp_idx);
4336 ifp->if_flags &= ~XFS_IFEXTIREC;
4337 } else if (ifp->if_real_bytes) {
4338 kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4339 } else if (ifp->if_bytes) {
4340 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4341 sizeof(xfs_bmbt_rec_t));
4343 ifp->if_u1.if_extents = NULL;
4344 ifp->if_real_bytes = 0;
4349 * Return a pointer to the extent record for file system block bno.
4351 xfs_bmbt_rec_t * /* pointer to found extent record */
4352 xfs_iext_bno_to_ext(
4353 xfs_ifork_t *ifp, /* inode fork pointer */
4354 xfs_fileoff_t bno, /* block number to search for */
4355 xfs_extnum_t *idxp) /* index of target extent */
4357 xfs_bmbt_rec_t *base; /* pointer to first extent */
4358 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4359 xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */
4360 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4361 int high; /* upper boundary in search */
4362 xfs_extnum_t idx = 0; /* index of target extent */
4363 int low; /* lower boundary in search */
4364 xfs_extnum_t nextents; /* number of file extents */
4365 xfs_fileoff_t startoff = 0; /* start offset of extent */
4367 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4368 if (nextents == 0) {
4373 if (ifp->if_flags & XFS_IFEXTIREC) {
4374 /* Find target extent list */
4376 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4377 base = erp->er_extbuf;
4378 high = erp->er_extcount - 1;
4380 base = ifp->if_u1.if_extents;
4381 high = nextents - 1;
4383 /* Binary search extent records */
4384 while (low <= high) {
4385 idx = (low + high) >> 1;
4387 startoff = xfs_bmbt_get_startoff(ep);
4388 blockcount = xfs_bmbt_get_blockcount(ep);
4389 if (bno < startoff) {
4391 } else if (bno >= startoff + blockcount) {
4394 /* Convert back to file-based extent index */
4395 if (ifp->if_flags & XFS_IFEXTIREC) {
4396 idx += erp->er_extoff;
4402 /* Convert back to file-based extent index */
4403 if (ifp->if_flags & XFS_IFEXTIREC) {
4404 idx += erp->er_extoff;
4406 if (bno >= startoff + blockcount) {
4407 if (++idx == nextents) {
4410 ep = xfs_iext_get_ext(ifp, idx);
4418 * Return a pointer to the indirection array entry containing the
4419 * extent record for filesystem block bno. Store the index of the
4420 * target irec in *erp_idxp.
4422 xfs_ext_irec_t * /* pointer to found extent record */
4423 xfs_iext_bno_to_irec(
4424 xfs_ifork_t *ifp, /* inode fork pointer */
4425 xfs_fileoff_t bno, /* block number to search for */
4426 int *erp_idxp) /* irec index of target ext list */
4428 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4429 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4430 int erp_idx; /* indirection array index */
4431 int nlists; /* number of extent irec's (lists) */
4432 int high; /* binary search upper limit */
4433 int low; /* binary search lower limit */
4435 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4436 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4440 while (low <= high) {
4441 erp_idx = (low + high) >> 1;
4442 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4443 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4444 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4446 } else if (erp_next && bno >=
4447 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4453 *erp_idxp = erp_idx;
4458 * Return a pointer to the indirection array entry containing the
4459 * extent record at file extent index *idxp. Store the index of the
4460 * target irec in *erp_idxp and store the page index of the target
4461 * extent record in *idxp.
4464 xfs_iext_idx_to_irec(
4465 xfs_ifork_t *ifp, /* inode fork pointer */
4466 xfs_extnum_t *idxp, /* extent index (file -> page) */
4467 int *erp_idxp, /* pointer to target irec */
4468 int realloc) /* new bytes were just added */
4470 xfs_ext_irec_t *prev; /* pointer to previous irec */
4471 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4472 int erp_idx; /* indirection array index */
4473 int nlists; /* number of irec's (ex lists) */
4474 int high; /* binary search upper limit */
4475 int low; /* binary search lower limit */
4476 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4478 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4479 ASSERT(page_idx >= 0 && page_idx <=
4480 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4481 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4486 /* Binary search extent irec's */
4487 while (low <= high) {
4488 erp_idx = (low + high) >> 1;
4489 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4490 prev = erp_idx > 0 ? erp - 1 : NULL;
4491 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4492 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4494 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4495 (page_idx == erp->er_extoff + erp->er_extcount &&
4498 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4499 erp->er_extcount == XFS_LINEAR_EXTS) {
4503 erp = erp_idx < nlists ? erp + 1 : NULL;
4506 page_idx -= erp->er_extoff;
4511 *erp_idxp = erp_idx;
4516 * Allocate and initialize an indirection array once the space needed
4517 * for incore extents increases above XFS_IEXT_BUFSZ.
4521 xfs_ifork_t *ifp) /* inode fork pointer */
4523 xfs_ext_irec_t *erp; /* indirection array pointer */
4524 xfs_extnum_t nextents; /* number of extents in file */
4526 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4527 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4528 ASSERT(nextents <= XFS_LINEAR_EXTS);
4530 erp = (xfs_ext_irec_t *)
4531 kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4533 if (nextents == 0) {
4534 ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4535 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4536 } else if (!ifp->if_real_bytes) {
4537 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4538 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4539 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4541 erp->er_extbuf = ifp->if_u1.if_extents;
4542 erp->er_extcount = nextents;
4545 ifp->if_flags |= XFS_IFEXTIREC;
4546 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4547 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4548 ifp->if_u1.if_ext_irec = erp;
4554 * Allocate and initialize a new entry in the indirection array.
4558 xfs_ifork_t *ifp, /* inode fork pointer */
4559 int erp_idx) /* index for new irec */
4561 xfs_ext_irec_t *erp; /* indirection array pointer */
4562 int i; /* loop counter */
4563 int nlists; /* number of irec's (ex lists) */
4565 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4566 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4568 /* Resize indirection array */
4569 xfs_iext_realloc_indirect(ifp, ++nlists *
4570 sizeof(xfs_ext_irec_t));
4572 * Move records down in the array so the
4573 * new page can use erp_idx.
4575 erp = ifp->if_u1.if_ext_irec;
4576 for (i = nlists - 1; i > erp_idx; i--) {
4577 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4579 ASSERT(i == erp_idx);
4581 /* Initialize new extent record */
4582 erp = ifp->if_u1.if_ext_irec;
4583 erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *)
4584 kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4585 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4586 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4587 erp[erp_idx].er_extcount = 0;
4588 erp[erp_idx].er_extoff = erp_idx > 0 ?
4589 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4590 return (&erp[erp_idx]);
4594 * Remove a record from the indirection array.
4597 xfs_iext_irec_remove(
4598 xfs_ifork_t *ifp, /* inode fork pointer */
4599 int erp_idx) /* irec index to remove */
4601 xfs_ext_irec_t *erp; /* indirection array pointer */
4602 int i; /* loop counter */
4603 int nlists; /* number of irec's (ex lists) */
4605 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4606 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4607 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4608 if (erp->er_extbuf) {
4609 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4611 kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4613 /* Compact extent records */
4614 erp = ifp->if_u1.if_ext_irec;
4615 for (i = erp_idx; i < nlists - 1; i++) {
4616 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4619 * Manually free the last extent record from the indirection
4620 * array. A call to xfs_iext_realloc_indirect() with a size
4621 * of zero would result in a call to xfs_iext_destroy() which
4622 * would in turn call this function again, creating a nasty
4626 xfs_iext_realloc_indirect(ifp,
4627 nlists * sizeof(xfs_ext_irec_t));
4629 kmem_free(ifp->if_u1.if_ext_irec,
4630 sizeof(xfs_ext_irec_t));
4632 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4636 * This is called to clean up large amounts of unused memory allocated
4637 * by the indirection array. Before compacting anything though, verify
4638 * that the indirection array is still needed and switch back to the
4639 * linear extent list (or even the inline buffer) if possible. The
4640 * compaction policy is as follows:
4642 * Full Compaction: Extents fit into a single page (or inline buffer)
4643 * Full Compaction: Extents occupy less than 10% of allocated space
4644 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4645 * No Compaction: Extents occupy at least 50% of allocated space
4648 xfs_iext_irec_compact(
4649 xfs_ifork_t *ifp) /* inode fork pointer */
4651 xfs_extnum_t nextents; /* number of extents in file */
4652 int nlists; /* number of irec's (ex lists) */
4654 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4655 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4656 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4658 if (nextents == 0) {
4659 xfs_iext_destroy(ifp);
4660 } else if (nextents <= XFS_INLINE_EXTS) {
4661 xfs_iext_indirect_to_direct(ifp);
4662 xfs_iext_direct_to_inline(ifp, nextents);
4663 } else if (nextents <= XFS_LINEAR_EXTS) {
4664 xfs_iext_indirect_to_direct(ifp);
4665 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4666 xfs_iext_irec_compact_full(ifp);
4667 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4668 xfs_iext_irec_compact_pages(ifp);
4673 * Combine extents from neighboring extent pages.
4676 xfs_iext_irec_compact_pages(
4677 xfs_ifork_t *ifp) /* inode fork pointer */
4679 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4680 int erp_idx = 0; /* indirection array index */
4681 int nlists; /* number of irec's (ex lists) */
4683 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4684 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4685 while (erp_idx < nlists - 1) {
4686 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4688 if (erp_next->er_extcount <=
4689 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4690 memmove(&erp->er_extbuf[erp->er_extcount],
4691 erp_next->er_extbuf, erp_next->er_extcount *
4692 sizeof(xfs_bmbt_rec_t));
4693 erp->er_extcount += erp_next->er_extcount;
4695 * Free page before removing extent record
4696 * so er_extoffs don't get modified in
4697 * xfs_iext_irec_remove.
4699 kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4700 erp_next->er_extbuf = NULL;
4701 xfs_iext_irec_remove(ifp, erp_idx + 1);
4702 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4710 * Fully compact the extent records managed by the indirection array.
4713 xfs_iext_irec_compact_full(
4714 xfs_ifork_t *ifp) /* inode fork pointer */
4716 xfs_bmbt_rec_t *ep, *ep_next; /* extent record pointers */
4717 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4718 int erp_idx = 0; /* extent irec index */
4719 int ext_avail; /* empty entries in ex list */
4720 int ext_diff; /* number of exts to add */
4721 int nlists; /* number of irec's (ex lists) */
4723 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4724 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4725 erp = ifp->if_u1.if_ext_irec;
4726 ep = &erp->er_extbuf[erp->er_extcount];
4728 ep_next = erp_next->er_extbuf;
4729 while (erp_idx < nlists - 1) {
4730 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4731 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4732 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4733 erp->er_extcount += ext_diff;
4734 erp_next->er_extcount -= ext_diff;
4735 /* Remove next page */
4736 if (erp_next->er_extcount == 0) {
4738 * Free page before removing extent record
4739 * so er_extoffs don't get modified in
4740 * xfs_iext_irec_remove.
4742 kmem_free(erp_next->er_extbuf,
4743 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4744 erp_next->er_extbuf = NULL;
4745 xfs_iext_irec_remove(ifp, erp_idx + 1);
4746 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4747 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4748 /* Update next page */
4750 /* Move rest of page up to become next new page */
4751 memmove(erp_next->er_extbuf, ep_next,
4752 erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4753 ep_next = erp_next->er_extbuf;
4754 memset(&ep_next[erp_next->er_extcount], 0,
4755 (XFS_LINEAR_EXTS - erp_next->er_extcount) *
4756 sizeof(xfs_bmbt_rec_t));
4758 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4760 if (erp_idx < nlists)
4761 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4765 ep = &erp->er_extbuf[erp->er_extcount];
4767 ep_next = erp_next->er_extbuf;
4772 * This is called to update the er_extoff field in the indirection
4773 * array when extents have been added or removed from one of the
4774 * extent lists. erp_idx contains the irec index to begin updating
4775 * at and ext_diff contains the number of extents that were added
4779 xfs_iext_irec_update_extoffs(
4780 xfs_ifork_t *ifp, /* inode fork pointer */
4781 int erp_idx, /* irec index to update */
4782 int ext_diff) /* number of new extents */
4784 int i; /* loop counter */
4785 int nlists; /* number of irec's (ex lists */
4787 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4788 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4789 for (i = erp_idx; i < nlists; i++) {
4790 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;