2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_btree.h"
41 #include "xfs_error.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
44 #include "xfs_fsops.h"
45 #include "xfs_itable.h"
46 #include "xfs_trans_space.h"
47 #include "xfs_rtalloc.h"
51 * File system operations
60 geo->blocksize = mp->m_sb.sb_blocksize;
61 geo->rtextsize = mp->m_sb.sb_rextsize;
62 geo->agblocks = mp->m_sb.sb_agblocks;
63 geo->agcount = mp->m_sb.sb_agcount;
64 geo->logblocks = mp->m_sb.sb_logblocks;
65 geo->sectsize = mp->m_sb.sb_sectsize;
66 geo->inodesize = mp->m_sb.sb_inodesize;
67 geo->imaxpct = mp->m_sb.sb_imax_pct;
68 geo->datablocks = mp->m_sb.sb_dblocks;
69 geo->rtblocks = mp->m_sb.sb_rblocks;
70 geo->rtextents = mp->m_sb.sb_rextents;
71 geo->logstart = mp->m_sb.sb_logstart;
72 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
73 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
74 if (new_version >= 2) {
75 geo->sunit = mp->m_sb.sb_unit;
76 geo->swidth = mp->m_sb.sb_width;
78 if (new_version >= 3) {
79 geo->version = XFS_FSOP_GEOM_VERSION;
81 (XFS_SB_VERSION_HASATTR(&mp->m_sb) ?
82 XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
83 (XFS_SB_VERSION_HASNLINK(&mp->m_sb) ?
84 XFS_FSOP_GEOM_FLAGS_NLINK : 0) |
85 (XFS_SB_VERSION_HASQUOTA(&mp->m_sb) ?
86 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
87 (XFS_SB_VERSION_HASALIGN(&mp->m_sb) ?
88 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
89 (XFS_SB_VERSION_HASDALIGN(&mp->m_sb) ?
90 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
91 (XFS_SB_VERSION_HASSHARED(&mp->m_sb) ?
92 XFS_FSOP_GEOM_FLAGS_SHARED : 0) |
93 (XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) ?
94 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
95 (XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ?
96 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
97 (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
98 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
99 (XFS_SB_VERSION_HASATTR2(&mp->m_sb) ?
100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0);
101 geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ?
102 mp->m_sb.sb_logsectsize : BBSIZE;
103 geo->rtsectsize = mp->m_sb.sb_blocksize;
104 geo->dirblocksize = mp->m_dirblksize;
106 if (new_version >= 4) {
108 (XFS_SB_VERSION_HASLOGV2(&mp->m_sb) ?
109 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
110 geo->logsunit = mp->m_sb.sb_logsunit;
116 xfs_growfs_data_private(
117 xfs_mount_t *mp, /* mount point for filesystem */
118 xfs_growfs_data_t *in) /* growfs data input struct */
124 xfs_extlen_t tmpsize;
125 xfs_alloc_rec_t *arec;
126 xfs_btree_sblock_t *block;
131 xfs_agnumber_t nagcount;
132 xfs_agnumber_t nagimax = 0;
133 xfs_rfsblock_t nb, nb_mod;
135 xfs_rfsblock_t nfree;
136 xfs_agnumber_t oagcount;
143 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
144 return XFS_ERROR(EINVAL);
145 dpct = pct - mp->m_sb.sb_imax_pct;
146 error = xfs_read_buf(mp, mp->m_ddev_targp,
147 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
148 XFS_FSS_TO_BB(mp, 1), 0, &bp);
154 new = nb; /* use new as a temporary here */
155 nb_mod = do_div(new, mp->m_sb.sb_agblocks);
156 nagcount = new + (nb_mod != 0);
157 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
159 nb = nagcount * mp->m_sb.sb_agblocks;
160 if (nb < mp->m_sb.sb_dblocks)
161 return XFS_ERROR(EINVAL);
163 new = nb - mp->m_sb.sb_dblocks;
164 oagcount = mp->m_sb.sb_agcount;
165 if (nagcount > oagcount) {
166 down_write(&mp->m_peraglock);
167 mp->m_perag = kmem_realloc(mp->m_perag,
168 sizeof(xfs_perag_t) * nagcount,
169 sizeof(xfs_perag_t) * oagcount,
171 memset(&mp->m_perag[oagcount], 0,
172 (nagcount - oagcount) * sizeof(xfs_perag_t));
173 mp->m_flags |= XFS_MOUNT_32BITINODES;
174 nagimax = xfs_initialize_perag(XFS_MTOVFS(mp), mp, nagcount);
175 up_write(&mp->m_peraglock);
177 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
178 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
179 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) {
180 xfs_trans_cancel(tp, 0);
185 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
187 * AG freelist header block
189 bp = xfs_buf_get(mp->m_ddev_targp,
190 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
191 XFS_FSS_TO_BB(mp, 1), 0);
192 agf = XFS_BUF_TO_AGF(bp);
193 memset(agf, 0, mp->m_sb.sb_sectsize);
194 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
195 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
196 agf->agf_seqno = cpu_to_be32(agno);
197 if (agno == nagcount - 1)
200 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
202 agsize = mp->m_sb.sb_agblocks;
203 agf->agf_length = cpu_to_be32(agsize);
204 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
205 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
206 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
207 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
208 agf->agf_flfirst = 0;
209 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
210 agf->agf_flcount = 0;
211 tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
212 agf->agf_freeblks = cpu_to_be32(tmpsize);
213 agf->agf_longest = cpu_to_be32(tmpsize);
214 error = xfs_bwrite(mp, bp);
219 * AG inode header block
221 bp = xfs_buf_get(mp->m_ddev_targp,
222 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
223 XFS_FSS_TO_BB(mp, 1), 0);
224 agi = XFS_BUF_TO_AGI(bp);
225 memset(agi, 0, mp->m_sb.sb_sectsize);
226 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
227 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
228 agi->agi_seqno = cpu_to_be32(agno);
229 agi->agi_length = cpu_to_be32(agsize);
231 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
232 agi->agi_level = cpu_to_be32(1);
233 agi->agi_freecount = 0;
234 agi->agi_newino = cpu_to_be32(NULLAGINO);
235 agi->agi_dirino = cpu_to_be32(NULLAGINO);
236 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
237 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
238 error = xfs_bwrite(mp, bp);
243 * BNO btree root block
245 bp = xfs_buf_get(mp->m_ddev_targp,
246 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
247 BTOBB(mp->m_sb.sb_blocksize), 0);
248 block = XFS_BUF_TO_SBLOCK(bp);
249 memset(block, 0, mp->m_sb.sb_blocksize);
250 block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC);
252 block->bb_numrecs = cpu_to_be16(1);
253 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
254 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
255 arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
256 block, 1, mp->m_alloc_mxr[0]);
257 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
258 arec->ar_blockcount = cpu_to_be32(
259 agsize - be32_to_cpu(arec->ar_startblock));
260 error = xfs_bwrite(mp, bp);
265 * CNT btree root block
267 bp = xfs_buf_get(mp->m_ddev_targp,
268 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
269 BTOBB(mp->m_sb.sb_blocksize), 0);
270 block = XFS_BUF_TO_SBLOCK(bp);
271 memset(block, 0, mp->m_sb.sb_blocksize);
272 block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC);
274 block->bb_numrecs = cpu_to_be16(1);
275 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
276 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
277 arec = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_alloc,
278 block, 1, mp->m_alloc_mxr[0]);
279 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
280 arec->ar_blockcount = cpu_to_be32(
281 agsize - be32_to_cpu(arec->ar_startblock));
282 nfree += be32_to_cpu(arec->ar_blockcount);
283 error = xfs_bwrite(mp, bp);
288 * INO btree root block
290 bp = xfs_buf_get(mp->m_ddev_targp,
291 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
292 BTOBB(mp->m_sb.sb_blocksize), 0);
293 block = XFS_BUF_TO_SBLOCK(bp);
294 memset(block, 0, mp->m_sb.sb_blocksize);
295 block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC);
297 block->bb_numrecs = 0;
298 block->bb_leftsib = cpu_to_be32(NULLAGBLOCK);
299 block->bb_rightsib = cpu_to_be32(NULLAGBLOCK);
300 error = xfs_bwrite(mp, bp);
305 xfs_trans_agblocks_delta(tp, nfree);
307 * There are new blocks in the old last a.g.
311 * Change the agi length.
313 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
318 agi = XFS_BUF_TO_AGI(bp);
319 be32_add(&agi->agi_length, new);
320 ASSERT(nagcount == oagcount ||
321 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
322 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
326 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
331 agf = XFS_BUF_TO_AGF(bp);
332 be32_add(&agf->agf_length, new);
333 ASSERT(be32_to_cpu(agf->agf_length) ==
334 be32_to_cpu(agi->agi_length));
336 * Free the new space.
338 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
339 be32_to_cpu(agf->agf_length) - new), new);
344 if (nagcount > oagcount)
345 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
346 if (nb > mp->m_sb.sb_dblocks)
347 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
348 nb - mp->m_sb.sb_dblocks);
350 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
352 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
353 error = xfs_trans_commit(tp, 0, NULL);
357 /* New allocation groups fully initialized, so update mount struct */
359 mp->m_maxagi = nagimax;
360 if (mp->m_sb.sb_imax_pct) {
361 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
363 mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
366 for (agno = 1; agno < nagcount; agno++) {
367 error = xfs_read_buf(mp, mp->m_ddev_targp,
368 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
369 XFS_FSS_TO_BB(mp, 1), 0, &bp);
371 xfs_fs_cmn_err(CE_WARN, mp,
372 "error %d reading secondary superblock for ag %d",
376 sbp = XFS_BUF_TO_SBP(bp);
377 xfs_xlatesb(sbp, &mp->m_sb, -1, XFS_SB_ALL_BITS);
379 * If we get an error writing out the alternate superblocks,
380 * just issue a warning and continue. The real work is
381 * already done and committed.
383 if (!(error = xfs_bwrite(mp, bp))) {
386 xfs_fs_cmn_err(CE_WARN, mp,
387 "write error %d updating secondary superblock for ag %d",
389 break; /* no point in continuing */
395 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
400 xfs_growfs_log_private(
401 xfs_mount_t *mp, /* mount point for filesystem */
402 xfs_growfs_log_t *in) /* growfs log input struct */
407 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
408 return XFS_ERROR(EINVAL);
409 if (nb == mp->m_sb.sb_logblocks &&
410 in->isint == (mp->m_sb.sb_logstart != 0))
411 return XFS_ERROR(EINVAL);
413 * Moving the log is hard, need new interfaces to sync
414 * the log first, hold off all activity while moving it.
415 * Can have shorter or longer log in the same space,
416 * or transform internal to external log or vice versa.
418 return XFS_ERROR(ENOSYS);
422 * protected versions of growfs function acquire and release locks on the mount
423 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
431 xfs_growfs_data_t *in)
434 if (!cpsema(&mp->m_growlock))
435 return XFS_ERROR(EWOULDBLOCK);
436 error = xfs_growfs_data_private(mp, in);
437 vsema(&mp->m_growlock);
444 xfs_growfs_log_t *in)
447 if (!cpsema(&mp->m_growlock))
448 return XFS_ERROR(EWOULDBLOCK);
449 error = xfs_growfs_log_private(mp, in);
450 vsema(&mp->m_growlock);
455 * exported through ioctl XFS_IOC_FSCOUNTS
461 xfs_fsop_counts_t *cnt)
466 cnt->freedata = mp->m_sb.sb_fdblocks;
467 cnt->freertx = mp->m_sb.sb_frextents;
468 cnt->freeino = mp->m_sb.sb_ifree;
469 cnt->allocino = mp->m_sb.sb_icount;
470 XFS_SB_UNLOCK(mp, s);
475 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
477 * xfs_reserve_blocks is called to set m_resblks
478 * in the in-core mount table. The number of unused reserved blocks
479 * is kept in m_resbls_avail.
481 * Reserve the requested number of blocks if available. Otherwise return
482 * as many as possible to satisfy the request. The actual number
483 * reserved are returned in outval
485 * A null inval pointer indicates that only the current reserved blocks
486 * available should be returned no settings are changed.
493 xfs_fsop_resblks_t *outval)
495 __int64_t lcounter, delta;
499 /* If inval is null, report current values and return */
501 if (inval == (__uint64_t *)NULL) {
502 outval->resblks = mp->m_resblks;
503 outval->resblks_avail = mp->m_resblks_avail;
511 * If our previous reservation was larger than the current value,
512 * then move any unused blocks back to the free pool.
515 if (mp->m_resblks > request) {
516 lcounter = mp->m_resblks_avail - request;
517 if (lcounter > 0) { /* release unused blocks */
518 mp->m_sb.sb_fdblocks += lcounter;
519 mp->m_resblks_avail -= lcounter;
521 mp->m_resblks = request;
523 delta = request - mp->m_resblks;
524 lcounter = mp->m_sb.sb_fdblocks - delta;
526 /* We can't satisfy the request, just get what we can */
527 mp->m_resblks += mp->m_sb.sb_fdblocks;
528 mp->m_resblks_avail += mp->m_sb.sb_fdblocks;
529 mp->m_sb.sb_fdblocks = 0;
531 mp->m_sb.sb_fdblocks = lcounter;
532 mp->m_resblks = request;
533 mp->m_resblks_avail += delta;
537 outval->resblks = mp->m_resblks;
538 outval->resblks_avail = mp->m_resblks_avail;
539 XFS_SB_UNLOCK(mp, s);
544 xfs_fs_log_dummy(xfs_mount_t *mp)
550 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
551 atomic_inc(&mp->m_active_trans);
552 if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
553 xfs_trans_cancel(tp, 0);
558 xfs_ilock(ip, XFS_ILOCK_EXCL);
560 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
561 xfs_trans_ihold(tp, ip);
562 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
563 xfs_trans_set_sync(tp);
564 xfs_trans_commit(tp, 0, NULL);
566 xfs_iunlock(ip, XFS_ILOCK_EXCL);
575 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
576 struct vfs *vfsp = XFS_MTOVFS(mp);
577 struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev);
579 if (sb && !IS_ERR(sb)) {
580 xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
581 thaw_bdev(sb->s_bdev, sb);
586 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
587 xfs_force_shutdown(mp, XFS_FORCE_UMOUNT);
589 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
590 xfs_force_shutdown(mp, XFS_FORCE_UMOUNT|XFS_LOG_IO_ERROR);
593 return XFS_ERROR(EINVAL);