2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/posix_acl.h>
16 #include <linux/sort.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/crc32.h>
19 #include <linux/lm_interface.h>
20 #include <linux/security.h>
33 #include "ops_address.h"
34 #include "ops_inode.h"
40 struct gfs2_inum_range_host {
45 static int iget_test(struct inode *inode, void *opaque)
47 struct gfs2_inode *ip = GFS2_I(inode);
48 u64 *no_addr = opaque;
50 if (ip->i_no_addr == *no_addr &&
51 inode->i_private != NULL)
57 static int iget_set(struct inode *inode, void *opaque)
59 struct gfs2_inode *ip = GFS2_I(inode);
60 u64 *no_addr = opaque;
62 inode->i_ino = (unsigned long)*no_addr;
63 ip->i_no_addr = *no_addr;
67 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
69 unsigned long hash = (unsigned long)no_addr;
70 return ilookup5(sb, hash, iget_test, &no_addr);
73 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
75 unsigned long hash = (unsigned long)no_addr;
76 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
79 struct gfs2_skip_data {
84 static int iget_skip_test(struct inode *inode, void *opaque)
86 struct gfs2_inode *ip = GFS2_I(inode);
87 struct gfs2_skip_data *data = opaque;
89 if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
90 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
99 static int iget_skip_set(struct inode *inode, void *opaque)
101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_skip_data *data = opaque;
106 inode->i_ino = (unsigned long)(data->no_addr);
107 ip->i_no_addr = data->no_addr;
111 static struct inode *gfs2_iget_skip(struct super_block *sb,
114 struct gfs2_skip_data data;
115 unsigned long hash = (unsigned long)no_addr;
117 data.no_addr = no_addr;
119 return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
123 * GFS2 lookup code fills in vfs inode contents based on info obtained
124 * from directory entry inside gfs2_inode_lookup(). This has caused issues
125 * with NFS code path since its get_dentry routine doesn't have the relevant
126 * directory entry when gfs2_inode_lookup() is invoked. Part of the code
127 * segment inside gfs2_inode_lookup code needs to get moved around.
129 * Clean up I_LOCK and I_NEW as well.
132 void gfs2_set_iop(struct inode *inode)
134 struct gfs2_sbd *sdp = GFS2_SB(inode);
135 umode_t mode = inode->i_mode;
138 inode->i_op = &gfs2_file_iops;
139 if (sdp->sd_args.ar_localflocks)
140 inode->i_fop = &gfs2_file_fops_nolock;
142 inode->i_fop = &gfs2_file_fops;
143 } else if (S_ISDIR(mode)) {
144 inode->i_op = &gfs2_dir_iops;
145 if (sdp->sd_args.ar_localflocks)
146 inode->i_fop = &gfs2_dir_fops_nolock;
148 inode->i_fop = &gfs2_dir_fops;
149 } else if (S_ISLNK(mode)) {
150 inode->i_op = &gfs2_symlink_iops;
152 inode->i_op = &gfs2_dev_iops;
155 unlock_new_inode(inode);
159 * gfs2_inode_lookup - Lookup an inode
160 * @sb: The super block
161 * @no_addr: The inode number
162 * @type: The type of the inode
163 * @skip_freeing: set this not return an inode if it is currently being freed.
165 * Returns: A VFS inode, or an error
168 struct inode *gfs2_inode_lookup(struct super_block *sb,
171 u64 no_formal_ino, int skip_freeing)
174 struct gfs2_inode *ip;
175 struct gfs2_glock *io_gl;
179 inode = gfs2_iget_skip(sb, no_addr);
181 inode = gfs2_iget(sb, no_addr);
185 return ERR_PTR(-ENOBUFS);
187 if (inode->i_state & I_NEW) {
188 struct gfs2_sbd *sdp = GFS2_SB(inode);
189 inode->i_private = ip;
190 ip->i_no_formal_ino = no_formal_ino;
192 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
195 ip->i_gl->gl_object = ip;
197 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
201 set_bit(GIF_INVALID, &ip->i_flags);
202 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
205 ip->i_iopen_gh.gh_gl->gl_object = ip;
207 gfs2_glock_put(io_gl);
209 if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
212 inode->i_mode = DT2IF(type);
215 * We must read the inode in order to work out its type in
216 * this case. Note that this doesn't happen often as we normally
217 * know the type beforehand. This code path only occurs during
218 * unlinked inode recovery (where it is safe to do this glock,
219 * which is not true in the general case).
221 if (type == DT_UNKNOWN) {
222 struct gfs2_holder gh;
223 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
226 /* Inode is now uptodate */
227 gfs2_glock_dq_uninit(&gh);
236 gfs2_glock_dq(&ip->i_iopen_gh);
238 gfs2_glock_put(io_gl);
240 ip->i_gl->gl_object = NULL;
241 gfs2_glock_put(ip->i_gl);
244 return ERR_PTR(error);
247 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
249 struct gfs2_dinode_host *di = &ip->i_di;
250 const struct gfs2_dinode *str = buf;
252 if (ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)) {
253 if (gfs2_consist_inode(ip))
254 gfs2_dinode_print(ip);
257 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
258 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
259 ip->i_inode.i_rdev = 0;
260 switch (ip->i_inode.i_mode & S_IFMT) {
263 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
264 be32_to_cpu(str->di_minor));
268 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
269 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
271 * We will need to review setting the nlink count here in the
272 * light of the forthcoming ro bind mount work. This is a reminder
275 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
276 di->di_size = be64_to_cpu(str->di_size);
277 i_size_write(&ip->i_inode, di->di_size);
278 di->di_blocks = be64_to_cpu(str->di_blocks);
279 gfs2_set_inode_blocks(&ip->i_inode);
280 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
281 ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
282 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
283 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
284 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
285 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
287 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
288 di->di_goal_data = be64_to_cpu(str->di_goal_data);
289 di->di_generation = be64_to_cpu(str->di_generation);
291 di->di_flags = be32_to_cpu(str->di_flags);
292 gfs2_set_inode_flags(&ip->i_inode);
293 di->di_height = be16_to_cpu(str->di_height);
295 di->di_depth = be16_to_cpu(str->di_depth);
296 di->di_entries = be32_to_cpu(str->di_entries);
298 di->di_eattr = be64_to_cpu(str->di_eattr);
299 if (S_ISREG(ip->i_inode.i_mode))
300 gfs2_set_aops(&ip->i_inode);
306 * gfs2_inode_refresh - Refresh the incore copy of the dinode
307 * @ip: The GFS2 inode
312 int gfs2_inode_refresh(struct gfs2_inode *ip)
314 struct buffer_head *dibh;
317 error = gfs2_meta_inode_buffer(ip, &dibh);
321 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
326 error = gfs2_dinode_in(ip, dibh->b_data);
328 clear_bit(GIF_INVALID, &ip->i_flags);
333 int gfs2_dinode_dealloc(struct gfs2_inode *ip)
335 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
336 struct gfs2_alloc *al;
337 struct gfs2_rgrpd *rgd;
340 if (ip->i_di.di_blocks != 1) {
341 if (gfs2_consist_inode(ip))
342 gfs2_dinode_print(ip);
346 al = gfs2_alloc_get(ip);
348 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
352 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
356 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr);
358 gfs2_consist_inode(ip);
360 goto out_rindex_relse;
363 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
366 goto out_rindex_relse;
368 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
372 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
373 set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags);
375 gfs2_free_di(rgd, ip);
378 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
381 gfs2_glock_dq_uninit(&al->al_rgd_gh);
383 gfs2_glock_dq_uninit(&al->al_ri_gh);
385 gfs2_quota_unhold(ip);
392 * gfs2_change_nlink - Change nlink count on inode
393 * @ip: The GFS2 inode
394 * @diff: The change in the nlink count required
398 int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
400 struct buffer_head *dibh;
404 BUG_ON(diff != 1 && diff != -1);
405 nlink = ip->i_inode.i_nlink + diff;
407 /* If we are reducing the nlink count, but the new value ends up being
408 bigger than the old one, we must have underflowed. */
409 if (diff < 0 && nlink > ip->i_inode.i_nlink) {
410 if (gfs2_consist_inode(ip))
411 gfs2_dinode_print(ip);
415 error = gfs2_meta_inode_buffer(ip, &dibh);
420 inc_nlink(&ip->i_inode);
422 drop_nlink(&ip->i_inode);
424 ip->i_inode.i_ctime = CURRENT_TIME;
426 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
427 gfs2_dinode_out(ip, dibh->b_data);
429 mark_inode_dirty(&ip->i_inode);
431 if (ip->i_inode.i_nlink == 0)
432 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
437 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
441 gfs2_str2qstr(&qstr, name);
442 inode = gfs2_lookupi(dip, &qstr, 1, NULL);
443 /* gfs2_lookupi has inconsistent callers: vfs
444 * related routines expect NULL for no entry found,
445 * gfs2_lookup_simple callers expect ENOENT
446 * and do not check for NULL.
449 return ERR_PTR(-ENOENT);
456 * gfs2_lookupi - Look up a filename in a directory and return its inode
457 * @d_gh: An initialized holder for the directory glock
458 * @name: The name of the inode to look for
459 * @is_root: If 1, ignore the caller's permissions
460 * @i_gh: An uninitialized holder for the new inode glock
462 * This can be called via the VFS filldir function when NFS is doing
463 * a readdirplus and the inode which its intending to stat isn't
464 * already in cache. In this case we must not take the directory glock
465 * again, since the readdir call will have already taken that lock.
470 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
471 int is_root, struct nameidata *nd)
473 struct super_block *sb = dir->i_sb;
474 struct gfs2_inode *dip = GFS2_I(dir);
475 struct gfs2_holder d_gh;
477 struct inode *inode = NULL;
480 if (!name->len || name->len > GFS2_FNAMESIZE)
481 return ERR_PTR(-ENAMETOOLONG);
483 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
484 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
485 dir == sb->s_root->d_inode)) {
490 if (gfs2_glock_is_locked_by_me(dip->i_gl) == 0) {
491 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
493 return ERR_PTR(error);
498 error = permission(dir, MAY_EXEC, NULL);
503 inode = gfs2_dir_search(dir, name);
505 error = PTR_ERR(inode);
508 gfs2_glock_dq_uninit(&d_gh);
509 if (error == -ENOENT)
511 return inode ? inode : ERR_PTR(error);
514 static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
516 const struct gfs2_inum_range *str = buf;
518 ir->ir_start = be64_to_cpu(str->ir_start);
519 ir->ir_length = be64_to_cpu(str->ir_length);
522 static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
524 struct gfs2_inum_range *str = buf;
526 str->ir_start = cpu_to_be64(ir->ir_start);
527 str->ir_length = cpu_to_be64(ir->ir_length);
530 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
532 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
533 struct buffer_head *bh;
534 struct gfs2_inum_range_host ir;
537 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
540 mutex_lock(&sdp->sd_inum_mutex);
542 error = gfs2_meta_inode_buffer(ip, &bh);
544 mutex_unlock(&sdp->sd_inum_mutex);
549 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
552 *formal_ino = ir.ir_start++;
554 gfs2_trans_add_bh(ip->i_gl, bh, 1);
555 gfs2_inum_range_out(&ir,
556 bh->b_data + sizeof(struct gfs2_dinode));
558 mutex_unlock(&sdp->sd_inum_mutex);
565 mutex_unlock(&sdp->sd_inum_mutex);
571 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
573 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
574 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
575 struct gfs2_holder gh;
576 struct buffer_head *bh;
577 struct gfs2_inum_range_host ir;
580 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
584 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
587 mutex_lock(&sdp->sd_inum_mutex);
589 error = gfs2_meta_inode_buffer(ip, &bh);
593 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
596 struct buffer_head *m_bh;
600 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
604 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
605 x = y = be64_to_cpu(z);
607 ir.ir_length = GFS2_INUM_QUANTUM;
608 x += GFS2_INUM_QUANTUM;
610 gfs2_consist_inode(m_ip);
612 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
613 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
618 *formal_ino = ir.ir_start++;
621 gfs2_trans_add_bh(ip->i_gl, bh, 1);
622 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
627 mutex_unlock(&sdp->sd_inum_mutex);
630 gfs2_glock_dq_uninit(&gh);
634 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
638 error = pick_formal_ino_1(sdp, inum);
642 error = pick_formal_ino_2(sdp, inum);
648 * create_ok - OK to create a new on-disk inode here?
649 * @dip: Directory in which dinode is to be created
650 * @name: Name of new dinode
656 static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
661 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
665 /* Don't create entries in an unlinked directory */
666 if (!dip->i_inode.i_nlink)
669 error = gfs2_dir_check(&dip->i_inode, name, NULL);
680 if (dip->i_di.di_entries == (u32)-1)
682 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
688 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
689 unsigned int *uid, unsigned int *gid)
691 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
692 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
695 else if (dip->i_inode.i_uid != current->fsuid)
697 *uid = dip->i_inode.i_uid;
699 *uid = current->fsuid;
701 if (dip->i_inode.i_mode & S_ISGID) {
704 *gid = dip->i_inode.i_gid;
706 *gid = current->fsgid;
709 static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation)
711 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
714 if (gfs2_alloc_get(dip) == NULL)
717 dip->i_alloc->al_requested = RES_DINODE;
718 error = gfs2_inplace_reserve(dip);
722 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
726 *no_addr = gfs2_alloc_di(dip, generation);
731 gfs2_inplace_release(dip);
738 * init_dinode - Fill in a new dinode structure
739 * @dip: the directory this inode is being created in
740 * @gl: The glock covering the new inode
741 * @inum: the inode number
742 * @mode: the file permissions
748 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
749 const struct gfs2_inum_host *inum, unsigned int mode,
750 unsigned int uid, unsigned int gid,
751 const u64 *generation, dev_t dev, struct buffer_head **bhp)
753 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
754 struct gfs2_dinode *di;
755 struct buffer_head *dibh;
756 struct timespec tv = CURRENT_TIME;
758 dibh = gfs2_meta_new(gl, inum->no_addr);
759 gfs2_trans_add_bh(gl, dibh, 1);
760 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
761 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
762 di = (struct gfs2_dinode *)dibh->b_data;
764 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
765 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
766 di->di_mode = cpu_to_be32(mode);
767 di->di_uid = cpu_to_be32(uid);
768 di->di_gid = cpu_to_be32(gid);
771 di->di_blocks = cpu_to_be64(1);
772 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
773 di->di_major = cpu_to_be32(MAJOR(dev));
774 di->di_minor = cpu_to_be32(MINOR(dev));
775 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
776 di->di_generation = cpu_to_be64(*generation);
780 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
781 gfs2_tune_get(sdp, gt_new_files_jdata))
782 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
783 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
784 gfs2_tune_get(sdp, gt_new_files_directio))
785 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
786 } else if (S_ISDIR(mode)) {
787 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
788 GFS2_DIF_INHERIT_DIRECTIO);
789 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
790 GFS2_DIF_INHERIT_JDATA);
794 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
800 memset(&di->__pad4, 0, sizeof(di->__pad4));
802 di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
803 di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
804 di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
805 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
807 set_buffer_uptodate(dibh);
812 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
813 unsigned int mode, const struct gfs2_inum_host *inum,
814 const u64 *generation, dev_t dev, struct buffer_head **bhp)
816 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
817 unsigned int uid, gid;
820 munge_mode_uid_gid(dip, &mode, &uid, &gid);
823 error = gfs2_quota_lock(dip, uid, gid);
827 error = gfs2_quota_check(dip, uid, gid);
831 error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
835 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp);
836 gfs2_quota_change(dip, +1, uid, gid);
840 gfs2_quota_unlock(dip);
846 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
847 struct gfs2_inode *ip)
849 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
850 struct gfs2_alloc *al;
852 struct buffer_head *dibh;
855 al = gfs2_alloc_get(dip);
857 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
861 error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
862 if (alloc_required < 0)
863 goto fail_quota_locks;
864 if (alloc_required) {
865 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
867 goto fail_quota_locks;
869 al->al_requested = sdp->sd_max_dirres;
871 error = gfs2_inplace_reserve(dip);
873 goto fail_quota_locks;
875 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
876 al->al_rgd->rd_length +
878 RES_STATFS + RES_QUOTA, 0);
882 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
884 goto fail_quota_locks;
887 error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode));
891 error = gfs2_meta_inode_buffer(ip, &dibh);
894 ip->i_inode.i_nlink = 1;
895 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
896 gfs2_dinode_out(ip, dibh->b_data);
904 if (dip->i_alloc->al_rgd)
905 gfs2_inplace_release(dip);
908 gfs2_quota_unlock(dip);
915 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
921 struct gfs2_ea_request er;
923 err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
924 &name, &value, &len);
927 if (err == -EOPNOTSUPP)
932 memset(&er, 0, sizeof(struct gfs2_ea_request));
934 er.er_type = GFS2_EATYPE_SECURITY;
937 er.er_name_len = strlen(name);
938 er.er_data_len = len;
940 err = gfs2_ea_set_i(ip, &er);
949 * gfs2_createi - Create a new inode
950 * @ghs: An array of two holders
951 * @name: The name of the new file
952 * @mode: the permissions on the new inode
954 * @ghs[0] is an initialized holder for the directory
955 * @ghs[1] is the holder for the inode lock
957 * If the return value is not NULL, the glocks on both the directory and the new
958 * file are held. A transaction has been started and an inplace reservation
964 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
965 unsigned int mode, dev_t dev)
967 struct inode *inode = NULL;
968 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
969 struct inode *dir = &dip->i_inode;
970 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
971 struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
974 struct buffer_head *bh = NULL;
976 if (!name->len || name->len > GFS2_FNAMESIZE)
977 return ERR_PTR(-ENAMETOOLONG);
979 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
980 error = gfs2_glock_nq(ghs);
984 error = create_ok(dip, name, mode);
988 error = pick_formal_ino(sdp, &inum.no_formal_ino);
992 error = alloc_dinode(dip, &inum.no_addr, &generation);
996 error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
997 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
1001 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh);
1005 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode),
1007 inum.no_formal_ino, 0);
1011 error = gfs2_inode_refresh(GFS2_I(inode));
1015 error = gfs2_acl_create(dip, GFS2_I(inode));
1019 error = gfs2_security_init(dip, GFS2_I(inode));
1023 error = link_dinode(dip, name, GFS2_I(inode));
1030 return ERR_PTR(-ENOMEM);
1034 gfs2_glock_dq_uninit(ghs + 1);
1042 return ERR_PTR(error);
1046 * gfs2_rmdiri - Remove a directory
1047 * @dip: The parent directory of the directory to be removed
1048 * @name: The name of the directory to be removed
1049 * @ip: The GFS2 inode of the directory to be removed
1051 * Assumes Glocks on dip and ip are held
1056 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
1057 struct gfs2_inode *ip)
1059 struct qstr dotname;
1062 if (ip->i_di.di_entries != 2) {
1063 if (gfs2_consist_inode(ip))
1064 gfs2_dinode_print(ip);
1068 error = gfs2_dir_del(dip, name);
1072 error = gfs2_change_nlink(dip, -1);
1076 gfs2_str2qstr(&dotname, ".");
1077 error = gfs2_dir_del(ip, &dotname);
1081 gfs2_str2qstr(&dotname, "..");
1082 error = gfs2_dir_del(ip, &dotname);
1086 /* It looks odd, but it really should be done twice */
1087 error = gfs2_change_nlink(ip, -1);
1091 error = gfs2_change_nlink(ip, -1);
1099 * gfs2_unlink_ok - check to see that a inode is still in a directory
1100 * @dip: the directory
1101 * @name: the name of the file
1104 * Assumes that the lock on (at least) @dip is held.
1106 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1109 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1110 const struct gfs2_inode *ip)
1114 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1117 if ((dip->i_inode.i_mode & S_ISVTX) &&
1118 dip->i_inode.i_uid != current->fsuid &&
1119 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1122 if (IS_APPEND(&dip->i_inode))
1125 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1129 error = gfs2_dir_check(&dip->i_inode, name, ip);
1137 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1141 * Follow @to back to the root and make sure we don't encounter @this
1142 * Assumes we already hold the rename lock.
1147 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1149 struct inode *dir = &to->i_inode;
1150 struct super_block *sb = dir->i_sb;
1155 gfs2_str2qstr(&dotdot, "..");
1160 if (dir == &this->i_inode) {
1164 if (dir == sb->s_root->d_inode) {
1169 tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1171 error = PTR_ERR(tmp);
1185 * gfs2_readlinki - return the contents of a symlink
1186 * @ip: the symlink's inode
1187 * @buf: a pointer to the buffer to be filled
1188 * @len: a pointer to the length of @buf
1190 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1191 * to be freed by the caller.
1196 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1198 struct gfs2_holder i_gh;
1199 struct buffer_head *dibh;
1203 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1204 error = gfs2_glock_nq_atime(&i_gh);
1206 gfs2_holder_uninit(&i_gh);
1210 if (!ip->i_di.di_size) {
1211 gfs2_consist_inode(ip);
1216 error = gfs2_meta_inode_buffer(ip, &dibh);
1220 x = ip->i_di.di_size + 1;
1222 *buf = kmalloc(x, GFP_KERNEL);
1229 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1235 gfs2_glock_dq_uninit(&i_gh);
1240 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1241 * conditionally update the inode's atime
1242 * @gh: the holder to acquire
1244 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1245 * Update if the difference between the current time and the inode's current
1246 * atime is greater than an interval specified at mount.
1251 int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1253 struct gfs2_glock *gl = gh->gh_gl;
1254 struct gfs2_sbd *sdp = gl->gl_sbd;
1255 struct gfs2_inode *ip = gl->gl_object;
1256 s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1260 struct timespec tv = CURRENT_TIME;
1262 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1263 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1264 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1267 state = gh->gh_state;
1268 flags = gh->gh_flags;
1270 error = gfs2_glock_nq(gh);
1274 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1275 (sdp->sd_vfs->s_flags & MS_RDONLY))
1278 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1280 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1282 error = gfs2_glock_nq(gh);
1286 /* Verify that atime hasn't been updated while we were
1287 trying to get exclusive lock. */
1290 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) {
1291 struct buffer_head *dibh;
1292 struct gfs2_dinode *di;
1294 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1295 if (error == -EROFS)
1300 error = gfs2_meta_inode_buffer(ip, &dibh);
1302 goto fail_end_trans;
1304 ip->i_inode.i_atime = tv;
1306 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1307 di = (struct gfs2_dinode *)dibh->b_data;
1308 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1309 di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1312 gfs2_trans_end(sdp);
1315 /* If someone else has asked for the glock,
1316 unlock and let them have it. Then reacquire
1317 in the original state. */
1318 if (gfs2_glock_is_blocking(gl)) {
1320 gfs2_holder_reinit(state, flags, gh);
1321 return gfs2_glock_nq(gh);
1328 gfs2_trans_end(sdp);
1335 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1337 struct buffer_head *dibh;
1340 error = gfs2_meta_inode_buffer(ip, &dibh);
1342 error = inode_setattr(&ip->i_inode, attr);
1343 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1344 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1345 gfs2_dinode_out(ip, dibh->b_data);
1352 * gfs2_setattr_simple -
1356 * Called with a reference on the vnode.
1361 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1365 if (current->journal_info)
1366 return __gfs2_setattr_simple(ip, attr);
1368 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1372 error = __gfs2_setattr_simple(ip, attr);
1373 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1377 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
1379 const struct gfs2_dinode_host *di = &ip->i_di;
1380 struct gfs2_dinode *str = buf;
1382 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
1383 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
1384 str->di_header.__pad0 = 0;
1385 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
1386 str->di_header.__pad1 = 0;
1387 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
1388 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
1389 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
1390 str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
1391 str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
1392 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
1393 str->di_size = cpu_to_be64(di->di_size);
1394 str->di_blocks = cpu_to_be64(di->di_blocks);
1395 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1396 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
1397 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
1399 str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
1400 str->di_goal_data = cpu_to_be64(di->di_goal_data);
1401 str->di_generation = cpu_to_be64(di->di_generation);
1403 str->di_flags = cpu_to_be32(di->di_flags);
1404 str->di_height = cpu_to_be16(di->di_height);
1405 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
1406 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
1407 GFS2_FORMAT_DE : 0);
1408 str->di_depth = cpu_to_be16(di->di_depth);
1409 str->di_entries = cpu_to_be32(di->di_entries);
1411 str->di_eattr = cpu_to_be64(di->di_eattr);
1412 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
1413 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
1414 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
1417 void gfs2_dinode_print(const struct gfs2_inode *ip)
1419 const struct gfs2_dinode_host *di = &ip->i_di;
1421 printk(KERN_INFO " no_formal_ino = %llu\n",
1422 (unsigned long long)ip->i_no_formal_ino);
1423 printk(KERN_INFO " no_addr = %llu\n",
1424 (unsigned long long)ip->i_no_addr);
1425 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
1426 printk(KERN_INFO " di_blocks = %llu\n",
1427 (unsigned long long)di->di_blocks);
1428 printk(KERN_INFO " di_goal_meta = %llu\n",
1429 (unsigned long long)di->di_goal_meta);
1430 printk(KERN_INFO " di_goal_data = %llu\n",
1431 (unsigned long long)di->di_goal_data);
1432 printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags);
1433 printk(KERN_INFO " di_height = %u\n", di->di_height);
1434 printk(KERN_INFO " di_depth = %u\n", di->di_depth);
1435 printk(KERN_INFO " di_entries = %u\n", di->di_entries);
1436 printk(KERN_INFO " di_eattr = %llu\n",
1437 (unsigned long long)di->di_eattr);