2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_quota.h"
42 #include "xfs_utils.h"
45 * Initialize the inode hash table for the newly mounted file system.
46 * Choose an initial table size based on user specified value, else
47 * use a simple algorithm using the maximum number of inodes as an
48 * indicator for table size, and clamp it between one and some large
52 xfs_ihash_init(xfs_mount_t *mp)
55 uint i, flags = KM_SLEEP | KM_MAYFAIL;
58 icount = mp->m_maxicount ? mp->m_maxicount :
59 (mp->m_sb.sb_dblocks << mp->m_sb.sb_inopblog);
60 mp->m_ihsize = 1 << max_t(uint, 8,
61 (xfs_highbit64(icount) + 1) / 2);
62 mp->m_ihsize = min_t(uint, mp->m_ihsize,
63 (64 * NBPP) / sizeof(xfs_ihash_t));
66 while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
67 sizeof(xfs_ihash_t), flags))) {
68 if ((mp->m_ihsize >>= 1) <= NBPP)
71 for (i = 0; i < mp->m_ihsize; i++) {
72 rwlock_init(&(mp->m_ihash[i].ih_lock));
77 * Free up structures allocated by xfs_ihash_init, at unmount time.
80 xfs_ihash_free(xfs_mount_t *mp)
82 kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
87 * Initialize the inode cluster hash table for the newly mounted file system.
88 * Its size is derived from the ihash table size.
91 xfs_chash_init(xfs_mount_t *mp)
95 mp->m_chsize = max_t(uint, 1, mp->m_ihsize /
96 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog));
97 mp->m_chsize = min_t(uint, mp->m_chsize, mp->m_ihsize);
98 mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
99 * sizeof(xfs_chash_t),
101 for (i = 0; i < mp->m_chsize; i++) {
102 spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
107 * Free up structures allocated by xfs_chash_init, at unmount time.
110 xfs_chash_free(xfs_mount_t *mp)
114 for (i = 0; i < mp->m_chsize; i++) {
115 spinlock_destroy(&mp->m_chash[i].ch_lock);
118 kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t));
123 * Try to move an inode to the front of its hash list if possible
124 * (and if its not there already). Called right after obtaining
125 * the list version number and then dropping the read_lock on the
126 * hash list in question (which is done right after looking up the
127 * inode in question...).
137 if ((ip->i_prevp != &ih->ih_next) && write_trylock(&ih->ih_lock)) {
138 if (likely(version == ih->ih_version)) {
139 /* remove from list */
140 if ((iq = ip->i_next)) {
141 iq->i_prevp = ip->i_prevp;
145 /* insert at list head */
147 iq->i_prevp = &ip->i_next;
149 ip->i_prevp = &ih->ih_next;
152 write_unlock(&ih->ih_lock);
157 * Look up an inode by number in the given file system.
158 * The inode is looked up in the hash table for the file system
159 * represented by the mount point parameter mp. Each bucket of
160 * the hash table is guarded by an individual semaphore.
162 * If the inode is found in the hash table, its corresponding vnode
163 * is obtained with a call to vn_get(). This call takes care of
164 * coordination with the reclamation of the inode and vnode. Note
165 * that the vmap structure is filled in while holding the hash lock.
166 * This gives us the state of the inode/vnode when we found it and
167 * is used for coordination in vn_get().
169 * If it is not in core, read it in from the file system's device and
170 * add the inode into the hash table.
172 * The inode is locked according to the value of the lock_flags parameter.
173 * This flag parameter indicates how and if the inode's IO lock and inode lock
176 * mp -- the mount point structure for the current file system. It points
177 * to the inode hash table.
178 * tp -- a pointer to the current transaction if there is one. This is
179 * simply passed through to the xfs_iread() call.
180 * ino -- the number of the inode desired. This is the unique identifier
181 * within the file system for the inode being requested.
182 * lock_flags -- flags indicating how to lock the inode. See the comment
183 * for xfs_ilock() for a list of valid values.
184 * bno -- the block number starting the buffer containing the inode,
185 * if known (as by bulkstat), else 0.
206 xfs_chashlist_t *chl, *chlnew;
210 ih = XFS_IHASH(mp, ino);
213 read_lock(&ih->ih_lock);
215 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
216 if (ip->i_ino == ino) {
218 * If INEW is set this inode is being set up
219 * we need to pause and try again.
221 if (ip->i_flags & XFS_INEW) {
222 read_unlock(&ih->ih_lock);
224 XFS_STATS_INC(xs_ig_frecycle);
229 inode_vp = XFS_ITOV_NULL(ip);
230 if (inode_vp == NULL) {
232 * If IRECLAIM is set this inode is
233 * on its way out of the system,
234 * we need to pause and try again.
236 if (ip->i_flags & XFS_IRECLAIM) {
237 read_unlock(&ih->ih_lock);
239 XFS_STATS_INC(xs_ig_frecycle);
244 vn_trace_exit(vp, "xfs_iget.alloc",
245 (inst_t *)__return_address);
247 XFS_STATS_INC(xs_ig_found);
249 ip->i_flags &= ~XFS_IRECLAIMABLE;
250 version = ih->ih_version;
251 read_unlock(&ih->ih_lock);
252 xfs_ihash_promote(ih, ip, version);
255 list_del_init(&ip->i_reclaim);
256 XFS_MOUNT_IUNLOCK(mp);
260 } else if (vp != inode_vp) {
261 struct inode *inode = LINVFS_GET_IP(inode_vp);
263 /* The inode is being torn down, pause and
266 if (inode->i_state & (I_FREEING | I_CLEAR)) {
267 read_unlock(&ih->ih_lock);
269 XFS_STATS_INC(xs_ig_frecycle);
273 /* Chances are the other vnode (the one in the inode) is being torn
274 * down right now, and we landed on top of it. Question is, what do
275 * we do? Unhook the old inode and hook up the new one?
278 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
283 * Inode cache hit: if ip is not at the front of
284 * its hash chain, move it there now.
285 * Do this with the lock held for update, but
286 * do statistics after releasing the lock.
288 version = ih->ih_version;
289 read_unlock(&ih->ih_lock);
290 xfs_ihash_promote(ih, ip, version);
291 XFS_STATS_INC(xs_ig_found);
294 if (ip->i_d.di_mode == 0) {
295 if (!(flags & IGET_CREATE))
297 xfs_iocore_inode_reinit(ip);
301 xfs_ilock(ip, lock_flags);
303 ip->i_flags &= ~XFS_ISTALE;
305 vn_trace_exit(vp, "xfs_iget.found",
306 (inst_t *)__return_address);
312 * Inode cache miss: save the hash chain version stamp and unlock
313 * the chain, so we don't deadlock in vn_alloc.
315 XFS_STATS_INC(xs_ig_missed);
317 version = ih->ih_version;
319 read_unlock(&ih->ih_lock);
322 * Read the disk inode attributes into a new inode structure and get
323 * a new vnode for it. This should also initialize i_ino and i_mount.
325 error = xfs_iread(mp, tp, ino, &ip, bno);
330 vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
332 xfs_inode_lock_init(ip, vp);
333 xfs_iocore_inode_init(ip);
335 if (lock_flags != 0) {
336 xfs_ilock(ip, lock_flags);
339 if ((ip->i_d.di_mode == 0) && !(flags & IGET_CREATE)) {
345 * Put ip on its hash chain, unless someone else hashed a duplicate
346 * after we released the hash lock.
348 write_lock(&ih->ih_lock);
350 if (ih->ih_version != version) {
351 for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) {
352 if (iq->i_ino == ino) {
353 write_unlock(&ih->ih_lock);
356 XFS_STATS_INC(xs_ig_dup);
363 * These values _must_ be set before releasing ihlock!
366 if ((iq = ih->ih_next)) {
367 iq->i_prevp = &ip->i_next;
370 ip->i_prevp = &ih->ih_next;
372 ip->i_udquot = ip->i_gdquot = NULL;
374 ip->i_flags |= XFS_INEW;
376 write_unlock(&ih->ih_lock);
379 * put ip on its cluster's hash chain
381 ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL &&
382 ip->i_cnext == NULL);
385 ch = XFS_CHASH(mp, ip->i_blkno);
387 s = mutex_spinlock(&ch->ch_lock);
388 for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
389 if (chl->chl_blkno == ip->i_blkno) {
391 /* insert this inode into the doubly-linked list
392 * where chl points */
393 if ((iq = chl->chl_ip)) {
394 ip->i_cprev = iq->i_cprev;
395 iq->i_cprev->i_cnext = ip;
408 /* no hash list found for this block; add a new hash list */
410 if (chlnew == NULL) {
411 mutex_spinunlock(&ch->ch_lock, s);
412 ASSERT(xfs_chashlist_zone != NULL);
413 chlnew = (xfs_chashlist_t *)
414 kmem_zone_alloc(xfs_chashlist_zone,
416 ASSERT(chlnew != NULL);
421 ip->i_chash = chlnew;
423 chlnew->chl_blkno = ip->i_blkno;
424 chlnew->chl_next = ch->ch_list;
425 ch->ch_list = chlnew;
429 if (chlnew != NULL) {
430 kmem_zone_free(xfs_chashlist_zone, chlnew);
434 mutex_spinunlock(&ch->ch_lock, s);
438 * Link ip to its mount and thread it on the mount's inode list.
441 if ((iq = mp->m_inodes)) {
442 ASSERT(iq->i_mprev->i_mnext == iq);
443 ip->i_mprev = iq->i_mprev;
444 iq->i_mprev->i_mnext = ip;
453 XFS_MOUNT_IUNLOCK(mp);
456 ASSERT(ip->i_df.if_ext_max ==
457 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
459 ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
460 ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
465 * If we have a real type for an on-disk inode, we can set ops(&unlock)
466 * now. If it's a new inode being created, xfs_ialloc will handle it.
468 VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
475 * The 'normal' internal xfs_iget, if needed it will
476 * 'allocate', or 'get', the vnode.
492 XFS_STATS_INC(xs_ig_attempts);
495 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
498 vp = LINVFS_GET_VP(inode);
499 if (inode->i_state & I_NEW) {
500 vn_initialize(inode);
501 error = xfs_iget_core(vp, mp, tp, ino, flags,
502 lock_flags, ipp, bno);
505 if (inode->i_state & I_NEW)
506 unlock_new_inode(inode);
511 * If the inode is not fully constructed due to
512 * filehandle mistmatches wait for the inode to go
513 * away and try again.
515 * iget_locked will call __wait_on_freeing_inode
516 * to wait for the inode to go away.
518 if (is_bad_inode(inode) ||
519 ((ip = xfs_vtoi(vp)) == NULL)) {
526 xfs_ilock(ip, lock_flags);
527 XFS_STATS_INC(xs_ig_found);
532 error = ENOMEM; /* If we got no inode we are out of memory */
538 * Do the setup for the various locks within the incore inode.
545 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
546 "xfsino", (long)vp->v_number);
547 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
548 init_waitqueue_head(&ip->i_ipin_wait);
549 atomic_set(&ip->i_pincount, 0);
550 init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);
554 * Look for the inode corresponding to the given ino in the hash table.
555 * If it is there and its i_transp pointer matches tp, return it.
556 * Otherwise, return NULL.
559 xfs_inode_incore(xfs_mount_t *mp,
567 ih = XFS_IHASH(mp, ino);
568 read_lock(&ih->ih_lock);
569 for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
570 if (ip->i_ino == ino) {
572 * If we find it and tp matches, return it.
573 * Also move it to the front of the hash list
574 * if we find it and it is not already there.
575 * Otherwise break from the loop and return
578 if (ip->i_transp == tp) {
579 version = ih->ih_version;
580 read_unlock(&ih->ih_lock);
581 xfs_ihash_promote(ih, ip, version);
587 read_unlock(&ih->ih_lock);
592 * Decrement reference count of an inode structure and unlock it.
594 * ip -- the inode being released
595 * lock_flags -- this parameter indicates the inode's locks to be
596 * to be released. See the comment on xfs_iunlock() for a list
600 xfs_iput(xfs_inode_t *ip,
603 vnode_t *vp = XFS_ITOV(ip);
605 vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
607 xfs_iunlock(ip, lock_flags);
613 * Special iput for brand-new inodes that are still locked
616 xfs_iput_new(xfs_inode_t *ip,
619 vnode_t *vp = XFS_ITOV(ip);
620 struct inode *inode = LINVFS_GET_IP(vp);
622 vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
624 if ((ip->i_d.di_mode == 0)) {
625 ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
628 if (inode->i_state & I_NEW)
629 unlock_new_inode(inode);
631 xfs_iunlock(ip, lock_flags);
637 * This routine embodies the part of the reclaim code that pulls
638 * the inode from the inode hash table and the mount structure's
640 * This should only be called from xfs_reclaim().
643 xfs_ireclaim(xfs_inode_t *ip)
648 * Remove from old hash list and mount list.
650 XFS_STATS_INC(xs_ig_reclaims);
655 * Here we do a spurious inode lock in order to coordinate with
656 * xfs_sync(). This is because xfs_sync() references the inodes
657 * in the mount list without taking references on the corresponding
658 * vnodes. We make that OK here by ensuring that we wait until
659 * the inode is unlocked in xfs_sync() before we go ahead and
660 * free it. We get both the regular lock and the io lock because
661 * the xfs_sync() code may need to drop the regular one but will
662 * still hold the io lock.
664 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
667 * Release dquots (and their references) if any. An inode may escape
668 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
670 XFS_QM_DQDETACH(ip->i_mount, ip);
673 * Pull our behavior descriptor from the vnode chain.
675 vp = XFS_ITOV_NULL(ip);
677 vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
681 * Free all memory associated with the inode.
687 * This routine removes an about-to-be-destroyed inode from
688 * all of the lists in which it is located with the exception
689 * of the behavior chain.
699 xfs_chashlist_t *chl, *chm;
703 write_lock(&ih->ih_lock);
704 if ((iq = ip->i_next)) {
705 iq->i_prevp = ip->i_prevp;
709 write_unlock(&ih->ih_lock);
712 * Remove from cluster hash list
713 * 1) delete the chashlist if this is the last inode on the chashlist
714 * 2) unchain from list of inodes
715 * 3) point chashlist->chl_ip to 'chl_next' if to this inode.
718 ch = XFS_CHASH(mp, ip->i_blkno);
719 s = mutex_spinlock(&ch->ch_lock);
721 if (ip->i_cnext == ip) {
722 /* Last inode on chashlist */
723 ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);
724 ASSERT(ip->i_chash != NULL);
726 for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
727 if (chl->chl_blkno == ip->i_blkno) {
729 /* first item on the list */
730 ch->ch_list = chl->chl_next;
732 chm->chl_next = chl->chl_next;
734 kmem_zone_free(xfs_chashlist_zone, chl);
737 ASSERT(chl->chl_ip != ip);
741 ASSERT_ALWAYS(chl != NULL);
743 /* delete one inode from a non-empty list */
745 iq->i_cprev = ip->i_cprev;
746 ip->i_cprev->i_cnext = iq;
747 if (ip->i_chash->chl_ip == ip) {
748 ip->i_chash->chl_ip = iq;
750 ip->i_chash = __return_address;
751 ip->i_cprev = __return_address;
752 ip->i_cnext = __return_address;
754 mutex_spinunlock(&ch->ch_lock, s);
757 * Remove from mount's inode list.
760 ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
762 iq->i_mprev = ip->i_mprev;
763 ip->i_mprev->i_mnext = iq;
766 * Fix up the head pointer if it points to the inode being deleted.
768 if (mp->m_inodes == ip) {
776 /* Deal with the deleted inodes list */
777 list_del_init(&ip->i_reclaim);
780 XFS_MOUNT_IUNLOCK(mp);
784 * This is a wrapper routine around the xfs_ilock() routine
785 * used to centralize some grungy code. It is used in places
786 * that wish to lock the inode solely for reading the extents.
787 * The reason these places can't just call xfs_ilock(SHARED)
788 * is that the inode lock also guards to bringing in of the
789 * extents from disk for a file in b-tree format. If the inode
790 * is in b-tree format, then we need to lock the inode exclusively
791 * until the extents are read in. Locking it exclusively all
792 * the time would limit our parallelism unnecessarily, though.
793 * What we do instead is check to see if the extents have been
794 * read in yet, and only lock the inode exclusively if they
797 * The function returns a value which should be given to the
798 * corresponding xfs_iunlock_map_shared(). This value is
799 * the mode in which the lock was actually taken.
802 xfs_ilock_map_shared(
807 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
808 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
809 lock_mode = XFS_ILOCK_EXCL;
811 lock_mode = XFS_ILOCK_SHARED;
814 xfs_ilock(ip, lock_mode);
820 * This is simply the unlock routine to go with xfs_ilock_map_shared().
821 * All it does is call xfs_iunlock() with the given lock_mode.
824 xfs_iunlock_map_shared(
826 unsigned int lock_mode)
828 xfs_iunlock(ip, lock_mode);
832 * The xfs inode contains 2 locks: a multi-reader lock called the
833 * i_iolock and a multi-reader lock called the i_lock. This routine
834 * allows either or both of the locks to be obtained.
836 * The 2 locks should always be ordered so that the IO lock is
837 * obtained first in order to prevent deadlock.
839 * ip -- the inode being locked
840 * lock_flags -- this parameter indicates the inode's locks
841 * to be locked. It can be:
846 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
847 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
848 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
849 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
852 xfs_ilock(xfs_inode_t *ip,
856 * You can't set both SHARED and EXCL for the same lock,
857 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
858 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
860 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
861 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
862 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
863 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
864 ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
866 if (lock_flags & XFS_IOLOCK_EXCL) {
867 mrupdate(&ip->i_iolock);
868 } else if (lock_flags & XFS_IOLOCK_SHARED) {
869 mraccess(&ip->i_iolock);
871 if (lock_flags & XFS_ILOCK_EXCL) {
872 mrupdate(&ip->i_lock);
873 } else if (lock_flags & XFS_ILOCK_SHARED) {
874 mraccess(&ip->i_lock);
876 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
880 * This is just like xfs_ilock(), except that the caller
881 * is guaranteed not to sleep. It returns 1 if it gets
882 * the requested locks and 0 otherwise. If the IO lock is
883 * obtained but the inode lock cannot be, then the IO lock
884 * is dropped before returning.
886 * ip -- the inode being locked
887 * lock_flags -- this parameter indicates the inode's locks to be
888 * to be locked. See the comment for xfs_ilock() for a list
893 xfs_ilock_nowait(xfs_inode_t *ip,
900 * You can't set both SHARED and EXCL for the same lock,
901 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
902 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
904 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
905 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
906 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
907 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
908 ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
911 if (lock_flags & XFS_IOLOCK_EXCL) {
912 iolocked = mrtryupdate(&ip->i_iolock);
916 } else if (lock_flags & XFS_IOLOCK_SHARED) {
917 iolocked = mrtryaccess(&ip->i_iolock);
922 if (lock_flags & XFS_ILOCK_EXCL) {
923 ilocked = mrtryupdate(&ip->i_lock);
926 mrunlock(&ip->i_iolock);
930 } else if (lock_flags & XFS_ILOCK_SHARED) {
931 ilocked = mrtryaccess(&ip->i_lock);
934 mrunlock(&ip->i_iolock);
939 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
944 * xfs_iunlock() is used to drop the inode locks acquired with
945 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
946 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
947 * that we know which locks to drop.
949 * ip -- the inode being unlocked
950 * lock_flags -- this parameter indicates the inode's locks to be
951 * to be unlocked. See the comment for xfs_ilock() for a list
952 * of valid values for this parameter.
956 xfs_iunlock(xfs_inode_t *ip,
960 * You can't set both SHARED and EXCL for the same lock,
961 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
962 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
964 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
965 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
966 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
967 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
968 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0);
969 ASSERT(lock_flags != 0);
971 if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
972 ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
973 (ismrlocked(&ip->i_iolock, MR_ACCESS)));
974 ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
975 (ismrlocked(&ip->i_iolock, MR_UPDATE)));
976 mrunlock(&ip->i_iolock);
979 if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
980 ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
981 (ismrlocked(&ip->i_lock, MR_ACCESS)));
982 ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
983 (ismrlocked(&ip->i_lock, MR_UPDATE)));
984 mrunlock(&ip->i_lock);
987 * Let the AIL know that this item has been unlocked in case
988 * it is in the AIL and anyone is waiting on it. Don't do
989 * this if the caller has asked us not to.
991 if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
992 ip->i_itemp != NULL) {
993 xfs_trans_unlocked_item(ip->i_mount,
994 (xfs_log_item_t*)(ip->i_itemp));
997 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
1001 * give up write locks. the i/o lock cannot be held nested
1002 * if it is being demoted.
1005 xfs_ilock_demote(xfs_inode_t *ip,
1008 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
1009 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
1011 if (lock_flags & XFS_ILOCK_EXCL) {
1012 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
1013 mrdemote(&ip->i_lock);
1015 if (lock_flags & XFS_IOLOCK_EXCL) {
1016 ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
1017 mrdemote(&ip->i_iolock);
1022 * The following three routines simply manage the i_flock
1023 * semaphore embedded in the inode. This semaphore synchronizes
1024 * processes attempting to flush the in-core inode back to disk.
1027 xfs_iflock(xfs_inode_t *ip)
1029 psema(&(ip->i_flock), PINOD|PLTWAIT);
1033 xfs_iflock_nowait(xfs_inode_t *ip)
1035 return (cpsema(&(ip->i_flock)));
1039 xfs_ifunlock(xfs_inode_t *ip)
1041 ASSERT(valusema(&(ip->i_flock)) <= 0);
1042 vsema(&(ip->i_flock));