2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/gfp.h>
20 #include <linux/pagemap.h>
21 #include <linux/spinlock.h>
22 #include <linux/page-flags.h>
25 #include "extent_io.h"
29 * btrfs_header_level() isn't free, so don't call it when lockdep isn't
32 #ifdef CONFIG_DEBUG_LOCK_ALLOC
33 static inline void spin_nested(struct extent_buffer *eb)
35 spin_lock_nested(&eb->lock, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
38 static inline void spin_nested(struct extent_buffer *eb)
45 * Setting a lock to blocking will drop the spinlock and set the
46 * flag that forces other procs who want the lock to wait. After
47 * this you can safely schedule with the lock held.
49 void btrfs_set_lock_blocking(struct extent_buffer *eb)
51 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
52 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
53 spin_unlock(&eb->lock);
55 /* exit with the spin lock released and the bit set */
59 * clearing the blocking flag will take the spinlock again.
60 * After this you can't safely schedule
62 void btrfs_clear_lock_blocking(struct extent_buffer *eb)
64 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
66 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
67 smp_mb__after_clear_bit();
69 /* exit with the spin lock held */
73 * unfortunately, many of the places that currently set a lock to blocking
74 * don't end up blocking for every long, and often they don't block
75 * at all. For a dbench 50 run, if we don't spin one the blocking bit
76 * at all, the context switch rate can jump up to 400,000/sec or more.
78 * So, we're still stuck with this crummy spin on the blocking bit,
79 * at least until the most common causes of the short blocks
82 static int btrfs_spin_on_block(struct extent_buffer *eb)
85 for (i = 0; i < 512; i++) {
87 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
96 * This is somewhat different from trylock. It will take the
97 * spinlock but if it finds the lock is set to blocking, it will
98 * return without the lock held.
100 * returns 1 if it was able to take the lock and zero otherwise
102 * After this call, scheduling is not safe without first calling
103 * btrfs_set_lock_blocking()
105 int btrfs_try_spin_lock(struct extent_buffer *eb)
110 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
112 spin_unlock(&eb->lock);
114 /* spin for a bit on the BLOCKING flag */
115 for (i = 0; i < 2; i++) {
116 if (!btrfs_spin_on_block(eb))
120 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
122 spin_unlock(&eb->lock);
128 * the autoremove wake function will return 0 if it tried to wake up
129 * a process that was already awake, which means that process won't
130 * count as an exclusive wakeup. The waitq code will continue waking
131 * procs until it finds one that was actually sleeping.
133 * For btrfs, this isn't quite what we want. We want a single proc
134 * to be notified that the lock is ready for taking. If that proc
135 * already happen to be awake, great, it will loop around and try for
138 * So, btrfs_wake_function always returns 1, even when the proc that we
139 * tried to wake up was already awake.
141 static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
144 autoremove_wake_function(wait, mode, sync, key);
149 * returns with the extent buffer spinlocked.
151 * This will spin and/or wait as required to take the lock, and then
152 * return with the spinlock held.
154 * After this call, scheduling is not safe without first calling
155 * btrfs_set_lock_blocking()
157 int btrfs_tree_lock(struct extent_buffer *eb)
160 wait.func = btrfs_wake_function;
165 /* nobody is blocking, exit with the spinlock held */
166 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
170 * we have the spinlock, but the real owner is blocking.
173 spin_unlock(&eb->lock);
176 * spin for a bit, and if the blocking flag goes away,
179 if (btrfs_spin_on_block(eb))
182 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
183 TASK_UNINTERRUPTIBLE);
185 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
188 finish_wait(&eb->lock_wq, &wait);
194 * Very quick trylock, this does not spin or schedule. It returns
195 * 1 with the spinlock held if it was able to take the lock, or it
196 * returns zero if it was unable to take the lock.
198 * After this call, scheduling is not safe without first calling
199 * btrfs_set_lock_blocking()
201 int btrfs_try_tree_lock(struct extent_buffer *eb)
203 if (spin_trylock(&eb->lock)) {
204 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
206 * we've got the spinlock, but the real owner is
207 * blocking. Drop the spinlock and return failure
209 spin_unlock(&eb->lock);
214 /* someone else has the spinlock giveup */
218 int btrfs_tree_unlock(struct extent_buffer *eb)
221 * if we were a blocking owner, we don't have the spinlock held
222 * just clear the bit and look for waiters
224 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
225 smp_mb__after_clear_bit();
227 spin_unlock(&eb->lock);
229 if (waitqueue_active(&eb->lock_wq))
230 wake_up(&eb->lock_wq);
234 int btrfs_tree_locked(struct extent_buffer *eb)
236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
237 spin_is_locked(&eb->lock);