Merge branch 'proc-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/adobriyan...
[linux-2.6] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct gfs2_glock_iter {
50         int hash;                       /* hash bucket index         */
51         struct gfs2_sbd *sdp;           /* incore superblock         */
52         struct gfs2_glock *gl;          /* current glock struct      */
53         char string[512];               /* scratch space             */
54 };
55
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57
58 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
59 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
60 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
61 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62
63 static DECLARE_RWSEM(gfs2_umount_flush_sem);
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 static LIST_HEAD(lru_list);
67 static atomic_t lru_count = ATOMIC_INIT(0);
68 static DEFINE_SPINLOCK(lru_lock);
69
70 #define GFS2_GL_HASH_SHIFT      15
71 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
72 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
73
74 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
75 static struct dentry *gfs2_root;
76
77 /*
78  * Despite what you might think, the numbers below are not arbitrary :-)
79  * They are taken from the ipv4 routing hash code, which is well tested
80  * and thus should be nearly optimal. Later on we might tweek the numbers
81  * but for now this should be fine.
82  *
83  * The reason for putting the locks in a separate array from the list heads
84  * is that we can have fewer locks than list heads and save memory. We use
85  * the same hash function for both, but with a different hash mask.
86  */
87 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
88         defined(CONFIG_PROVE_LOCKING)
89
90 #ifdef CONFIG_LOCKDEP
91 # define GL_HASH_LOCK_SZ        256
92 #else
93 # if NR_CPUS >= 32
94 #  define GL_HASH_LOCK_SZ       4096
95 # elif NR_CPUS >= 16
96 #  define GL_HASH_LOCK_SZ       2048
97 # elif NR_CPUS >= 8
98 #  define GL_HASH_LOCK_SZ       1024
99 # elif NR_CPUS >= 4
100 #  define GL_HASH_LOCK_SZ       512
101 # else
102 #  define GL_HASH_LOCK_SZ       256
103 # endif
104 #endif
105
106 /* We never want more locks than chains */
107 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
108 # undef GL_HASH_LOCK_SZ
109 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
110 #endif
111
112 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
113
114 static inline rwlock_t *gl_lock_addr(unsigned int x)
115 {
116         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
117 }
118 #else /* not SMP, so no spinlocks required */
119 static inline rwlock_t *gl_lock_addr(unsigned int x)
120 {
121         return NULL;
122 }
123 #endif
124
125 /**
126  * gl_hash() - Turn glock number into hash bucket number
127  * @lock: The glock number
128  *
129  * Returns: The number of the corresponding hash bucket
130  */
131
132 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
133                             const struct lm_lockname *name)
134 {
135         unsigned int h;
136
137         h = jhash(&name->ln_number, sizeof(u64), 0);
138         h = jhash(&name->ln_type, sizeof(unsigned int), h);
139         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
140         h &= GFS2_GL_HASH_MASK;
141
142         return h;
143 }
144
145 /**
146  * glock_free() - Perform a few checks and then release struct gfs2_glock
147  * @gl: The glock to release
148  *
149  * Also calls lock module to release its internal structure for this glock.
150  *
151  */
152
153 static void glock_free(struct gfs2_glock *gl)
154 {
155         struct gfs2_sbd *sdp = gl->gl_sbd;
156         struct inode *aspace = gl->gl_aspace;
157
158         if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
159                 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
160
161         if (aspace)
162                 gfs2_aspace_put(aspace);
163
164         kmem_cache_free(gfs2_glock_cachep, gl);
165 }
166
167 /**
168  * gfs2_glock_hold() - increment reference count on glock
169  * @gl: The glock to hold
170  *
171  */
172
173 static void gfs2_glock_hold(struct gfs2_glock *gl)
174 {
175         atomic_inc(&gl->gl_ref);
176 }
177
178 /**
179  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
180  * @gl: the glock
181  *
182  */
183
184 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
185 {
186         spin_lock(&lru_lock);
187         if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
188                 list_add_tail(&gl->gl_lru, &lru_list);
189                 atomic_inc(&lru_count);
190         }
191         spin_unlock(&lru_lock);
192 }
193
194 /**
195  * gfs2_glock_put() - Decrement reference count on glock
196  * @gl: The glock to put
197  *
198  */
199
200 int gfs2_glock_put(struct gfs2_glock *gl)
201 {
202         int rv = 0;
203
204         write_lock(gl_lock_addr(gl->gl_hash));
205         if (atomic_dec_and_test(&gl->gl_ref)) {
206                 hlist_del(&gl->gl_list);
207                 write_unlock(gl_lock_addr(gl->gl_hash));
208                 spin_lock(&lru_lock);
209                 if (!list_empty(&gl->gl_lru)) {
210                         list_del_init(&gl->gl_lru);
211                         atomic_dec(&lru_count);
212                 }
213                 spin_unlock(&lru_lock);
214                 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
215                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru));
216                 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
217                 glock_free(gl);
218                 rv = 1;
219                 goto out;
220         }
221         write_unlock(gl_lock_addr(gl->gl_hash));
222         /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
223         if (atomic_read(&gl->gl_ref) == 2)
224                 gfs2_glock_schedule_for_reclaim(gl);
225 out:
226         return rv;
227 }
228
229 /**
230  * search_bucket() - Find struct gfs2_glock by lock number
231  * @bucket: the bucket to search
232  * @name: The lock name
233  *
234  * Returns: NULL, or the struct gfs2_glock with the requested number
235  */
236
237 static struct gfs2_glock *search_bucket(unsigned int hash,
238                                         const struct gfs2_sbd *sdp,
239                                         const struct lm_lockname *name)
240 {
241         struct gfs2_glock *gl;
242         struct hlist_node *h;
243
244         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
245                 if (!lm_name_equal(&gl->gl_name, name))
246                         continue;
247                 if (gl->gl_sbd != sdp)
248                         continue;
249
250                 atomic_inc(&gl->gl_ref);
251
252                 return gl;
253         }
254
255         return NULL;
256 }
257
258 /**
259  * gfs2_glock_find() - Find glock by lock number
260  * @sdp: The GFS2 superblock
261  * @name: The lock name
262  *
263  * Returns: NULL, or the struct gfs2_glock with the requested number
264  */
265
266 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
267                                           const struct lm_lockname *name)
268 {
269         unsigned int hash = gl_hash(sdp, name);
270         struct gfs2_glock *gl;
271
272         read_lock(gl_lock_addr(hash));
273         gl = search_bucket(hash, sdp, name);
274         read_unlock(gl_lock_addr(hash));
275
276         return gl;
277 }
278
279 /**
280  * may_grant - check if its ok to grant a new lock
281  * @gl: The glock
282  * @gh: The lock request which we wish to grant
283  *
284  * Returns: true if its ok to grant the lock
285  */
286
287 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
288 {
289         const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
290         if ((gh->gh_state == LM_ST_EXCLUSIVE ||
291              gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
292                 return 0;
293         if (gl->gl_state == gh->gh_state)
294                 return 1;
295         if (gh->gh_flags & GL_EXACT)
296                 return 0;
297         if (gl->gl_state == LM_ST_EXCLUSIVE) {
298                 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
299                         return 1;
300                 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
301                         return 1;
302         }
303         if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
304                 return 1;
305         return 0;
306 }
307
308 static void gfs2_holder_wake(struct gfs2_holder *gh)
309 {
310         clear_bit(HIF_WAIT, &gh->gh_iflags);
311         smp_mb__after_clear_bit();
312         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
313 }
314
315 /**
316  * do_promote - promote as many requests as possible on the current queue
317  * @gl: The glock
318  * 
319  * Returns: 1 if there is a blocked holder at the head of the list, or 2
320  *          if a type specific operation is underway.
321  */
322
323 static int do_promote(struct gfs2_glock *gl)
324 __releases(&gl->gl_spin)
325 __acquires(&gl->gl_spin)
326 {
327         const struct gfs2_glock_operations *glops = gl->gl_ops;
328         struct gfs2_holder *gh, *tmp;
329         int ret;
330
331 restart:
332         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
333                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
334                         continue;
335                 if (may_grant(gl, gh)) {
336                         if (gh->gh_list.prev == &gl->gl_holders &&
337                             glops->go_lock) {
338                                 spin_unlock(&gl->gl_spin);
339                                 /* FIXME: eliminate this eventually */
340                                 ret = glops->go_lock(gh);
341                                 spin_lock(&gl->gl_spin);
342                                 if (ret) {
343                                         if (ret == 1)
344                                                 return 2;
345                                         gh->gh_error = ret;
346                                         list_del_init(&gh->gh_list);
347                                         gfs2_holder_wake(gh);
348                                         goto restart;
349                                 }
350                                 set_bit(HIF_HOLDER, &gh->gh_iflags);
351                                 gfs2_holder_wake(gh);
352                                 goto restart;
353                         }
354                         set_bit(HIF_HOLDER, &gh->gh_iflags);
355                         gfs2_holder_wake(gh);
356                         continue;
357                 }
358                 if (gh->gh_list.prev == &gl->gl_holders)
359                         return 1;
360                 break;
361         }
362         return 0;
363 }
364
365 /**
366  * do_error - Something unexpected has happened during a lock request
367  *
368  */
369
370 static inline void do_error(struct gfs2_glock *gl, const int ret)
371 {
372         struct gfs2_holder *gh, *tmp;
373
374         list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
375                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
376                         continue;
377                 if (ret & LM_OUT_ERROR)
378                         gh->gh_error = -EIO;
379                 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
380                         gh->gh_error = GLR_TRYFAILED;
381                 else
382                         continue;
383                 list_del_init(&gh->gh_list);
384                 gfs2_holder_wake(gh);
385         }
386 }
387
388 /**
389  * find_first_waiter - find the first gh that's waiting for the glock
390  * @gl: the glock
391  */
392
393 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
394 {
395         struct gfs2_holder *gh;
396
397         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
398                 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
399                         return gh;
400         }
401         return NULL;
402 }
403
404 /**
405  * state_change - record that the glock is now in a different state
406  * @gl: the glock
407  * @new_state the new state
408  *
409  */
410
411 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
412 {
413         int held1, held2;
414
415         held1 = (gl->gl_state != LM_ST_UNLOCKED);
416         held2 = (new_state != LM_ST_UNLOCKED);
417
418         if (held1 != held2) {
419                 if (held2)
420                         gfs2_glock_hold(gl);
421                 else
422                         gfs2_glock_put(gl);
423         }
424
425         gl->gl_state = new_state;
426         gl->gl_tchange = jiffies;
427 }
428
429 static void gfs2_demote_wake(struct gfs2_glock *gl)
430 {
431         gl->gl_demote_state = LM_ST_EXCLUSIVE;
432         clear_bit(GLF_DEMOTE, &gl->gl_flags);
433         smp_mb__after_clear_bit();
434         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
435 }
436
437 /**
438  * finish_xmote - The DLM has replied to one of our lock requests
439  * @gl: The glock
440  * @ret: The status from the DLM
441  *
442  */
443
444 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
445 {
446         const struct gfs2_glock_operations *glops = gl->gl_ops;
447         struct gfs2_holder *gh;
448         unsigned state = ret & LM_OUT_ST_MASK;
449         int rv;
450
451         spin_lock(&gl->gl_spin);
452         state_change(gl, state);
453         gh = find_first_waiter(gl);
454
455         /* Demote to UN request arrived during demote to SH or DF */
456         if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
457             state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
458                 gl->gl_target = LM_ST_UNLOCKED;
459
460         /* Check for state != intended state */
461         if (unlikely(state != gl->gl_target)) {
462                 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
463                         /* move to back of queue and try next entry */
464                         if (ret & LM_OUT_CANCELED) {
465                                 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
466                                         list_move_tail(&gh->gh_list, &gl->gl_holders);
467                                 gh = find_first_waiter(gl);
468                                 gl->gl_target = gh->gh_state;
469                                 goto retry;
470                         }
471                         /* Some error or failed "try lock" - report it */
472                         if ((ret & LM_OUT_ERROR) ||
473                             (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
474                                 gl->gl_target = gl->gl_state;
475                                 do_error(gl, ret);
476                                 goto out;
477                         }
478                 }
479                 switch(state) {
480                 /* Unlocked due to conversion deadlock, try again */
481                 case LM_ST_UNLOCKED:
482 retry:
483                         do_xmote(gl, gh, gl->gl_target);
484                         break;
485                 /* Conversion fails, unlock and try again */
486                 case LM_ST_SHARED:
487                 case LM_ST_DEFERRED:
488                         do_xmote(gl, gh, LM_ST_UNLOCKED);
489                         break;
490                 default: /* Everything else */
491                         printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
492                         GLOCK_BUG_ON(gl, 1);
493                 }
494                 spin_unlock(&gl->gl_spin);
495                 gfs2_glock_put(gl);
496                 return;
497         }
498
499         /* Fast path - we got what we asked for */
500         if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
501                 gfs2_demote_wake(gl);
502         if (state != LM_ST_UNLOCKED) {
503                 if (glops->go_xmote_bh) {
504                         spin_unlock(&gl->gl_spin);
505                         rv = glops->go_xmote_bh(gl, gh);
506                         if (rv == -EAGAIN)
507                                 return;
508                         spin_lock(&gl->gl_spin);
509                         if (rv) {
510                                 do_error(gl, rv);
511                                 goto out;
512                         }
513                 }
514                 rv = do_promote(gl);
515                 if (rv == 2)
516                         goto out_locked;
517         }
518 out:
519         clear_bit(GLF_LOCK, &gl->gl_flags);
520 out_locked:
521         spin_unlock(&gl->gl_spin);
522         gfs2_glock_put(gl);
523 }
524
525 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
526                                  unsigned int cur_state, unsigned int req_state,
527                                  unsigned int flags)
528 {
529         int ret = LM_OUT_ERROR;
530
531         if (!sdp->sd_lockstruct.ls_ops->lm_lock)
532                 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
533
534         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
535                 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
536                                                          req_state, flags);
537         return ret;
538 }
539
540 /**
541  * do_xmote - Calls the DLM to change the state of a lock
542  * @gl: The lock state
543  * @gh: The holder (only for promotes)
544  * @target: The target lock state
545  *
546  */
547
548 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
549 __releases(&gl->gl_spin)
550 __acquires(&gl->gl_spin)
551 {
552         const struct gfs2_glock_operations *glops = gl->gl_ops;
553         struct gfs2_sbd *sdp = gl->gl_sbd;
554         unsigned int lck_flags = gh ? gh->gh_flags : 0;
555         int ret;
556
557         lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
558                       LM_FLAG_PRIORITY);
559         BUG_ON(gl->gl_state == target);
560         BUG_ON(gl->gl_state == gl->gl_target);
561         if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
562             glops->go_inval) {
563                 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
564                 do_error(gl, 0); /* Fail queued try locks */
565         }
566         spin_unlock(&gl->gl_spin);
567         if (glops->go_xmote_th)
568                 glops->go_xmote_th(gl);
569         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
570                 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
571         clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
572
573         gfs2_glock_hold(gl);
574         if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
575             gl->gl_state == LM_ST_DEFERRED) &&
576             !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
577                 lck_flags |= LM_FLAG_TRY_1CB;
578         ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
579
580         if (!(ret & LM_OUT_ASYNC)) {
581                 finish_xmote(gl, ret);
582                 gfs2_glock_hold(gl);
583                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
584                         gfs2_glock_put(gl);
585         } else {
586                 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
587         }
588         spin_lock(&gl->gl_spin);
589 }
590
591 /**
592  * find_first_holder - find the first "holder" gh
593  * @gl: the glock
594  */
595
596 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
597 {
598         struct gfs2_holder *gh;
599
600         if (!list_empty(&gl->gl_holders)) {
601                 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
602                 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
603                         return gh;
604         }
605         return NULL;
606 }
607
608 /**
609  * run_queue - do all outstanding tasks related to a glock
610  * @gl: The glock in question
611  * @nonblock: True if we must not block in run_queue
612  *
613  */
614
615 static void run_queue(struct gfs2_glock *gl, const int nonblock)
616 __releases(&gl->gl_spin)
617 __acquires(&gl->gl_spin)
618 {
619         struct gfs2_holder *gh = NULL;
620         int ret;
621
622         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
623                 return;
624
625         GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
626
627         if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
628             gl->gl_demote_state != gl->gl_state) {
629                 if (find_first_holder(gl))
630                         goto out;
631                 if (nonblock)
632                         goto out_sched;
633                 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
634                 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
635                 gl->gl_target = gl->gl_demote_state;
636         } else {
637                 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
638                         gfs2_demote_wake(gl);
639                 ret = do_promote(gl);
640                 if (ret == 0)
641                         goto out;
642                 if (ret == 2)
643                         return;
644                 gh = find_first_waiter(gl);
645                 gl->gl_target = gh->gh_state;
646                 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
647                         do_error(gl, 0); /* Fail queued try locks */
648         }
649         do_xmote(gl, gh, gl->gl_target);
650         return;
651
652 out_sched:
653         gfs2_glock_hold(gl);
654         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
655                 gfs2_glock_put(gl);
656 out:
657         clear_bit(GLF_LOCK, &gl->gl_flags);
658 }
659
660 static void glock_work_func(struct work_struct *work)
661 {
662         unsigned long delay = 0;
663         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
664
665         if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
666                 finish_xmote(gl, gl->gl_reply);
667         spin_lock(&gl->gl_spin);
668         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
669             gl->gl_state != LM_ST_UNLOCKED &&
670             gl->gl_demote_state != LM_ST_EXCLUSIVE) {
671                 unsigned long holdtime, now = jiffies;
672                 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
673                 if (time_before(now, holdtime))
674                         delay = holdtime - now;
675                 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
676         }
677         run_queue(gl, 0);
678         spin_unlock(&gl->gl_spin);
679         if (!delay ||
680             queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
681                 gfs2_glock_put(gl);
682 }
683
684 static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
685                      void **lockp)
686 {
687         int error = -EIO;
688         if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
689                 return 0;
690         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
691                 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
692                                 sdp->sd_lockstruct.ls_lockspace, name, lockp);
693         return error;
694 }
695
696 /**
697  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
698  * @sdp: The GFS2 superblock
699  * @number: the lock number
700  * @glops: The glock_operations to use
701  * @create: If 0, don't create the glock if it doesn't exist
702  * @glp: the glock is returned here
703  *
704  * This does not lock a glock, just finds/creates structures for one.
705  *
706  * Returns: errno
707  */
708
709 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
710                    const struct gfs2_glock_operations *glops, int create,
711                    struct gfs2_glock **glp)
712 {
713         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
714         struct gfs2_glock *gl, *tmp;
715         unsigned int hash = gl_hash(sdp, &name);
716         int error;
717
718         read_lock(gl_lock_addr(hash));
719         gl = search_bucket(hash, sdp, &name);
720         read_unlock(gl_lock_addr(hash));
721
722         if (gl || !create) {
723                 *glp = gl;
724                 return 0;
725         }
726
727         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
728         if (!gl)
729                 return -ENOMEM;
730
731         gl->gl_flags = 0;
732         gl->gl_name = name;
733         atomic_set(&gl->gl_ref, 1);
734         gl->gl_state = LM_ST_UNLOCKED;
735         gl->gl_target = LM_ST_UNLOCKED;
736         gl->gl_demote_state = LM_ST_EXCLUSIVE;
737         gl->gl_hash = hash;
738         gl->gl_ops = glops;
739         gl->gl_stamp = jiffies;
740         gl->gl_tchange = jiffies;
741         gl->gl_object = NULL;
742         gl->gl_sbd = sdp;
743         gl->gl_aspace = NULL;
744         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
745
746         /* If this glock protects actual on-disk data or metadata blocks,
747            create a VFS inode to manage the pages/buffers holding them. */
748         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
749                 gl->gl_aspace = gfs2_aspace_get(sdp);
750                 if (!gl->gl_aspace) {
751                         error = -ENOMEM;
752                         goto fail;
753                 }
754         }
755
756         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
757         if (error)
758                 goto fail_aspace;
759
760         write_lock(gl_lock_addr(hash));
761         tmp = search_bucket(hash, sdp, &name);
762         if (tmp) {
763                 write_unlock(gl_lock_addr(hash));
764                 glock_free(gl);
765                 gl = tmp;
766         } else {
767                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
768                 write_unlock(gl_lock_addr(hash));
769         }
770
771         *glp = gl;
772
773         return 0;
774
775 fail_aspace:
776         if (gl->gl_aspace)
777                 gfs2_aspace_put(gl->gl_aspace);
778 fail:
779         kmem_cache_free(gfs2_glock_cachep, gl);
780         return error;
781 }
782
783 /**
784  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
785  * @gl: the glock
786  * @state: the state we're requesting
787  * @flags: the modifier flags
788  * @gh: the holder structure
789  *
790  */
791
792 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
793                       struct gfs2_holder *gh)
794 {
795         INIT_LIST_HEAD(&gh->gh_list);
796         gh->gh_gl = gl;
797         gh->gh_ip = (unsigned long)__builtin_return_address(0);
798         gh->gh_owner_pid = get_pid(task_pid(current));
799         gh->gh_state = state;
800         gh->gh_flags = flags;
801         gh->gh_error = 0;
802         gh->gh_iflags = 0;
803         gfs2_glock_hold(gl);
804 }
805
806 /**
807  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
808  * @state: the state we're requesting
809  * @flags: the modifier flags
810  * @gh: the holder structure
811  *
812  * Don't mess with the glock.
813  *
814  */
815
816 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
817 {
818         gh->gh_state = state;
819         gh->gh_flags = flags;
820         gh->gh_iflags = 0;
821         gh->gh_ip = (unsigned long)__builtin_return_address(0);
822 }
823
824 /**
825  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
826  * @gh: the holder structure
827  *
828  */
829
830 void gfs2_holder_uninit(struct gfs2_holder *gh)
831 {
832         put_pid(gh->gh_owner_pid);
833         gfs2_glock_put(gh->gh_gl);
834         gh->gh_gl = NULL;
835         gh->gh_ip = 0;
836 }
837
838 static int just_schedule(void *word)
839 {
840         schedule();
841         return 0;
842 }
843
844 static void wait_on_holder(struct gfs2_holder *gh)
845 {
846         might_sleep();
847         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
848 }
849
850 static void wait_on_demote(struct gfs2_glock *gl)
851 {
852         might_sleep();
853         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
854 }
855
856 /**
857  * handle_callback - process a demote request
858  * @gl: the glock
859  * @state: the state the caller wants us to change to
860  *
861  * There are only two requests that we are going to see in actual
862  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
863  */
864
865 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
866                             unsigned long delay)
867 {
868         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
869
870         set_bit(bit, &gl->gl_flags);
871         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
872                 gl->gl_demote_state = state;
873                 gl->gl_demote_time = jiffies;
874         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
875                         gl->gl_demote_state != state) {
876                 gl->gl_demote_state = LM_ST_UNLOCKED;
877         }
878 }
879
880 /**
881  * gfs2_glock_wait - wait on a glock acquisition
882  * @gh: the glock holder
883  *
884  * Returns: 0 on success
885  */
886
887 int gfs2_glock_wait(struct gfs2_holder *gh)
888 {
889         wait_on_holder(gh);
890         return gh->gh_error;
891 }
892
893 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
894 {
895         va_list args;
896
897         va_start(args, fmt);
898         if (seq) {
899                 struct gfs2_glock_iter *gi = seq->private;
900                 vsprintf(gi->string, fmt, args);
901                 seq_printf(seq, gi->string);
902         } else {
903                 printk(KERN_ERR " ");
904                 vprintk(fmt, args);
905         }
906         va_end(args);
907 }
908
909 /**
910  * add_to_queue - Add a holder to the wait queue (but look for recursion)
911  * @gh: the holder structure to add
912  *
913  * Eventually we should move the recursive locking trap to a
914  * debugging option or something like that. This is the fast
915  * path and needs to have the minimum number of distractions.
916  * 
917  */
918
919 static inline void add_to_queue(struct gfs2_holder *gh)
920 __releases(&gl->gl_spin)
921 __acquires(&gl->gl_spin)
922 {
923         struct gfs2_glock *gl = gh->gh_gl;
924         struct gfs2_sbd *sdp = gl->gl_sbd;
925         struct list_head *insert_pt = NULL;
926         struct gfs2_holder *gh2;
927         int try_lock = 0;
928
929         BUG_ON(gh->gh_owner_pid == NULL);
930         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
931                 BUG();
932
933         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
934                 if (test_bit(GLF_LOCK, &gl->gl_flags))
935                         try_lock = 1;
936                 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
937                         goto fail;
938         }
939
940         list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
941                 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
942                     (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
943                         goto trap_recursive;
944                 if (try_lock &&
945                     !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
946                     !may_grant(gl, gh)) {
947 fail:
948                         gh->gh_error = GLR_TRYFAILED;
949                         gfs2_holder_wake(gh);
950                         return;
951                 }
952                 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
953                         continue;
954                 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
955                         insert_pt = &gh2->gh_list;
956         }
957         if (likely(insert_pt == NULL)) {
958                 list_add_tail(&gh->gh_list, &gl->gl_holders);
959                 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
960                         goto do_cancel;
961                 return;
962         }
963         list_add_tail(&gh->gh_list, insert_pt);
964 do_cancel:
965         gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
966         if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
967                 spin_unlock(&gl->gl_spin);
968                 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
969                         sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
970                 spin_lock(&gl->gl_spin);
971         }
972         return;
973
974 trap_recursive:
975         print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
976         printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
977         printk(KERN_ERR "lock type: %d req lock state : %d\n",
978                gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
979         print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
980         printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
981         printk(KERN_ERR "lock type: %d req lock state : %d\n",
982                gh->gh_gl->gl_name.ln_type, gh->gh_state);
983         __dump_glock(NULL, gl);
984         BUG();
985 }
986
987 /**
988  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
989  * @gh: the holder structure
990  *
991  * if (gh->gh_flags & GL_ASYNC), this never returns an error
992  *
993  * Returns: 0, GLR_TRYFAILED, or errno on failure
994  */
995
996 int gfs2_glock_nq(struct gfs2_holder *gh)
997 {
998         struct gfs2_glock *gl = gh->gh_gl;
999         struct gfs2_sbd *sdp = gl->gl_sbd;
1000         int error = 0;
1001
1002         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1003                 return -EIO;
1004
1005         spin_lock(&gl->gl_spin);
1006         add_to_queue(gh);
1007         run_queue(gl, 1);
1008         spin_unlock(&gl->gl_spin);
1009
1010         if (!(gh->gh_flags & GL_ASYNC))
1011                 error = gfs2_glock_wait(gh);
1012
1013         return error;
1014 }
1015
1016 /**
1017  * gfs2_glock_poll - poll to see if an async request has been completed
1018  * @gh: the holder
1019  *
1020  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1021  */
1022
1023 int gfs2_glock_poll(struct gfs2_holder *gh)
1024 {
1025         return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1026 }
1027
1028 /**
1029  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1030  * @gh: the glock holder
1031  *
1032  */
1033
1034 void gfs2_glock_dq(struct gfs2_holder *gh)
1035 {
1036         struct gfs2_glock *gl = gh->gh_gl;
1037         const struct gfs2_glock_operations *glops = gl->gl_ops;
1038         unsigned delay = 0;
1039         int fast_path = 0;
1040
1041         spin_lock(&gl->gl_spin);
1042         if (gh->gh_flags & GL_NOCACHE)
1043                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1044
1045         list_del_init(&gh->gh_list);
1046         if (find_first_holder(gl) == NULL) {
1047                 if (glops->go_unlock) {
1048                         GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1049                         spin_unlock(&gl->gl_spin);
1050                         glops->go_unlock(gh);
1051                         spin_lock(&gl->gl_spin);
1052                         clear_bit(GLF_LOCK, &gl->gl_flags);
1053                 }
1054                 gl->gl_stamp = jiffies;
1055                 if (list_empty(&gl->gl_holders) &&
1056                     !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1057                     !test_bit(GLF_DEMOTE, &gl->gl_flags))
1058                         fast_path = 1;
1059         }
1060         spin_unlock(&gl->gl_spin);
1061         if (likely(fast_path))
1062                 return;
1063
1064         gfs2_glock_hold(gl);
1065         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1066             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1067                 delay = gl->gl_ops->go_min_hold_time;
1068         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1069                 gfs2_glock_put(gl);
1070 }
1071
1072 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1073 {
1074         struct gfs2_glock *gl = gh->gh_gl;
1075         gfs2_glock_dq(gh);
1076         wait_on_demote(gl);
1077 }
1078
1079 /**
1080  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1081  * @gh: the holder structure
1082  *
1083  */
1084
1085 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1086 {
1087         gfs2_glock_dq(gh);
1088         gfs2_holder_uninit(gh);
1089 }
1090
1091 /**
1092  * gfs2_glock_nq_num - acquire a glock based on lock number
1093  * @sdp: the filesystem
1094  * @number: the lock number
1095  * @glops: the glock operations for the type of glock
1096  * @state: the state to acquire the glock in
1097  * @flags: modifier flags for the aquisition
1098  * @gh: the struct gfs2_holder
1099  *
1100  * Returns: errno
1101  */
1102
1103 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1104                       const struct gfs2_glock_operations *glops,
1105                       unsigned int state, int flags, struct gfs2_holder *gh)
1106 {
1107         struct gfs2_glock *gl;
1108         int error;
1109
1110         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1111         if (!error) {
1112                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1113                 gfs2_glock_put(gl);
1114         }
1115
1116         return error;
1117 }
1118
1119 /**
1120  * glock_compare - Compare two struct gfs2_glock structures for sorting
1121  * @arg_a: the first structure
1122  * @arg_b: the second structure
1123  *
1124  */
1125
1126 static int glock_compare(const void *arg_a, const void *arg_b)
1127 {
1128         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1129         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1130         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1131         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1132
1133         if (a->ln_number > b->ln_number)
1134                 return 1;
1135         if (a->ln_number < b->ln_number)
1136                 return -1;
1137         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1138         return 0;
1139 }
1140
1141 /**
1142  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1143  * @num_gh: the number of structures
1144  * @ghs: an array of struct gfs2_holder structures
1145  *
1146  * Returns: 0 on success (all glocks acquired),
1147  *          errno on failure (no glocks acquired)
1148  */
1149
1150 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1151                      struct gfs2_holder **p)
1152 {
1153         unsigned int x;
1154         int error = 0;
1155
1156         for (x = 0; x < num_gh; x++)
1157                 p[x] = &ghs[x];
1158
1159         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1160
1161         for (x = 0; x < num_gh; x++) {
1162                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1163
1164                 error = gfs2_glock_nq(p[x]);
1165                 if (error) {
1166                         while (x--)
1167                                 gfs2_glock_dq(p[x]);
1168                         break;
1169                 }
1170         }
1171
1172         return error;
1173 }
1174
1175 /**
1176  * gfs2_glock_nq_m - acquire multiple glocks
1177  * @num_gh: the number of structures
1178  * @ghs: an array of struct gfs2_holder structures
1179  *
1180  *
1181  * Returns: 0 on success (all glocks acquired),
1182  *          errno on failure (no glocks acquired)
1183  */
1184
1185 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1186 {
1187         struct gfs2_holder *tmp[4];
1188         struct gfs2_holder **pph = tmp;
1189         int error = 0;
1190
1191         switch(num_gh) {
1192         case 0:
1193                 return 0;
1194         case 1:
1195                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1196                 return gfs2_glock_nq(ghs);
1197         default:
1198                 if (num_gh <= 4)
1199                         break;
1200                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1201                 if (!pph)
1202                         return -ENOMEM;
1203         }
1204
1205         error = nq_m_sync(num_gh, ghs, pph);
1206
1207         if (pph != tmp)
1208                 kfree(pph);
1209
1210         return error;
1211 }
1212
1213 /**
1214  * gfs2_glock_dq_m - release multiple glocks
1215  * @num_gh: the number of structures
1216  * @ghs: an array of struct gfs2_holder structures
1217  *
1218  */
1219
1220 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1221 {
1222         unsigned int x;
1223
1224         for (x = 0; x < num_gh; x++)
1225                 gfs2_glock_dq(&ghs[x]);
1226 }
1227
1228 /**
1229  * gfs2_glock_dq_uninit_m - release multiple glocks
1230  * @num_gh: the number of structures
1231  * @ghs: an array of struct gfs2_holder structures
1232  *
1233  */
1234
1235 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1236 {
1237         unsigned int x;
1238
1239         for (x = 0; x < num_gh; x++)
1240                 gfs2_glock_dq_uninit(&ghs[x]);
1241 }
1242
1243 static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
1244 {
1245         int error = -EIO;
1246         if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
1247                 return 0;
1248         if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1249                 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
1250         return error;
1251 }
1252
1253 /**
1254  * gfs2_lvb_hold - attach a LVB from a glock
1255  * @gl: The glock in question
1256  *
1257  */
1258
1259 int gfs2_lvb_hold(struct gfs2_glock *gl)
1260 {
1261         int error;
1262
1263         if (!atomic_read(&gl->gl_lvb_count)) {
1264                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1265                 if (error) 
1266                         return error;
1267                 gfs2_glock_hold(gl);
1268         }
1269         atomic_inc(&gl->gl_lvb_count);
1270
1271         return 0;
1272 }
1273
1274 /**
1275  * gfs2_lvb_unhold - detach a LVB from a glock
1276  * @gl: The glock in question
1277  *
1278  */
1279
1280 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1281 {
1282         struct gfs2_sbd *sdp = gl->gl_sbd;
1283
1284         gfs2_glock_hold(gl);
1285         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1286         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1287                 if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
1288                         sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
1289                 gl->gl_lvb = NULL;
1290                 gfs2_glock_put(gl);
1291         }
1292         gfs2_glock_put(gl);
1293 }
1294
1295 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1296                         unsigned int state)
1297 {
1298         struct gfs2_glock *gl;
1299         unsigned long delay = 0;
1300         unsigned long holdtime;
1301         unsigned long now = jiffies;
1302
1303         gl = gfs2_glock_find(sdp, name);
1304         if (!gl)
1305                 return;
1306
1307         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1308         if (time_before(now, holdtime))
1309                 delay = holdtime - now;
1310         if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1311                 delay = gl->gl_ops->go_min_hold_time;
1312
1313         spin_lock(&gl->gl_spin);
1314         handle_callback(gl, state, delay);
1315         spin_unlock(&gl->gl_spin);
1316         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1317                 gfs2_glock_put(gl);
1318 }
1319
1320 static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
1321 {
1322         struct gfs2_jdesc *jd;
1323
1324         spin_lock(&sdp->sd_jindex_spin);
1325         list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
1326                 if (jd->jd_jid != jid)
1327                         continue;
1328                 jd->jd_dirty = 1;
1329                 break;
1330         }
1331         spin_unlock(&sdp->sd_jindex_spin);
1332 }
1333
1334 /**
1335  * gfs2_glock_cb - Callback used by locking module
1336  * @sdp: Pointer to the superblock
1337  * @type: Type of callback
1338  * @data: Type dependent data pointer
1339  *
1340  * Called by the locking module when it wants to tell us something.
1341  * Either we need to drop a lock, one of our ASYNC requests completed, or
1342  * a journal from another client needs to be recovered.
1343  */
1344
1345 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1346 {
1347         struct gfs2_sbd *sdp = cb_data;
1348
1349         switch (type) {
1350         case LM_CB_NEED_E:
1351                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1352                 return;
1353
1354         case LM_CB_NEED_D:
1355                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1356                 return;
1357
1358         case LM_CB_NEED_S:
1359                 blocking_cb(sdp, data, LM_ST_SHARED);
1360                 return;
1361
1362         case LM_CB_ASYNC: {
1363                 struct lm_async_cb *async = data;
1364                 struct gfs2_glock *gl;
1365
1366                 down_read(&gfs2_umount_flush_sem);
1367                 gl = gfs2_glock_find(sdp, &async->lc_name);
1368                 if (gfs2_assert_warn(sdp, gl))
1369                         return;
1370                 gl->gl_reply = async->lc_ret;
1371                 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1372                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1373                         gfs2_glock_put(gl);
1374                 up_read(&gfs2_umount_flush_sem);
1375                 return;
1376         }
1377
1378         case LM_CB_NEED_RECOVERY:
1379                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1380                 if (sdp->sd_recoverd_process)
1381                         wake_up_process(sdp->sd_recoverd_process);
1382                 return;
1383
1384         default:
1385                 gfs2_assert_warn(sdp, 0);
1386                 return;
1387         }
1388 }
1389
1390 /**
1391  * demote_ok - Check to see if it's ok to unlock a glock
1392  * @gl: the glock
1393  *
1394  * Returns: 1 if it's ok
1395  */
1396
1397 static int demote_ok(const struct gfs2_glock *gl)
1398 {
1399         const struct gfs2_glock_operations *glops = gl->gl_ops;
1400
1401         if (gl->gl_state == LM_ST_UNLOCKED)
1402                 return 0;
1403         if (!list_empty(&gl->gl_holders))
1404                 return 0;
1405         if (glops->go_demote_ok)
1406                 return glops->go_demote_ok(gl);
1407         return 1;
1408 }
1409
1410
1411 static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1412 {
1413         struct gfs2_glock *gl;
1414         int may_demote;
1415         int nr_skipped = 0;
1416         int got_ref = 0;
1417         LIST_HEAD(skipped);
1418
1419         if (nr == 0)
1420                 goto out;
1421
1422         if (!(gfp_mask & __GFP_FS))
1423                 return -1;
1424
1425         spin_lock(&lru_lock);
1426         while(nr && !list_empty(&lru_list)) {
1427                 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1428                 list_del_init(&gl->gl_lru);
1429                 atomic_dec(&lru_count);
1430
1431                 /* Test for being demotable */
1432                 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1433                         gfs2_glock_hold(gl);
1434                         got_ref = 1;
1435                         spin_unlock(&lru_lock);
1436                         spin_lock(&gl->gl_spin);
1437                         may_demote = demote_ok(gl);
1438                         spin_unlock(&gl->gl_spin);
1439                         clear_bit(GLF_LOCK, &gl->gl_flags);
1440                         if (may_demote) {
1441                                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1442                                 nr--;
1443                                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1444                                         gfs2_glock_put(gl);
1445                         }
1446                         spin_lock(&lru_lock);
1447                         if (may_demote)
1448                                 continue;
1449                 }
1450                 if (list_empty(&gl->gl_lru) &&
1451                     (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
1452                         nr_skipped++;
1453                         list_add(&gl->gl_lru, &skipped);
1454                 }
1455                 if (got_ref) {
1456                         spin_unlock(&lru_lock);
1457                         gfs2_glock_put(gl);
1458                         spin_lock(&lru_lock);
1459                         got_ref = 0;
1460                 }
1461         }
1462         list_splice(&skipped, &lru_list);
1463         atomic_add(nr_skipped, &lru_count);
1464         spin_unlock(&lru_lock);
1465 out:
1466         return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1467 }
1468
1469 static struct shrinker glock_shrinker = {
1470         .shrink = gfs2_shrink_glock_memory,
1471         .seeks = DEFAULT_SEEKS,
1472 };
1473
1474 /**
1475  * examine_bucket - Call a function for glock in a hash bucket
1476  * @examiner: the function
1477  * @sdp: the filesystem
1478  * @bucket: the bucket
1479  *
1480  * Returns: 1 if the bucket has entries
1481  */
1482
1483 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1484                           unsigned int hash)
1485 {
1486         struct gfs2_glock *gl, *prev = NULL;
1487         int has_entries = 0;
1488         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1489
1490         read_lock(gl_lock_addr(hash));
1491         /* Can't use hlist_for_each_entry - don't want prefetch here */
1492         if (hlist_empty(head))
1493                 goto out;
1494         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1495         while(1) {
1496                 if (!sdp || gl->gl_sbd == sdp) {
1497                         gfs2_glock_hold(gl);
1498                         read_unlock(gl_lock_addr(hash));
1499                         if (prev)
1500                                 gfs2_glock_put(prev);
1501                         prev = gl;
1502                         examiner(gl);
1503                         has_entries = 1;
1504                         read_lock(gl_lock_addr(hash));
1505                 }
1506                 if (gl->gl_list.next == NULL)
1507                         break;
1508                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1509         }
1510 out:
1511         read_unlock(gl_lock_addr(hash));
1512         if (prev)
1513                 gfs2_glock_put(prev);
1514         cond_resched();
1515         return has_entries;
1516 }
1517
1518 /**
1519  * clear_glock - look at a glock and see if we can free it from glock cache
1520  * @gl: the glock to look at
1521  *
1522  */
1523
1524 static void clear_glock(struct gfs2_glock *gl)
1525 {
1526         spin_lock(&lru_lock);
1527         if (!list_empty(&gl->gl_lru)) {
1528                 list_del_init(&gl->gl_lru);
1529                 atomic_dec(&lru_count);
1530         }
1531         spin_unlock(&lru_lock);
1532
1533         spin_lock(&gl->gl_spin);
1534         if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1535                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1536         spin_unlock(&gl->gl_spin);
1537         gfs2_glock_hold(gl);
1538         if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1539                 gfs2_glock_put(gl);
1540 }
1541
1542 /**
1543  * gfs2_gl_hash_clear - Empty out the glock hash table
1544  * @sdp: the filesystem
1545  * @wait: wait until it's all gone
1546  *
1547  * Called when unmounting the filesystem.
1548  */
1549
1550 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1551 {
1552         unsigned long t;
1553         unsigned int x;
1554         int cont;
1555
1556         t = jiffies;
1557
1558         for (;;) {
1559                 cont = 0;
1560                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1561                         if (examine_bucket(clear_glock, sdp, x))
1562                                 cont = 1;
1563                 }
1564
1565                 if (!cont)
1566                         break;
1567
1568                 if (time_after_eq(jiffies,
1569                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1570                         fs_warn(sdp, "Unmount seems to be stalled. "
1571                                      "Dumping lock state...\n");
1572                         gfs2_dump_lockstate(sdp);
1573                         t = jiffies;
1574                 }
1575
1576                 down_write(&gfs2_umount_flush_sem);
1577                 invalidate_inodes(sdp->sd_vfs);
1578                 up_write(&gfs2_umount_flush_sem);
1579                 msleep(10);
1580         }
1581 }
1582
1583 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1584 {
1585         struct gfs2_glock *gl = ip->i_gl;
1586         int ret;
1587
1588         ret = gfs2_truncatei_resume(ip);
1589         gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1590
1591         spin_lock(&gl->gl_spin);
1592         clear_bit(GLF_LOCK, &gl->gl_flags);
1593         run_queue(gl, 1);
1594         spin_unlock(&gl->gl_spin);
1595 }
1596
1597 static const char *state2str(unsigned state)
1598 {
1599         switch(state) {
1600         case LM_ST_UNLOCKED:
1601                 return "UN";
1602         case LM_ST_SHARED:
1603                 return "SH";
1604         case LM_ST_DEFERRED:
1605                 return "DF";
1606         case LM_ST_EXCLUSIVE:
1607                 return "EX";
1608         }
1609         return "??";
1610 }
1611
1612 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1613 {
1614         char *p = buf;
1615         if (flags & LM_FLAG_TRY)
1616                 *p++ = 't';
1617         if (flags & LM_FLAG_TRY_1CB)
1618                 *p++ = 'T';
1619         if (flags & LM_FLAG_NOEXP)
1620                 *p++ = 'e';
1621         if (flags & LM_FLAG_ANY)
1622                 *p++ = 'a';
1623         if (flags & LM_FLAG_PRIORITY)
1624                 *p++ = 'p';
1625         if (flags & GL_ASYNC)
1626                 *p++ = 'a';
1627         if (flags & GL_EXACT)
1628                 *p++ = 'E';
1629         if (flags & GL_NOCACHE)
1630                 *p++ = 'c';
1631         if (test_bit(HIF_HOLDER, &iflags))
1632                 *p++ = 'H';
1633         if (test_bit(HIF_WAIT, &iflags))
1634                 *p++ = 'W';
1635         if (test_bit(HIF_FIRST, &iflags))
1636                 *p++ = 'F';
1637         *p = 0;
1638         return buf;
1639 }
1640
1641 /**
1642  * dump_holder - print information about a glock holder
1643  * @seq: the seq_file struct
1644  * @gh: the glock holder
1645  *
1646  * Returns: 0 on success, -ENOBUFS when we run out of space
1647  */
1648
1649 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1650 {
1651         struct task_struct *gh_owner = NULL;
1652         char buffer[KSYM_SYMBOL_LEN];
1653         char flags_buf[32];
1654
1655         sprint_symbol(buffer, gh->gh_ip);
1656         if (gh->gh_owner_pid)
1657                 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1658         gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1659                   state2str(gh->gh_state),
1660                   hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1661                   gh->gh_error, 
1662                   gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1663                   gh_owner ? gh_owner->comm : "(ended)", buffer);
1664         return 0;
1665 }
1666
1667 static const char *gflags2str(char *buf, const unsigned long *gflags)
1668 {
1669         char *p = buf;
1670         if (test_bit(GLF_LOCK, gflags))
1671                 *p++ = 'l';
1672         if (test_bit(GLF_DEMOTE, gflags))
1673                 *p++ = 'D';
1674         if (test_bit(GLF_PENDING_DEMOTE, gflags))
1675                 *p++ = 'd';
1676         if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1677                 *p++ = 'p';
1678         if (test_bit(GLF_DIRTY, gflags))
1679                 *p++ = 'y';
1680         if (test_bit(GLF_LFLUSH, gflags))
1681                 *p++ = 'f';
1682         if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1683                 *p++ = 'i';
1684         if (test_bit(GLF_REPLY_PENDING, gflags))
1685                 *p++ = 'r';
1686         *p = 0;
1687         return buf;
1688 }
1689
1690 /**
1691  * __dump_glock - print information about a glock
1692  * @seq: The seq_file struct
1693  * @gl: the glock
1694  *
1695  * The file format is as follows:
1696  * One line per object, capital letters are used to indicate objects
1697  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1698  * other objects are indented by a single space and follow the glock to
1699  * which they are related. Fields are indicated by lower case letters
1700  * followed by a colon and the field value, except for strings which are in
1701  * [] so that its possible to see if they are composed of spaces for
1702  * example. The field's are n = number (id of the object), f = flags,
1703  * t = type, s = state, r = refcount, e = error, p = pid.
1704  *
1705  * Returns: 0 on success, -ENOBUFS when we run out of space
1706  */
1707
1708 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1709 {
1710         const struct gfs2_glock_operations *glops = gl->gl_ops;
1711         unsigned long long dtime;
1712         const struct gfs2_holder *gh;
1713         char gflags_buf[32];
1714         int error = 0;
1715
1716         dtime = jiffies - gl->gl_demote_time;
1717         dtime *= 1000000/HZ; /* demote time in uSec */
1718         if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1719                 dtime = 0;
1720         gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
1721                   state2str(gl->gl_state),
1722                   gl->gl_name.ln_type,
1723                   (unsigned long long)gl->gl_name.ln_number,
1724                   gflags2str(gflags_buf, &gl->gl_flags),
1725                   state2str(gl->gl_target),
1726                   state2str(gl->gl_demote_state), dtime,
1727                   atomic_read(&gl->gl_lvb_count),
1728                   atomic_read(&gl->gl_ail_count),
1729                   atomic_read(&gl->gl_ref));
1730
1731         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1732                 error = dump_holder(seq, gh);
1733                 if (error)
1734                         goto out;
1735         }
1736         if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1737                 error = glops->go_dump(seq, gl);
1738 out:
1739         return error;
1740 }
1741
1742 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1743 {
1744         int ret;
1745         spin_lock(&gl->gl_spin);
1746         ret = __dump_glock(seq, gl);
1747         spin_unlock(&gl->gl_spin);
1748         return ret;
1749 }
1750
1751 /**
1752  * gfs2_dump_lockstate - print out the current lockstate
1753  * @sdp: the filesystem
1754  * @ub: the buffer to copy the information into
1755  *
1756  * If @ub is NULL, dump the lockstate to the console.
1757  *
1758  */
1759
1760 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1761 {
1762         struct gfs2_glock *gl;
1763         struct hlist_node *h;
1764         unsigned int x;
1765         int error = 0;
1766
1767         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1768
1769                 read_lock(gl_lock_addr(x));
1770
1771                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1772                         if (gl->gl_sbd != sdp)
1773                                 continue;
1774
1775                         error = dump_glock(NULL, gl);
1776                         if (error)
1777                                 break;
1778                 }
1779
1780                 read_unlock(gl_lock_addr(x));
1781
1782                 if (error)
1783                         break;
1784         }
1785
1786
1787         return error;
1788 }
1789
1790
1791 int __init gfs2_glock_init(void)
1792 {
1793         unsigned i;
1794         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1795                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1796         }
1797 #ifdef GL_HASH_LOCK_SZ
1798         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1799                 rwlock_init(&gl_hash_locks[i]);
1800         }
1801 #endif
1802
1803         glock_workqueue = create_workqueue("glock_workqueue");
1804         if (IS_ERR(glock_workqueue))
1805                 return PTR_ERR(glock_workqueue);
1806
1807         register_shrinker(&glock_shrinker);
1808
1809         return 0;
1810 }
1811
1812 void gfs2_glock_exit(void)
1813 {
1814         unregister_shrinker(&glock_shrinker);
1815         destroy_workqueue(glock_workqueue);
1816 }
1817
1818 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1819 {
1820         struct gfs2_glock *gl;
1821
1822 restart:
1823         read_lock(gl_lock_addr(gi->hash));
1824         gl = gi->gl;
1825         if (gl) {
1826                 gi->gl = hlist_entry(gl->gl_list.next,
1827                                      struct gfs2_glock, gl_list);
1828         } else {
1829                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1830                                      struct gfs2_glock, gl_list);
1831         }
1832         if (gi->gl)
1833                 gfs2_glock_hold(gi->gl);
1834         read_unlock(gl_lock_addr(gi->hash));
1835         if (gl)
1836                 gfs2_glock_put(gl);
1837         while (gi->gl == NULL) {
1838                 gi->hash++;
1839                 if (gi->hash >= GFS2_GL_HASH_SIZE)
1840                         return 1;
1841                 read_lock(gl_lock_addr(gi->hash));
1842                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1843                                      struct gfs2_glock, gl_list);
1844                 if (gi->gl)
1845                         gfs2_glock_hold(gi->gl);
1846                 read_unlock(gl_lock_addr(gi->hash));
1847         }
1848
1849         if (gi->sdp != gi->gl->gl_sbd)
1850                 goto restart;
1851
1852         return 0;
1853 }
1854
1855 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1856 {
1857         if (gi->gl)
1858                 gfs2_glock_put(gi->gl);
1859         gi->gl = NULL;
1860 }
1861
1862 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1863 {
1864         struct gfs2_glock_iter *gi = seq->private;
1865         loff_t n = *pos;
1866
1867         gi->hash = 0;
1868
1869         do {
1870                 if (gfs2_glock_iter_next(gi)) {
1871                         gfs2_glock_iter_free(gi);
1872                         return NULL;
1873                 }
1874         } while (n--);
1875
1876         return gi->gl;
1877 }
1878
1879 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1880                                  loff_t *pos)
1881 {
1882         struct gfs2_glock_iter *gi = seq->private;
1883
1884         (*pos)++;
1885
1886         if (gfs2_glock_iter_next(gi)) {
1887                 gfs2_glock_iter_free(gi);
1888                 return NULL;
1889         }
1890
1891         return gi->gl;
1892 }
1893
1894 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1895 {
1896         struct gfs2_glock_iter *gi = seq->private;
1897         gfs2_glock_iter_free(gi);
1898 }
1899
1900 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1901 {
1902         return dump_glock(seq, iter_ptr);
1903 }
1904
1905 static const struct seq_operations gfs2_glock_seq_ops = {
1906         .start = gfs2_glock_seq_start,
1907         .next  = gfs2_glock_seq_next,
1908         .stop  = gfs2_glock_seq_stop,
1909         .show  = gfs2_glock_seq_show,
1910 };
1911
1912 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1913 {
1914         int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1915                                    sizeof(struct gfs2_glock_iter));
1916         if (ret == 0) {
1917                 struct seq_file *seq = file->private_data;
1918                 struct gfs2_glock_iter *gi = seq->private;
1919                 gi->sdp = inode->i_private;
1920         }
1921         return ret;
1922 }
1923
1924 static const struct file_operations gfs2_debug_fops = {
1925         .owner   = THIS_MODULE,
1926         .open    = gfs2_debugfs_open,
1927         .read    = seq_read,
1928         .llseek  = seq_lseek,
1929         .release = seq_release_private,
1930 };
1931
1932 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1933 {
1934         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1935         if (!sdp->debugfs_dir)
1936                 return -ENOMEM;
1937         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1938                                                          S_IFREG | S_IRUGO,
1939                                                          sdp->debugfs_dir, sdp,
1940                                                          &gfs2_debug_fops);
1941         if (!sdp->debugfs_dentry_glocks)
1942                 return -ENOMEM;
1943
1944         return 0;
1945 }
1946
1947 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1948 {
1949         if (sdp && sdp->debugfs_dir) {
1950                 if (sdp->debugfs_dentry_glocks) {
1951                         debugfs_remove(sdp->debugfs_dentry_glocks);
1952                         sdp->debugfs_dentry_glocks = NULL;
1953                 }
1954                 debugfs_remove(sdp->debugfs_dir);
1955                 sdp->debugfs_dir = NULL;
1956         }
1957 }
1958
1959 int gfs2_register_debugfs(void)
1960 {
1961         gfs2_root = debugfs_create_dir("gfs2", NULL);
1962         return gfs2_root ? 0 : -ENOMEM;
1963 }
1964
1965 void gfs2_unregister_debugfs(void)
1966 {
1967         debugfs_remove(gfs2_root);
1968         gfs2_root = NULL;
1969 }