Merge branch 'master' into 85xx
[linux-2.6] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 static kmem_zone_t *xfs_buf_zone;
38 static kmem_shaker_t xfs_buf_shake;
39 STATIC int xfsbufd(void *);
40 STATIC int xfsbufd_wakeup(int, gfp_t);
41 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
42
43 static struct workqueue_struct *xfslogd_workqueue;
44 struct workqueue_struct *xfsdatad_workqueue;
45
46 #ifdef XFS_BUF_TRACE
47 void
48 xfs_buf_trace(
49         xfs_buf_t       *bp,
50         char            *id,
51         void            *data,
52         void            *ra)
53 {
54         ktrace_enter(xfs_buf_trace_buf,
55                 bp, id,
56                 (void *)(unsigned long)bp->b_flags,
57                 (void *)(unsigned long)bp->b_hold.counter,
58                 (void *)(unsigned long)bp->b_sema.count.counter,
59                 (void *)current,
60                 data, ra,
61                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
62                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
63                 (void *)(unsigned long)bp->b_buffer_length,
64                 NULL, NULL, NULL, NULL, NULL);
65 }
66 ktrace_t *xfs_buf_trace_buf;
67 #define XFS_BUF_TRACE_SIZE      4096
68 #define XB_TRACE(bp, id, data)  \
69         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
70 #else
71 #define XB_TRACE(bp, id, data)  do { } while (0)
72 #endif
73
74 #ifdef XFS_BUF_LOCK_TRACKING
75 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
76 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
77 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
78 #else
79 # define XB_SET_OWNER(bp)       do { } while (0)
80 # define XB_CLEAR_OWNER(bp)     do { } while (0)
81 # define XB_GET_OWNER(bp)       do { } while (0)
82 #endif
83
84 #define xb_to_gfp(flags) \
85         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
86           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
87
88 #define xb_to_km(flags) \
89          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
90
91 #define xfs_buf_allocate(flags) \
92         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
93 #define xfs_buf_deallocate(bp) \
94         kmem_zone_free(xfs_buf_zone, (bp));
95
96 /*
97  *      Page Region interfaces.
98  *
99  *      For pages in filesystems where the blocksize is smaller than the
100  *      pagesize, we use the page->private field (long) to hold a bitmap
101  *      of uptodate regions within the page.
102  *
103  *      Each such region is "bytes per page / bits per long" bytes long.
104  *
105  *      NBPPR == number-of-bytes-per-page-region
106  *      BTOPR == bytes-to-page-region (rounded up)
107  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
108  */
109 #if (BITS_PER_LONG == 32)
110 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
111 #elif (BITS_PER_LONG == 64)
112 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
113 #else
114 #error BITS_PER_LONG must be 32 or 64
115 #endif
116 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
117 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
118 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
119
120 STATIC unsigned long
121 page_region_mask(
122         size_t          offset,
123         size_t          length)
124 {
125         unsigned long   mask;
126         int             first, final;
127
128         first = BTOPR(offset);
129         final = BTOPRT(offset + length - 1);
130         first = min(first, final);
131
132         mask = ~0UL;
133         mask <<= BITS_PER_LONG - (final - first);
134         mask >>= BITS_PER_LONG - (final);
135
136         ASSERT(offset + length <= PAGE_CACHE_SIZE);
137         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
138
139         return mask;
140 }
141
142 STATIC_INLINE void
143 set_page_region(
144         struct page     *page,
145         size_t          offset,
146         size_t          length)
147 {
148         set_page_private(page,
149                 page_private(page) | page_region_mask(offset, length));
150         if (page_private(page) == ~0UL)
151                 SetPageUptodate(page);
152 }
153
154 STATIC_INLINE int
155 test_page_region(
156         struct page     *page,
157         size_t          offset,
158         size_t          length)
159 {
160         unsigned long   mask = page_region_mask(offset, length);
161
162         return (mask && (page_private(page) & mask) == mask);
163 }
164
165 /*
166  *      Mapping of multi-page buffers into contiguous virtual space
167  */
168
169 typedef struct a_list {
170         void            *vm_addr;
171         struct a_list   *next;
172 } a_list_t;
173
174 static a_list_t         *as_free_head;
175 static int              as_list_len;
176 static DEFINE_SPINLOCK(as_lock);
177
178 /*
179  *      Try to batch vunmaps because they are costly.
180  */
181 STATIC void
182 free_address(
183         void            *addr)
184 {
185         a_list_t        *aentry;
186
187         aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
188         if (likely(aentry)) {
189                 spin_lock(&as_lock);
190                 aentry->next = as_free_head;
191                 aentry->vm_addr = addr;
192                 as_free_head = aentry;
193                 as_list_len++;
194                 spin_unlock(&as_lock);
195         } else {
196                 vunmap(addr);
197         }
198 }
199
200 STATIC void
201 purge_addresses(void)
202 {
203         a_list_t        *aentry, *old;
204
205         if (as_free_head == NULL)
206                 return;
207
208         spin_lock(&as_lock);
209         aentry = as_free_head;
210         as_free_head = NULL;
211         as_list_len = 0;
212         spin_unlock(&as_lock);
213
214         while ((old = aentry) != NULL) {
215                 vunmap(aentry->vm_addr);
216                 aentry = aentry->next;
217                 kfree(old);
218         }
219 }
220
221 /*
222  *      Internal xfs_buf_t object manipulation
223  */
224
225 STATIC void
226 _xfs_buf_initialize(
227         xfs_buf_t               *bp,
228         xfs_buftarg_t           *target,
229         xfs_off_t               range_base,
230         size_t                  range_length,
231         xfs_buf_flags_t         flags)
232 {
233         /*
234          * We don't want certain flags to appear in b_flags.
235          */
236         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
237
238         memset(bp, 0, sizeof(xfs_buf_t));
239         atomic_set(&bp->b_hold, 1);
240         init_MUTEX_LOCKED(&bp->b_iodonesema);
241         INIT_LIST_HEAD(&bp->b_list);
242         INIT_LIST_HEAD(&bp->b_hash_list);
243         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
244         XB_SET_OWNER(bp);
245         bp->b_target = target;
246         bp->b_file_offset = range_base;
247         /*
248          * Set buffer_length and count_desired to the same value initially.
249          * I/O routines should use count_desired, which will be the same in
250          * most cases but may be reset (e.g. XFS recovery).
251          */
252         bp->b_buffer_length = bp->b_count_desired = range_length;
253         bp->b_flags = flags;
254         bp->b_bn = XFS_BUF_DADDR_NULL;
255         atomic_set(&bp->b_pin_count, 0);
256         init_waitqueue_head(&bp->b_waiters);
257
258         XFS_STATS_INC(xb_create);
259         XB_TRACE(bp, "initialize", target);
260 }
261
262 /*
263  *      Allocate a page array capable of holding a specified number
264  *      of pages, and point the page buf at it.
265  */
266 STATIC int
267 _xfs_buf_get_pages(
268         xfs_buf_t               *bp,
269         int                     page_count,
270         xfs_buf_flags_t         flags)
271 {
272         /* Make sure that we have a page list */
273         if (bp->b_pages == NULL) {
274                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
275                 bp->b_page_count = page_count;
276                 if (page_count <= XB_PAGES) {
277                         bp->b_pages = bp->b_page_array;
278                 } else {
279                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
280                                         page_count, xb_to_km(flags));
281                         if (bp->b_pages == NULL)
282                                 return -ENOMEM;
283                 }
284                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
285         }
286         return 0;
287 }
288
289 /*
290  *      Frees b_pages if it was allocated.
291  */
292 STATIC void
293 _xfs_buf_free_pages(
294         xfs_buf_t       *bp)
295 {
296         if (bp->b_pages != bp->b_page_array) {
297                 kmem_free(bp->b_pages,
298                           bp->b_page_count * sizeof(struct page *));
299         }
300 }
301
302 /*
303  *      Releases the specified buffer.
304  *
305  *      The modification state of any associated pages is left unchanged.
306  *      The buffer most not be on any hash - use xfs_buf_rele instead for
307  *      hashed and refcounted buffers
308  */
309 void
310 xfs_buf_free(
311         xfs_buf_t               *bp)
312 {
313         XB_TRACE(bp, "free", 0);
314
315         ASSERT(list_empty(&bp->b_hash_list));
316
317         if (bp->b_flags & _XBF_PAGE_CACHE) {
318                 uint            i;
319
320                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
321                         free_address(bp->b_addr - bp->b_offset);
322
323                 for (i = 0; i < bp->b_page_count; i++) {
324                         struct page     *page = bp->b_pages[i];
325
326                         ASSERT(!PagePrivate(page));
327                         page_cache_release(page);
328                 }
329                 _xfs_buf_free_pages(bp);
330         } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
331                  /*
332                   * XXX(hch): bp->b_count_desired might be incorrect (see
333                   * xfs_buf_associate_memory for details), but fortunately
334                   * the Linux version of kmem_free ignores the len argument..
335                   */
336                 kmem_free(bp->b_addr, bp->b_count_desired);
337                 _xfs_buf_free_pages(bp);
338         }
339
340         xfs_buf_deallocate(bp);
341 }
342
343 /*
344  *      Finds all pages for buffer in question and builds it's page list.
345  */
346 STATIC int
347 _xfs_buf_lookup_pages(
348         xfs_buf_t               *bp,
349         uint                    flags)
350 {
351         struct address_space    *mapping = bp->b_target->bt_mapping;
352         size_t                  blocksize = bp->b_target->bt_bsize;
353         size_t                  size = bp->b_count_desired;
354         size_t                  nbytes, offset;
355         gfp_t                   gfp_mask = xb_to_gfp(flags);
356         unsigned short          page_count, i;
357         pgoff_t                 first;
358         xfs_off_t               end;
359         int                     error;
360
361         end = bp->b_file_offset + bp->b_buffer_length;
362         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
363
364         error = _xfs_buf_get_pages(bp, page_count, flags);
365         if (unlikely(error))
366                 return error;
367         bp->b_flags |= _XBF_PAGE_CACHE;
368
369         offset = bp->b_offset;
370         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
371
372         for (i = 0; i < bp->b_page_count; i++) {
373                 struct page     *page;
374                 uint            retries = 0;
375
376               retry:
377                 page = find_or_create_page(mapping, first + i, gfp_mask);
378                 if (unlikely(page == NULL)) {
379                         if (flags & XBF_READ_AHEAD) {
380                                 bp->b_page_count = i;
381                                 for (i = 0; i < bp->b_page_count; i++)
382                                         unlock_page(bp->b_pages[i]);
383                                 return -ENOMEM;
384                         }
385
386                         /*
387                          * This could deadlock.
388                          *
389                          * But until all the XFS lowlevel code is revamped to
390                          * handle buffer allocation failures we can't do much.
391                          */
392                         if (!(++retries % 100))
393                                 printk(KERN_ERR
394                                         "XFS: possible memory allocation "
395                                         "deadlock in %s (mode:0x%x)\n",
396                                         __FUNCTION__, gfp_mask);
397
398                         XFS_STATS_INC(xb_page_retries);
399                         xfsbufd_wakeup(0, gfp_mask);
400                         congestion_wait(WRITE, HZ/50);
401                         goto retry;
402                 }
403
404                 XFS_STATS_INC(xb_page_found);
405
406                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
407                 size -= nbytes;
408
409                 ASSERT(!PagePrivate(page));
410                 if (!PageUptodate(page)) {
411                         page_count--;
412                         if (blocksize >= PAGE_CACHE_SIZE) {
413                                 if (flags & XBF_READ)
414                                         bp->b_locked = 1;
415                         } else if (!PagePrivate(page)) {
416                                 if (test_page_region(page, offset, nbytes))
417                                         page_count++;
418                         }
419                 }
420
421                 bp->b_pages[i] = page;
422                 offset = 0;
423         }
424
425         if (!bp->b_locked) {
426                 for (i = 0; i < bp->b_page_count; i++)
427                         unlock_page(bp->b_pages[i]);
428         }
429
430         if (page_count == bp->b_page_count)
431                 bp->b_flags |= XBF_DONE;
432
433         XB_TRACE(bp, "lookup_pages", (long)page_count);
434         return error;
435 }
436
437 /*
438  *      Map buffer into kernel address-space if nessecary.
439  */
440 STATIC int
441 _xfs_buf_map_pages(
442         xfs_buf_t               *bp,
443         uint                    flags)
444 {
445         /* A single page buffer is always mappable */
446         if (bp->b_page_count == 1) {
447                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
448                 bp->b_flags |= XBF_MAPPED;
449         } else if (flags & XBF_MAPPED) {
450                 if (as_list_len > 64)
451                         purge_addresses();
452                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
453                                         VM_MAP, PAGE_KERNEL);
454                 if (unlikely(bp->b_addr == NULL))
455                         return -ENOMEM;
456                 bp->b_addr += bp->b_offset;
457                 bp->b_flags |= XBF_MAPPED;
458         }
459
460         return 0;
461 }
462
463 /*
464  *      Finding and Reading Buffers
465  */
466
467 /*
468  *      Look up, and creates if absent, a lockable buffer for
469  *      a given range of an inode.  The buffer is returned
470  *      locked.  If other overlapping buffers exist, they are
471  *      released before the new buffer is created and locked,
472  *      which may imply that this call will block until those buffers
473  *      are unlocked.  No I/O is implied by this call.
474  */
475 xfs_buf_t *
476 _xfs_buf_find(
477         xfs_buftarg_t           *btp,   /* block device target          */
478         xfs_off_t               ioff,   /* starting offset of range     */
479         size_t                  isize,  /* length of range              */
480         xfs_buf_flags_t         flags,
481         xfs_buf_t               *new_bp)
482 {
483         xfs_off_t               range_base;
484         size_t                  range_length;
485         xfs_bufhash_t           *hash;
486         xfs_buf_t               *bp, *n;
487
488         range_base = (ioff << BBSHIFT);
489         range_length = (isize << BBSHIFT);
490
491         /* Check for IOs smaller than the sector size / not sector aligned */
492         ASSERT(!(range_length < (1 << btp->bt_sshift)));
493         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
494
495         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
496
497         spin_lock(&hash->bh_lock);
498
499         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
500                 ASSERT(btp == bp->b_target);
501                 if (bp->b_file_offset == range_base &&
502                     bp->b_buffer_length == range_length) {
503                         /*
504                          * If we look at something, bring it to the
505                          * front of the list for next time.
506                          */
507                         atomic_inc(&bp->b_hold);
508                         list_move(&bp->b_hash_list, &hash->bh_list);
509                         goto found;
510                 }
511         }
512
513         /* No match found */
514         if (new_bp) {
515                 _xfs_buf_initialize(new_bp, btp, range_base,
516                                 range_length, flags);
517                 new_bp->b_hash = hash;
518                 list_add(&new_bp->b_hash_list, &hash->bh_list);
519         } else {
520                 XFS_STATS_INC(xb_miss_locked);
521         }
522
523         spin_unlock(&hash->bh_lock);
524         return new_bp;
525
526 found:
527         spin_unlock(&hash->bh_lock);
528
529         /* Attempt to get the semaphore without sleeping,
530          * if this does not work then we need to drop the
531          * spinlock and do a hard attempt on the semaphore.
532          */
533         if (down_trylock(&bp->b_sema)) {
534                 if (!(flags & XBF_TRYLOCK)) {
535                         /* wait for buffer ownership */
536                         XB_TRACE(bp, "get_lock", 0);
537                         xfs_buf_lock(bp);
538                         XFS_STATS_INC(xb_get_locked_waited);
539                 } else {
540                         /* We asked for a trylock and failed, no need
541                          * to look at file offset and length here, we
542                          * know that this buffer at least overlaps our
543                          * buffer and is locked, therefore our buffer
544                          * either does not exist, or is this buffer.
545                          */
546                         xfs_buf_rele(bp);
547                         XFS_STATS_INC(xb_busy_locked);
548                         return NULL;
549                 }
550         } else {
551                 /* trylock worked */
552                 XB_SET_OWNER(bp);
553         }
554
555         if (bp->b_flags & XBF_STALE) {
556                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
557                 bp->b_flags &= XBF_MAPPED;
558         }
559         XB_TRACE(bp, "got_lock", 0);
560         XFS_STATS_INC(xb_get_locked);
561         return bp;
562 }
563
564 /*
565  *      Assembles a buffer covering the specified range.
566  *      Storage in memory for all portions of the buffer will be allocated,
567  *      although backing storage may not be.
568  */
569 xfs_buf_t *
570 xfs_buf_get_flags(
571         xfs_buftarg_t           *target,/* target for buffer            */
572         xfs_off_t               ioff,   /* starting offset of range     */
573         size_t                  isize,  /* length of range              */
574         xfs_buf_flags_t         flags)
575 {
576         xfs_buf_t               *bp, *new_bp;
577         int                     error = 0, i;
578
579         new_bp = xfs_buf_allocate(flags);
580         if (unlikely(!new_bp))
581                 return NULL;
582
583         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
584         if (bp == new_bp) {
585                 error = _xfs_buf_lookup_pages(bp, flags);
586                 if (error)
587                         goto no_buffer;
588         } else {
589                 xfs_buf_deallocate(new_bp);
590                 if (unlikely(bp == NULL))
591                         return NULL;
592         }
593
594         for (i = 0; i < bp->b_page_count; i++)
595                 mark_page_accessed(bp->b_pages[i]);
596
597         if (!(bp->b_flags & XBF_MAPPED)) {
598                 error = _xfs_buf_map_pages(bp, flags);
599                 if (unlikely(error)) {
600                         printk(KERN_WARNING "%s: failed to map pages\n",
601                                         __FUNCTION__);
602                         goto no_buffer;
603                 }
604         }
605
606         XFS_STATS_INC(xb_get);
607
608         /*
609          * Always fill in the block number now, the mapped cases can do
610          * their own overlay of this later.
611          */
612         bp->b_bn = ioff;
613         bp->b_count_desired = bp->b_buffer_length;
614
615         XB_TRACE(bp, "get", (unsigned long)flags);
616         return bp;
617
618  no_buffer:
619         if (flags & (XBF_LOCK | XBF_TRYLOCK))
620                 xfs_buf_unlock(bp);
621         xfs_buf_rele(bp);
622         return NULL;
623 }
624
625 xfs_buf_t *
626 xfs_buf_read_flags(
627         xfs_buftarg_t           *target,
628         xfs_off_t               ioff,
629         size_t                  isize,
630         xfs_buf_flags_t         flags)
631 {
632         xfs_buf_t               *bp;
633
634         flags |= XBF_READ;
635
636         bp = xfs_buf_get_flags(target, ioff, isize, flags);
637         if (bp) {
638                 if (!XFS_BUF_ISDONE(bp)) {
639                         XB_TRACE(bp, "read", (unsigned long)flags);
640                         XFS_STATS_INC(xb_get_read);
641                         xfs_buf_iostart(bp, flags);
642                 } else if (flags & XBF_ASYNC) {
643                         XB_TRACE(bp, "read_async", (unsigned long)flags);
644                         /*
645                          * Read ahead call which is already satisfied,
646                          * drop the buffer
647                          */
648                         goto no_buffer;
649                 } else {
650                         XB_TRACE(bp, "read_done", (unsigned long)flags);
651                         /* We do not want read in the flags */
652                         bp->b_flags &= ~XBF_READ;
653                 }
654         }
655
656         return bp;
657
658  no_buffer:
659         if (flags & (XBF_LOCK | XBF_TRYLOCK))
660                 xfs_buf_unlock(bp);
661         xfs_buf_rele(bp);
662         return NULL;
663 }
664
665 /*
666  *      If we are not low on memory then do the readahead in a deadlock
667  *      safe manner.
668  */
669 void
670 xfs_buf_readahead(
671         xfs_buftarg_t           *target,
672         xfs_off_t               ioff,
673         size_t                  isize,
674         xfs_buf_flags_t         flags)
675 {
676         struct backing_dev_info *bdi;
677
678         bdi = target->bt_mapping->backing_dev_info;
679         if (bdi_read_congested(bdi))
680                 return;
681
682         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
683         xfs_buf_read_flags(target, ioff, isize, flags);
684 }
685
686 xfs_buf_t *
687 xfs_buf_get_empty(
688         size_t                  len,
689         xfs_buftarg_t           *target)
690 {
691         xfs_buf_t               *bp;
692
693         bp = xfs_buf_allocate(0);
694         if (bp)
695                 _xfs_buf_initialize(bp, target, 0, len, 0);
696         return bp;
697 }
698
699 static inline struct page *
700 mem_to_page(
701         void                    *addr)
702 {
703         if (((unsigned long)addr < VMALLOC_START) ||
704             ((unsigned long)addr >= VMALLOC_END)) {
705                 return virt_to_page(addr);
706         } else {
707                 return vmalloc_to_page(addr);
708         }
709 }
710
711 int
712 xfs_buf_associate_memory(
713         xfs_buf_t               *bp,
714         void                    *mem,
715         size_t                  len)
716 {
717         int                     rval;
718         int                     i = 0;
719         size_t                  ptr;
720         size_t                  end, end_cur;
721         off_t                   offset;
722         int                     page_count;
723
724         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
725         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
726         if (offset && (len > PAGE_CACHE_SIZE))
727                 page_count++;
728
729         /* Free any previous set of page pointers */
730         if (bp->b_pages)
731                 _xfs_buf_free_pages(bp);
732
733         bp->b_pages = NULL;
734         bp->b_addr = mem;
735
736         rval = _xfs_buf_get_pages(bp, page_count, 0);
737         if (rval)
738                 return rval;
739
740         bp->b_offset = offset;
741         ptr = (size_t) mem & PAGE_CACHE_MASK;
742         end = PAGE_CACHE_ALIGN((size_t) mem + len);
743         end_cur = end;
744         /* set up first page */
745         bp->b_pages[0] = mem_to_page(mem);
746
747         ptr += PAGE_CACHE_SIZE;
748         bp->b_page_count = ++i;
749         while (ptr < end) {
750                 bp->b_pages[i] = mem_to_page((void *)ptr);
751                 bp->b_page_count = ++i;
752                 ptr += PAGE_CACHE_SIZE;
753         }
754         bp->b_locked = 0;
755
756         bp->b_count_desired = bp->b_buffer_length = len;
757         bp->b_flags |= XBF_MAPPED;
758
759         return 0;
760 }
761
762 xfs_buf_t *
763 xfs_buf_get_noaddr(
764         size_t                  len,
765         xfs_buftarg_t           *target)
766 {
767         size_t                  malloc_len = len;
768         xfs_buf_t               *bp;
769         void                    *data;
770         int                     error;
771
772         bp = xfs_buf_allocate(0);
773         if (unlikely(bp == NULL))
774                 goto fail;
775         _xfs_buf_initialize(bp, target, 0, len, 0);
776
777  try_again:
778         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
779         if (unlikely(data == NULL))
780                 goto fail_free_buf;
781
782         /* check whether alignment matches.. */
783         if ((__psunsigned_t)data !=
784             ((__psunsigned_t)data & ~target->bt_smask)) {
785                 /* .. else double the size and try again */
786                 kmem_free(data, malloc_len);
787                 malloc_len <<= 1;
788                 goto try_again;
789         }
790
791         error = xfs_buf_associate_memory(bp, data, len);
792         if (error)
793                 goto fail_free_mem;
794         bp->b_flags |= _XBF_KMEM_ALLOC;
795
796         xfs_buf_unlock(bp);
797
798         XB_TRACE(bp, "no_daddr", data);
799         return bp;
800  fail_free_mem:
801         kmem_free(data, malloc_len);
802  fail_free_buf:
803         xfs_buf_free(bp);
804  fail:
805         return NULL;
806 }
807
808 /*
809  *      Increment reference count on buffer, to hold the buffer concurrently
810  *      with another thread which may release (free) the buffer asynchronously.
811  *      Must hold the buffer already to call this function.
812  */
813 void
814 xfs_buf_hold(
815         xfs_buf_t               *bp)
816 {
817         atomic_inc(&bp->b_hold);
818         XB_TRACE(bp, "hold", 0);
819 }
820
821 /*
822  *      Releases a hold on the specified buffer.  If the
823  *      the hold count is 1, calls xfs_buf_free.
824  */
825 void
826 xfs_buf_rele(
827         xfs_buf_t               *bp)
828 {
829         xfs_bufhash_t           *hash = bp->b_hash;
830
831         XB_TRACE(bp, "rele", bp->b_relse);
832
833         if (unlikely(!hash)) {
834                 ASSERT(!bp->b_relse);
835                 if (atomic_dec_and_test(&bp->b_hold))
836                         xfs_buf_free(bp);
837                 return;
838         }
839
840         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
841                 if (bp->b_relse) {
842                         atomic_inc(&bp->b_hold);
843                         spin_unlock(&hash->bh_lock);
844                         (*(bp->b_relse)) (bp);
845                 } else if (bp->b_flags & XBF_FS_MANAGED) {
846                         spin_unlock(&hash->bh_lock);
847                 } else {
848                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
849                         list_del_init(&bp->b_hash_list);
850                         spin_unlock(&hash->bh_lock);
851                         xfs_buf_free(bp);
852                 }
853         } else {
854                 /*
855                  * Catch reference count leaks
856                  */
857                 ASSERT(atomic_read(&bp->b_hold) >= 0);
858         }
859 }
860
861
862 /*
863  *      Mutual exclusion on buffers.  Locking model:
864  *
865  *      Buffers associated with inodes for which buffer locking
866  *      is not enabled are not protected by semaphores, and are
867  *      assumed to be exclusively owned by the caller.  There is a
868  *      spinlock in the buffer, used by the caller when concurrent
869  *      access is possible.
870  */
871
872 /*
873  *      Locks a buffer object, if it is not already locked.
874  *      Note that this in no way locks the underlying pages, so it is only
875  *      useful for synchronizing concurrent use of buffer objects, not for
876  *      synchronizing independent access to the underlying pages.
877  */
878 int
879 xfs_buf_cond_lock(
880         xfs_buf_t               *bp)
881 {
882         int                     locked;
883
884         locked = down_trylock(&bp->b_sema) == 0;
885         if (locked) {
886                 XB_SET_OWNER(bp);
887         }
888         XB_TRACE(bp, "cond_lock", (long)locked);
889         return locked ? 0 : -EBUSY;
890 }
891
892 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
893 int
894 xfs_buf_lock_value(
895         xfs_buf_t               *bp)
896 {
897         return atomic_read(&bp->b_sema.count);
898 }
899 #endif
900
901 /*
902  *      Locks a buffer object.
903  *      Note that this in no way locks the underlying pages, so it is only
904  *      useful for synchronizing concurrent use of buffer objects, not for
905  *      synchronizing independent access to the underlying pages.
906  */
907 void
908 xfs_buf_lock(
909         xfs_buf_t               *bp)
910 {
911         XB_TRACE(bp, "lock", 0);
912         if (atomic_read(&bp->b_io_remaining))
913                 blk_run_address_space(bp->b_target->bt_mapping);
914         down(&bp->b_sema);
915         XB_SET_OWNER(bp);
916         XB_TRACE(bp, "locked", 0);
917 }
918
919 /*
920  *      Releases the lock on the buffer object.
921  *      If the buffer is marked delwri but is not queued, do so before we
922  *      unlock the buffer as we need to set flags correctly.  We also need to
923  *      take a reference for the delwri queue because the unlocker is going to
924  *      drop their's and they don't know we just queued it.
925  */
926 void
927 xfs_buf_unlock(
928         xfs_buf_t               *bp)
929 {
930         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
931                 atomic_inc(&bp->b_hold);
932                 bp->b_flags |= XBF_ASYNC;
933                 xfs_buf_delwri_queue(bp, 0);
934         }
935
936         XB_CLEAR_OWNER(bp);
937         up(&bp->b_sema);
938         XB_TRACE(bp, "unlock", 0);
939 }
940
941
942 /*
943  *      Pinning Buffer Storage in Memory
944  *      Ensure that no attempt to force a buffer to disk will succeed.
945  */
946 void
947 xfs_buf_pin(
948         xfs_buf_t               *bp)
949 {
950         atomic_inc(&bp->b_pin_count);
951         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
952 }
953
954 void
955 xfs_buf_unpin(
956         xfs_buf_t               *bp)
957 {
958         if (atomic_dec_and_test(&bp->b_pin_count))
959                 wake_up_all(&bp->b_waiters);
960         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
961 }
962
963 int
964 xfs_buf_ispin(
965         xfs_buf_t               *bp)
966 {
967         return atomic_read(&bp->b_pin_count);
968 }
969
970 STATIC void
971 xfs_buf_wait_unpin(
972         xfs_buf_t               *bp)
973 {
974         DECLARE_WAITQUEUE       (wait, current);
975
976         if (atomic_read(&bp->b_pin_count) == 0)
977                 return;
978
979         add_wait_queue(&bp->b_waiters, &wait);
980         for (;;) {
981                 set_current_state(TASK_UNINTERRUPTIBLE);
982                 if (atomic_read(&bp->b_pin_count) == 0)
983                         break;
984                 if (atomic_read(&bp->b_io_remaining))
985                         blk_run_address_space(bp->b_target->bt_mapping);
986                 schedule();
987         }
988         remove_wait_queue(&bp->b_waiters, &wait);
989         set_current_state(TASK_RUNNING);
990 }
991
992 /*
993  *      Buffer Utility Routines
994  */
995
996 STATIC void
997 xfs_buf_iodone_work(
998         struct work_struct      *work)
999 {
1000         xfs_buf_t               *bp =
1001                 container_of(work, xfs_buf_t, b_iodone_work);
1002
1003         if (bp->b_iodone)
1004                 (*(bp->b_iodone))(bp);
1005         else if (bp->b_flags & XBF_ASYNC)
1006                 xfs_buf_relse(bp);
1007 }
1008
1009 void
1010 xfs_buf_ioend(
1011         xfs_buf_t               *bp,
1012         int                     schedule)
1013 {
1014         bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1015         if (bp->b_error == 0)
1016                 bp->b_flags |= XBF_DONE;
1017
1018         XB_TRACE(bp, "iodone", bp->b_iodone);
1019
1020         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1021                 if (schedule) {
1022                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1023                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1024                 } else {
1025                         xfs_buf_iodone_work(&bp->b_iodone_work);
1026                 }
1027         } else {
1028                 up(&bp->b_iodonesema);
1029         }
1030 }
1031
1032 void
1033 xfs_buf_ioerror(
1034         xfs_buf_t               *bp,
1035         int                     error)
1036 {
1037         ASSERT(error >= 0 && error <= 0xffff);
1038         bp->b_error = (unsigned short)error;
1039         XB_TRACE(bp, "ioerror", (unsigned long)error);
1040 }
1041
1042 /*
1043  *      Initiate I/O on a buffer, based on the flags supplied.
1044  *      The b_iodone routine in the buffer supplied will only be called
1045  *      when all of the subsidiary I/O requests, if any, have been completed.
1046  */
1047 int
1048 xfs_buf_iostart(
1049         xfs_buf_t               *bp,
1050         xfs_buf_flags_t         flags)
1051 {
1052         int                     status = 0;
1053
1054         XB_TRACE(bp, "iostart", (unsigned long)flags);
1055
1056         if (flags & XBF_DELWRI) {
1057                 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1058                 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1059                 xfs_buf_delwri_queue(bp, 1);
1060                 return status;
1061         }
1062
1063         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1064                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1065         bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1066                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1067
1068         BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1069
1070         /* For writes allow an alternate strategy routine to precede
1071          * the actual I/O request (which may not be issued at all in
1072          * a shutdown situation, for example).
1073          */
1074         status = (flags & XBF_WRITE) ?
1075                 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1076
1077         /* Wait for I/O if we are not an async request.
1078          * Note: async I/O request completion will release the buffer,
1079          * and that can already be done by this point.  So using the
1080          * buffer pointer from here on, after async I/O, is invalid.
1081          */
1082         if (!status && !(flags & XBF_ASYNC))
1083                 status = xfs_buf_iowait(bp);
1084
1085         return status;
1086 }
1087
1088 STATIC_INLINE int
1089 _xfs_buf_iolocked(
1090         xfs_buf_t               *bp)
1091 {
1092         ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1093         if (bp->b_flags & XBF_READ)
1094                 return bp->b_locked;
1095         return 0;
1096 }
1097
1098 STATIC_INLINE void
1099 _xfs_buf_ioend(
1100         xfs_buf_t               *bp,
1101         int                     schedule)
1102 {
1103         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1104                 bp->b_locked = 0;
1105                 xfs_buf_ioend(bp, schedule);
1106         }
1107 }
1108
1109 STATIC int
1110 xfs_buf_bio_end_io(
1111         struct bio              *bio,
1112         unsigned int            bytes_done,
1113         int                     error)
1114 {
1115         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1116         unsigned int            blocksize = bp->b_target->bt_bsize;
1117         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1118
1119         if (bio->bi_size)
1120                 return 1;
1121
1122         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1123                 bp->b_error = EIO;
1124
1125         do {
1126                 struct page     *page = bvec->bv_page;
1127
1128                 ASSERT(!PagePrivate(page));
1129                 if (unlikely(bp->b_error)) {
1130                         if (bp->b_flags & XBF_READ)
1131                                 ClearPageUptodate(page);
1132                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1133                         SetPageUptodate(page);
1134                 } else if (!PagePrivate(page) &&
1135                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1136                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1137                 }
1138
1139                 if (--bvec >= bio->bi_io_vec)
1140                         prefetchw(&bvec->bv_page->flags);
1141
1142                 if (_xfs_buf_iolocked(bp)) {
1143                         unlock_page(page);
1144                 }
1145         } while (bvec >= bio->bi_io_vec);
1146
1147         _xfs_buf_ioend(bp, 1);
1148         bio_put(bio);
1149         return 0;
1150 }
1151
1152 STATIC void
1153 _xfs_buf_ioapply(
1154         xfs_buf_t               *bp)
1155 {
1156         int                     i, rw, map_i, total_nr_pages, nr_pages;
1157         struct bio              *bio;
1158         int                     offset = bp->b_offset;
1159         int                     size = bp->b_count_desired;
1160         sector_t                sector = bp->b_bn;
1161         unsigned int            blocksize = bp->b_target->bt_bsize;
1162         int                     locking = _xfs_buf_iolocked(bp);
1163
1164         total_nr_pages = bp->b_page_count;
1165         map_i = 0;
1166
1167         if (bp->b_flags & XBF_ORDERED) {
1168                 ASSERT(!(bp->b_flags & XBF_READ));
1169                 rw = WRITE_BARRIER;
1170         } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1171                 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1172                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1173                 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1174         } else {
1175                 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1176                      (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1177         }
1178
1179         /* Special code path for reading a sub page size buffer in --
1180          * we populate up the whole page, and hence the other metadata
1181          * in the same page.  This optimization is only valid when the
1182          * filesystem block size is not smaller than the page size.
1183          */
1184         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1185             (bp->b_flags & XBF_READ) && locking &&
1186             (blocksize >= PAGE_CACHE_SIZE)) {
1187                 bio = bio_alloc(GFP_NOIO, 1);
1188
1189                 bio->bi_bdev = bp->b_target->bt_bdev;
1190                 bio->bi_sector = sector - (offset >> BBSHIFT);
1191                 bio->bi_end_io = xfs_buf_bio_end_io;
1192                 bio->bi_private = bp;
1193
1194                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1195                 size = 0;
1196
1197                 atomic_inc(&bp->b_io_remaining);
1198
1199                 goto submit_io;
1200         }
1201
1202         /* Lock down the pages which we need to for the request */
1203         if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1204                 for (i = 0; size; i++) {
1205                         int             nbytes = PAGE_CACHE_SIZE - offset;
1206                         struct page     *page = bp->b_pages[i];
1207
1208                         if (nbytes > size)
1209                                 nbytes = size;
1210
1211                         lock_page(page);
1212
1213                         size -= nbytes;
1214                         offset = 0;
1215                 }
1216                 offset = bp->b_offset;
1217                 size = bp->b_count_desired;
1218         }
1219
1220 next_chunk:
1221         atomic_inc(&bp->b_io_remaining);
1222         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1223         if (nr_pages > total_nr_pages)
1224                 nr_pages = total_nr_pages;
1225
1226         bio = bio_alloc(GFP_NOIO, nr_pages);
1227         bio->bi_bdev = bp->b_target->bt_bdev;
1228         bio->bi_sector = sector;
1229         bio->bi_end_io = xfs_buf_bio_end_io;
1230         bio->bi_private = bp;
1231
1232         for (; size && nr_pages; nr_pages--, map_i++) {
1233                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1234
1235                 if (nbytes > size)
1236                         nbytes = size;
1237
1238                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1239                 if (rbytes < nbytes)
1240                         break;
1241
1242                 offset = 0;
1243                 sector += nbytes >> BBSHIFT;
1244                 size -= nbytes;
1245                 total_nr_pages--;
1246         }
1247
1248 submit_io:
1249         if (likely(bio->bi_size)) {
1250                 submit_bio(rw, bio);
1251                 if (size)
1252                         goto next_chunk;
1253         } else {
1254                 bio_put(bio);
1255                 xfs_buf_ioerror(bp, EIO);
1256         }
1257 }
1258
1259 int
1260 xfs_buf_iorequest(
1261         xfs_buf_t               *bp)
1262 {
1263         XB_TRACE(bp, "iorequest", 0);
1264
1265         if (bp->b_flags & XBF_DELWRI) {
1266                 xfs_buf_delwri_queue(bp, 1);
1267                 return 0;
1268         }
1269
1270         if (bp->b_flags & XBF_WRITE) {
1271                 xfs_buf_wait_unpin(bp);
1272         }
1273
1274         xfs_buf_hold(bp);
1275
1276         /* Set the count to 1 initially, this will stop an I/O
1277          * completion callout which happens before we have started
1278          * all the I/O from calling xfs_buf_ioend too early.
1279          */
1280         atomic_set(&bp->b_io_remaining, 1);
1281         _xfs_buf_ioapply(bp);
1282         _xfs_buf_ioend(bp, 0);
1283
1284         xfs_buf_rele(bp);
1285         return 0;
1286 }
1287
1288 /*
1289  *      Waits for I/O to complete on the buffer supplied.
1290  *      It returns immediately if no I/O is pending.
1291  *      It returns the I/O error code, if any, or 0 if there was no error.
1292  */
1293 int
1294 xfs_buf_iowait(
1295         xfs_buf_t               *bp)
1296 {
1297         XB_TRACE(bp, "iowait", 0);
1298         if (atomic_read(&bp->b_io_remaining))
1299                 blk_run_address_space(bp->b_target->bt_mapping);
1300         down(&bp->b_iodonesema);
1301         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1302         return bp->b_error;
1303 }
1304
1305 xfs_caddr_t
1306 xfs_buf_offset(
1307         xfs_buf_t               *bp,
1308         size_t                  offset)
1309 {
1310         struct page             *page;
1311
1312         if (bp->b_flags & XBF_MAPPED)
1313                 return XFS_BUF_PTR(bp) + offset;
1314
1315         offset += bp->b_offset;
1316         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1317         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1318 }
1319
1320 /*
1321  *      Move data into or out of a buffer.
1322  */
1323 void
1324 xfs_buf_iomove(
1325         xfs_buf_t               *bp,    /* buffer to process            */
1326         size_t                  boff,   /* starting buffer offset       */
1327         size_t                  bsize,  /* length to copy               */
1328         caddr_t                 data,   /* data address                 */
1329         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1330 {
1331         size_t                  bend, cpoff, csize;
1332         struct page             *page;
1333
1334         bend = boff + bsize;
1335         while (boff < bend) {
1336                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1337                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1338                 csize = min_t(size_t,
1339                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1340
1341                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1342
1343                 switch (mode) {
1344                 case XBRW_ZERO:
1345                         memset(page_address(page) + cpoff, 0, csize);
1346                         break;
1347                 case XBRW_READ:
1348                         memcpy(data, page_address(page) + cpoff, csize);
1349                         break;
1350                 case XBRW_WRITE:
1351                         memcpy(page_address(page) + cpoff, data, csize);
1352                 }
1353
1354                 boff += csize;
1355                 data += csize;
1356         }
1357 }
1358
1359 /*
1360  *      Handling of buffer targets (buftargs).
1361  */
1362
1363 /*
1364  *      Wait for any bufs with callbacks that have been submitted but
1365  *      have not yet returned... walk the hash list for the target.
1366  */
1367 void
1368 xfs_wait_buftarg(
1369         xfs_buftarg_t   *btp)
1370 {
1371         xfs_buf_t       *bp, *n;
1372         xfs_bufhash_t   *hash;
1373         uint            i;
1374
1375         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1376                 hash = &btp->bt_hash[i];
1377 again:
1378                 spin_lock(&hash->bh_lock);
1379                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1380                         ASSERT(btp == bp->b_target);
1381                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1382                                 spin_unlock(&hash->bh_lock);
1383                                 /*
1384                                  * Catch superblock reference count leaks
1385                                  * immediately
1386                                  */
1387                                 BUG_ON(bp->b_bn == 0);
1388                                 delay(100);
1389                                 goto again;
1390                         }
1391                 }
1392                 spin_unlock(&hash->bh_lock);
1393         }
1394 }
1395
1396 /*
1397  *      Allocate buffer hash table for a given target.
1398  *      For devices containing metadata (i.e. not the log/realtime devices)
1399  *      we need to allocate a much larger hash table.
1400  */
1401 STATIC void
1402 xfs_alloc_bufhash(
1403         xfs_buftarg_t           *btp,
1404         int                     external)
1405 {
1406         unsigned int            i;
1407
1408         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1409         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1410         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1411                                         sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1412         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1413                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1414                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1415         }
1416 }
1417
1418 STATIC void
1419 xfs_free_bufhash(
1420         xfs_buftarg_t           *btp)
1421 {
1422         kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1423         btp->bt_hash = NULL;
1424 }
1425
1426 /*
1427  *      buftarg list for delwrite queue processing
1428  */
1429 LIST_HEAD(xfs_buftarg_list);
1430 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1431
1432 STATIC void
1433 xfs_register_buftarg(
1434         xfs_buftarg_t           *btp)
1435 {
1436         spin_lock(&xfs_buftarg_lock);
1437         list_add(&btp->bt_list, &xfs_buftarg_list);
1438         spin_unlock(&xfs_buftarg_lock);
1439 }
1440
1441 STATIC void
1442 xfs_unregister_buftarg(
1443         xfs_buftarg_t           *btp)
1444 {
1445         spin_lock(&xfs_buftarg_lock);
1446         list_del(&btp->bt_list);
1447         spin_unlock(&xfs_buftarg_lock);
1448 }
1449
1450 void
1451 xfs_free_buftarg(
1452         xfs_buftarg_t           *btp,
1453         int                     external)
1454 {
1455         xfs_flush_buftarg(btp, 1);
1456         if (external)
1457                 xfs_blkdev_put(btp->bt_bdev);
1458         xfs_free_bufhash(btp);
1459         iput(btp->bt_mapping->host);
1460
1461         /* Unregister the buftarg first so that we don't get a
1462          * wakeup finding a non-existent task
1463          */
1464         xfs_unregister_buftarg(btp);
1465         kthread_stop(btp->bt_task);
1466
1467         kmem_free(btp, sizeof(*btp));
1468 }
1469
1470 STATIC int
1471 xfs_setsize_buftarg_flags(
1472         xfs_buftarg_t           *btp,
1473         unsigned int            blocksize,
1474         unsigned int            sectorsize,
1475         int                     verbose)
1476 {
1477         btp->bt_bsize = blocksize;
1478         btp->bt_sshift = ffs(sectorsize) - 1;
1479         btp->bt_smask = sectorsize - 1;
1480
1481         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1482                 printk(KERN_WARNING
1483                         "XFS: Cannot set_blocksize to %u on device %s\n",
1484                         sectorsize, XFS_BUFTARG_NAME(btp));
1485                 return EINVAL;
1486         }
1487
1488         if (verbose &&
1489             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1490                 printk(KERN_WARNING
1491                         "XFS: %u byte sectors in use on device %s.  "
1492                         "This is suboptimal; %u or greater is ideal.\n",
1493                         sectorsize, XFS_BUFTARG_NAME(btp),
1494                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1495         }
1496
1497         return 0;
1498 }
1499
1500 /*
1501  *      When allocating the initial buffer target we have not yet
1502  *      read in the superblock, so don't know what sized sectors
1503  *      are being used is at this early stage.  Play safe.
1504  */
1505 STATIC int
1506 xfs_setsize_buftarg_early(
1507         xfs_buftarg_t           *btp,
1508         struct block_device     *bdev)
1509 {
1510         return xfs_setsize_buftarg_flags(btp,
1511                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1512 }
1513
1514 int
1515 xfs_setsize_buftarg(
1516         xfs_buftarg_t           *btp,
1517         unsigned int            blocksize,
1518         unsigned int            sectorsize)
1519 {
1520         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1521 }
1522
1523 STATIC int
1524 xfs_mapping_buftarg(
1525         xfs_buftarg_t           *btp,
1526         struct block_device     *bdev)
1527 {
1528         struct backing_dev_info *bdi;
1529         struct inode            *inode;
1530         struct address_space    *mapping;
1531         static const struct address_space_operations mapping_aops = {
1532                 .sync_page = block_sync_page,
1533                 .migratepage = fail_migrate_page,
1534         };
1535
1536         inode = new_inode(bdev->bd_inode->i_sb);
1537         if (!inode) {
1538                 printk(KERN_WARNING
1539                         "XFS: Cannot allocate mapping inode for device %s\n",
1540                         XFS_BUFTARG_NAME(btp));
1541                 return ENOMEM;
1542         }
1543         inode->i_mode = S_IFBLK;
1544         inode->i_bdev = bdev;
1545         inode->i_rdev = bdev->bd_dev;
1546         bdi = blk_get_backing_dev_info(bdev);
1547         if (!bdi)
1548                 bdi = &default_backing_dev_info;
1549         mapping = &inode->i_data;
1550         mapping->a_ops = &mapping_aops;
1551         mapping->backing_dev_info = bdi;
1552         mapping_set_gfp_mask(mapping, GFP_NOFS);
1553         btp->bt_mapping = mapping;
1554         return 0;
1555 }
1556
1557 STATIC int
1558 xfs_alloc_delwrite_queue(
1559         xfs_buftarg_t           *btp)
1560 {
1561         int     error = 0;
1562
1563         INIT_LIST_HEAD(&btp->bt_list);
1564         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1565         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1566         btp->bt_flags = 0;
1567         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1568         if (IS_ERR(btp->bt_task)) {
1569                 error = PTR_ERR(btp->bt_task);
1570                 goto out_error;
1571         }
1572         xfs_register_buftarg(btp);
1573 out_error:
1574         return error;
1575 }
1576
1577 xfs_buftarg_t *
1578 xfs_alloc_buftarg(
1579         struct block_device     *bdev,
1580         int                     external)
1581 {
1582         xfs_buftarg_t           *btp;
1583
1584         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1585
1586         btp->bt_dev =  bdev->bd_dev;
1587         btp->bt_bdev = bdev;
1588         if (xfs_setsize_buftarg_early(btp, bdev))
1589                 goto error;
1590         if (xfs_mapping_buftarg(btp, bdev))
1591                 goto error;
1592         if (xfs_alloc_delwrite_queue(btp))
1593                 goto error;
1594         xfs_alloc_bufhash(btp, external);
1595         return btp;
1596
1597 error:
1598         kmem_free(btp, sizeof(*btp));
1599         return NULL;
1600 }
1601
1602
1603 /*
1604  *      Delayed write buffer handling
1605  */
1606 STATIC void
1607 xfs_buf_delwri_queue(
1608         xfs_buf_t               *bp,
1609         int                     unlock)
1610 {
1611         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1612         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1613
1614         XB_TRACE(bp, "delwri_q", (long)unlock);
1615         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1616
1617         spin_lock(dwlk);
1618         /* If already in the queue, dequeue and place at tail */
1619         if (!list_empty(&bp->b_list)) {
1620                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1621                 if (unlock)
1622                         atomic_dec(&bp->b_hold);
1623                 list_del(&bp->b_list);
1624         }
1625
1626         bp->b_flags |= _XBF_DELWRI_Q;
1627         list_add_tail(&bp->b_list, dwq);
1628         bp->b_queuetime = jiffies;
1629         spin_unlock(dwlk);
1630
1631         if (unlock)
1632                 xfs_buf_unlock(bp);
1633 }
1634
1635 void
1636 xfs_buf_delwri_dequeue(
1637         xfs_buf_t               *bp)
1638 {
1639         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1640         int                     dequeued = 0;
1641
1642         spin_lock(dwlk);
1643         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1644                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1645                 list_del_init(&bp->b_list);
1646                 dequeued = 1;
1647         }
1648         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1649         spin_unlock(dwlk);
1650
1651         if (dequeued)
1652                 xfs_buf_rele(bp);
1653
1654         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1655 }
1656
1657 STATIC void
1658 xfs_buf_runall_queues(
1659         struct workqueue_struct *queue)
1660 {
1661         flush_workqueue(queue);
1662 }
1663
1664 STATIC int
1665 xfsbufd_wakeup(
1666         int                     priority,
1667         gfp_t                   mask)
1668 {
1669         xfs_buftarg_t           *btp;
1670
1671         spin_lock(&xfs_buftarg_lock);
1672         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1673                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1674                         continue;
1675                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1676                 wake_up_process(btp->bt_task);
1677         }
1678         spin_unlock(&xfs_buftarg_lock);
1679         return 0;
1680 }
1681
1682 /*
1683  * Move as many buffers as specified to the supplied list
1684  * idicating if we skipped any buffers to prevent deadlocks.
1685  */
1686 STATIC int
1687 xfs_buf_delwri_split(
1688         xfs_buftarg_t   *target,
1689         struct list_head *list,
1690         unsigned long   age)
1691 {
1692         xfs_buf_t       *bp, *n;
1693         struct list_head *dwq = &target->bt_delwrite_queue;
1694         spinlock_t      *dwlk = &target->bt_delwrite_lock;
1695         int             skipped = 0;
1696         int             force;
1697
1698         force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1699         INIT_LIST_HEAD(list);
1700         spin_lock(dwlk);
1701         list_for_each_entry_safe(bp, n, dwq, b_list) {
1702                 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1703                 ASSERT(bp->b_flags & XBF_DELWRI);
1704
1705                 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1706                         if (!force &&
1707                             time_before(jiffies, bp->b_queuetime + age)) {
1708                                 xfs_buf_unlock(bp);
1709                                 break;
1710                         }
1711
1712                         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1713                                          _XBF_RUN_QUEUES);
1714                         bp->b_flags |= XBF_WRITE;
1715                         list_move_tail(&bp->b_list, list);
1716                 } else
1717                         skipped++;
1718         }
1719         spin_unlock(dwlk);
1720
1721         return skipped;
1722
1723 }
1724
1725 STATIC int
1726 xfsbufd(
1727         void            *data)
1728 {
1729         struct list_head tmp;
1730         xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
1731         int             count;
1732         xfs_buf_t       *bp;
1733
1734         current->flags |= PF_MEMALLOC;
1735
1736         do {
1737                 if (unlikely(freezing(current))) {
1738                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1739                         refrigerator();
1740                 } else {
1741                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1742                 }
1743
1744                 schedule_timeout_interruptible(
1745                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1746
1747                 xfs_buf_delwri_split(target, &tmp,
1748                                 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1749
1750                 count = 0;
1751                 while (!list_empty(&tmp)) {
1752                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1753                         ASSERT(target == bp->b_target);
1754
1755                         list_del_init(&bp->b_list);
1756                         xfs_buf_iostrategy(bp);
1757                         count++;
1758                 }
1759
1760                 if (as_list_len > 0)
1761                         purge_addresses();
1762                 if (count)
1763                         blk_run_address_space(target->bt_mapping);
1764
1765         } while (!kthread_should_stop());
1766
1767         return 0;
1768 }
1769
1770 /*
1771  *      Go through all incore buffers, and release buffers if they belong to
1772  *      the given device. This is used in filesystem error handling to
1773  *      preserve the consistency of its metadata.
1774  */
1775 int
1776 xfs_flush_buftarg(
1777         xfs_buftarg_t   *target,
1778         int             wait)
1779 {
1780         struct list_head tmp;
1781         xfs_buf_t       *bp, *n;
1782         int             pincount = 0;
1783
1784         xfs_buf_runall_queues(xfsdatad_workqueue);
1785         xfs_buf_runall_queues(xfslogd_workqueue);
1786
1787         set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1788         pincount = xfs_buf_delwri_split(target, &tmp, 0);
1789
1790         /*
1791          * Dropped the delayed write list lock, now walk the temporary list
1792          */
1793         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1794                 ASSERT(target == bp->b_target);
1795                 if (wait)
1796                         bp->b_flags &= ~XBF_ASYNC;
1797                 else
1798                         list_del_init(&bp->b_list);
1799
1800                 xfs_buf_iostrategy(bp);
1801         }
1802
1803         if (wait)
1804                 blk_run_address_space(target->bt_mapping);
1805
1806         /*
1807          * Remaining list items must be flushed before returning
1808          */
1809         while (!list_empty(&tmp)) {
1810                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1811
1812                 list_del_init(&bp->b_list);
1813                 xfs_iowait(bp);
1814                 xfs_buf_relse(bp);
1815         }
1816
1817         return pincount;
1818 }
1819
1820 int __init
1821 xfs_buf_init(void)
1822 {
1823 #ifdef XFS_BUF_TRACE
1824         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1825 #endif
1826
1827         xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1828                                                 KM_ZONE_HWALIGN, NULL);
1829         if (!xfs_buf_zone)
1830                 goto out_free_trace_buf;
1831
1832         xfslogd_workqueue = create_freezeable_workqueue("xfslogd");
1833         if (!xfslogd_workqueue)
1834                 goto out_free_buf_zone;
1835
1836         xfsdatad_workqueue = create_freezeable_workqueue("xfsdatad");
1837         if (!xfsdatad_workqueue)
1838                 goto out_destroy_xfslogd_workqueue;
1839
1840         xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1841         if (!xfs_buf_shake)
1842                 goto out_destroy_xfsdatad_workqueue;
1843
1844         return 0;
1845
1846  out_destroy_xfsdatad_workqueue:
1847         destroy_workqueue(xfsdatad_workqueue);
1848  out_destroy_xfslogd_workqueue:
1849         destroy_workqueue(xfslogd_workqueue);
1850  out_free_buf_zone:
1851         kmem_zone_destroy(xfs_buf_zone);
1852  out_free_trace_buf:
1853 #ifdef XFS_BUF_TRACE
1854         ktrace_free(xfs_buf_trace_buf);
1855 #endif
1856         return -ENOMEM;
1857 }
1858
1859 void
1860 xfs_buf_terminate(void)
1861 {
1862         kmem_shake_deregister(xfs_buf_shake);
1863         destroy_workqueue(xfsdatad_workqueue);
1864         destroy_workqueue(xfslogd_workqueue);
1865         kmem_zone_destroy(xfs_buf_zone);
1866 #ifdef XFS_BUF_TRACE
1867         ktrace_free(xfs_buf_trace_buf);
1868 #endif
1869 }