[PATCH] md printk fix
[linux-2.6] / drivers / md / bitmap.c
1 /*
2  * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3  *
4  * bitmap_create  - sets up the bitmap structure
5  * bitmap_destroy - destroys the bitmap structure
6  *
7  * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8  * - added disk storage for bitmap
9  * - changes to allow various bitmap chunk sizes
10  * - added bitmap daemon (to asynchronously clear bitmap bits from disk)
11  */
12
13 /*
14  * Still to do:
15  *
16  * flush after percent set rather than just time based. (maybe both).
17  * wait if count gets too high, wake when it drops to half.
18  * allow bitmap to be mirrored with superblock (before or after...)
19  * allow hot-add to re-instate a current device.
20  * allow hot-add of bitmap after quiessing device
21  */
22
23 #include <linux/module.h>
24 #include <linux/version.h>
25 #include <linux/errno.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/config.h>
29 #include <linux/timer.h>
30 #include <linux/sched.h>
31 #include <linux/list.h>
32 #include <linux/file.h>
33 #include <linux/mount.h>
34 #include <linux/buffer_head.h>
35 #include <linux/raid/md.h>
36 #include <linux/raid/bitmap.h>
37
38 /* debug macros */
39
40 #define DEBUG 0
41
42 #if DEBUG
43 /* these are for debugging purposes only! */
44
45 /* define one and only one of these */
46 #define INJECT_FAULTS_1 0 /* cause bitmap_alloc_page to fail always */
47 #define INJECT_FAULTS_2 0 /* cause bitmap file to be kicked when first bit set*/
48 #define INJECT_FAULTS_3 0 /* treat bitmap file as kicked at init time */
49 #define INJECT_FAULTS_4 0 /* undef */
50 #define INJECT_FAULTS_5 0 /* undef */
51 #define INJECT_FAULTS_6 0
52
53 /* if these are defined, the driver will fail! debug only */
54 #define INJECT_FATAL_FAULT_1 0 /* fail kmalloc, causing bitmap_create to fail */
55 #define INJECT_FATAL_FAULT_2 0 /* undef */
56 #define INJECT_FATAL_FAULT_3 0 /* undef */
57 #endif
58
59 //#define DPRINTK PRINTK /* set this NULL to avoid verbose debug output */
60 #define DPRINTK(x...) do { } while(0)
61
62 #ifndef PRINTK
63 #  if DEBUG > 0
64 #    define PRINTK(x...) printk(KERN_DEBUG x)
65 #  else
66 #    define PRINTK(x...)
67 #  endif
68 #endif
69
70 static inline char * bmname(struct bitmap *bitmap)
71 {
72         return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
73 }
74
75
76 /*
77  * test if the bitmap is active
78  */
79 int bitmap_active(struct bitmap *bitmap)
80 {
81         unsigned long flags;
82         int res = 0;
83
84         if (!bitmap)
85                 return res;
86         spin_lock_irqsave(&bitmap->lock, flags);
87         res = bitmap->flags & BITMAP_ACTIVE;
88         spin_unlock_irqrestore(&bitmap->lock, flags);
89         return res;
90 }
91
92 #define WRITE_POOL_SIZE 256
93 /* mempool for queueing pending writes on the bitmap file */
94 static void *write_pool_alloc(unsigned int gfp_flags, void *data)
95 {
96         return kmalloc(sizeof(struct page_list), gfp_flags);
97 }
98
99 static void write_pool_free(void *ptr, void *data)
100 {
101         kfree(ptr);
102 }
103
104 /*
105  * just a placeholder - calls kmalloc for bitmap pages
106  */
107 static unsigned char *bitmap_alloc_page(struct bitmap *bitmap)
108 {
109         unsigned char *page;
110
111 #if INJECT_FAULTS_1
112         page = NULL;
113 #else
114         page = kmalloc(PAGE_SIZE, GFP_NOIO);
115 #endif
116         if (!page)
117                 printk("%s: bitmap_alloc_page FAILED\n", bmname(bitmap));
118         else
119                 printk("%s: bitmap_alloc_page: allocated page at %p\n",
120                         bmname(bitmap), page);
121         return page;
122 }
123
124 /*
125  * for now just a placeholder -- just calls kfree for bitmap pages
126  */
127 static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
128 {
129         PRINTK("%s: bitmap_free_page: free page %p\n", bmname(bitmap), page);
130         kfree(page);
131 }
132
133 /*
134  * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
135  *
136  * 1) check to see if this page is allocated, if it's not then try to alloc
137  * 2) if the alloc fails, set the page's hijacked flag so we'll use the
138  *    page pointer directly as a counter
139  *
140  * if we find our page, we increment the page's refcount so that it stays
141  * allocated while we're using it
142  */
143 static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
144 {
145         unsigned char *mappage;
146
147         if (page >= bitmap->pages) {
148                 printk(KERN_ALERT
149                         "%s: invalid bitmap page request: %lu (> %lu)\n",
150                         bmname(bitmap), page, bitmap->pages-1);
151                 return -EINVAL;
152         }
153
154
155         if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
156                 return 0;
157
158         if (bitmap->bp[page].map) /* page is already allocated, just return */
159                 return 0;
160
161         if (!create)
162                 return -ENOENT;
163
164         spin_unlock_irq(&bitmap->lock);
165
166         /* this page has not been allocated yet */
167
168         if ((mappage = bitmap_alloc_page(bitmap)) == NULL) {
169                 PRINTK("%s: bitmap map page allocation failed, hijacking\n",
170                         bmname(bitmap));
171                 /* failed - set the hijacked flag so that we can use the
172                  * pointer as a counter */
173                 spin_lock_irq(&bitmap->lock);
174                 if (!bitmap->bp[page].map)
175                         bitmap->bp[page].hijacked = 1;
176                 goto out;
177         }
178
179         /* got a page */
180
181         spin_lock_irq(&bitmap->lock);
182
183         /* recheck the page */
184
185         if (bitmap->bp[page].map || bitmap->bp[page].hijacked) {
186                 /* somebody beat us to getting the page */
187                 bitmap_free_page(bitmap, mappage);
188                 return 0;
189         }
190
191         /* no page was in place and we have one, so install it */
192
193         memset(mappage, 0, PAGE_SIZE);
194         bitmap->bp[page].map = mappage;
195         bitmap->missing_pages--;
196 out:
197         return 0;
198 }
199
200
201 /* if page is completely empty, put it back on the free list, or dealloc it */
202 /* if page was hijacked, unmark the flag so it might get alloced next time */
203 /* Note: lock should be held when calling this */
204 static inline void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
205 {
206         char *ptr;
207
208         if (bitmap->bp[page].count) /* page is still busy */
209                 return;
210
211         /* page is no longer in use, it can be released */
212
213         if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
214                 bitmap->bp[page].hijacked = 0;
215                 bitmap->bp[page].map = NULL;
216                 return;
217         }
218
219         /* normal case, free the page */
220
221 #if 0
222 /* actually ... let's not.  We will probably need the page again exactly when
223  * memory is tight and we are flusing to disk
224  */
225         return;
226 #else
227         ptr = bitmap->bp[page].map;
228         bitmap->bp[page].map = NULL;
229         bitmap->missing_pages++;
230         bitmap_free_page(bitmap, ptr);
231         return;
232 #endif
233 }
234
235
236 /*
237  * bitmap file handling - read and write the bitmap file and its superblock
238  */
239
240 /* copy the pathname of a file to a buffer */
241 char *file_path(struct file *file, char *buf, int count)
242 {
243         struct dentry *d;
244         struct vfsmount *v;
245
246         if (!buf)
247                 return NULL;
248
249         d = file->f_dentry;
250         v = file->f_vfsmnt;
251
252         buf = d_path(d, v, buf, count);
253
254         return IS_ERR(buf) ? NULL : buf;
255 }
256
257 /*
258  * basic page I/O operations
259  */
260
261 /*
262  * write out a page
263  */
264 static int write_page(struct page *page, int wait)
265 {
266         int ret = -ENOMEM;
267
268         lock_page(page);
269
270         if (page->mapping == NULL)
271                 goto unlock_out;
272         else if (i_size_read(page->mapping->host) < page->index << PAGE_SHIFT) {
273                 ret = -ENOENT;
274                 goto unlock_out;
275         }
276
277         ret = page->mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
278         if (!ret)
279                 ret = page->mapping->a_ops->commit_write(NULL, page, 0,
280                         PAGE_SIZE);
281         if (ret) {
282 unlock_out:
283                 unlock_page(page);
284                 return ret;
285         }
286
287         set_page_dirty(page); /* force it to be written out */
288         return write_one_page(page, wait);
289 }
290
291 /* read a page from a file, pinning it into cache, and return bytes_read */
292 static struct page *read_page(struct file *file, unsigned long index,
293                                         unsigned long *bytes_read)
294 {
295         struct inode *inode = file->f_mapping->host;
296         struct page *page = NULL;
297         loff_t isize = i_size_read(inode);
298         unsigned long end_index = isize >> PAGE_CACHE_SHIFT;
299
300         PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_CACHE_SIZE,
301                         (unsigned long long)index << PAGE_CACHE_SHIFT);
302
303         page = read_cache_page(inode->i_mapping, index,
304                         (filler_t *)inode->i_mapping->a_ops->readpage, file);
305         if (IS_ERR(page))
306                 goto out;
307         wait_on_page_locked(page);
308         if (!PageUptodate(page) || PageError(page)) {
309                 page_cache_release(page);
310                 page = ERR_PTR(-EIO);
311                 goto out;
312         }
313
314         if (index > end_index) /* we have read beyond EOF */
315                 *bytes_read = 0;
316         else if (index == end_index) /* possible short read */
317                 *bytes_read = isize & ~PAGE_CACHE_MASK;
318         else
319                 *bytes_read = PAGE_CACHE_SIZE; /* got a full page */
320 out:
321         if (IS_ERR(page))
322                 printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
323                         (int)PAGE_CACHE_SIZE,
324                         (unsigned long long)index << PAGE_CACHE_SHIFT,
325                         PTR_ERR(page));
326         return page;
327 }
328
329 /*
330  * bitmap file superblock operations
331  */
332
333 /* update the event counter and sync the superblock to disk */
334 int bitmap_update_sb(struct bitmap *bitmap)
335 {
336         bitmap_super_t *sb;
337         unsigned long flags;
338
339         if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
340                 return 0;
341         spin_lock_irqsave(&bitmap->lock, flags);
342         if (!bitmap->sb_page) { /* no superblock */
343                 spin_unlock_irqrestore(&bitmap->lock, flags);
344                 return 0;
345         }
346         page_cache_get(bitmap->sb_page);
347         spin_unlock_irqrestore(&bitmap->lock, flags);
348         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
349         sb->events = cpu_to_le64(bitmap->mddev->events);
350         if (!bitmap->mddev->degraded)
351                 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
352         kunmap(bitmap->sb_page);
353         write_page(bitmap->sb_page, 0);
354         return 0;
355 }
356
357 /* print out the bitmap file superblock */
358 void bitmap_print_sb(struct bitmap *bitmap)
359 {
360         bitmap_super_t *sb;
361
362         if (!bitmap || !bitmap->sb_page)
363                 return;
364         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
365         printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
366         printk(KERN_DEBUG "       magic: %08x\n", le32_to_cpu(sb->magic));
367         printk(KERN_DEBUG "     version: %d\n", le32_to_cpu(sb->version));
368         printk(KERN_DEBUG "        uuid: %08x.%08x.%08x.%08x\n",
369                                         *(__u32 *)(sb->uuid+0),
370                                         *(__u32 *)(sb->uuid+4),
371                                         *(__u32 *)(sb->uuid+8),
372                                         *(__u32 *)(sb->uuid+12));
373         printk(KERN_DEBUG "      events: %llu\n",
374                         (unsigned long long) le64_to_cpu(sb->events));
375         printk(KERN_DEBUG "events_clred: %llu\n",
376                         (unsigned long long) le64_to_cpu(sb->events_cleared));
377         printk(KERN_DEBUG "       state: %08x\n", le32_to_cpu(sb->state));
378         printk(KERN_DEBUG "   chunksize: %d B\n", le32_to_cpu(sb->chunksize));
379         printk(KERN_DEBUG "daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
380         printk(KERN_DEBUG "   sync size: %llu KB\n",
381                         (unsigned long long)le64_to_cpu(sb->sync_size));
382         kunmap(bitmap->sb_page);
383 }
384
385 /* read the superblock from the bitmap file and initialize some bitmap fields */
386 static int bitmap_read_sb(struct bitmap *bitmap)
387 {
388         char *reason = NULL;
389         bitmap_super_t *sb;
390         unsigned long chunksize, daemon_sleep;
391         unsigned long bytes_read;
392         unsigned long long events;
393         int err = -EINVAL;
394
395         /* page 0 is the superblock, read it... */
396         bitmap->sb_page = read_page(bitmap->file, 0, &bytes_read);
397         if (IS_ERR(bitmap->sb_page)) {
398                 err = PTR_ERR(bitmap->sb_page);
399                 bitmap->sb_page = NULL;
400                 return err;
401         }
402
403         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
404
405         if (bytes_read < sizeof(*sb)) { /* short read */
406                 printk(KERN_INFO "%s: bitmap file superblock truncated\n",
407                         bmname(bitmap));
408                 err = -ENOSPC;
409                 goto out;
410         }
411
412         chunksize = le32_to_cpu(sb->chunksize);
413         daemon_sleep = le32_to_cpu(sb->daemon_sleep);
414
415         /* verify that the bitmap-specific fields are valid */
416         if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
417                 reason = "bad magic";
418         else if (sb->version != cpu_to_le32(BITMAP_MAJOR))
419                 reason = "unrecognized superblock version";
420         else if (chunksize < 512 || chunksize > (1024 * 1024 * 4))
421                 reason = "bitmap chunksize out of range (512B - 4MB)";
422         else if ((1 << ffz(~chunksize)) != chunksize)
423                 reason = "bitmap chunksize not a power of 2";
424         else if (daemon_sleep < 1 || daemon_sleep > 15)
425                 reason = "daemon sleep period out of range";
426         if (reason) {
427                 printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
428                         bmname(bitmap), reason);
429                 goto out;
430         }
431
432         /* keep the array size field of the bitmap superblock up to date */
433         sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
434
435         if (!bitmap->mddev->persistent)
436                 goto success;
437
438         /*
439          * if we have a persistent array superblock, compare the
440          * bitmap's UUID and event counter to the mddev's
441          */
442         if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
443                 printk(KERN_INFO "%s: bitmap superblock UUID mismatch\n",
444                         bmname(bitmap));
445                 goto out;
446         }
447         events = le64_to_cpu(sb->events);
448         if (events < bitmap->mddev->events) {
449                 printk(KERN_INFO "%s: bitmap file is out of date (%llu < %llu) "
450                         "-- forcing full recovery\n", bmname(bitmap), events,
451                         (unsigned long long) bitmap->mddev->events);
452                 sb->state |= BITMAP_STALE;
453         }
454 success:
455         /* assign fields using values from superblock */
456         bitmap->chunksize = chunksize;
457         bitmap->daemon_sleep = daemon_sleep;
458         bitmap->flags |= sb->state;
459         bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
460         err = 0;
461 out:
462         kunmap(bitmap->sb_page);
463         if (err)
464                 bitmap_print_sb(bitmap);
465         return err;
466 }
467
468 enum bitmap_mask_op {
469         MASK_SET,
470         MASK_UNSET
471 };
472
473 /* record the state of the bitmap in the superblock */
474 static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
475                                 enum bitmap_mask_op op)
476 {
477         bitmap_super_t *sb;
478         unsigned long flags;
479
480         spin_lock_irqsave(&bitmap->lock, flags);
481         if (!bitmap || !bitmap->sb_page) { /* can't set the state */
482                 spin_unlock_irqrestore(&bitmap->lock, flags);
483                 return;
484         }
485         page_cache_get(bitmap->sb_page);
486         spin_unlock_irqrestore(&bitmap->lock, flags);
487         sb = (bitmap_super_t *)kmap(bitmap->sb_page);
488         switch (op) {
489                 case MASK_SET: sb->state |= bits;
490                                 break;
491                 case MASK_UNSET: sb->state &= ~bits;
492                                 break;
493                 default: BUG();
494         }
495         kunmap(bitmap->sb_page);
496         page_cache_release(bitmap->sb_page);
497 }
498
499 /*
500  * general bitmap file operations
501  */
502
503 /* calculate the index of the page that contains this bit */
504 static inline unsigned long file_page_index(unsigned long chunk)
505 {
506         return CHUNK_BIT_OFFSET(chunk) >> PAGE_BIT_SHIFT;
507 }
508
509 /* calculate the (bit) offset of this bit within a page */
510 static inline unsigned long file_page_offset(unsigned long chunk)
511 {
512         return CHUNK_BIT_OFFSET(chunk) & (PAGE_BITS - 1);
513 }
514
515 /*
516  * return a pointer to the page in the filemap that contains the given bit
517  *
518  * this lookup is complicated by the fact that the bitmap sb might be exactly
519  * 1 page (e.g., x86) or less than 1 page -- so the bitmap might start on page
520  * 0 or page 1
521  */
522 static inline struct page *filemap_get_page(struct bitmap *bitmap,
523                                         unsigned long chunk)
524 {
525         return bitmap->filemap[file_page_index(chunk) - file_page_index(0)];
526 }
527
528
529 static void bitmap_file_unmap(struct bitmap *bitmap)
530 {
531         struct page **map, *sb_page;
532         unsigned long *attr;
533         int pages;
534         unsigned long flags;
535
536         spin_lock_irqsave(&bitmap->lock, flags);
537         map = bitmap->filemap;
538         bitmap->filemap = NULL;
539         attr = bitmap->filemap_attr;
540         bitmap->filemap_attr = NULL;
541         pages = bitmap->file_pages;
542         bitmap->file_pages = 0;
543         sb_page = bitmap->sb_page;
544         bitmap->sb_page = NULL;
545         spin_unlock_irqrestore(&bitmap->lock, flags);
546
547         while (pages--)
548                 if (map[pages]->index != 0) /* 0 is sb_page, release it below */
549                         page_cache_release(map[pages]);
550         kfree(map);
551         kfree(attr);
552
553         if (sb_page)
554                 page_cache_release(sb_page);
555 }
556
557 static void bitmap_stop_daemons(struct bitmap *bitmap);
558
559 /* dequeue the next item in a page list -- don't call from irq context */
560 static struct page_list *dequeue_page(struct bitmap *bitmap,
561                                         struct list_head *head)
562 {
563         struct page_list *item = NULL;
564
565         spin_lock(&bitmap->write_lock);
566         if (list_empty(head))
567                 goto out;
568         item = list_entry(head->prev, struct page_list, list);
569         list_del(head->prev);
570 out:
571         spin_unlock(&bitmap->write_lock);
572         return item;
573 }
574
575 static void drain_write_queues(struct bitmap *bitmap)
576 {
577         struct list_head *queues[] = {  &bitmap->complete_pages, NULL };
578         struct list_head *head;
579         struct page_list *item;
580         int i;
581
582         for (i = 0; queues[i]; i++) {
583                 head = queues[i];
584                 while ((item = dequeue_page(bitmap, head))) {
585                         page_cache_release(item->page);
586                         mempool_free(item, bitmap->write_pool);
587                 }
588         }
589
590         spin_lock(&bitmap->write_lock);
591         bitmap->writes_pending = 0; /* make sure waiters continue */
592         wake_up(&bitmap->write_wait);
593         spin_unlock(&bitmap->write_lock);
594 }
595
596 static void bitmap_file_put(struct bitmap *bitmap)
597 {
598         struct file *file;
599         struct inode *inode;
600         unsigned long flags;
601
602         spin_lock_irqsave(&bitmap->lock, flags);
603         file = bitmap->file;
604         bitmap->file = NULL;
605         spin_unlock_irqrestore(&bitmap->lock, flags);
606
607         bitmap_stop_daemons(bitmap);
608
609         drain_write_queues(bitmap);
610
611         bitmap_file_unmap(bitmap);
612
613         if (file) {
614                 inode = file->f_mapping->host;
615                 spin_lock(&inode->i_lock);
616                 atomic_set(&inode->i_writecount, 1); /* allow writes again */
617                 spin_unlock(&inode->i_lock);
618                 fput(file);
619         }
620 }
621
622
623 /*
624  * bitmap_file_kick - if an error occurs while manipulating the bitmap file
625  * then it is no longer reliable, so we stop using it and we mark the file
626  * as failed in the superblock
627  */
628 static void bitmap_file_kick(struct bitmap *bitmap)
629 {
630         char *path, *ptr = NULL;
631
632         bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET);
633         bitmap_update_sb(bitmap);
634
635         path = kmalloc(PAGE_SIZE, GFP_KERNEL);
636         if (path)
637                 ptr = file_path(bitmap->file, path, PAGE_SIZE);
638
639         printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n",
640                 bmname(bitmap), ptr ? ptr : "");
641
642         kfree(path);
643
644         bitmap_file_put(bitmap);
645
646         return;
647 }
648
649 enum bitmap_page_attr {
650         BITMAP_PAGE_DIRTY = 1, // there are set bits that need to be synced
651         BITMAP_PAGE_CLEAN = 2, // there are bits that might need to be cleared
652         BITMAP_PAGE_NEEDWRITE=4, // there are cleared bits that need to be synced
653 };
654
655 static inline void set_page_attr(struct bitmap *bitmap, struct page *page,
656                                 enum bitmap_page_attr attr)
657 {
658         bitmap->filemap_attr[page->index] |= attr;
659 }
660
661 static inline void clear_page_attr(struct bitmap *bitmap, struct page *page,
662                                 enum bitmap_page_attr attr)
663 {
664         bitmap->filemap_attr[page->index] &= ~attr;
665 }
666
667 static inline unsigned long get_page_attr(struct bitmap *bitmap, struct page *page)
668 {
669         return bitmap->filemap_attr[page->index];
670 }
671
672 /*
673  * bitmap_file_set_bit -- called before performing a write to the md device
674  * to set (and eventually sync) a particular bit in the bitmap file
675  *
676  * we set the bit immediately, then we record the page number so that
677  * when an unplug occurs, we can flush the dirty pages out to disk
678  */
679 static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
680 {
681         unsigned long bit;
682         struct page *page;
683         void *kaddr;
684         unsigned long chunk = block >> CHUNK_BLOCK_SHIFT(bitmap);
685
686         if (!bitmap->file || !bitmap->filemap) {
687                 return;
688         }
689
690         page = filemap_get_page(bitmap, chunk);
691         bit = file_page_offset(chunk);
692
693
694         /* make sure the page stays cached until it gets written out */
695         if (! (get_page_attr(bitmap, page) & BITMAP_PAGE_DIRTY))
696                 page_cache_get(page);
697
698         /* set the bit */
699         kaddr = kmap_atomic(page, KM_USER0);
700         set_bit(bit, kaddr);
701         kunmap_atomic(kaddr, KM_USER0);
702         PRINTK("set file bit %lu page %lu\n", bit, page->index);
703
704         /* record page number so it gets flushed to disk when unplug occurs */
705         set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
706
707 }
708
709 /* this gets called when the md device is ready to unplug its underlying
710  * (slave) device queues -- before we let any writes go down, we need to
711  * sync the dirty pages of the bitmap file to disk */
712 int bitmap_unplug(struct bitmap *bitmap)
713 {
714         unsigned long i, attr, flags;
715         struct page *page;
716         int wait = 0;
717
718         if (!bitmap)
719                 return 0;
720
721         /* look at each page to see if there are any set bits that need to be
722          * flushed out to disk */
723         for (i = 0; i < bitmap->file_pages; i++) {
724                 spin_lock_irqsave(&bitmap->lock, flags);
725                 if (!bitmap->file || !bitmap->filemap) {
726                         spin_unlock_irqrestore(&bitmap->lock, flags);
727                         return 0;
728                 }
729                 page = bitmap->filemap[i];
730                 attr = get_page_attr(bitmap, page);
731                 clear_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
732                 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
733                 if ((attr & BITMAP_PAGE_DIRTY))
734                         wait = 1;
735                 spin_unlock_irqrestore(&bitmap->lock, flags);
736
737                 if (attr & (BITMAP_PAGE_DIRTY | BITMAP_PAGE_NEEDWRITE))
738                         write_page(page, 0);
739         }
740         if (wait) { /* if any writes were performed, we need to wait on them */
741                 spin_lock_irq(&bitmap->write_lock);
742                 wait_event_lock_irq(bitmap->write_wait,
743                         bitmap->writes_pending == 0, bitmap->write_lock,
744                         wake_up_process(bitmap->writeback_daemon->tsk));
745                 spin_unlock_irq(&bitmap->write_lock);
746         }
747         return 0;
748 }
749
750 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
751         unsigned long sectors, int in_sync);
752 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
753  * the in-memory bitmap from the on-disk bitmap -- also, sets up the
754  * memory mapping of the bitmap file
755  * Special cases:
756  *   if there's no bitmap file, or if the bitmap file had been
757  *   previously kicked from the array, we mark all the bits as
758  *   1's in order to cause a full resync.
759  */
760 static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync)
761 {
762         unsigned long i, chunks, index, oldindex, bit;
763         struct page *page = NULL, *oldpage = NULL;
764         unsigned long num_pages, bit_cnt = 0;
765         struct file *file;
766         unsigned long bytes, offset, dummy;
767         int outofdate;
768         int ret = -ENOSPC;
769
770         chunks = bitmap->chunks;
771         file = bitmap->file;
772
773         BUG_ON(!file);
774
775 #if INJECT_FAULTS_3
776         outofdate = 1;
777 #else
778         outofdate = bitmap->flags & BITMAP_STALE;
779 #endif
780         if (outofdate)
781                 printk(KERN_INFO "%s: bitmap file is out of date, doing full "
782                         "recovery\n", bmname(bitmap));
783
784         bytes = (chunks + 7) / 8;
785
786         num_pages = (bytes + sizeof(bitmap_super_t) + PAGE_SIZE - 1) / PAGE_SIZE;
787
788         if (i_size_read(file->f_mapping->host) < bytes + sizeof(bitmap_super_t)) {
789                 printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
790                         bmname(bitmap),
791                         (unsigned long) i_size_read(file->f_mapping->host),
792                         bytes + sizeof(bitmap_super_t));
793                 goto out;
794         }
795
796         ret = -ENOMEM;
797
798         bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
799         if (!bitmap->filemap)
800                 goto out;
801
802         bitmap->filemap_attr = kmalloc(sizeof(long) * num_pages, GFP_KERNEL);
803         if (!bitmap->filemap_attr)
804                 goto out;
805
806         memset(bitmap->filemap_attr, 0, sizeof(long) * num_pages);
807
808         oldindex = ~0L;
809
810         for (i = 0; i < chunks; i++) {
811                 index = file_page_index(i);
812                 bit = file_page_offset(i);
813                 if (index != oldindex) { /* this is a new page, read it in */
814                         /* unmap the old page, we're done with it */
815                         if (oldpage != NULL)
816                                 kunmap(oldpage);
817                         if (index == 0) {
818                                 /*
819                                  * if we're here then the superblock page
820                                  * contains some bits (PAGE_SIZE != sizeof sb)
821                                  * we've already read it in, so just use it
822                                  */
823                                 page = bitmap->sb_page;
824                                 offset = sizeof(bitmap_super_t);
825                         } else {
826                                 page = read_page(file, index, &dummy);
827                                 if (IS_ERR(page)) { /* read error */
828                                         ret = PTR_ERR(page);
829                                         goto out;
830                                 }
831                                 offset = 0;
832                         }
833                         oldindex = index;
834                         oldpage = page;
835                         kmap(page);
836
837                         if (outofdate) {
838                                 /*
839                                  * if bitmap is out of date, dirty the
840                                  * whole page and write it out
841                                  */
842                                 memset(page_address(page) + offset, 0xff,
843                                         PAGE_SIZE - offset);
844                                 ret = write_page(page, 1);
845                                 if (ret) {
846                                         kunmap(page);
847                                         /* release, page not in filemap yet */
848                                         page_cache_release(page);
849                                         goto out;
850                                 }
851                         }
852
853                         bitmap->filemap[bitmap->file_pages++] = page;
854                 }
855                 if (test_bit(bit, page_address(page))) {
856                         /* if the disk bit is set, set the memory bit */
857                         bitmap_set_memory_bits(bitmap,
858                                         i << CHUNK_BLOCK_SHIFT(bitmap), 1, in_sync);
859                         bit_cnt++;
860                 }
861         }
862
863         /* everything went OK */
864         ret = 0;
865         bitmap_mask_state(bitmap, BITMAP_STALE, MASK_UNSET);
866
867         if (page) /* unmap the last page */
868                 kunmap(page);
869
870         if (bit_cnt) { /* Kick recovery if any bits were set */
871                 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
872                 md_wakeup_thread(bitmap->mddev->thread);
873         }
874
875 out:
876         printk(KERN_INFO "%s: bitmap initialized from disk: "
877                 "read %lu/%lu pages, set %lu bits, status: %d\n",
878                 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret);
879
880         return ret;
881 }
882
883
884 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
885 {
886         sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
887         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
888         bitmap->bp[page].count += inc;
889 /*
890         if (page == 0) printk("count page 0, offset %llu: %d gives %d\n",
891                               (unsigned long long)offset, inc, bitmap->bp[page].count);
892 */
893         bitmap_checkfree(bitmap, page);
894 }
895 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
896                                             sector_t offset, int *blocks,
897                                             int create);
898
899 /*
900  * bitmap daemon -- periodically wakes up to clean bits and flush pages
901  *                      out to disk
902  */
903
904 int bitmap_daemon_work(struct bitmap *bitmap)
905 {
906         unsigned long bit, j;
907         unsigned long flags;
908         struct page *page = NULL, *lastpage = NULL;
909         int err = 0;
910         int blocks;
911         int attr;
912
913         if (bitmap == NULL)
914                 return 0;
915         if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
916                 return 0;
917         bitmap->daemon_lastrun = jiffies;
918
919         for (j = 0; j < bitmap->chunks; j++) {
920                 bitmap_counter_t *bmc;
921                 spin_lock_irqsave(&bitmap->lock, flags);
922                 if (!bitmap->file || !bitmap->filemap) {
923                         /* error or shutdown */
924                         spin_unlock_irqrestore(&bitmap->lock, flags);
925                         break;
926                 }
927
928                 page = filemap_get_page(bitmap, j);
929                 /* skip this page unless it's marked as needing cleaning */
930                 if (!((attr=get_page_attr(bitmap, page)) & BITMAP_PAGE_CLEAN)) {
931                         if (attr & BITMAP_PAGE_NEEDWRITE) {
932                                 page_cache_get(page);
933                                 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
934                         }
935                         spin_unlock_irqrestore(&bitmap->lock, flags);
936                         if (attr & BITMAP_PAGE_NEEDWRITE) {
937                                 if (write_page(page, 0))
938                                         bitmap_file_kick(bitmap);
939                                 page_cache_release(page);
940                         }
941                         continue;
942                 }
943
944                 bit = file_page_offset(j);
945
946                 if (page != lastpage) {
947                         /* grab the new page, sync and release the old */
948                         page_cache_get(page);
949                         if (lastpage != NULL) {
950                                 if (get_page_attr(bitmap, lastpage) & BITMAP_PAGE_NEEDWRITE) {
951                                         clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
952                                         spin_unlock_irqrestore(&bitmap->lock, flags);
953                                         write_page(lastpage, 0);
954                                 } else {
955                                         set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
956                                         spin_unlock_irqrestore(&bitmap->lock, flags);
957                                 }
958                                 kunmap(lastpage);
959                                 page_cache_release(lastpage);
960                                 if (err)
961                                         bitmap_file_kick(bitmap);
962                         } else
963                                 spin_unlock_irqrestore(&bitmap->lock, flags);
964                         lastpage = page;
965                         kmap(page);
966 /*
967                         printk("bitmap clean at page %lu\n", j);
968 */
969                         spin_lock_irqsave(&bitmap->lock, flags);
970                         clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
971                 }
972                 bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
973                                         &blocks, 0);
974                 if (bmc) {
975 /*
976   if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
977 */
978                         if (*bmc == 2) {
979                                 *bmc=1; /* maybe clear the bit next time */
980                                 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
981                         } else if (*bmc == 1) {
982                                 /* we can clear the bit */
983                                 *bmc = 0;
984                                 bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
985                                                   -1);
986
987                                 /* clear the bit */
988                                 clear_bit(bit, page_address(page));
989                         }
990                 }
991                 spin_unlock_irqrestore(&bitmap->lock, flags);
992         }
993
994         /* now sync the final page */
995         if (lastpage != NULL) {
996                 kunmap(lastpage);
997                 spin_lock_irqsave(&bitmap->lock, flags);
998                 if (get_page_attr(bitmap, lastpage) &BITMAP_PAGE_NEEDWRITE) {
999                         clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1000                         spin_unlock_irqrestore(&bitmap->lock, flags);
1001                         write_page(lastpage, 0);
1002                 } else {
1003                         set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1004                         spin_unlock_irqrestore(&bitmap->lock, flags);
1005                 }
1006
1007                 page_cache_release(lastpage);
1008         }
1009
1010         return err;
1011 }
1012
1013 static void daemon_exit(struct bitmap *bitmap, mdk_thread_t **daemon)
1014 {
1015         mdk_thread_t *dmn;
1016         unsigned long flags;
1017
1018         /* if no one is waiting on us, we'll free the md thread struct
1019          * and exit, otherwise we let the waiter clean things up */
1020         spin_lock_irqsave(&bitmap->lock, flags);
1021         if ((dmn = *daemon)) { /* no one is waiting, cleanup and exit */
1022                 *daemon = NULL;
1023                 spin_unlock_irqrestore(&bitmap->lock, flags);
1024                 kfree(dmn);
1025                 complete_and_exit(NULL, 0); /* do_exit not exported */
1026         }
1027         spin_unlock_irqrestore(&bitmap->lock, flags);
1028 }
1029
1030 static void bitmap_writeback_daemon(mddev_t *mddev)
1031 {
1032         struct bitmap *bitmap = mddev->bitmap;
1033         struct page *page;
1034         struct page_list *item;
1035         int err = 0;
1036
1037         while (1) {
1038                 PRINTK("%s: bitmap writeback daemon waiting...\n", bmname(bitmap));
1039                 down_interruptible(&bitmap->write_done);
1040                 if (signal_pending(current)) {
1041                         printk(KERN_INFO
1042                             "%s: bitmap writeback daemon got signal, exiting...\n",
1043                             bmname(bitmap));
1044                         break;
1045                 }
1046
1047                 PRINTK("%s: bitmap writeback daemon woke up...\n", bmname(bitmap));
1048                 /* wait on bitmap page writebacks */
1049                 while ((item = dequeue_page(bitmap, &bitmap->complete_pages))) {
1050                         page = item->page;
1051                         mempool_free(item, bitmap->write_pool);
1052                         PRINTK("wait on page writeback: %p %lu\n", page, bitmap->writes_pending);
1053                         wait_on_page_writeback(page);
1054                         PRINTK("finished page writeback: %p %lu\n", page, bitmap->writes_pending);
1055                         spin_lock(&bitmap->write_lock);
1056                         if (!--bitmap->writes_pending)
1057                                 wake_up(&bitmap->write_wait);
1058                         spin_unlock(&bitmap->write_lock);
1059                         err = PageError(page);
1060                         page_cache_release(page);
1061                         if (err) {
1062                                 printk(KERN_WARNING "%s: bitmap file writeback "
1063                                         "failed (page %lu): %d\n",
1064                                         bmname(bitmap), page->index, err);
1065                                 bitmap_file_kick(bitmap);
1066                                 goto out;
1067                         }
1068                 }
1069         }
1070 out:
1071         if (err) {
1072                 printk(KERN_INFO "%s: bitmap writeback daemon exiting (%d)\n",
1073                         bmname(bitmap), err);
1074                 daemon_exit(bitmap, &bitmap->writeback_daemon);
1075         }
1076         return;
1077 }
1078
1079 static int bitmap_start_daemon(struct bitmap *bitmap, mdk_thread_t **ptr,
1080                                 void (*func)(mddev_t *), char *name)
1081 {
1082         mdk_thread_t *daemon;
1083         unsigned long flags;
1084         char namebuf[32];
1085
1086         spin_lock_irqsave(&bitmap->lock, flags);
1087         *ptr = NULL;
1088         if (!bitmap->file) /* no need for daemon if there's no backing file */
1089                 goto out_unlock;
1090
1091         spin_unlock_irqrestore(&bitmap->lock, flags);
1092
1093 #if INJECT_FATAL_FAULT_2
1094         daemon = NULL;
1095 #else
1096         sprintf(namebuf, "%%s_%s", name);
1097         daemon = md_register_thread(func, bitmap->mddev, namebuf);
1098 #endif
1099         if (!daemon) {
1100                 printk(KERN_ERR "%s: failed to start bitmap daemon\n",
1101                         bmname(bitmap));
1102                 return -ECHILD;
1103         }
1104
1105         spin_lock_irqsave(&bitmap->lock, flags);
1106         *ptr = daemon;
1107
1108         md_wakeup_thread(daemon); /* start it running */
1109
1110         PRINTK("%s: %s daemon (pid %d) started...\n",
1111                 bmname(bitmap), name, daemon->tsk->pid);
1112 out_unlock:
1113         spin_unlock_irqrestore(&bitmap->lock, flags);
1114         return 0;
1115 }
1116
1117 static int bitmap_start_daemons(struct bitmap *bitmap)
1118 {
1119         int err = bitmap_start_daemon(bitmap, &bitmap->writeback_daemon,
1120                                         bitmap_writeback_daemon, "bitmap_wb");
1121         return err;
1122 }
1123
1124 static void bitmap_stop_daemon(struct bitmap *bitmap, mdk_thread_t **ptr)
1125 {
1126         mdk_thread_t *daemon;
1127         unsigned long flags;
1128
1129         spin_lock_irqsave(&bitmap->lock, flags);
1130         daemon = *ptr;
1131         *ptr = NULL;
1132         spin_unlock_irqrestore(&bitmap->lock, flags);
1133         if (daemon)
1134                 md_unregister_thread(daemon); /* destroy the thread */
1135 }
1136
1137 static void bitmap_stop_daemons(struct bitmap *bitmap)
1138 {
1139         /* the daemons can't stop themselves... they'll just exit instead... */
1140         if (bitmap->writeback_daemon &&
1141             current->pid != bitmap->writeback_daemon->tsk->pid)
1142                 bitmap_stop_daemon(bitmap, &bitmap->writeback_daemon);
1143 }
1144
1145 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
1146                                             sector_t offset, int *blocks,
1147                                             int create)
1148 {
1149         /* If 'create', we might release the lock and reclaim it.
1150          * The lock must have been taken with interrupts enabled.
1151          * If !create, we don't release the lock.
1152          */
1153         sector_t chunk = offset >> CHUNK_BLOCK_SHIFT(bitmap);
1154         unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1155         unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1156         sector_t csize;
1157
1158         if (bitmap_checkpage(bitmap, page, create) < 0) {
1159                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1160                 *blocks = csize - (offset & (csize- 1));
1161                 return NULL;
1162         }
1163         /* now locked ... */
1164
1165         if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1166                 /* should we use the first or second counter field
1167                  * of the hijacked pointer? */
1168                 int hi = (pageoff > PAGE_COUNTER_MASK);
1169                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap) +
1170                                           PAGE_COUNTER_SHIFT - 1);
1171                 *blocks = csize - (offset & (csize- 1));
1172                 return  &((bitmap_counter_t *)
1173                           &bitmap->bp[page].map)[hi];
1174         } else { /* page is allocated */
1175                 csize = ((sector_t)1) << (CHUNK_BLOCK_SHIFT(bitmap));
1176                 *blocks = csize - (offset & (csize- 1));
1177                 return (bitmap_counter_t *)
1178                         &(bitmap->bp[page].map[pageoff]);
1179         }
1180 }
1181
1182 int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors)
1183 {
1184         if (!bitmap) return 0;
1185         while (sectors) {
1186                 int blocks;
1187                 bitmap_counter_t *bmc;
1188
1189                 spin_lock_irq(&bitmap->lock);
1190                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 1);
1191                 if (!bmc) {
1192                         spin_unlock_irq(&bitmap->lock);
1193                         return 0;
1194                 }
1195
1196                 switch(*bmc) {
1197                 case 0:
1198                         bitmap_file_set_bit(bitmap, offset);
1199                         bitmap_count_page(bitmap,offset, 1);
1200                         blk_plug_device(bitmap->mddev->queue);
1201                         /* fall through */
1202                 case 1:
1203                         *bmc = 2;
1204                 }
1205                 if ((*bmc & COUNTER_MAX) == COUNTER_MAX) BUG();
1206                 (*bmc)++;
1207
1208                 spin_unlock_irq(&bitmap->lock);
1209
1210                 offset += blocks;
1211                 if (sectors > blocks)
1212                         sectors -= blocks;
1213                 else sectors = 0;
1214         }
1215         return 0;
1216 }
1217
1218 void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors,
1219                      int success)
1220 {
1221         if (!bitmap) return;
1222         while (sectors) {
1223                 int blocks;
1224                 unsigned long flags;
1225                 bitmap_counter_t *bmc;
1226
1227                 spin_lock_irqsave(&bitmap->lock, flags);
1228                 bmc = bitmap_get_counter(bitmap, offset, &blocks, 0);
1229                 if (!bmc) {
1230                         spin_unlock_irqrestore(&bitmap->lock, flags);
1231                         return;
1232                 }
1233
1234                 if (!success && ! (*bmc & NEEDED_MASK))
1235                         *bmc |= NEEDED_MASK;
1236
1237                 (*bmc)--;
1238                 if (*bmc <= 2) {
1239                         set_page_attr(bitmap,
1240                                       filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1241                                       BITMAP_PAGE_CLEAN);
1242                 }
1243                 spin_unlock_irqrestore(&bitmap->lock, flags);
1244                 offset += blocks;
1245                 if (sectors > blocks)
1246                         sectors -= blocks;
1247                 else sectors = 0;
1248         }
1249 }
1250
1251 int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks)
1252 {
1253         bitmap_counter_t *bmc;
1254         int rv;
1255         if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1256                 *blocks = 1024;
1257                 return 1; /* always resync if no bitmap */
1258         }
1259         spin_lock_irq(&bitmap->lock);
1260         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1261         rv = 0;
1262         if (bmc) {
1263                 /* locked */
1264                 if (RESYNC(*bmc))
1265                         rv = 1;
1266                 else if (NEEDED(*bmc)) {
1267                         rv = 1;
1268                         *bmc |= RESYNC_MASK;
1269                         *bmc &= ~NEEDED_MASK;
1270                 }
1271         }
1272         spin_unlock_irq(&bitmap->lock);
1273         return rv;
1274 }
1275
1276 void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted)
1277 {
1278         bitmap_counter_t *bmc;
1279         unsigned long flags;
1280 /*
1281         if (offset == 0) printk("bitmap_end_sync 0 (%d)\n", aborted);
1282 */      if (bitmap == NULL) {
1283                 *blocks = 1024;
1284                 return;
1285         }
1286         spin_lock_irqsave(&bitmap->lock, flags);
1287         bmc = bitmap_get_counter(bitmap, offset, blocks, 0);
1288         if (bmc == NULL)
1289                 goto unlock;
1290         /* locked */
1291 /*
1292         if (offset == 0) printk("bitmap_end sync found 0x%x, blocks %d\n", *bmc, *blocks);
1293 */
1294         if (RESYNC(*bmc)) {
1295                 *bmc &= ~RESYNC_MASK;
1296
1297                 if (!NEEDED(*bmc) && aborted)
1298                         *bmc |= NEEDED_MASK;
1299                 else {
1300                         if (*bmc <= 2) {
1301                                 set_page_attr(bitmap,
1302                                               filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap)),
1303                                               BITMAP_PAGE_CLEAN);
1304                         }
1305                 }
1306         }
1307  unlock:
1308         spin_unlock_irqrestore(&bitmap->lock, flags);
1309 }
1310
1311 void bitmap_close_sync(struct bitmap *bitmap)
1312 {
1313         /* Sync has finished, and any bitmap chunks that weren't synced
1314          * properly have been aborted.  It remains to us to clear the
1315          * RESYNC bit wherever it is still on
1316          */
1317         sector_t sector = 0;
1318         int blocks;
1319         if (!bitmap) return;
1320         while (sector < bitmap->mddev->resync_max_sectors) {
1321                 bitmap_end_sync(bitmap, sector, &blocks, 0);
1322 /*
1323                 if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
1324                                          (unsigned long long)sector, blocks);
1325 */              sector += blocks;
1326         }
1327 }
1328
1329 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
1330                                    unsigned long sectors, int in_sync)
1331 {
1332         /* For each chunk covered by any of these sectors, set the
1333          * counter to 1 and set resync_needed unless in_sync.  They should all
1334          * be 0 at this point
1335          */
1336         while (sectors) {
1337                 int secs;
1338                 bitmap_counter_t *bmc;
1339                 spin_lock_irq(&bitmap->lock);
1340                 bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
1341                 if (!bmc) {
1342                         spin_unlock_irq(&bitmap->lock);
1343                         return;
1344                 }
1345                 if (! *bmc) {
1346                         struct page *page;
1347                         *bmc = 1 | (in_sync? 0 : NEEDED_MASK);
1348                         bitmap_count_page(bitmap, offset, 1);
1349                         page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
1350                         set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1351                 }
1352                 spin_unlock_irq(&bitmap->lock);
1353                 if (sectors > secs)
1354                         sectors -= secs;
1355                 else
1356                         sectors = 0;
1357         }
1358 }
1359
1360 /* dirty the entire bitmap */
1361 int bitmap_setallbits(struct bitmap *bitmap)
1362 {
1363         unsigned long flags;
1364         unsigned long j;
1365
1366         /* dirty the in-memory bitmap */
1367         bitmap_set_memory_bits(bitmap, 0, bitmap->chunks << CHUNK_BLOCK_SHIFT(bitmap), 1);
1368
1369         /* dirty the bitmap file */
1370         for (j = 0; j < bitmap->file_pages; j++) {
1371                 struct page *page = bitmap->filemap[j];
1372
1373                 spin_lock_irqsave(&bitmap->lock, flags);
1374                 page_cache_get(page);
1375                 spin_unlock_irqrestore(&bitmap->lock, flags);
1376                 memset(kmap(page), 0xff, PAGE_SIZE);
1377                 kunmap(page);
1378                 write_page(page, 0);
1379         }
1380
1381         return 0;
1382 }
1383
1384 /*
1385  * free memory that was allocated
1386  */
1387 void bitmap_destroy(mddev_t *mddev)
1388 {
1389         unsigned long k, pages;
1390         struct bitmap_page *bp;
1391         struct bitmap *bitmap = mddev->bitmap;
1392
1393         if (!bitmap) /* there was no bitmap */
1394                 return;
1395
1396         mddev->bitmap = NULL; /* disconnect from the md device */
1397
1398         /* release the bitmap file and kill the daemon */
1399         bitmap_file_put(bitmap);
1400
1401         bp = bitmap->bp;
1402         pages = bitmap->pages;
1403
1404         /* free all allocated memory */
1405
1406         mempool_destroy(bitmap->write_pool);
1407
1408         if (bp) /* deallocate the page memory */
1409                 for (k = 0; k < pages; k++)
1410                         if (bp[k].map && !bp[k].hijacked)
1411                                 kfree(bp[k].map);
1412         kfree(bp);
1413         kfree(bitmap);
1414 }
1415
1416 /*
1417  * initialize the bitmap structure
1418  * if this returns an error, bitmap_destroy must be called to do clean up
1419  */
1420 int bitmap_create(mddev_t *mddev)
1421 {
1422         struct bitmap *bitmap;
1423         unsigned long blocks = mddev->resync_max_sectors;
1424         unsigned long chunks;
1425         unsigned long pages;
1426         struct file *file = mddev->bitmap_file;
1427         int err;
1428
1429         BUG_ON(sizeof(bitmap_super_t) != 256);
1430
1431         if (!file) /* bitmap disabled, nothing to do */
1432                 return 0;
1433
1434         bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
1435         if (!bitmap)
1436                 return -ENOMEM;
1437
1438         memset(bitmap, 0, sizeof(*bitmap));
1439
1440         spin_lock_init(&bitmap->lock);
1441         bitmap->mddev = mddev;
1442         mddev->bitmap = bitmap;
1443
1444         spin_lock_init(&bitmap->write_lock);
1445         init_MUTEX_LOCKED(&bitmap->write_done);
1446         INIT_LIST_HEAD(&bitmap->complete_pages);
1447         init_waitqueue_head(&bitmap->write_wait);
1448         bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc,
1449                                 write_pool_free, NULL);
1450         if (!bitmap->write_pool)
1451                 return -ENOMEM;
1452
1453         bitmap->file = file;
1454         get_file(file);
1455         /* read superblock from bitmap file (this sets bitmap->chunksize) */
1456         err = bitmap_read_sb(bitmap);
1457         if (err)
1458                 return err;
1459
1460         bitmap->chunkshift = find_first_bit(&bitmap->chunksize,
1461                                         sizeof(bitmap->chunksize));
1462
1463         /* now that chunksize and chunkshift are set, we can use these macros */
1464         chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
1465                         CHUNK_BLOCK_RATIO(bitmap);
1466         pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1467
1468         BUG_ON(!pages);
1469
1470         bitmap->chunks = chunks;
1471         bitmap->pages = pages;
1472         bitmap->missing_pages = pages;
1473         bitmap->counter_bits = COUNTER_BITS;
1474
1475         bitmap->syncchunk = ~0UL;
1476
1477 #if INJECT_FATAL_FAULT_1
1478         bitmap->bp = NULL;
1479 #else
1480         bitmap->bp = kmalloc(pages * sizeof(*bitmap->bp), GFP_KERNEL);
1481 #endif
1482         if (!bitmap->bp)
1483                 return -ENOMEM;
1484         memset(bitmap->bp, 0, pages * sizeof(*bitmap->bp));
1485
1486         bitmap->flags |= BITMAP_ACTIVE;
1487
1488         /* now that we have some pages available, initialize the in-memory
1489          * bitmap from the on-disk bitmap */
1490         err = bitmap_init_from_disk(bitmap, mddev->recovery_cp == MaxSector);
1491         if (err)
1492                 return err;
1493
1494         printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
1495                 pages, bmname(bitmap));
1496
1497         /* kick off the bitmap daemons */
1498         err = bitmap_start_daemons(bitmap);
1499         if (err)
1500                 return err;
1501         return bitmap_update_sb(bitmap);
1502 }
1503
1504 /* the bitmap API -- for raid personalities */
1505 EXPORT_SYMBOL(bitmap_startwrite);
1506 EXPORT_SYMBOL(bitmap_endwrite);
1507 EXPORT_SYMBOL(bitmap_start_sync);
1508 EXPORT_SYMBOL(bitmap_end_sync);
1509 EXPORT_SYMBOL(bitmap_unplug);
1510 EXPORT_SYMBOL(bitmap_close_sync);
1511 EXPORT_SYMBOL(bitmap_daemon_work);