2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kthread.h>
37 #include <linux/linkage.h>
38 #include <linux/raid/md.h>
39 #include <linux/raid/bitmap.h>
40 #include <linux/sysctl.h>
41 #include <linux/buffer_head.h> /* for invalidate_bdev */
42 #include <linux/poll.h>
43 #include <linux/mutex.h>
44 #include <linux/ctype.h>
45 #include <linux/freezer.h>
47 #include <linux/init.h>
49 #include <linux/file.h>
52 #include <linux/kmod.h>
55 #include <asm/unaligned.h>
57 #define MAJOR_NR MD_MAJOR
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
68 static void autostart_arrays (int part);
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
74 static void md_print_devices(void);
76 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
80 * is 1000 KB/sec, so the extra system load does not show up that much.
81 * Increase it if you want to have more _guaranteed_ speed. Note that
82 * the RAID driver will use the maximum available bandwidth if the IO
83 * subsystem is idle. There is also an 'absolute maximum' reconstruction
84 * speed limit - in case reconstruction slows down your system despite
87 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
88 * or /sys/block/mdX/md/sync_speed_{min,max}
91 static int sysctl_speed_limit_min = 1000;
92 static int sysctl_speed_limit_max = 200000;
93 static inline int speed_min(mddev_t *mddev)
95 return mddev->sync_speed_min ?
96 mddev->sync_speed_min : sysctl_speed_limit_min;
99 static inline int speed_max(mddev_t *mddev)
101 return mddev->sync_speed_max ?
102 mddev->sync_speed_max : sysctl_speed_limit_max;
105 static struct ctl_table_header *raid_table_header;
107 static ctl_table raid_table[] = {
109 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
110 .procname = "speed_limit_min",
111 .data = &sysctl_speed_limit_min,
112 .maxlen = sizeof(int),
113 .mode = S_IRUGO|S_IWUSR,
114 .proc_handler = &proc_dointvec,
117 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
118 .procname = "speed_limit_max",
119 .data = &sysctl_speed_limit_max,
120 .maxlen = sizeof(int),
121 .mode = S_IRUGO|S_IWUSR,
122 .proc_handler = &proc_dointvec,
127 static ctl_table raid_dir_table[] = {
129 .ctl_name = DEV_RAID,
132 .mode = S_IRUGO|S_IXUGO,
138 static ctl_table raid_root_table[] = {
144 .child = raid_dir_table,
149 static struct block_device_operations md_fops;
151 static int start_readonly;
154 * We have a system wide 'event count' that is incremented
155 * on any 'interesting' event, and readers of /proc/mdstat
156 * can use 'poll' or 'select' to find out when the event
160 * start array, stop array, error, add device, remove device,
161 * start build, activate spare
163 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
164 static atomic_t md_event_count;
165 void md_new_event(mddev_t *mddev)
167 atomic_inc(&md_event_count);
168 wake_up(&md_event_waiters);
169 sysfs_notify(&mddev->kobj, NULL, "sync_action");
171 EXPORT_SYMBOL_GPL(md_new_event);
173 /* Alternate version that can be called from interrupts
174 * when calling sysfs_notify isn't needed.
176 static void md_new_event_inintr(mddev_t *mddev)
178 atomic_inc(&md_event_count);
179 wake_up(&md_event_waiters);
183 * Enables to iterate over all existing md arrays
184 * all_mddevs_lock protects this list.
186 static LIST_HEAD(all_mddevs);
187 static DEFINE_SPINLOCK(all_mddevs_lock);
191 * iterates through all used mddevs in the system.
192 * We take care to grab the all_mddevs_lock whenever navigating
193 * the list, and to always hold a refcount when unlocked.
194 * Any code which breaks out of this loop while own
195 * a reference to the current mddev and must mddev_put it.
197 #define ITERATE_MDDEV(mddev,tmp) \
199 for (({ spin_lock(&all_mddevs_lock); \
200 tmp = all_mddevs.next; \
202 ({ if (tmp != &all_mddevs) \
203 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
204 spin_unlock(&all_mddevs_lock); \
205 if (mddev) mddev_put(mddev); \
206 mddev = list_entry(tmp, mddev_t, all_mddevs); \
207 tmp != &all_mddevs;}); \
208 ({ spin_lock(&all_mddevs_lock); \
213 static int md_fail_request (request_queue_t *q, struct bio *bio)
215 bio_io_error(bio, bio->bi_size);
219 static inline mddev_t *mddev_get(mddev_t *mddev)
221 atomic_inc(&mddev->active);
225 static void mddev_put(mddev_t *mddev)
227 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
229 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
230 list_del(&mddev->all_mddevs);
231 spin_unlock(&all_mddevs_lock);
232 blk_cleanup_queue(mddev->queue);
233 kobject_unregister(&mddev->kobj);
235 spin_unlock(&all_mddevs_lock);
238 static mddev_t * mddev_find(dev_t unit)
240 mddev_t *mddev, *new = NULL;
243 spin_lock(&all_mddevs_lock);
244 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
245 if (mddev->unit == unit) {
247 spin_unlock(&all_mddevs_lock);
253 list_add(&new->all_mddevs, &all_mddevs);
254 spin_unlock(&all_mddevs_lock);
257 spin_unlock(&all_mddevs_lock);
259 new = kzalloc(sizeof(*new), GFP_KERNEL);
264 if (MAJOR(unit) == MD_MAJOR)
265 new->md_minor = MINOR(unit);
267 new->md_minor = MINOR(unit) >> MdpMinorShift;
269 mutex_init(&new->reconfig_mutex);
270 INIT_LIST_HEAD(&new->disks);
271 INIT_LIST_HEAD(&new->all_mddevs);
272 init_timer(&new->safemode_timer);
273 atomic_set(&new->active, 1);
274 spin_lock_init(&new->write_lock);
275 init_waitqueue_head(&new->sb_wait);
277 new->queue = blk_alloc_queue(GFP_KERNEL);
282 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
284 blk_queue_make_request(new->queue, md_fail_request);
289 static inline int mddev_lock(mddev_t * mddev)
291 return mutex_lock_interruptible(&mddev->reconfig_mutex);
294 static inline int mddev_trylock(mddev_t * mddev)
296 return mutex_trylock(&mddev->reconfig_mutex);
299 static inline void mddev_unlock(mddev_t * mddev)
301 mutex_unlock(&mddev->reconfig_mutex);
303 md_wakeup_thread(mddev->thread);
306 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
309 struct list_head *tmp;
311 ITERATE_RDEV(mddev,rdev,tmp) {
312 if (rdev->desc_nr == nr)
318 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
320 struct list_head *tmp;
323 ITERATE_RDEV(mddev,rdev,tmp) {
324 if (rdev->bdev->bd_dev == dev)
330 static struct mdk_personality *find_pers(int level, char *clevel)
332 struct mdk_personality *pers;
333 list_for_each_entry(pers, &pers_list, list) {
334 if (level != LEVEL_NONE && pers->level == level)
336 if (strcmp(pers->name, clevel)==0)
342 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
344 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
345 return MD_NEW_SIZE_BLOCKS(size);
348 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
352 size = rdev->sb_offset;
355 size &= ~((sector_t)chunk_size/1024 - 1);
359 static int alloc_disk_sb(mdk_rdev_t * rdev)
364 rdev->sb_page = alloc_page(GFP_KERNEL);
365 if (!rdev->sb_page) {
366 printk(KERN_ALERT "md: out of memory.\n");
373 static void free_disk_sb(mdk_rdev_t * rdev)
376 put_page(rdev->sb_page);
378 rdev->sb_page = NULL;
385 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
387 mdk_rdev_t *rdev = bio->bi_private;
388 mddev_t *mddev = rdev->mddev;
392 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
393 printk("md: super_written gets error=%d, uptodate=%d\n",
394 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
395 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
396 md_error(mddev, rdev);
399 if (atomic_dec_and_test(&mddev->pending_writes))
400 wake_up(&mddev->sb_wait);
405 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
407 struct bio *bio2 = bio->bi_private;
408 mdk_rdev_t *rdev = bio2->bi_private;
409 mddev_t *mddev = rdev->mddev;
413 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
414 error == -EOPNOTSUPP) {
416 /* barriers don't appear to be supported :-( */
417 set_bit(BarriersNotsupp, &rdev->flags);
418 mddev->barriers_work = 0;
419 spin_lock_irqsave(&mddev->write_lock, flags);
420 bio2->bi_next = mddev->biolist;
421 mddev->biolist = bio2;
422 spin_unlock_irqrestore(&mddev->write_lock, flags);
423 wake_up(&mddev->sb_wait);
428 bio->bi_private = rdev;
429 return super_written(bio, bytes_done, error);
432 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
433 sector_t sector, int size, struct page *page)
435 /* write first size bytes of page to sector of rdev
436 * Increment mddev->pending_writes before returning
437 * and decrement it on completion, waking up sb_wait
438 * if zero is reached.
439 * If an error occurred, call md_error
441 * As we might need to resubmit the request if BIO_RW_BARRIER
442 * causes ENOTSUPP, we allocate a spare bio...
444 struct bio *bio = bio_alloc(GFP_NOIO, 1);
445 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
447 bio->bi_bdev = rdev->bdev;
448 bio->bi_sector = sector;
449 bio_add_page(bio, page, size, 0);
450 bio->bi_private = rdev;
451 bio->bi_end_io = super_written;
454 atomic_inc(&mddev->pending_writes);
455 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
457 rw |= (1<<BIO_RW_BARRIER);
458 rbio = bio_clone(bio, GFP_NOIO);
459 rbio->bi_private = bio;
460 rbio->bi_end_io = super_written_barrier;
461 submit_bio(rw, rbio);
466 void md_super_wait(mddev_t *mddev)
468 /* wait for all superblock writes that were scheduled to complete.
469 * if any had to be retried (due to BARRIER problems), retry them
473 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
474 if (atomic_read(&mddev->pending_writes)==0)
476 while (mddev->biolist) {
478 spin_lock_irq(&mddev->write_lock);
479 bio = mddev->biolist;
480 mddev->biolist = bio->bi_next ;
482 spin_unlock_irq(&mddev->write_lock);
483 submit_bio(bio->bi_rw, bio);
487 finish_wait(&mddev->sb_wait, &wq);
490 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
495 complete((struct completion*)bio->bi_private);
499 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
500 struct page *page, int rw)
502 struct bio *bio = bio_alloc(GFP_NOIO, 1);
503 struct completion event;
506 rw |= (1 << BIO_RW_SYNC);
509 bio->bi_sector = sector;
510 bio_add_page(bio, page, size, 0);
511 init_completion(&event);
512 bio->bi_private = &event;
513 bio->bi_end_io = bi_complete;
515 wait_for_completion(&event);
517 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
521 EXPORT_SYMBOL_GPL(sync_page_io);
523 static int read_disk_sb(mdk_rdev_t * rdev, int size)
525 char b[BDEVNAME_SIZE];
526 if (!rdev->sb_page) {
534 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
540 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
541 bdevname(rdev->bdev,b));
545 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
547 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
548 (sb1->set_uuid1 == sb2->set_uuid1) &&
549 (sb1->set_uuid2 == sb2->set_uuid2) &&
550 (sb1->set_uuid3 == sb2->set_uuid3))
558 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
561 mdp_super_t *tmp1, *tmp2;
563 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
564 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
566 if (!tmp1 || !tmp2) {
568 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
576 * nr_disks is not constant
581 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
592 static unsigned int calc_sb_csum(mdp_super_t * sb)
594 unsigned int disk_csum, csum;
596 disk_csum = sb->sb_csum;
598 csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
599 sb->sb_csum = disk_csum;
605 * Handle superblock details.
606 * We want to be able to handle multiple superblock formats
607 * so we have a common interface to them all, and an array of
608 * different handlers.
609 * We rely on user-space to write the initial superblock, and support
610 * reading and updating of superblocks.
611 * Interface methods are:
612 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
613 * loads and validates a superblock on dev.
614 * if refdev != NULL, compare superblocks on both devices
616 * 0 - dev has a superblock that is compatible with refdev
617 * 1 - dev has a superblock that is compatible and newer than refdev
618 * so dev should be used as the refdev in future
619 * -EINVAL superblock incompatible or invalid
620 * -othererror e.g. -EIO
622 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
623 * Verify that dev is acceptable into mddev.
624 * The first time, mddev->raid_disks will be 0, and data from
625 * dev should be merged in. Subsequent calls check that dev
626 * is new enough. Return 0 or -EINVAL
628 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
629 * Update the superblock for rdev with data in mddev
630 * This does not write to disc.
636 struct module *owner;
637 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
638 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
639 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
643 * load_super for 0.90.0
645 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
647 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
653 * Calculate the position of the superblock,
654 * it's at the end of the disk.
656 * It also happens to be a multiple of 4Kb.
658 sb_offset = calc_dev_sboffset(rdev->bdev);
659 rdev->sb_offset = sb_offset;
661 ret = read_disk_sb(rdev, MD_SB_BYTES);
666 bdevname(rdev->bdev, b);
667 sb = (mdp_super_t*)page_address(rdev->sb_page);
669 if (sb->md_magic != MD_SB_MAGIC) {
670 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
675 if (sb->major_version != 0 ||
676 sb->minor_version < 90 ||
677 sb->minor_version > 91) {
678 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
679 sb->major_version, sb->minor_version,
684 if (sb->raid_disks <= 0)
687 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
688 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
693 rdev->preferred_minor = sb->md_minor;
694 rdev->data_offset = 0;
695 rdev->sb_size = MD_SB_BYTES;
697 if (sb->level == LEVEL_MULTIPATH)
700 rdev->desc_nr = sb->this_disk.number;
706 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
707 if (!uuid_equal(refsb, sb)) {
708 printk(KERN_WARNING "md: %s has different UUID to %s\n",
709 b, bdevname(refdev->bdev,b2));
712 if (!sb_equal(refsb, sb)) {
713 printk(KERN_WARNING "md: %s has same UUID"
714 " but different superblock to %s\n",
715 b, bdevname(refdev->bdev, b2));
719 ev2 = md_event(refsb);
725 rdev->size = calc_dev_size(rdev, sb->chunk_size);
727 if (rdev->size < sb->size && sb->level > 1)
728 /* "this cannot possibly happen" ... */
736 * validate_super for 0.90.0
738 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
741 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
742 __u64 ev1 = md_event(sb);
744 rdev->raid_disk = -1;
746 if (mddev->raid_disks == 0) {
747 mddev->major_version = 0;
748 mddev->minor_version = sb->minor_version;
749 mddev->patch_version = sb->patch_version;
750 mddev->persistent = ! sb->not_persistent;
751 mddev->chunk_size = sb->chunk_size;
752 mddev->ctime = sb->ctime;
753 mddev->utime = sb->utime;
754 mddev->level = sb->level;
755 mddev->clevel[0] = 0;
756 mddev->layout = sb->layout;
757 mddev->raid_disks = sb->raid_disks;
758 mddev->size = sb->size;
760 mddev->bitmap_offset = 0;
761 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
763 if (mddev->minor_version >= 91) {
764 mddev->reshape_position = sb->reshape_position;
765 mddev->delta_disks = sb->delta_disks;
766 mddev->new_level = sb->new_level;
767 mddev->new_layout = sb->new_layout;
768 mddev->new_chunk = sb->new_chunk;
770 mddev->reshape_position = MaxSector;
771 mddev->delta_disks = 0;
772 mddev->new_level = mddev->level;
773 mddev->new_layout = mddev->layout;
774 mddev->new_chunk = mddev->chunk_size;
777 if (sb->state & (1<<MD_SB_CLEAN))
778 mddev->recovery_cp = MaxSector;
780 if (sb->events_hi == sb->cp_events_hi &&
781 sb->events_lo == sb->cp_events_lo) {
782 mddev->recovery_cp = sb->recovery_cp;
784 mddev->recovery_cp = 0;
787 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
788 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
789 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
790 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
792 mddev->max_disks = MD_SB_DISKS;
794 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
795 mddev->bitmap_file == NULL) {
796 if (mddev->level != 1 && mddev->level != 4
797 && mddev->level != 5 && mddev->level != 6
798 && mddev->level != 10) {
799 /* FIXME use a better test */
800 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
803 mddev->bitmap_offset = mddev->default_bitmap_offset;
806 } else if (mddev->pers == NULL) {
807 /* Insist on good event counter while assembling */
809 if (ev1 < mddev->events)
811 } else if (mddev->bitmap) {
812 /* if adding to array with a bitmap, then we can accept an
813 * older device ... but not too old.
815 if (ev1 < mddev->bitmap->events_cleared)
818 if (ev1 < mddev->events)
819 /* just a hot-add of a new device, leave raid_disk at -1 */
823 if (mddev->level != LEVEL_MULTIPATH) {
824 desc = sb->disks + rdev->desc_nr;
826 if (desc->state & (1<<MD_DISK_FAULTY))
827 set_bit(Faulty, &rdev->flags);
828 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
829 desc->raid_disk < mddev->raid_disks */) {
830 set_bit(In_sync, &rdev->flags);
831 rdev->raid_disk = desc->raid_disk;
833 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
834 set_bit(WriteMostly, &rdev->flags);
835 } else /* MULTIPATH are always insync */
836 set_bit(In_sync, &rdev->flags);
841 * sync_super for 0.90.0
843 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
846 struct list_head *tmp;
848 int next_spare = mddev->raid_disks;
851 /* make rdev->sb match mddev data..
854 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
855 * 3/ any empty disks < next_spare become removed
857 * disks[0] gets initialised to REMOVED because
858 * we cannot be sure from other fields if it has
859 * been initialised or not.
862 int active=0, working=0,failed=0,spare=0,nr_disks=0;
864 rdev->sb_size = MD_SB_BYTES;
866 sb = (mdp_super_t*)page_address(rdev->sb_page);
868 memset(sb, 0, sizeof(*sb));
870 sb->md_magic = MD_SB_MAGIC;
871 sb->major_version = mddev->major_version;
872 sb->patch_version = mddev->patch_version;
873 sb->gvalid_words = 0; /* ignored */
874 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
875 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
876 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
877 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
879 sb->ctime = mddev->ctime;
880 sb->level = mddev->level;
881 sb->size = mddev->size;
882 sb->raid_disks = mddev->raid_disks;
883 sb->md_minor = mddev->md_minor;
884 sb->not_persistent = !mddev->persistent;
885 sb->utime = mddev->utime;
887 sb->events_hi = (mddev->events>>32);
888 sb->events_lo = (u32)mddev->events;
890 if (mddev->reshape_position == MaxSector)
891 sb->minor_version = 90;
893 sb->minor_version = 91;
894 sb->reshape_position = mddev->reshape_position;
895 sb->new_level = mddev->new_level;
896 sb->delta_disks = mddev->delta_disks;
897 sb->new_layout = mddev->new_layout;
898 sb->new_chunk = mddev->new_chunk;
900 mddev->minor_version = sb->minor_version;
903 sb->recovery_cp = mddev->recovery_cp;
904 sb->cp_events_hi = (mddev->events>>32);
905 sb->cp_events_lo = (u32)mddev->events;
906 if (mddev->recovery_cp == MaxSector)
907 sb->state = (1<< MD_SB_CLEAN);
911 sb->layout = mddev->layout;
912 sb->chunk_size = mddev->chunk_size;
914 if (mddev->bitmap && mddev->bitmap_file == NULL)
915 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
917 sb->disks[0].state = (1<<MD_DISK_REMOVED);
918 ITERATE_RDEV(mddev,rdev2,tmp) {
921 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
922 && !test_bit(Faulty, &rdev2->flags))
923 desc_nr = rdev2->raid_disk;
925 desc_nr = next_spare++;
926 rdev2->desc_nr = desc_nr;
927 d = &sb->disks[rdev2->desc_nr];
929 d->number = rdev2->desc_nr;
930 d->major = MAJOR(rdev2->bdev->bd_dev);
931 d->minor = MINOR(rdev2->bdev->bd_dev);
932 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
933 && !test_bit(Faulty, &rdev2->flags))
934 d->raid_disk = rdev2->raid_disk;
936 d->raid_disk = rdev2->desc_nr; /* compatibility */
937 if (test_bit(Faulty, &rdev2->flags))
938 d->state = (1<<MD_DISK_FAULTY);
939 else if (test_bit(In_sync, &rdev2->flags)) {
940 d->state = (1<<MD_DISK_ACTIVE);
941 d->state |= (1<<MD_DISK_SYNC);
949 if (test_bit(WriteMostly, &rdev2->flags))
950 d->state |= (1<<MD_DISK_WRITEMOSTLY);
952 /* now set the "removed" and "faulty" bits on any missing devices */
953 for (i=0 ; i < mddev->raid_disks ; i++) {
954 mdp_disk_t *d = &sb->disks[i];
955 if (d->state == 0 && d->number == 0) {
958 d->state = (1<<MD_DISK_REMOVED);
959 d->state |= (1<<MD_DISK_FAULTY);
963 sb->nr_disks = nr_disks;
964 sb->active_disks = active;
965 sb->working_disks = working;
966 sb->failed_disks = failed;
967 sb->spare_disks = spare;
969 sb->this_disk = sb->disks[rdev->desc_nr];
970 sb->sb_csum = calc_sb_csum(sb);
974 * version 1 superblock
977 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
981 unsigned long long newcsum;
982 int size = 256 + le32_to_cpu(sb->max_dev)*2;
983 __le32 *isuper = (__le32*)sb;
986 disk_csum = sb->sb_csum;
989 for (i=0; size>=4; size -= 4 )
990 newcsum += le32_to_cpu(*isuper++);
993 newcsum += le16_to_cpu(*(__le16*) isuper);
995 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
996 sb->sb_csum = disk_csum;
997 return cpu_to_le32(csum);
1000 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1002 struct mdp_superblock_1 *sb;
1005 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1009 * Calculate the position of the superblock.
1010 * It is always aligned to a 4K boundary and
1011 * depeding on minor_version, it can be:
1012 * 0: At least 8K, but less than 12K, from end of device
1013 * 1: At start of device
1014 * 2: 4K from start of device.
1016 switch(minor_version) {
1018 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1020 sb_offset &= ~(sector_t)(4*2-1);
1021 /* convert from sectors to K */
1033 rdev->sb_offset = sb_offset;
1035 /* superblock is rarely larger than 1K, but it can be larger,
1036 * and it is safe to read 4k, so we do that
1038 ret = read_disk_sb(rdev, 4096);
1039 if (ret) return ret;
1042 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1044 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1045 sb->major_version != cpu_to_le32(1) ||
1046 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1047 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1048 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1051 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1052 printk("md: invalid superblock checksum on %s\n",
1053 bdevname(rdev->bdev,b));
1056 if (le64_to_cpu(sb->data_size) < 10) {
1057 printk("md: data_size too small on %s\n",
1058 bdevname(rdev->bdev,b));
1061 rdev->preferred_minor = 0xffff;
1062 rdev->data_offset = le64_to_cpu(sb->data_offset);
1063 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1065 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1066 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1067 if (rdev->sb_size & bmask)
1068 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1070 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1073 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1079 struct mdp_superblock_1 *refsb =
1080 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1082 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1083 sb->level != refsb->level ||
1084 sb->layout != refsb->layout ||
1085 sb->chunksize != refsb->chunksize) {
1086 printk(KERN_WARNING "md: %s has strangely different"
1087 " superblock to %s\n",
1088 bdevname(rdev->bdev,b),
1089 bdevname(refdev->bdev,b2));
1092 ev1 = le64_to_cpu(sb->events);
1093 ev2 = le64_to_cpu(refsb->events);
1101 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1103 rdev->size = rdev->sb_offset;
1104 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1106 rdev->size = le64_to_cpu(sb->data_size)/2;
1107 if (le32_to_cpu(sb->chunksize))
1108 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1110 if (le64_to_cpu(sb->size) > rdev->size*2)
1115 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1117 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1118 __u64 ev1 = le64_to_cpu(sb->events);
1120 rdev->raid_disk = -1;
1122 if (mddev->raid_disks == 0) {
1123 mddev->major_version = 1;
1124 mddev->patch_version = 0;
1125 mddev->persistent = 1;
1126 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1127 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1128 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1129 mddev->level = le32_to_cpu(sb->level);
1130 mddev->clevel[0] = 0;
1131 mddev->layout = le32_to_cpu(sb->layout);
1132 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1133 mddev->size = le64_to_cpu(sb->size)/2;
1134 mddev->events = ev1;
1135 mddev->bitmap_offset = 0;
1136 mddev->default_bitmap_offset = 1024 >> 9;
1138 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1139 memcpy(mddev->uuid, sb->set_uuid, 16);
1141 mddev->max_disks = (4096-256)/2;
1143 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1144 mddev->bitmap_file == NULL ) {
1145 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1146 && mddev->level != 10) {
1147 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1150 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1152 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1153 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1154 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1155 mddev->new_level = le32_to_cpu(sb->new_level);
1156 mddev->new_layout = le32_to_cpu(sb->new_layout);
1157 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1159 mddev->reshape_position = MaxSector;
1160 mddev->delta_disks = 0;
1161 mddev->new_level = mddev->level;
1162 mddev->new_layout = mddev->layout;
1163 mddev->new_chunk = mddev->chunk_size;
1166 } else if (mddev->pers == NULL) {
1167 /* Insist of good event counter while assembling */
1169 if (ev1 < mddev->events)
1171 } else if (mddev->bitmap) {
1172 /* If adding to array with a bitmap, then we can accept an
1173 * older device, but not too old.
1175 if (ev1 < mddev->bitmap->events_cleared)
1178 if (ev1 < mddev->events)
1179 /* just a hot-add of a new device, leave raid_disk at -1 */
1182 if (mddev->level != LEVEL_MULTIPATH) {
1184 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1186 case 0xffff: /* spare */
1188 case 0xfffe: /* faulty */
1189 set_bit(Faulty, &rdev->flags);
1192 if ((le32_to_cpu(sb->feature_map) &
1193 MD_FEATURE_RECOVERY_OFFSET))
1194 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1196 set_bit(In_sync, &rdev->flags);
1197 rdev->raid_disk = role;
1200 if (sb->devflags & WriteMostly1)
1201 set_bit(WriteMostly, &rdev->flags);
1202 } else /* MULTIPATH are always insync */
1203 set_bit(In_sync, &rdev->flags);
1208 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1210 struct mdp_superblock_1 *sb;
1211 struct list_head *tmp;
1214 /* make rdev->sb match mddev and rdev data. */
1216 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1218 sb->feature_map = 0;
1220 sb->recovery_offset = cpu_to_le64(0);
1221 memset(sb->pad1, 0, sizeof(sb->pad1));
1222 memset(sb->pad2, 0, sizeof(sb->pad2));
1223 memset(sb->pad3, 0, sizeof(sb->pad3));
1225 sb->utime = cpu_to_le64((__u64)mddev->utime);
1226 sb->events = cpu_to_le64(mddev->events);
1228 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1230 sb->resync_offset = cpu_to_le64(0);
1232 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1234 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1235 sb->size = cpu_to_le64(mddev->size<<1);
1237 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1238 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1239 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1242 if (rdev->raid_disk >= 0 &&
1243 !test_bit(In_sync, &rdev->flags) &&
1244 rdev->recovery_offset > 0) {
1245 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1246 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1249 if (mddev->reshape_position != MaxSector) {
1250 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1251 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1252 sb->new_layout = cpu_to_le32(mddev->new_layout);
1253 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1254 sb->new_level = cpu_to_le32(mddev->new_level);
1255 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1259 ITERATE_RDEV(mddev,rdev2,tmp)
1260 if (rdev2->desc_nr+1 > max_dev)
1261 max_dev = rdev2->desc_nr+1;
1263 sb->max_dev = cpu_to_le32(max_dev);
1264 for (i=0; i<max_dev;i++)
1265 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1267 ITERATE_RDEV(mddev,rdev2,tmp) {
1269 if (test_bit(Faulty, &rdev2->flags))
1270 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1271 else if (test_bit(In_sync, &rdev2->flags))
1272 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1273 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1274 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1276 sb->dev_roles[i] = cpu_to_le16(0xffff);
1279 sb->sb_csum = calc_sb_1_csum(sb);
1283 static struct super_type super_types[] = {
1286 .owner = THIS_MODULE,
1287 .load_super = super_90_load,
1288 .validate_super = super_90_validate,
1289 .sync_super = super_90_sync,
1293 .owner = THIS_MODULE,
1294 .load_super = super_1_load,
1295 .validate_super = super_1_validate,
1296 .sync_super = super_1_sync,
1300 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1302 struct list_head *tmp, *tmp2;
1303 mdk_rdev_t *rdev, *rdev2;
1305 ITERATE_RDEV(mddev1,rdev,tmp)
1306 ITERATE_RDEV(mddev2, rdev2, tmp2)
1307 if (rdev->bdev->bd_contains ==
1308 rdev2->bdev->bd_contains)
1314 static LIST_HEAD(pending_raid_disks);
1316 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1318 char b[BDEVNAME_SIZE];
1327 /* make sure rdev->size exceeds mddev->size */
1328 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1330 /* Cannot change size, so fail */
1333 mddev->size = rdev->size;
1336 /* Verify rdev->desc_nr is unique.
1337 * If it is -1, assign a free number, else
1338 * check number is not in use
1340 if (rdev->desc_nr < 0) {
1342 if (mddev->pers) choice = mddev->raid_disks;
1343 while (find_rdev_nr(mddev, choice))
1345 rdev->desc_nr = choice;
1347 if (find_rdev_nr(mddev, rdev->desc_nr))
1350 bdevname(rdev->bdev,b);
1351 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1353 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1356 rdev->mddev = mddev;
1357 printk(KERN_INFO "md: bind<%s>\n", b);
1359 rdev->kobj.parent = &mddev->kobj;
1360 if ((err = kobject_add(&rdev->kobj)))
1363 if (rdev->bdev->bd_part)
1364 ko = &rdev->bdev->bd_part->kobj;
1366 ko = &rdev->bdev->bd_disk->kobj;
1367 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1368 kobject_del(&rdev->kobj);
1371 list_add(&rdev->same_set, &mddev->disks);
1372 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1376 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1381 static void delayed_delete(struct work_struct *ws)
1383 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1384 kobject_del(&rdev->kobj);
1387 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1389 char b[BDEVNAME_SIZE];
1394 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1395 list_del_init(&rdev->same_set);
1396 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1398 sysfs_remove_link(&rdev->kobj, "block");
1400 /* We need to delay this, otherwise we can deadlock when
1401 * writing to 'remove' to "dev/state"
1403 INIT_WORK(&rdev->del_work, delayed_delete);
1404 schedule_work(&rdev->del_work);
1408 * prevent the device from being mounted, repartitioned or
1409 * otherwise reused by a RAID array (or any other kernel
1410 * subsystem), by bd_claiming the device.
1412 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1415 struct block_device *bdev;
1416 char b[BDEVNAME_SIZE];
1418 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1420 printk(KERN_ERR "md: could not open %s.\n",
1421 __bdevname(dev, b));
1422 return PTR_ERR(bdev);
1424 err = bd_claim(bdev, rdev);
1426 printk(KERN_ERR "md: could not bd_claim %s.\n",
1435 static void unlock_rdev(mdk_rdev_t *rdev)
1437 struct block_device *bdev = rdev->bdev;
1445 void md_autodetect_dev(dev_t dev);
1447 static void export_rdev(mdk_rdev_t * rdev)
1449 char b[BDEVNAME_SIZE];
1450 printk(KERN_INFO "md: export_rdev(%s)\n",
1451 bdevname(rdev->bdev,b));
1455 list_del_init(&rdev->same_set);
1457 md_autodetect_dev(rdev->bdev->bd_dev);
1460 kobject_put(&rdev->kobj);
1463 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1465 unbind_rdev_from_array(rdev);
1469 static void export_array(mddev_t *mddev)
1471 struct list_head *tmp;
1474 ITERATE_RDEV(mddev,rdev,tmp) {
1479 kick_rdev_from_array(rdev);
1481 if (!list_empty(&mddev->disks))
1483 mddev->raid_disks = 0;
1484 mddev->major_version = 0;
1487 static void print_desc(mdp_disk_t *desc)
1489 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1490 desc->major,desc->minor,desc->raid_disk,desc->state);
1493 static void print_sb(mdp_super_t *sb)
1498 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1499 sb->major_version, sb->minor_version, sb->patch_version,
1500 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1502 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1503 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1504 sb->md_minor, sb->layout, sb->chunk_size);
1505 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1506 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1507 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1508 sb->failed_disks, sb->spare_disks,
1509 sb->sb_csum, (unsigned long)sb->events_lo);
1512 for (i = 0; i < MD_SB_DISKS; i++) {
1515 desc = sb->disks + i;
1516 if (desc->number || desc->major || desc->minor ||
1517 desc->raid_disk || (desc->state && (desc->state != 4))) {
1518 printk(" D %2d: ", i);
1522 printk(KERN_INFO "md: THIS: ");
1523 print_desc(&sb->this_disk);
1527 static void print_rdev(mdk_rdev_t *rdev)
1529 char b[BDEVNAME_SIZE];
1530 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1531 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1532 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1534 if (rdev->sb_loaded) {
1535 printk(KERN_INFO "md: rdev superblock:\n");
1536 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1538 printk(KERN_INFO "md: no rdev superblock!\n");
1541 static void md_print_devices(void)
1543 struct list_head *tmp, *tmp2;
1546 char b[BDEVNAME_SIZE];
1549 printk("md: **********************************\n");
1550 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1551 printk("md: **********************************\n");
1552 ITERATE_MDDEV(mddev,tmp) {
1555 bitmap_print_sb(mddev->bitmap);
1557 printk("%s: ", mdname(mddev));
1558 ITERATE_RDEV(mddev,rdev,tmp2)
1559 printk("<%s>", bdevname(rdev->bdev,b));
1562 ITERATE_RDEV(mddev,rdev,tmp2)
1565 printk("md: **********************************\n");
1570 static void sync_sbs(mddev_t * mddev, int nospares)
1572 /* Update each superblock (in-memory image), but
1573 * if we are allowed to, skip spares which already
1574 * have the right event counter, or have one earlier
1575 * (which would mean they aren't being marked as dirty
1576 * with the rest of the array)
1579 struct list_head *tmp;
1581 ITERATE_RDEV(mddev,rdev,tmp) {
1582 if (rdev->sb_events == mddev->events ||
1584 rdev->raid_disk < 0 &&
1585 (rdev->sb_events&1)==0 &&
1586 rdev->sb_events+1 == mddev->events)) {
1587 /* Don't update this superblock */
1588 rdev->sb_loaded = 2;
1590 super_types[mddev->major_version].
1591 sync_super(mddev, rdev);
1592 rdev->sb_loaded = 1;
1597 static void md_update_sb(mddev_t * mddev, int force_change)
1600 struct list_head *tmp;
1606 spin_lock_irq(&mddev->write_lock);
1608 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1609 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1611 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1612 /* just a clean<-> dirty transition, possibly leave spares alone,
1613 * though if events isn't the right even/odd, we will have to do
1619 if (mddev->degraded)
1620 /* If the array is degraded, then skipping spares is both
1621 * dangerous and fairly pointless.
1622 * Dangerous because a device that was removed from the array
1623 * might have a event_count that still looks up-to-date,
1624 * so it can be re-added without a resync.
1625 * Pointless because if there are any spares to skip,
1626 * then a recovery will happen and soon that array won't
1627 * be degraded any more and the spare can go back to sleep then.
1631 sync_req = mddev->in_sync;
1632 mddev->utime = get_seconds();
1634 /* If this is just a dirty<->clean transition, and the array is clean
1635 * and 'events' is odd, we can roll back to the previous clean state */
1637 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1638 && (mddev->events & 1)
1639 && mddev->events != 1)
1642 /* otherwise we have to go forward and ... */
1644 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1645 /* .. if the array isn't clean, insist on an odd 'events' */
1646 if ((mddev->events&1)==0) {
1651 /* otherwise insist on an even 'events' (for clean states) */
1652 if ((mddev->events&1)) {
1659 if (!mddev->events) {
1661 * oops, this 64-bit counter should never wrap.
1662 * Either we are in around ~1 trillion A.C., assuming
1663 * 1 reboot per second, or we have a bug:
1668 sync_sbs(mddev, nospares);
1671 * do not write anything to disk if using
1672 * nonpersistent superblocks
1674 if (!mddev->persistent) {
1675 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1676 spin_unlock_irq(&mddev->write_lock);
1677 wake_up(&mddev->sb_wait);
1680 spin_unlock_irq(&mddev->write_lock);
1683 "md: updating %s RAID superblock on device (in sync %d)\n",
1684 mdname(mddev),mddev->in_sync);
1686 err = bitmap_update_sb(mddev->bitmap);
1687 ITERATE_RDEV(mddev,rdev,tmp) {
1688 char b[BDEVNAME_SIZE];
1689 dprintk(KERN_INFO "md: ");
1690 if (rdev->sb_loaded != 1)
1691 continue; /* no noise on spare devices */
1692 if (test_bit(Faulty, &rdev->flags))
1693 dprintk("(skipping faulty ");
1695 dprintk("%s ", bdevname(rdev->bdev,b));
1696 if (!test_bit(Faulty, &rdev->flags)) {
1697 md_super_write(mddev,rdev,
1698 rdev->sb_offset<<1, rdev->sb_size,
1700 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1701 bdevname(rdev->bdev,b),
1702 (unsigned long long)rdev->sb_offset);
1703 rdev->sb_events = mddev->events;
1707 if (mddev->level == LEVEL_MULTIPATH)
1708 /* only need to write one superblock... */
1711 md_super_wait(mddev);
1712 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1714 spin_lock_irq(&mddev->write_lock);
1715 if (mddev->in_sync != sync_req ||
1716 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1717 /* have to write it out again */
1718 spin_unlock_irq(&mddev->write_lock);
1721 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1722 spin_unlock_irq(&mddev->write_lock);
1723 wake_up(&mddev->sb_wait);
1727 /* words written to sysfs files may, or my not, be \n terminated.
1728 * We want to accept with case. For this we use cmd_match.
1730 static int cmd_match(const char *cmd, const char *str)
1732 /* See if cmd, written into a sysfs file, matches
1733 * str. They must either be the same, or cmd can
1734 * have a trailing newline
1736 while (*cmd && *str && *cmd == *str) {
1747 struct rdev_sysfs_entry {
1748 struct attribute attr;
1749 ssize_t (*show)(mdk_rdev_t *, char *);
1750 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1754 state_show(mdk_rdev_t *rdev, char *page)
1759 if (test_bit(Faulty, &rdev->flags)) {
1760 len+= sprintf(page+len, "%sfaulty",sep);
1763 if (test_bit(In_sync, &rdev->flags)) {
1764 len += sprintf(page+len, "%sin_sync",sep);
1767 if (test_bit(WriteMostly, &rdev->flags)) {
1768 len += sprintf(page+len, "%swrite_mostly",sep);
1771 if (!test_bit(Faulty, &rdev->flags) &&
1772 !test_bit(In_sync, &rdev->flags)) {
1773 len += sprintf(page+len, "%sspare", sep);
1776 return len+sprintf(page+len, "\n");
1780 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1783 * faulty - simulates and error
1784 * remove - disconnects the device
1785 * writemostly - sets write_mostly
1786 * -writemostly - clears write_mostly
1789 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1790 md_error(rdev->mddev, rdev);
1792 } else if (cmd_match(buf, "remove")) {
1793 if (rdev->raid_disk >= 0)
1796 mddev_t *mddev = rdev->mddev;
1797 kick_rdev_from_array(rdev);
1799 md_update_sb(mddev, 1);
1800 md_new_event(mddev);
1803 } else if (cmd_match(buf, "writemostly")) {
1804 set_bit(WriteMostly, &rdev->flags);
1806 } else if (cmd_match(buf, "-writemostly")) {
1807 clear_bit(WriteMostly, &rdev->flags);
1810 return err ? err : len;
1812 static struct rdev_sysfs_entry rdev_state =
1813 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1816 super_show(mdk_rdev_t *rdev, char *page)
1818 if (rdev->sb_loaded && rdev->sb_size) {
1819 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1820 return rdev->sb_size;
1824 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1827 errors_show(mdk_rdev_t *rdev, char *page)
1829 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1833 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1836 unsigned long n = simple_strtoul(buf, &e, 10);
1837 if (*buf && (*e == 0 || *e == '\n')) {
1838 atomic_set(&rdev->corrected_errors, n);
1843 static struct rdev_sysfs_entry rdev_errors =
1844 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1847 slot_show(mdk_rdev_t *rdev, char *page)
1849 if (rdev->raid_disk < 0)
1850 return sprintf(page, "none\n");
1852 return sprintf(page, "%d\n", rdev->raid_disk);
1856 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1859 int slot = simple_strtoul(buf, &e, 10);
1860 if (strncmp(buf, "none", 4)==0)
1862 else if (e==buf || (*e && *e!= '\n'))
1864 if (rdev->mddev->pers)
1865 /* Cannot set slot in active array (yet) */
1867 if (slot >= rdev->mddev->raid_disks)
1869 rdev->raid_disk = slot;
1870 /* assume it is working */
1872 set_bit(In_sync, &rdev->flags);
1877 static struct rdev_sysfs_entry rdev_slot =
1878 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1881 offset_show(mdk_rdev_t *rdev, char *page)
1883 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1887 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1890 unsigned long long offset = simple_strtoull(buf, &e, 10);
1891 if (e==buf || (*e && *e != '\n'))
1893 if (rdev->mddev->pers)
1895 rdev->data_offset = offset;
1899 static struct rdev_sysfs_entry rdev_offset =
1900 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1903 rdev_size_show(mdk_rdev_t *rdev, char *page)
1905 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1909 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1912 unsigned long long size = simple_strtoull(buf, &e, 10);
1913 if (e==buf || (*e && *e != '\n'))
1915 if (rdev->mddev->pers)
1918 if (size < rdev->mddev->size || rdev->mddev->size == 0)
1919 rdev->mddev->size = size;
1923 static struct rdev_sysfs_entry rdev_size =
1924 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
1926 static struct attribute *rdev_default_attrs[] = {
1936 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1938 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1939 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1943 return entry->show(rdev, page);
1947 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1948 const char *page, size_t length)
1950 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1951 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1955 if (!capable(CAP_SYS_ADMIN))
1957 return entry->store(rdev, page, length);
1960 static void rdev_free(struct kobject *ko)
1962 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1965 static struct sysfs_ops rdev_sysfs_ops = {
1966 .show = rdev_attr_show,
1967 .store = rdev_attr_store,
1969 static struct kobj_type rdev_ktype = {
1970 .release = rdev_free,
1971 .sysfs_ops = &rdev_sysfs_ops,
1972 .default_attrs = rdev_default_attrs,
1976 * Import a device. If 'super_format' >= 0, then sanity check the superblock
1978 * mark the device faulty if:
1980 * - the device is nonexistent (zero size)
1981 * - the device has no valid superblock
1983 * a faulty rdev _never_ has rdev->sb set.
1985 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1987 char b[BDEVNAME_SIZE];
1992 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1994 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1995 return ERR_PTR(-ENOMEM);
1998 if ((err = alloc_disk_sb(rdev)))
2001 err = lock_rdev(rdev, newdev);
2005 rdev->kobj.parent = NULL;
2006 rdev->kobj.ktype = &rdev_ktype;
2007 kobject_init(&rdev->kobj);
2010 rdev->saved_raid_disk = -1;
2011 rdev->raid_disk = -1;
2013 rdev->data_offset = 0;
2014 rdev->sb_events = 0;
2015 atomic_set(&rdev->nr_pending, 0);
2016 atomic_set(&rdev->read_errors, 0);
2017 atomic_set(&rdev->corrected_errors, 0);
2019 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2022 "md: %s has zero or unknown size, marking faulty!\n",
2023 bdevname(rdev->bdev,b));
2028 if (super_format >= 0) {
2029 err = super_types[super_format].
2030 load_super(rdev, NULL, super_minor);
2031 if (err == -EINVAL) {
2033 "md: %s has invalid sb, not importing!\n",
2034 bdevname(rdev->bdev,b));
2039 "md: could not read %s's sb, not importing!\n",
2040 bdevname(rdev->bdev,b));
2044 INIT_LIST_HEAD(&rdev->same_set);
2049 if (rdev->sb_page) {
2055 return ERR_PTR(err);
2059 * Check a full RAID array for plausibility
2063 static void analyze_sbs(mddev_t * mddev)
2066 struct list_head *tmp;
2067 mdk_rdev_t *rdev, *freshest;
2068 char b[BDEVNAME_SIZE];
2071 ITERATE_RDEV(mddev,rdev,tmp)
2072 switch (super_types[mddev->major_version].
2073 load_super(rdev, freshest, mddev->minor_version)) {
2081 "md: fatal superblock inconsistency in %s"
2082 " -- removing from array\n",
2083 bdevname(rdev->bdev,b));
2084 kick_rdev_from_array(rdev);
2088 super_types[mddev->major_version].
2089 validate_super(mddev, freshest);
2092 ITERATE_RDEV(mddev,rdev,tmp) {
2093 if (rdev != freshest)
2094 if (super_types[mddev->major_version].
2095 validate_super(mddev, rdev)) {
2096 printk(KERN_WARNING "md: kicking non-fresh %s"
2098 bdevname(rdev->bdev,b));
2099 kick_rdev_from_array(rdev);
2102 if (mddev->level == LEVEL_MULTIPATH) {
2103 rdev->desc_nr = i++;
2104 rdev->raid_disk = rdev->desc_nr;
2105 set_bit(In_sync, &rdev->flags);
2111 if (mddev->recovery_cp != MaxSector &&
2113 printk(KERN_ERR "md: %s: raid array is not clean"
2114 " -- starting background reconstruction\n",
2120 safe_delay_show(mddev_t *mddev, char *page)
2122 int msec = (mddev->safemode_delay*1000)/HZ;
2123 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2126 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2134 /* remove a period, and count digits after it */
2135 if (len >= sizeof(buf))
2137 strlcpy(buf, cbuf, len);
2139 for (i=0; i<len; i++) {
2141 if (isdigit(buf[i])) {
2146 } else if (buf[i] == '.') {
2151 msec = simple_strtoul(buf, &e, 10);
2152 if (e == buf || (*e && *e != '\n'))
2154 msec = (msec * 1000) / scale;
2156 mddev->safemode_delay = 0;
2158 mddev->safemode_delay = (msec*HZ)/1000;
2159 if (mddev->safemode_delay == 0)
2160 mddev->safemode_delay = 1;
2164 static struct md_sysfs_entry md_safe_delay =
2165 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2168 level_show(mddev_t *mddev, char *page)
2170 struct mdk_personality *p = mddev->pers;
2172 return sprintf(page, "%s\n", p->name);
2173 else if (mddev->clevel[0])
2174 return sprintf(page, "%s\n", mddev->clevel);
2175 else if (mddev->level != LEVEL_NONE)
2176 return sprintf(page, "%d\n", mddev->level);
2182 level_store(mddev_t *mddev, const char *buf, size_t len)
2189 if (len >= sizeof(mddev->clevel))
2191 strncpy(mddev->clevel, buf, len);
2192 if (mddev->clevel[len-1] == '\n')
2194 mddev->clevel[len] = 0;
2195 mddev->level = LEVEL_NONE;
2199 static struct md_sysfs_entry md_level =
2200 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2204 layout_show(mddev_t *mddev, char *page)
2206 /* just a number, not meaningful for all levels */
2207 return sprintf(page, "%d\n", mddev->layout);
2211 layout_store(mddev_t *mddev, const char *buf, size_t len)
2214 unsigned long n = simple_strtoul(buf, &e, 10);
2218 if (!*buf || (*e && *e != '\n'))
2224 static struct md_sysfs_entry md_layout =
2225 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2229 raid_disks_show(mddev_t *mddev, char *page)
2231 if (mddev->raid_disks == 0)
2233 return sprintf(page, "%d\n", mddev->raid_disks);
2236 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2239 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2243 unsigned long n = simple_strtoul(buf, &e, 10);
2245 if (!*buf || (*e && *e != '\n'))
2249 rv = update_raid_disks(mddev, n);
2251 mddev->raid_disks = n;
2252 return rv ? rv : len;
2254 static struct md_sysfs_entry md_raid_disks =
2255 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2258 chunk_size_show(mddev_t *mddev, char *page)
2260 return sprintf(page, "%d\n", mddev->chunk_size);
2264 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2266 /* can only set chunk_size if array is not yet active */
2268 unsigned long n = simple_strtoul(buf, &e, 10);
2272 if (!*buf || (*e && *e != '\n'))
2275 mddev->chunk_size = n;
2278 static struct md_sysfs_entry md_chunk_size =
2279 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2282 resync_start_show(mddev_t *mddev, char *page)
2284 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2288 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2290 /* can only set chunk_size if array is not yet active */
2292 unsigned long long n = simple_strtoull(buf, &e, 10);
2296 if (!*buf || (*e && *e != '\n'))
2299 mddev->recovery_cp = n;
2302 static struct md_sysfs_entry md_resync_start =
2303 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2306 * The array state can be:
2309 * No devices, no size, no level
2310 * Equivalent to STOP_ARRAY ioctl
2312 * May have some settings, but array is not active
2313 * all IO results in error
2314 * When written, doesn't tear down array, but just stops it
2315 * suspended (not supported yet)
2316 * All IO requests will block. The array can be reconfigured.
2317 * Writing this, if accepted, will block until array is quiessent
2319 * no resync can happen. no superblocks get written.
2320 * write requests fail
2322 * like readonly, but behaves like 'clean' on a write request.
2324 * clean - no pending writes, but otherwise active.
2325 * When written to inactive array, starts without resync
2326 * If a write request arrives then
2327 * if metadata is known, mark 'dirty' and switch to 'active'.
2328 * if not known, block and switch to write-pending
2329 * If written to an active array that has pending writes, then fails.
2331 * fully active: IO and resync can be happening.
2332 * When written to inactive array, starts with resync
2335 * clean, but writes are blocked waiting for 'active' to be written.
2338 * like active, but no writes have been seen for a while (100msec).
2341 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2342 write_pending, active_idle, bad_word};
2343 static char *array_states[] = {
2344 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2345 "write-pending", "active-idle", NULL };
2347 static int match_word(const char *word, char **list)
2350 for (n=0; list[n]; n++)
2351 if (cmd_match(word, list[n]))
2357 array_state_show(mddev_t *mddev, char *page)
2359 enum array_state st = inactive;
2372 else if (mddev->safemode)
2378 if (list_empty(&mddev->disks) &&
2379 mddev->raid_disks == 0 &&
2385 return sprintf(page, "%s\n", array_states[st]);
2388 static int do_md_stop(mddev_t * mddev, int ro);
2389 static int do_md_run(mddev_t * mddev);
2390 static int restart_array(mddev_t *mddev);
2393 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2396 enum array_state st = match_word(buf, array_states);
2401 /* stopping an active array */
2403 if (atomic_read(&mddev->active) > 1)
2405 err = do_md_stop(mddev, 0);
2409 /* stopping an active array */
2411 if (atomic_read(&mddev->active) > 1)
2413 err = do_md_stop(mddev, 2);
2417 break; /* not supported yet */
2420 err = do_md_stop(mddev, 1);
2423 err = do_md_run(mddev);
2427 /* stopping an active array */
2429 err = do_md_stop(mddev, 1);
2431 mddev->ro = 2; /* FIXME mark devices writable */
2434 err = do_md_run(mddev);
2439 restart_array(mddev);
2440 spin_lock_irq(&mddev->write_lock);
2441 if (atomic_read(&mddev->writes_pending) == 0) {
2443 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
2445 spin_unlock_irq(&mddev->write_lock);
2448 mddev->recovery_cp = MaxSector;
2449 err = do_md_run(mddev);
2454 restart_array(mddev);
2455 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2456 wake_up(&mddev->sb_wait);
2460 err = do_md_run(mddev);
2465 /* these cannot be set */
2473 static struct md_sysfs_entry md_array_state =
2474 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2477 null_show(mddev_t *mddev, char *page)
2483 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2485 /* buf must be %d:%d\n? giving major and minor numbers */
2486 /* The new device is added to the array.
2487 * If the array has a persistent superblock, we read the
2488 * superblock to initialise info and check validity.
2489 * Otherwise, only checking done is that in bind_rdev_to_array,
2490 * which mainly checks size.
2493 int major = simple_strtoul(buf, &e, 10);
2499 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2501 minor = simple_strtoul(e+1, &e, 10);
2502 if (*e && *e != '\n')
2504 dev = MKDEV(major, minor);
2505 if (major != MAJOR(dev) ||
2506 minor != MINOR(dev))
2510 if (mddev->persistent) {
2511 rdev = md_import_device(dev, mddev->major_version,
2512 mddev->minor_version);
2513 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2514 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2515 mdk_rdev_t, same_set);
2516 err = super_types[mddev->major_version]
2517 .load_super(rdev, rdev0, mddev->minor_version);
2522 rdev = md_import_device(dev, -1, -1);
2525 return PTR_ERR(rdev);
2526 err = bind_rdev_to_array(rdev, mddev);
2530 return err ? err : len;
2533 static struct md_sysfs_entry md_new_device =
2534 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2537 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2540 unsigned long chunk, end_chunk;
2544 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2546 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2547 if (buf == end) break;
2548 if (*end == '-') { /* range */
2550 end_chunk = simple_strtoul(buf, &end, 0);
2551 if (buf == end) break;
2553 if (*end && !isspace(*end)) break;
2554 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2556 while (isspace(*buf)) buf++;
2558 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2563 static struct md_sysfs_entry md_bitmap =
2564 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2567 size_show(mddev_t *mddev, char *page)
2569 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2572 static int update_size(mddev_t *mddev, unsigned long size);
2575 size_store(mddev_t *mddev, const char *buf, size_t len)
2577 /* If array is inactive, we can reduce the component size, but
2578 * not increase it (except from 0).
2579 * If array is active, we can try an on-line resize
2583 unsigned long long size = simple_strtoull(buf, &e, 10);
2584 if (!*buf || *buf == '\n' ||
2589 err = update_size(mddev, size);
2590 md_update_sb(mddev, 1);
2592 if (mddev->size == 0 ||
2598 return err ? err : len;
2601 static struct md_sysfs_entry md_size =
2602 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2606 * This is either 'none' for arrays with externally managed metadata,
2607 * or N.M for internally known formats
2610 metadata_show(mddev_t *mddev, char *page)
2612 if (mddev->persistent)
2613 return sprintf(page, "%d.%d\n",
2614 mddev->major_version, mddev->minor_version);
2616 return sprintf(page, "none\n");
2620 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2624 if (!list_empty(&mddev->disks))
2627 if (cmd_match(buf, "none")) {
2628 mddev->persistent = 0;
2629 mddev->major_version = 0;
2630 mddev->minor_version = 90;
2633 major = simple_strtoul(buf, &e, 10);
2634 if (e==buf || *e != '.')
2637 minor = simple_strtoul(buf, &e, 10);
2638 if (e==buf || (*e && *e != '\n') )
2640 if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2641 super_types[major].name == NULL)
2643 mddev->major_version = major;
2644 mddev->minor_version = minor;
2645 mddev->persistent = 1;
2649 static struct md_sysfs_entry md_metadata =
2650 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2653 action_show(mddev_t *mddev, char *page)
2655 char *type = "idle";
2656 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2657 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2658 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2660 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2661 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2663 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2670 return sprintf(page, "%s\n", type);
2674 action_store(mddev_t *mddev, const char *page, size_t len)
2676 if (!mddev->pers || !mddev->pers->sync_request)
2679 if (cmd_match(page, "idle")) {
2680 if (mddev->sync_thread) {
2681 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2682 md_unregister_thread(mddev->sync_thread);
2683 mddev->sync_thread = NULL;
2684 mddev->recovery = 0;
2686 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2687 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2689 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2690 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2691 else if (cmd_match(page, "reshape")) {
2693 if (mddev->pers->start_reshape == NULL)
2695 err = mddev->pers->start_reshape(mddev);
2699 if (cmd_match(page, "check"))
2700 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2701 else if (!cmd_match(page, "repair"))
2703 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2704 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2706 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2707 md_wakeup_thread(mddev->thread);
2712 mismatch_cnt_show(mddev_t *mddev, char *page)
2714 return sprintf(page, "%llu\n",
2715 (unsigned long long) mddev->resync_mismatches);
2718 static struct md_sysfs_entry md_scan_mode =
2719 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2722 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2725 sync_min_show(mddev_t *mddev, char *page)
2727 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2728 mddev->sync_speed_min ? "local": "system");
2732 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2736 if (strncmp(buf, "system", 6)==0) {
2737 mddev->sync_speed_min = 0;
2740 min = simple_strtoul(buf, &e, 10);
2741 if (buf == e || (*e && *e != '\n') || min <= 0)
2743 mddev->sync_speed_min = min;
2747 static struct md_sysfs_entry md_sync_min =
2748 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2751 sync_max_show(mddev_t *mddev, char *page)
2753 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2754 mddev->sync_speed_max ? "local": "system");
2758 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2762 if (strncmp(buf, "system", 6)==0) {
2763 mddev->sync_speed_max = 0;
2766 max = simple_strtoul(buf, &e, 10);
2767 if (buf == e || (*e && *e != '\n') || max <= 0)
2769 mddev->sync_speed_max = max;
2773 static struct md_sysfs_entry md_sync_max =
2774 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2778 sync_speed_show(mddev_t *mddev, char *page)
2780 unsigned long resync, dt, db;
2781 resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2782 dt = ((jiffies - mddev->resync_mark) / HZ);
2784 db = resync - (mddev->resync_mark_cnt);
2785 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2788 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2791 sync_completed_show(mddev_t *mddev, char *page)
2793 unsigned long max_blocks, resync;
2795 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2796 max_blocks = mddev->resync_max_sectors;
2798 max_blocks = mddev->size << 1;
2800 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2801 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2804 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
2807 suspend_lo_show(mddev_t *mddev, char *page)
2809 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2813 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2816 unsigned long long new = simple_strtoull(buf, &e, 10);
2818 if (mddev->pers->quiesce == NULL)
2820 if (buf == e || (*e && *e != '\n'))
2822 if (new >= mddev->suspend_hi ||
2823 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2824 mddev->suspend_lo = new;
2825 mddev->pers->quiesce(mddev, 2);
2830 static struct md_sysfs_entry md_suspend_lo =
2831 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2835 suspend_hi_show(mddev_t *mddev, char *page)
2837 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2841 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2844 unsigned long long new = simple_strtoull(buf, &e, 10);
2846 if (mddev->pers->quiesce == NULL)
2848 if (buf == e || (*e && *e != '\n'))
2850 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2851 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2852 mddev->suspend_hi = new;
2853 mddev->pers->quiesce(mddev, 1);
2854 mddev->pers->quiesce(mddev, 0);
2859 static struct md_sysfs_entry md_suspend_hi =
2860 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2863 static struct attribute *md_default_attrs[] = {
2866 &md_raid_disks.attr,
2867 &md_chunk_size.attr,
2869 &md_resync_start.attr,
2871 &md_new_device.attr,
2872 &md_safe_delay.attr,
2873 &md_array_state.attr,
2877 static struct attribute *md_redundancy_attrs[] = {
2879 &md_mismatches.attr,
2882 &md_sync_speed.attr,
2883 &md_sync_completed.attr,
2884 &md_suspend_lo.attr,
2885 &md_suspend_hi.attr,
2889 static struct attribute_group md_redundancy_group = {
2891 .attrs = md_redundancy_attrs,
2896 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2898 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2899 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2904 rv = mddev_lock(mddev);
2906 rv = entry->show(mddev, page);
2907 mddev_unlock(mddev);
2913 md_attr_store(struct kobject *kobj, struct attribute *attr,
2914 const char *page, size_t length)
2916 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2917 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2922 if (!capable(CAP_SYS_ADMIN))
2924 rv = mddev_lock(mddev);
2926 rv = entry->store(mddev, page, length);
2927 mddev_unlock(mddev);
2932 static void md_free(struct kobject *ko)
2934 mddev_t *mddev = container_of(ko, mddev_t, kobj);
2938 static struct sysfs_ops md_sysfs_ops = {
2939 .show = md_attr_show,
2940 .store = md_attr_store,
2942 static struct kobj_type md_ktype = {
2944 .sysfs_ops = &md_sysfs_ops,
2945 .default_attrs = md_default_attrs,
2950 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2952 static DEFINE_MUTEX(disks_mutex);
2953 mddev_t *mddev = mddev_find(dev);
2954 struct gendisk *disk;
2955 int partitioned = (MAJOR(dev) != MD_MAJOR);
2956 int shift = partitioned ? MdpMinorShift : 0;
2957 int unit = MINOR(dev) >> shift;
2962 mutex_lock(&disks_mutex);
2963 if (mddev->gendisk) {
2964 mutex_unlock(&disks_mutex);
2968 disk = alloc_disk(1 << shift);
2970 mutex_unlock(&disks_mutex);
2974 disk->major = MAJOR(dev);
2975 disk->first_minor = unit << shift;
2977 sprintf(disk->disk_name, "md_d%d", unit);
2979 sprintf(disk->disk_name, "md%d", unit);
2980 disk->fops = &md_fops;
2981 disk->private_data = mddev;
2982 disk->queue = mddev->queue;
2984 mddev->gendisk = disk;
2985 mutex_unlock(&disks_mutex);
2986 mddev->kobj.parent = &disk->kobj;
2987 mddev->kobj.k_name = NULL;
2988 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2989 mddev->kobj.ktype = &md_ktype;
2990 if (kobject_register(&mddev->kobj))
2991 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
2996 static void md_safemode_timeout(unsigned long data)
2998 mddev_t *mddev = (mddev_t *) data;
3000 mddev->safemode = 1;
3001 md_wakeup_thread(mddev->thread);
3004 static int start_dirty_degraded;
3006 static int do_md_run(mddev_t * mddev)
3010 struct list_head *tmp;
3012 struct gendisk *disk;
3013 struct mdk_personality *pers;
3014 char b[BDEVNAME_SIZE];
3016 if (list_empty(&mddev->disks))
3017 /* cannot run an array with no devices.. */
3024 * Analyze all RAID superblock(s)
3026 if (!mddev->raid_disks)
3029 chunk_size = mddev->chunk_size;
3032 if (chunk_size > MAX_CHUNK_SIZE) {
3033 printk(KERN_ERR "too big chunk_size: %d > %d\n",
3034 chunk_size, MAX_CHUNK_SIZE);
3038 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3040 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3041 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3044 if (chunk_size < PAGE_SIZE) {
3045 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3046 chunk_size, PAGE_SIZE);
3050 /* devices must have minimum size of one chunk */
3051 ITERATE_RDEV(mddev,rdev,tmp) {
3052 if (test_bit(Faulty, &rdev->flags))
3054 if (rdev->size < chunk_size / 1024) {
3056 "md: Dev %s smaller than chunk_size:"
3058 bdevname(rdev->bdev,b),
3059 (unsigned long long)rdev->size,
3067 if (mddev->level != LEVEL_NONE)
3068 request_module("md-level-%d", mddev->level);
3069 else if (mddev->clevel[0])
3070 request_module("md-%s", mddev->clevel);
3074 * Drop all container device buffers, from now on
3075 * the only valid external interface is through the md
3077 * Also find largest hardsector size
3079 ITERATE_RDEV(mddev,rdev,tmp) {
3080 if (test_bit(Faulty, &rdev->flags))
3082 sync_blockdev(rdev->bdev);
3083 invalidate_bdev(rdev->bdev, 0);
3086 md_probe(mddev->unit, NULL, NULL);
3087 disk = mddev->gendisk;
3091 spin_lock(&pers_lock);
3092 pers = find_pers(mddev->level, mddev->clevel);
3093 if (!pers || !try_module_get(pers->owner)) {
3094 spin_unlock(&pers_lock);
3095 if (mddev->level != LEVEL_NONE)
3096 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3099 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3104 spin_unlock(&pers_lock);
3105 mddev->level = pers->level;
3106 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3108 if (mddev->reshape_position != MaxSector &&
3109 pers->start_reshape == NULL) {
3110 /* This personality cannot handle reshaping... */
3112 module_put(pers->owner);
3116 if (pers->sync_request) {
3117 /* Warn if this is a potentially silly
3120 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3122 struct list_head *tmp2;
3124 ITERATE_RDEV(mddev, rdev, tmp) {
3125 ITERATE_RDEV(mddev, rdev2, tmp2) {
3127 rdev->bdev->bd_contains ==
3128 rdev2->bdev->bd_contains) {
3130 "%s: WARNING: %s appears to be"
3131 " on the same physical disk as"
3134 bdevname(rdev->bdev,b),
3135 bdevname(rdev2->bdev,b2));
3142 "True protection against single-disk"
3143 " failure might be compromised.\n");
3146 mddev->recovery = 0;
3147 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3148 mddev->barriers_work = 1;
3149 mddev->ok_start_degraded = start_dirty_degraded;
3152 mddev->ro = 2; /* read-only, but switch on first write */
3154 err = mddev->pers->run(mddev);
3155 if (!err && mddev->pers->sync_request) {
3156 err = bitmap_create(mddev);
3158 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3159 mdname(mddev), err);
3160 mddev->pers->stop(mddev);
3164 printk(KERN_ERR "md: pers->run() failed ...\n");
3165 module_put(mddev->pers->owner);
3167 bitmap_destroy(mddev);
3170 if (mddev->pers->sync_request) {
3171 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3173 "md: cannot register extra attributes for %s\n",
3175 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3178 atomic_set(&mddev->writes_pending,0);
3179 mddev->safemode = 0;
3180 mddev->safemode_timer.function = md_safemode_timeout;
3181 mddev->safemode_timer.data = (unsigned long) mddev;
3182 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3185 ITERATE_RDEV(mddev,rdev,tmp)
3186 if (rdev->raid_disk >= 0) {
3188 sprintf(nm, "rd%d", rdev->raid_disk);
3189 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3190 printk("md: cannot register %s for %s\n",
3194 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3197 md_update_sb(mddev, 0);
3199 set_capacity(disk, mddev->array_size<<1);
3201 /* If we call blk_queue_make_request here, it will
3202 * re-initialise max_sectors etc which may have been
3203 * refined inside -> run. So just set the bits we need to set.
3204 * Most initialisation happended when we called
3205 * blk_queue_make_request(..., md_fail_request)
3208 mddev->queue->queuedata = mddev;
3209 mddev->queue->make_request_fn = mddev->pers->make_request;
3211 /* If there is a partially-recovered drive we need to
3212 * start recovery here. If we leave it to md_check_recovery,
3213 * it will remove the drives and not do the right thing
3215 if (mddev->degraded && !mddev->sync_thread) {
3216 struct list_head *rtmp;
3218 ITERATE_RDEV(mddev,rdev,rtmp)
3219 if (rdev->raid_disk >= 0 &&
3220 !test_bit(In_sync, &rdev->flags) &&
3221 !test_bit(Faulty, &rdev->flags))
3222 /* complete an interrupted recovery */
3224 if (spares && mddev->pers->sync_request) {
3225 mddev->recovery = 0;
3226 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3227 mddev->sync_thread = md_register_thread(md_do_sync,
3230 if (!mddev->sync_thread) {
3231 printk(KERN_ERR "%s: could not start resync"
3234 /* leave the spares where they are, it shouldn't hurt */
3235 mddev->recovery = 0;
3239 md_wakeup_thread(mddev->thread);
3240 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3243 md_new_event(mddev);
3244 kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
3248 static int restart_array(mddev_t *mddev)
3250 struct gendisk *disk = mddev->gendisk;
3254 * Complain if it has no devices
3257 if (list_empty(&mddev->disks))
3265 mddev->safemode = 0;
3267 set_disk_ro(disk, 0);
3269 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3272 * Kick recovery or resync if necessary
3274 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3275 md_wakeup_thread(mddev->thread);
3276 md_wakeup_thread(mddev->sync_thread);
3285 /* similar to deny_write_access, but accounts for our holding a reference
3286 * to the file ourselves */
3287 static int deny_bitmap_write_access(struct file * file)
3289 struct inode *inode = file->f_mapping->host;
3291 spin_lock(&inode->i_lock);
3292 if (atomic_read(&inode->i_writecount) > 1) {
3293 spin_unlock(&inode->i_lock);
3296 atomic_set(&inode->i_writecount, -1);
3297 spin_unlock(&inode->i_lock);
3302 static void restore_bitmap_write_access(struct file *file)
3304 struct inode *inode = file->f_mapping->host;
3306 spin_lock(&inode->i_lock);
3307 atomic_set(&inode->i_writecount, 1);
3308 spin_unlock(&inode->i_lock);
3312 * 0 - completely stop and dis-assemble array
3313 * 1 - switch to readonly
3314 * 2 - stop but do not disassemble array
3316 static int do_md_stop(mddev_t * mddev, int mode)
3319 struct gendisk *disk = mddev->gendisk;
3322 if (atomic_read(&mddev->active)>2) {
3323 printk("md: %s still in use.\n",mdname(mddev));
3327 if (mddev->sync_thread) {
3328 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3329 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3330 md_unregister_thread(mddev->sync_thread);
3331 mddev->sync_thread = NULL;
3334 del_timer_sync(&mddev->safemode_timer);
3336 invalidate_partition(disk, 0);
3339 case 1: /* readonly */
3345 case 0: /* disassemble */
3347 bitmap_flush(mddev);
3348 md_super_wait(mddev);
3350 set_disk_ro(disk, 0);
3351 blk_queue_make_request(mddev->queue, md_fail_request);
3352 mddev->pers->stop(mddev);
3353 mddev->queue->merge_bvec_fn = NULL;
3354 mddev->queue->unplug_fn = NULL;
3355 mddev->queue->issue_flush_fn = NULL;
3356 mddev->queue->backing_dev_info.congested_fn = NULL;
3357 if (mddev->pers->sync_request)
3358 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3360 module_put(mddev->pers->owner);
3363 set_capacity(disk, 0);
3369 if (!mddev->in_sync || mddev->flags) {
3370 /* mark array as shutdown cleanly */
3372 md_update_sb(mddev, 1);
3375 set_disk_ro(disk, 1);
3376 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3380 * Free resources if final stop
3384 struct list_head *tmp;
3386 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3388 bitmap_destroy(mddev);
3389 if (mddev->bitmap_file) {
3390 restore_bitmap_write_access(mddev->bitmap_file);
3391 fput(mddev->bitmap_file);
3392 mddev->bitmap_file = NULL;
3394 mddev->bitmap_offset = 0;
3396 ITERATE_RDEV(mddev,rdev,tmp)
3397 if (rdev->raid_disk >= 0) {
3399 sprintf(nm, "rd%d", rdev->raid_disk);
3400 sysfs_remove_link(&mddev->kobj, nm);
3403 /* make sure all delayed_delete calls have finished */
3404 flush_scheduled_work();
3406 export_array(mddev);
3408 mddev->array_size = 0;
3410 mddev->raid_disks = 0;
3411 mddev->recovery_cp = 0;
3413 } else if (mddev->pers)
3414 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3417 md_new_event(mddev);
3423 static void autorun_array(mddev_t *mddev)
3426 struct list_head *tmp;
3429 if (list_empty(&mddev->disks))
3432 printk(KERN_INFO "md: running: ");
3434 ITERATE_RDEV(mddev,rdev,tmp) {
3435 char b[BDEVNAME_SIZE];
3436 printk("<%s>", bdevname(rdev->bdev,b));
3440 err = do_md_run (mddev);
3442 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3443 do_md_stop (mddev, 0);
3448 * lets try to run arrays based on all disks that have arrived
3449 * until now. (those are in pending_raid_disks)
3451 * the method: pick the first pending disk, collect all disks with
3452 * the same UUID, remove all from the pending list and put them into
3453 * the 'same_array' list. Then order this list based on superblock
3454 * update time (freshest comes first), kick out 'old' disks and
3455 * compare superblocks. If everything's fine then run it.
3457 * If "unit" is allocated, then bump its reference count
3459 static void autorun_devices(int part)
3461 struct list_head *tmp;
3462 mdk_rdev_t *rdev0, *rdev;
3464 char b[BDEVNAME_SIZE];
3466 printk(KERN_INFO "md: autorun ...\n");
3467 while (!list_empty(&pending_raid_disks)) {
3470 LIST_HEAD(candidates);
3471 rdev0 = list_entry(pending_raid_disks.next,
3472 mdk_rdev_t, same_set);
3474 printk(KERN_INFO "md: considering %s ...\n",
3475 bdevname(rdev0->bdev,b));
3476 INIT_LIST_HEAD(&candidates);
3477 ITERATE_RDEV_PENDING(rdev,tmp)
3478 if (super_90_load(rdev, rdev0, 0) >= 0) {
3479 printk(KERN_INFO "md: adding %s ...\n",
3480 bdevname(rdev->bdev,b));
3481 list_move(&rdev->same_set, &candidates);
3484 * now we have a set of devices, with all of them having
3485 * mostly sane superblocks. It's time to allocate the
3489 dev = MKDEV(mdp_major,
3490 rdev0->preferred_minor << MdpMinorShift);
3491 unit = MINOR(dev) >> MdpMinorShift;
3493 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3496 if (rdev0->preferred_minor != unit) {
3497 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3498 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3502 md_probe(dev, NULL, NULL);
3503 mddev = mddev_find(dev);
3506 "md: cannot allocate memory for md drive.\n");
3509 if (mddev_lock(mddev))
3510 printk(KERN_WARNING "md: %s locked, cannot run\n",
3512 else if (mddev->raid_disks || mddev->major_version
3513 || !list_empty(&mddev->disks)) {
3515 "md: %s already running, cannot run %s\n",
3516 mdname(mddev), bdevname(rdev0->bdev,b));
3517 mddev_unlock(mddev);
3519 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3520 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3521 list_del_init(&rdev->same_set);
3522 if (bind_rdev_to_array(rdev, mddev))
3525 autorun_array(mddev);
3526 mddev_unlock(mddev);
3528 /* on success, candidates will be empty, on error
3531 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3535 printk(KERN_INFO "md: ... autorun DONE.\n");
3537 #endif /* !MODULE */
3539 static int get_version(void __user * arg)
3543 ver.major = MD_MAJOR_VERSION;
3544 ver.minor = MD_MINOR_VERSION;
3545 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3547 if (copy_to_user(arg, &ver, sizeof(ver)))
3553 static int get_array_info(mddev_t * mddev, void __user * arg)
3555 mdu_array_info_t info;
3556 int nr,working,active,failed,spare;
3558 struct list_head *tmp;
3560 nr=working=active=failed=spare=0;
3561 ITERATE_RDEV(mddev,rdev,tmp) {
3563 if (test_bit(Faulty, &rdev->flags))
3567 if (test_bit(In_sync, &rdev->flags))
3574 info.major_version = mddev->major_version;
3575 info.minor_version = mddev->minor_version;
3576 info.patch_version = MD_PATCHLEVEL_VERSION;
3577 info.ctime = mddev->ctime;
3578 info.level = mddev->level;
3579 info.size = mddev->size;
3580 if (info.size != mddev->size) /* overflow */
3583 info.raid_disks = mddev->raid_disks;
3584 info.md_minor = mddev->md_minor;
3585 info.not_persistent= !mddev->persistent;
3587 info.utime = mddev->utime;
3590 info.state = (1<<MD_SB_CLEAN);
3591 if (mddev->bitmap && mddev->bitmap_offset)
3592 info.state = (1<<MD_SB_BITMAP_PRESENT);
3593 info.active_disks = active;
3594 info.working_disks = working;
3595 info.failed_disks = failed;
3596 info.spare_disks = spare;
3598 info.layout = mddev->layout;
3599 info.chunk_size = mddev->chunk_size;
3601 if (copy_to_user(arg, &info, sizeof(info)))
3607 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3609 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3610 char *ptr, *buf = NULL;
3613 md_allow_write(mddev);
3615 file = kmalloc(sizeof(*file), GFP_KERNEL);
3619 /* bitmap disabled, zero the first byte and copy out */
3620 if (!mddev->bitmap || !mddev->bitmap->file) {
3621 file->pathname[0] = '\0';
3625 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3629 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3633 strcpy(file->pathname, ptr);
3637 if (copy_to_user(arg, file, sizeof(*file)))
3645 static int get_disk_info(mddev_t * mddev, void __user * arg)
3647 mdu_disk_info_t info;
3651 if (copy_from_user(&info, arg, sizeof(info)))
3656 rdev = find_rdev_nr(mddev, nr);
3658 info.major = MAJOR(rdev->bdev->bd_dev);
3659 info.minor = MINOR(rdev->bdev->bd_dev);
3660 info.raid_disk = rdev->raid_disk;
3662 if (test_bit(Faulty, &rdev->flags))
3663 info.state |= (1<<MD_DISK_FAULTY);
3664 else if (test_bit(In_sync, &rdev->flags)) {
3665 info.state |= (1<<MD_DISK_ACTIVE);
3666 info.state |= (1<<MD_DISK_SYNC);
3668 if (test_bit(WriteMostly, &rdev->flags))
3669 info.state |= (1<<MD_DISK_WRITEMOSTLY);
3671 info.major = info.minor = 0;
3672 info.raid_disk = -1;
3673 info.state = (1<<MD_DISK_REMOVED);
3676 if (copy_to_user(arg, &info, sizeof(info)))
3682 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3684 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3686 dev_t dev = MKDEV(info->major,info->minor);
3688 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3691 if (!mddev->raid_disks) {
3693 /* expecting a device which has a superblock */
3694 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3697 "md: md_import_device returned %ld\n",
3699 return PTR_ERR(rdev);
3701 if (!list_empty(&mddev->disks)) {
3702 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3703 mdk_rdev_t, same_set);
3704 int err = super_types[mddev->major_version]
3705 .load_super(rdev, rdev0, mddev->minor_version);
3708 "md: %s has different UUID to %s\n",
3709 bdevname(rdev->bdev,b),
3710 bdevname(rdev0->bdev,b2));
3715 err = bind_rdev_to_array(rdev, mddev);
3722 * add_new_disk can be used once the array is assembled
3723 * to add "hot spares". They must already have a superblock
3728 if (!mddev->pers->hot_add_disk) {
3730 "%s: personality does not support diskops!\n",
3734 if (mddev->persistent)
3735 rdev = md_import_device(dev, mddev->major_version,
3736 mddev->minor_version);
3738 rdev = md_import_device(dev, -1, -1);
3741 "md: md_import_device returned %ld\n",
3743 return PTR_ERR(rdev);
3745 /* set save_raid_disk if appropriate */
3746 if (!mddev->persistent) {
3747 if (info->state & (1<<MD_DISK_SYNC) &&
3748 info->raid_disk < mddev->raid_disks)
3749 rdev->raid_disk = info->raid_disk;
3751 rdev->raid_disk = -1;
3753 super_types[mddev->major_version].
3754 validate_super(mddev, rdev);
3755 rdev->saved_raid_disk = rdev->raid_disk;
3757 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3758 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3759 set_bit(WriteMostly, &rdev->flags);
3761 rdev->raid_disk = -1;
3762 err = bind_rdev_to_array(rdev, mddev);
3763 if (!err && !mddev->pers->hot_remove_disk) {
3764 /* If there is hot_add_disk but no hot_remove_disk
3765 * then added disks for geometry changes,
3766 * and should be added immediately.
3768 super_types[mddev->major_version].
3769 validate_super(mddev, rdev);
3770 err = mddev->pers->hot_add_disk(mddev, rdev);
3772 unbind_rdev_from_array(rdev);
3777 md_update_sb(mddev, 1);
3778 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3779 md_wakeup_thread(mddev->thread);
3783 /* otherwise, add_new_disk is only allowed
3784 * for major_version==0 superblocks
3786 if (mddev->major_version != 0) {
3787 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3792 if (!(info->state & (1<<MD_DISK_FAULTY))) {
3794 rdev = md_import_device (dev, -1, 0);
3797 "md: error, md_import_device() returned %ld\n",
3799 return PTR_ERR(rdev);
3801 rdev->desc_nr = info->number;
3802 if (info->raid_disk < mddev->raid_disks)
3803 rdev->raid_disk = info->raid_disk;
3805 rdev->raid_disk = -1;
3809 if (rdev->raid_disk < mddev->raid_disks)
3810 if (info->state & (1<<MD_DISK_SYNC))
3811 set_bit(In_sync, &rdev->flags);
3813 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3814 set_bit(WriteMostly, &rdev->flags);
3816 if (!mddev->persistent) {
3817 printk(KERN_INFO "md: nonpersistent superblock ...\n");
3818 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3820 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3821 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3823 err = bind_rdev_to_array(rdev, mddev);
3833 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3835 char b[BDEVNAME_SIZE];
3841 rdev = find_rdev(mddev, dev);
3845 if (rdev->raid_disk >= 0)
3848 kick_rdev_from_array(rdev);
3849 md_update_sb(mddev, 1);
3850 md_new_event(mddev);
3854 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3855 bdevname(rdev->bdev,b), mdname(mddev));
3859 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3861 char b[BDEVNAME_SIZE];
3869 if (mddev->major_version != 0) {
3870 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3871 " version-0 superblocks.\n",
3875 if (!mddev->pers->hot_add_disk) {
3877 "%s: personality does not support diskops!\n",
3882 rdev = md_import_device (dev, -1, 0);
3885 "md: error, md_import_device() returned %ld\n",
3890 if (mddev->persistent)
3891 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3894 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3896 size = calc_dev_size(rdev, mddev->chunk_size);
3899 if (test_bit(Faulty, &rdev->flags)) {
3901 "md: can not hot-add faulty %s disk to %s!\n",
3902 bdevname(rdev->bdev,b), mdname(mddev));
3906 clear_bit(In_sync, &rdev->flags);
3908 rdev->saved_raid_disk = -1;
3909 err = bind_rdev_to_array(rdev, mddev);
3914 * The rest should better be atomic, we can have disk failures
3915 * noticed in interrupt contexts ...
3918 if (rdev->desc_nr == mddev->max_disks) {
3919 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3922 goto abort_unbind_export;
3925 rdev->raid_disk = -1;
3927 md_update_sb(mddev, 1);
3930 * Kick recovery, maybe this spare has to be added to the
3931 * array immediately.
3933 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3934 md_wakeup_thread(mddev->thread);
3935 md_new_event(mddev);
3938 abort_unbind_export:
3939 unbind_rdev_from_array(rdev);
3946 static int set_bitmap_file(mddev_t *mddev, int fd)
3951 if (!mddev->pers->quiesce)
3953 if (mddev->recovery || mddev->sync_thread)
3955 /* we should be able to change the bitmap.. */
3961 return -EEXIST; /* cannot add when bitmap is present */
3962 mddev->bitmap_file = fget(fd);
3964 if (mddev->bitmap_file == NULL) {
3965 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3970 err = deny_bitmap_write_access(mddev->bitmap_file);
3972 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3974 fput(mddev->bitmap_file);
3975 mddev->bitmap_file = NULL;
3978 mddev->bitmap_offset = 0; /* file overrides offset */
3979 } else if (mddev->bitmap == NULL)
3980 return -ENOENT; /* cannot remove what isn't there */
3983 mddev->pers->quiesce(mddev, 1);
3985 err = bitmap_create(mddev);
3986 if (fd < 0 || err) {
3987 bitmap_destroy(mddev);
3988 fd = -1; /* make sure to put the file */
3990 mddev->pers->quiesce(mddev, 0);
3993 if (mddev->bitmap_file) {
3994 restore_bitmap_write_access(mddev->bitmap_file);
3995 fput(mddev->bitmap_file);
3997 mddev->bitmap_file = NULL;
4004 * set_array_info is used two different ways
4005 * The original usage is when creating a new array.
4006 * In this usage, raid_disks is > 0 and it together with
4007 * level, size, not_persistent,layout,chunksize determine the
4008 * shape of the array.
4009 * This will always create an array with a type-0.90.0 superblock.
4010 * The newer usage is when assembling an array.
4011 * In this case raid_disks will be 0, and the major_version field is
4012 * use to determine which style super-blocks are to be found on the devices.
4013 * The minor and patch _version numbers are also kept incase the
4014 * super_block handler wishes to interpret them.
4016 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4019 if (info->raid_disks == 0) {
4020 /* just setting version number for superblock loading */
4021 if (info->major_version < 0 ||
4022 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
4023 super_types[info->major_version].name == NULL) {
4024 /* maybe try to auto-load a module? */
4026 "md: superblock version %d not known\n",
4027 info->major_version);
4030 mddev->major_version = info->major_version;
4031 mddev->minor_version = info->minor_version;
4032 mddev->patch_version = info->patch_version;
4033 mddev->persistent = !info->not_persistent;
4036 mddev->major_version = MD_MAJOR_VERSION;
4037 mddev->minor_version = MD_MINOR_VERSION;
4038 mddev->patch_version = MD_PATCHLEVEL_VERSION;
4039 mddev->ctime = get_seconds();
4041 mddev->level = info->level;
4042 mddev->clevel[0] = 0;
4043 mddev->size = info->size;
4044 mddev->raid_disks = info->raid_disks;
4045 /* don't set md_minor, it is determined by which /dev/md* was
4048 if (info->state & (1<<MD_SB_CLEAN))
4049 mddev->recovery_cp = MaxSector;
4051 mddev->recovery_cp = 0;
4052 mddev->persistent = ! info->not_persistent;
4054 mddev->layout = info->layout;
4055 mddev->chunk_size = info->chunk_size;
4057 mddev->max_disks = MD_SB_DISKS;
4060 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4062 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4063 mddev->bitmap_offset = 0;
4065 mddev->reshape_position = MaxSector;
4068 * Generate a 128 bit UUID
4070 get_random_bytes(mddev->uuid, 16);
4072 mddev->new_level = mddev->level;
4073 mddev->new_chunk = mddev->chunk_size;
4074 mddev->new_layout = mddev->layout;
4075 mddev->delta_disks = 0;
4080 static int update_size(mddev_t *mddev, unsigned long size)
4084 struct list_head *tmp;
4085 int fit = (size == 0);
4087 if (mddev->pers->resize == NULL)
4089 /* The "size" is the amount of each device that is used.
4090 * This can only make sense for arrays with redundancy.
4091 * linear and raid0 always use whatever space is available
4092 * We can only consider changing the size if no resync
4093 * or reconstruction is happening, and if the new size
4094 * is acceptable. It must fit before the sb_offset or,
4095 * if that is <data_offset, it must fit before the
4096 * size of each device.
4097 * If size is zero, we find the largest size that fits.
4099 if (mddev->sync_thread)
4101 ITERATE_RDEV(mddev,rdev,tmp) {
4103 avail = rdev->size * 2;
4105 if (fit && (size == 0 || size > avail/2))
4107 if (avail < ((sector_t)size << 1))
4110 rv = mddev->pers->resize(mddev, (sector_t)size *2);
4112 struct block_device *bdev;
4114 bdev = bdget_disk(mddev->gendisk, 0);
4116 mutex_lock(&bdev->bd_inode->i_mutex);
4117 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4118 mutex_unlock(&bdev->bd_inode->i_mutex);
4125 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4128 /* change the number of raid disks */
4129 if (mddev->pers->check_reshape == NULL)
4131 if (raid_disks <= 0 ||
4132 raid_disks >= mddev->max_disks)
4134 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4136 mddev->delta_disks = raid_disks - mddev->raid_disks;
4138 rv = mddev->pers->check_reshape(mddev);
4144 * update_array_info is used to change the configuration of an
4146 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4147 * fields in the info are checked against the array.
4148 * Any differences that cannot be handled will cause an error.
4149 * Normally, only one change can be managed at a time.
4151 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4157 /* calculate expected state,ignoring low bits */
4158 if (mddev->bitmap && mddev->bitmap_offset)
4159 state |= (1 << MD_SB_BITMAP_PRESENT);
4161 if (mddev->major_version != info->major_version ||
4162 mddev->minor_version != info->minor_version ||
4163 /* mddev->patch_version != info->patch_version || */
4164 mddev->ctime != info->ctime ||
4165 mddev->level != info->level ||
4166 /* mddev->layout != info->layout || */
4167 !mddev->persistent != info->not_persistent||
4168 mddev->chunk_size != info->chunk_size ||
4169 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4170 ((state^info->state) & 0xfffffe00)
4173 /* Check there is only one change */
4174 if (info->size >= 0 && mddev->size != info->size) cnt++;
4175 if (mddev->raid_disks != info->raid_disks) cnt++;
4176 if (mddev->layout != info->layout) cnt++;
4177 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4178 if (cnt == 0) return 0;
4179 if (cnt > 1) return -EINVAL;
4181 if (mddev->layout != info->layout) {
4183 * we don't need to do anything at the md level, the
4184 * personality will take care of it all.
4186 if (mddev->pers->reconfig == NULL)
4189 return mddev->pers->reconfig(mddev, info->layout, -1);
4191 if (info->size >= 0 && mddev->size != info->size)
4192 rv = update_size(mddev, info->size);
4194 if (mddev->raid_disks != info->raid_disks)
4195 rv = update_raid_disks(mddev, info->raid_disks);
4197 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4198 if (mddev->pers->quiesce == NULL)
4200 if (mddev->recovery || mddev->sync_thread)
4202 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4203 /* add the bitmap */
4206 if (mddev->default_bitmap_offset == 0)
4208 mddev->bitmap_offset = mddev->default_bitmap_offset;
4209 mddev->pers->quiesce(mddev, 1);
4210 rv = bitmap_create(mddev);
4212 bitmap_destroy(mddev);
4213 mddev->pers->quiesce(mddev, 0);
4215 /* remove the bitmap */
4218 if (mddev->bitmap->file)
4220 mddev->pers->quiesce(mddev, 1);
4221 bitmap_destroy(mddev);
4222 mddev->pers->quiesce(mddev, 0);
4223 mddev->bitmap_offset = 0;
4226 md_update_sb(mddev, 1);
4230 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4234 if (mddev->pers == NULL)
4237 rdev = find_rdev(mddev, dev);
4241 md_error(mddev, rdev);
4245 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4247 mddev_t *mddev = bdev->bd_disk->private_data;
4251 geo->cylinders = get_capacity(mddev->gendisk) / 8;
4255 static int md_ioctl(struct inode *inode, struct file *file,
4256 unsigned int cmd, unsigned long arg)
4259 void __user *argp = (void __user *)arg;
4260 mddev_t *mddev = NULL;
4262 if (!capable(CAP_SYS_ADMIN))
4266 * Commands dealing with the RAID driver but not any
4272 err = get_version(argp);
4275 case PRINT_RAID_DEBUG:
4283 autostart_arrays(arg);
4290 * Commands creating/starting a new array:
4293 mddev = inode->i_bdev->bd_disk->private_data;
4300 err = mddev_lock(mddev);
4303 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4310 case SET_ARRAY_INFO:
4312 mdu_array_info_t info;
4314 memset(&info, 0, sizeof(info));
4315 else if (copy_from_user(&info, argp, sizeof(info))) {
4320 err = update_array_info(mddev, &info);
4322 printk(KERN_WARNING "md: couldn't update"
4323 " array info. %d\n", err);
4328 if (!list_empty(&mddev->disks)) {
4330 "md: array %s already has disks!\n",
4335 if (mddev->raid_disks) {
4337 "md: array %s already initialised!\n",
4342 err = set_array_info(mddev, &info);
4344 printk(KERN_WARNING "md: couldn't set"
4345 " array info. %d\n", err);
4355 * Commands querying/configuring an existing array:
4357 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4358 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4359 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4360 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4361 && cmd != GET_BITMAP_FILE) {
4367 * Commands even a read-only array can execute:
4371 case GET_ARRAY_INFO:
4372 err = get_array_info(mddev, argp);
4375 case GET_BITMAP_FILE:
4376 err = get_bitmap_file(mddev, argp);
4380 err = get_disk_info(mddev, argp);
4383 case RESTART_ARRAY_RW:
4384 err = restart_array(mddev);
4388 err = do_md_stop (mddev, 0);
4392 err = do_md_stop (mddev, 1);
4396 * We have a problem here : there is no easy way to give a CHS
4397 * virtual geometry. We currently pretend that we have a 2 heads
4398 * 4 sectors (with a BIG number of cylinders...). This drives
4399 * dosfs just mad... ;-)
4404 * The remaining ioctls are changing the state of the
4405 * superblock, so we do not allow them on read-only arrays.
4406 * However non-MD ioctls (e.g. get-size) will still come through
4407 * here and hit the 'default' below, so only disallow
4408 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4410 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4411 mddev->ro && mddev->pers) {
4412 if (mddev->ro == 2) {
4414 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4415 md_wakeup_thread(mddev->thread);
4427 mdu_disk_info_t info;
4428 if (copy_from_user(&info, argp, sizeof(info)))
4431 err = add_new_disk(mddev, &info);
4435 case HOT_REMOVE_DISK:
4436 err = hot_remove_disk(mddev, new_decode_dev(arg));
4440 err = hot_add_disk(mddev, new_decode_dev(arg));
4443 case SET_DISK_FAULTY:
4444 err = set_disk_faulty(mddev, new_decode_dev(arg));
4448 err = do_md_run (mddev);
4451 case SET_BITMAP_FILE:
4452 err = set_bitmap_file(mddev, (int)arg);
4462 mddev_unlock(mddev);
4472 static int md_open(struct inode *inode, struct file *file)
4475 * Succeed if we can lock the mddev, which confirms that
4476 * it isn't being stopped right now.
4478 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4481 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4486 mddev_unlock(mddev);
4488 check_disk_change(inode->i_bdev);
4493 static int md_release(struct inode *inode, struct file * file)
4495 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4503 static int md_media_changed(struct gendisk *disk)
4505 mddev_t *mddev = disk->private_data;
4507 return mddev->changed;
4510 static int md_revalidate(struct gendisk *disk)
4512 mddev_t *mddev = disk->private_data;
4517 static struct block_device_operations md_fops =
4519 .owner = THIS_MODULE,
4521 .release = md_release,
4523 .getgeo = md_getgeo,
4524 .media_changed = md_media_changed,
4525 .revalidate_disk= md_revalidate,
4528 static int md_thread(void * arg)
4530 mdk_thread_t *thread = arg;
4533 * md_thread is a 'system-thread', it's priority should be very
4534 * high. We avoid resource deadlocks individually in each
4535 * raid personality. (RAID5 does preallocation) We also use RR and
4536 * the very same RT priority as kswapd, thus we will never get
4537 * into a priority inversion deadlock.
4539 * we definitely have to have equal or higher priority than
4540 * bdflush, otherwise bdflush will deadlock if there are too
4541 * many dirty RAID5 blocks.
4544 current->flags |= PF_NOFREEZE;
4545 allow_signal(SIGKILL);
4546 while (!kthread_should_stop()) {
4548 /* We need to wait INTERRUPTIBLE so that
4549 * we don't add to the load-average.
4550 * That means we need to be sure no signals are
4553 if (signal_pending(current))
4554 flush_signals(current);
4556 wait_event_interruptible_timeout
4558 test_bit(THREAD_WAKEUP, &thread->flags)
4559 || kthread_should_stop(),
4562 clear_bit(THREAD_WAKEUP, &thread->flags);
4564 thread->run(thread->mddev);
4570 void md_wakeup_thread(mdk_thread_t *thread)
4573 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4574 set_bit(THREAD_WAKEUP, &thread->flags);
4575 wake_up(&thread->wqueue);
4579 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4582 mdk_thread_t *thread;
4584 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4588 init_waitqueue_head(&thread->wqueue);
4591 thread->mddev = mddev;
4592 thread->timeout = MAX_SCHEDULE_TIMEOUT;
4593 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4594 if (IS_ERR(thread->tsk)) {
4601 void md_unregister_thread(mdk_thread_t *thread)
4603 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4605 kthread_stop(thread->tsk);
4609 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4616 if (!rdev || test_bit(Faulty, &rdev->flags))
4619 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4621 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4622 __builtin_return_address(0),__builtin_return_address(1),
4623 __builtin_return_address(2),__builtin_return_address(3));
4627 if (!mddev->pers->error_handler)
4629 mddev->pers->error_handler(mddev,rdev);
4630 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4631 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4632 md_wakeup_thread(mddev->thread);
4633 md_new_event_inintr(mddev);
4636 /* seq_file implementation /proc/mdstat */
4638 static void status_unused(struct seq_file *seq)
4642 struct list_head *tmp;
4644 seq_printf(seq, "unused devices: ");
4646 ITERATE_RDEV_PENDING(rdev,tmp) {
4647 char b[BDEVNAME_SIZE];
4649 seq_printf(seq, "%s ",
4650 bdevname(rdev->bdev,b));
4653 seq_printf(seq, "<none>");
4655 seq_printf(seq, "\n");
4659 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4661 sector_t max_blocks, resync, res;
4662 unsigned long dt, db, rt;
4664 unsigned int per_milli;
4666 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4668 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4669 max_blocks = mddev->resync_max_sectors >> 1;
4671 max_blocks = mddev->size;
4674 * Should not happen.
4680 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4681 * in a sector_t, and (max_blocks>>scale) will fit in a
4682 * u32, as those are the requirements for sector_div.
4683 * Thus 'scale' must be at least 10
4686 if (sizeof(sector_t) > sizeof(unsigned long)) {
4687 while ( max_blocks/2 > (1ULL<<(scale+32)))
4690 res = (resync>>scale)*1000;
4691 sector_div(res, (u32)((max_blocks>>scale)+1));
4695 int i, x = per_milli/50, y = 20-x;
4696 seq_printf(seq, "[");
4697 for (i = 0; i < x; i++)
4698 seq_printf(seq, "=");
4699 seq_printf(seq, ">");
4700 for (i = 0; i < y; i++)
4701 seq_printf(seq, ".");
4702 seq_printf(seq, "] ");
4704 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4705 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4707 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
4709 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4710 "resync" : "recovery"))),
4711 per_milli/10, per_milli % 10,
4712 (unsigned long long) resync,
4713 (unsigned long long) max_blocks);
4716 * We do not want to overflow, so the order of operands and
4717 * the * 100 / 100 trick are important. We do a +1 to be
4718 * safe against division by zero. We only estimate anyway.
4720 * dt: time from mark until now
4721 * db: blocks written from mark until now
4722 * rt: remaining time
4724 dt = ((jiffies - mddev->resync_mark) / HZ);
4726 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
4727 - mddev->resync_mark_cnt;
4728 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
4730 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4732 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
4735 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4737 struct list_head *tmp;
4747 spin_lock(&all_mddevs_lock);
4748 list_for_each(tmp,&all_mddevs)
4750 mddev = list_entry(tmp, mddev_t, all_mddevs);
4752 spin_unlock(&all_mddevs_lock);
4755 spin_unlock(&all_mddevs_lock);
4757 return (void*)2;/* tail */
4761 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4763 struct list_head *tmp;
4764 mddev_t *next_mddev, *mddev = v;
4770 spin_lock(&all_mddevs_lock);
4772 tmp = all_mddevs.next;
4774 tmp = mddev->all_mddevs.next;
4775 if (tmp != &all_mddevs)
4776 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4778 next_mddev = (void*)2;
4781 spin_unlock(&all_mddevs_lock);
4789 static void md_seq_stop(struct seq_file *seq, void *v)
4793 if (mddev && v != (void*)1 && v != (void*)2)
4797 struct mdstat_info {
4801 static int md_seq_show(struct seq_file *seq, void *v)
4805 struct list_head *tmp2;
4807 struct mdstat_info *mi = seq->private;
4808 struct bitmap *bitmap;
4810 if (v == (void*)1) {
4811 struct mdk_personality *pers;
4812 seq_printf(seq, "Personalities : ");
4813 spin_lock(&pers_lock);
4814 list_for_each_entry(pers, &pers_list, list)
4815 seq_printf(seq, "[%s] ", pers->name);
4817 spin_unlock(&pers_lock);
4818 seq_printf(seq, "\n");
4819 mi->event = atomic_read(&md_event_count);
4822 if (v == (void*)2) {
4827 if (mddev_lock(mddev) < 0)
4830 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4831 seq_printf(seq, "%s : %sactive", mdname(mddev),
4832 mddev->pers ? "" : "in");
4835 seq_printf(seq, " (read-only)");
4837 seq_printf(seq, "(auto-read-only)");
4838 seq_printf(seq, " %s", mddev->pers->name);
4842 ITERATE_RDEV(mddev,rdev,tmp2) {
4843 char b[BDEVNAME_SIZE];
4844 seq_printf(seq, " %s[%d]",
4845 bdevname(rdev->bdev,b), rdev->desc_nr);
4846 if (test_bit(WriteMostly, &rdev->flags))
4847 seq_printf(seq, "(W)");
4848 if (test_bit(Faulty, &rdev->flags)) {
4849 seq_printf(seq, "(F)");
4851 } else if (rdev->raid_disk < 0)
4852 seq_printf(seq, "(S)"); /* spare */
4856 if (!list_empty(&mddev->disks)) {
4858 seq_printf(seq, "\n %llu blocks",
4859 (unsigned long long)mddev->array_size);
4861 seq_printf(seq, "\n %llu blocks",
4862 (unsigned long long)size);
4864 if (mddev->persistent) {
4865 if (mddev->major_version != 0 ||
4866 mddev->minor_version != 90) {
4867 seq_printf(seq," super %d.%d",
4868 mddev->major_version,
4869 mddev->minor_version);
4872 seq_printf(seq, " super non-persistent");
4875 mddev->pers->status (seq, mddev);
4876 seq_printf(seq, "\n ");
4877 if (mddev->pers->sync_request) {
4878 if (mddev->curr_resync > 2) {
4879 status_resync (seq, mddev);
4880 seq_printf(seq, "\n ");
4881 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4882 seq_printf(seq, "\tresync=DELAYED\n ");
4883 else if (mddev->recovery_cp < MaxSector)
4884 seq_printf(seq, "\tresync=PENDING\n ");
4887 seq_printf(seq, "\n ");
4889 if ((bitmap = mddev->bitmap)) {
4890 unsigned long chunk_kb;
4891 unsigned long flags;
4892 spin_lock_irqsave(&bitmap->lock, flags);
4893 chunk_kb = bitmap->chunksize >> 10;
4894 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4896 bitmap->pages - bitmap->missing_pages,
4898 (bitmap->pages - bitmap->missing_pages)
4899 << (PAGE_SHIFT - 10),
4900 chunk_kb ? chunk_kb : bitmap->chunksize,
4901 chunk_kb ? "KB" : "B");
4903 seq_printf(seq, ", file: ");
4904 seq_path(seq, bitmap->file->f_path.mnt,
4905 bitmap->file->f_path.dentry," \t\n");
4908 seq_printf(seq, "\n");
4909 spin_unlock_irqrestore(&bitmap->lock, flags);
4912 seq_printf(seq, "\n");
4914 mddev_unlock(mddev);
4919 static struct seq_operations md_seq_ops = {
4920 .start = md_seq_start,
4921 .next = md_seq_next,
4922 .stop = md_seq_stop,
4923 .show = md_seq_show,
4926 static int md_seq_open(struct inode *inode, struct file *file)
4929 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4933 error = seq_open(file, &md_seq_ops);
4937 struct seq_file *p = file->private_data;
4939 mi->event = atomic_read(&md_event_count);
4944 static int md_seq_release(struct inode *inode, struct file *file)
4946 struct seq_file *m = file->private_data;
4947 struct mdstat_info *mi = m->private;
4950 return seq_release(inode, file);
4953 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4955 struct seq_file *m = filp->private_data;
4956 struct mdstat_info *mi = m->private;
4959 poll_wait(filp, &md_event_waiters, wait);
4961 /* always allow read */
4962 mask = POLLIN | POLLRDNORM;
4964 if (mi->event != atomic_read(&md_event_count))
4965 mask |= POLLERR | POLLPRI;
4969 static const struct file_operations md_seq_fops = {
4970 .owner = THIS_MODULE,
4971 .open = md_seq_open,
4973 .llseek = seq_lseek,
4974 .release = md_seq_release,
4975 .poll = mdstat_poll,
4978 int register_md_personality(struct mdk_personality *p)
4980 spin_lock(&pers_lock);
4981 list_add_tail(&p->list, &pers_list);
4982 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4983 spin_unlock(&pers_lock);
4987 int unregister_md_personality(struct mdk_personality *p)
4989 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4990 spin_lock(&pers_lock);
4991 list_del_init(&p->list);
4992 spin_unlock(&pers_lock);
4996 static int is_mddev_idle(mddev_t *mddev)
4999 struct list_head *tmp;
5001 unsigned long curr_events;
5004 ITERATE_RDEV(mddev,rdev,tmp) {
5005 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5006 curr_events = disk_stat_read(disk, sectors[0]) +
5007 disk_stat_read(disk, sectors[1]) -
5008 atomic_read(&disk->sync_io);
5009 /* The difference between curr_events and last_events
5010 * will be affected by any new non-sync IO (making
5011 * curr_events bigger) and any difference in the amount of
5012 * in-flight syncio (making current_events bigger or smaller)
5013 * The amount in-flight is currently limited to
5014 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
5015 * which is at most 4096 sectors.
5016 * These numbers are fairly fragile and should be made
5017 * more robust, probably by enforcing the
5018 * 'window size' that md_do_sync sort-of uses.
5020 * Note: the following is an unsigned comparison.
5022 if ((curr_events - rdev->last_events + 4096) > 8192) {
5023 rdev->last_events = curr_events;
5030 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5032 /* another "blocks" (512byte) blocks have been synced */
5033 atomic_sub(blocks, &mddev->recovery_active);
5034 wake_up(&mddev->recovery_wait);
5036 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5037 md_wakeup_thread(mddev->thread);
5038 // stop recovery, signal do_sync ....
5043 /* md_write_start(mddev, bi)
5044 * If we need to update some array metadata (e.g. 'active' flag
5045 * in superblock) before writing, schedule a superblock update
5046 * and wait for it to complete.
5048 void md_write_start(mddev_t *mddev, struct bio *bi)
5050 if (bio_data_dir(bi) != WRITE)
5053 BUG_ON(mddev->ro == 1);
5054 if (mddev->ro == 2) {
5055 /* need to switch to read/write */
5057 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5058 md_wakeup_thread(mddev->thread);
5060 atomic_inc(&mddev->writes_pending);
5061 if (mddev->in_sync) {
5062 spin_lock_irq(&mddev->write_lock);
5063 if (mddev->in_sync) {
5065 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5066 md_wakeup_thread(mddev->thread);
5068 spin_unlock_irq(&mddev->write_lock);
5070 wait_event(mddev->sb_wait, mddev->flags==0);
5073 void md_write_end(mddev_t *mddev)
5075 if (atomic_dec_and_test(&mddev->writes_pending)) {
5076 if (mddev->safemode == 2)
5077 md_wakeup_thread(mddev->thread);
5078 else if (mddev->safemode_delay)
5079 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5083 /* md_allow_write(mddev)
5084 * Calling this ensures that the array is marked 'active' so that writes
5085 * may proceed without blocking. It is important to call this before
5086 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5087 * Must be called with mddev_lock held.
5089 void md_allow_write(mddev_t *mddev)
5096 spin_lock_irq(&mddev->write_lock);
5097 if (mddev->in_sync) {
5099 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5100 if (mddev->safemode_delay &&
5101 mddev->safemode == 0)
5102 mddev->safemode = 1;
5103 spin_unlock_irq(&mddev->write_lock);
5104 md_update_sb(mddev, 0);
5106 spin_unlock_irq(&mddev->write_lock);
5108 EXPORT_SYMBOL_GPL(md_allow_write);
5110 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5112 #define SYNC_MARKS 10
5113 #define SYNC_MARK_STEP (3*HZ)
5114 void md_do_sync(mddev_t *mddev)
5117 unsigned int currspeed = 0,
5119 sector_t max_sectors,j, io_sectors;
5120 unsigned long mark[SYNC_MARKS];
5121 sector_t mark_cnt[SYNC_MARKS];
5123 struct list_head *tmp;
5124 sector_t last_check;
5126 struct list_head *rtmp;
5130 /* just incase thread restarts... */
5131 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5133 if (mddev->ro) /* never try to sync a read-only array */
5136 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5137 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5138 desc = "data-check";
5139 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5140 desc = "requested-resync";
5143 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5148 /* we overload curr_resync somewhat here.
5149 * 0 == not engaged in resync at all
5150 * 2 == checking that there is no conflict with another sync
5151 * 1 == like 2, but have yielded to allow conflicting resync to
5153 * other == active in resync - this many blocks
5155 * Before starting a resync we must have set curr_resync to
5156 * 2, and then checked that every "conflicting" array has curr_resync
5157 * less than ours. When we find one that is the same or higher
5158 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5159 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5160 * This will mean we have to start checking from the beginning again.
5165 mddev->curr_resync = 2;
5168 if (kthread_should_stop()) {
5169 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5172 ITERATE_MDDEV(mddev2,tmp) {
5173 if (mddev2 == mddev)
5175 if (mddev2->curr_resync &&
5176 match_mddev_units(mddev,mddev2)) {
5178 if (mddev < mddev2 && mddev->curr_resync == 2) {
5179 /* arbitrarily yield */
5180 mddev->curr_resync = 1;
5181 wake_up(&resync_wait);
5183 if (mddev > mddev2 && mddev->curr_resync == 1)
5184 /* no need to wait here, we can wait the next
5185 * time 'round when curr_resync == 2
5188 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5189 if (!kthread_should_stop() &&
5190 mddev2->curr_resync >= mddev->curr_resync) {
5191 printk(KERN_INFO "md: delaying %s of %s"
5192 " until %s has finished (they"
5193 " share one or more physical units)\n",
5194 desc, mdname(mddev), mdname(mddev2));
5197 finish_wait(&resync_wait, &wq);
5200 finish_wait(&resync_wait, &wq);
5203 } while (mddev->curr_resync < 2);
5206 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5207 /* resync follows the size requested by the personality,
5208 * which defaults to physical size, but can be virtual size
5210 max_sectors = mddev->resync_max_sectors;
5211 mddev->resync_mismatches = 0;
5212 /* we don't use the checkpoint if there's a bitmap */
5213 if (!mddev->bitmap &&
5214 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5215 j = mddev->recovery_cp;
5216 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5217 max_sectors = mddev->size << 1;
5219 /* recovery follows the physical size of devices */
5220 max_sectors = mddev->size << 1;
5222 ITERATE_RDEV(mddev,rdev,rtmp)
5223 if (rdev->raid_disk >= 0 &&
5224 !test_bit(Faulty, &rdev->flags) &&
5225 !test_bit(In_sync, &rdev->flags) &&
5226 rdev->recovery_offset < j)
5227 j = rdev->recovery_offset;
5230 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5231 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
5232 " %d KB/sec/disk.\n", speed_min(mddev));
5233 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5234 "(but not more than %d KB/sec) for %s.\n",
5235 speed_max(mddev), desc);
5237 is_mddev_idle(mddev); /* this also initializes IO event counters */
5240 for (m = 0; m < SYNC_MARKS; m++) {
5242 mark_cnt[m] = io_sectors;
5245 mddev->resync_mark = mark[last_mark];
5246 mddev->resync_mark_cnt = mark_cnt[last_mark];
5249 * Tune reconstruction:
5251 window = 32*(PAGE_SIZE/512);
5252 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5253 window/2,(unsigned long long) max_sectors/2);
5255 atomic_set(&mddev->recovery_active, 0);
5256 init_waitqueue_head(&mddev->recovery_wait);
5261 "md: resuming %s of %s from checkpoint.\n",
5262 desc, mdname(mddev));
5263 mddev->curr_resync = j;
5266 while (j < max_sectors) {
5270 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5271 currspeed < speed_min(mddev));
5273 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5277 if (!skipped) { /* actual IO requested */
5278 io_sectors += sectors;
5279 atomic_add(sectors, &mddev->recovery_active);
5283 if (j>1) mddev->curr_resync = j;
5284 mddev->curr_mark_cnt = io_sectors;
5285 if (last_check == 0)
5286 /* this is the earliers that rebuilt will be
5287 * visible in /proc/mdstat
5289 md_new_event(mddev);
5291 if (last_check + window > io_sectors || j == max_sectors)
5294 last_check = io_sectors;
5296 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5297 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5301 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5303 int next = (last_mark+1) % SYNC_MARKS;
5305 mddev->resync_mark = mark[next];
5306 mddev->resync_mark_cnt = mark_cnt[next];
5307 mark[next] = jiffies;
5308 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5313 if (kthread_should_stop()) {
5315 * got a signal, exit.
5318 "md: md_do_sync() got signal ... exiting\n");
5319 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5324 * this loop exits only if either when we are slower than
5325 * the 'hard' speed limit, or the system was IO-idle for
5327 * the system might be non-idle CPU-wise, but we only care
5328 * about not overloading the IO subsystem. (things like an
5329 * e2fsck being done on the RAID array should execute fast)
5331 mddev->queue->unplug_fn(mddev->queue);
5334 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5335 /((jiffies-mddev->resync_mark)/HZ +1) +1;
5337 if (currspeed > speed_min(mddev)) {
5338 if ((currspeed > speed_max(mddev)) ||
5339 !is_mddev_idle(mddev)) {
5345 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5347 * this also signals 'finished resyncing' to md_stop
5350 mddev->queue->unplug_fn(mddev->queue);
5352 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5354 /* tell personality that we are finished */
5355 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5357 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5358 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5359 mddev->curr_resync > 2) {
5360 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5361 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5362 if (mddev->curr_resync >= mddev->recovery_cp) {
5364 "md: checkpointing %s of %s.\n",
5365 desc, mdname(mddev));
5366 mddev->recovery_cp = mddev->curr_resync;
5369 mddev->recovery_cp = MaxSector;
5371 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5372 mddev->curr_resync = MaxSector;
5373 ITERATE_RDEV(mddev,rdev,rtmp)
5374 if (rdev->raid_disk >= 0 &&
5375 !test_bit(Faulty, &rdev->flags) &&
5376 !test_bit(In_sync, &rdev->flags) &&
5377 rdev->recovery_offset < mddev->curr_resync)
5378 rdev->recovery_offset = mddev->curr_resync;
5381 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5384 mddev->curr_resync = 0;
5385 wake_up(&resync_wait);
5386 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5387 md_wakeup_thread(mddev->thread);
5389 EXPORT_SYMBOL_GPL(md_do_sync);
5392 static int remove_and_add_spares(mddev_t *mddev)
5395 struct list_head *rtmp;
5398 ITERATE_RDEV(mddev,rdev,rtmp)
5399 if (rdev->raid_disk >= 0 &&
5400 (test_bit(Faulty, &rdev->flags) ||
5401 ! test_bit(In_sync, &rdev->flags)) &&
5402 atomic_read(&rdev->nr_pending)==0) {
5403 if (mddev->pers->hot_remove_disk(
5404 mddev, rdev->raid_disk)==0) {
5406 sprintf(nm,"rd%d", rdev->raid_disk);
5407 sysfs_remove_link(&mddev->kobj, nm);
5408 rdev->raid_disk = -1;
5412 if (mddev->degraded) {
5413 ITERATE_RDEV(mddev,rdev,rtmp)
5414 if (rdev->raid_disk < 0
5415 && !test_bit(Faulty, &rdev->flags)) {
5416 rdev->recovery_offset = 0;
5417 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5419 sprintf(nm, "rd%d", rdev->raid_disk);
5420 if (sysfs_create_link(&mddev->kobj,
5423 "md: cannot register "
5427 md_new_event(mddev);
5435 * This routine is regularly called by all per-raid-array threads to
5436 * deal with generic issues like resync and super-block update.
5437 * Raid personalities that don't have a thread (linear/raid0) do not
5438 * need this as they never do any recovery or update the superblock.
5440 * It does not do any resync itself, but rather "forks" off other threads
5441 * to do that as needed.
5442 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5443 * "->recovery" and create a thread at ->sync_thread.
5444 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5445 * and wakeups up this thread which will reap the thread and finish up.
5446 * This thread also removes any faulty devices (with nr_pending == 0).
5448 * The overall approach is:
5449 * 1/ if the superblock needs updating, update it.
5450 * 2/ If a recovery thread is running, don't do anything else.
5451 * 3/ If recovery has finished, clean up, possibly marking spares active.
5452 * 4/ If there are any faulty devices, remove them.
5453 * 5/ If array is degraded, try to add spares devices
5454 * 6/ If array has spares or is not in-sync, start a resync thread.
5456 void md_check_recovery(mddev_t *mddev)
5459 struct list_head *rtmp;
5463 bitmap_daemon_work(mddev->bitmap);
5468 if (signal_pending(current)) {
5469 if (mddev->pers->sync_request) {
5470 printk(KERN_INFO "md: %s in immediate safe mode\n",
5472 mddev->safemode = 2;
5474 flush_signals(current);
5479 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5480 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5481 (mddev->safemode == 1) ||
5482 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5483 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5487 if (mddev_trylock(mddev)) {
5490 spin_lock_irq(&mddev->write_lock);
5491 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5492 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5494 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5496 if (mddev->safemode == 1)
5497 mddev->safemode = 0;
5498 spin_unlock_irq(&mddev->write_lock);
5501 md_update_sb(mddev, 0);
5504 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5505 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5506 /* resync/recovery still happening */
5507 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5510 if (mddev->sync_thread) {
5511 /* resync has finished, collect result */
5512 md_unregister_thread(mddev->sync_thread);
5513 mddev->sync_thread = NULL;
5514 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5515 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5517 /* activate any spares */
5518 mddev->pers->spare_active(mddev);
5520 md_update_sb(mddev, 1);
5522 /* if array is no-longer degraded, then any saved_raid_disk
5523 * information must be scrapped
5525 if (!mddev->degraded)
5526 ITERATE_RDEV(mddev,rdev,rtmp)
5527 rdev->saved_raid_disk = -1;
5529 mddev->recovery = 0;
5530 /* flag recovery needed just to double check */
5531 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5532 md_new_event(mddev);
5535 /* Clear some bits that don't mean anything, but
5538 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5539 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5540 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5541 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5543 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5545 /* no recovery is running.
5546 * remove any failed drives, then
5547 * add spares if possible.
5548 * Spare are also removed and re-added, to allow
5549 * the personality to fail the re-add.
5552 if (mddev->reshape_position != MaxSector) {
5553 if (mddev->pers->check_reshape(mddev) != 0)
5554 /* Cannot proceed */
5556 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5557 } else if ((spares = remove_and_add_spares(mddev))) {
5558 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5559 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5560 } else if (mddev->recovery_cp < MaxSector) {
5561 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5562 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5563 /* nothing to be done ... */
5566 if (mddev->pers->sync_request) {
5567 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5568 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5569 /* We are adding a device or devices to an array
5570 * which has the bitmap stored on all devices.
5571 * So make sure all bitmap pages get written
5573 bitmap_write_all(mddev->bitmap);
5575 mddev->sync_thread = md_register_thread(md_do_sync,
5578 if (!mddev->sync_thread) {
5579 printk(KERN_ERR "%s: could not start resync"
5582 /* leave the spares where they are, it shouldn't hurt */
5583 mddev->recovery = 0;
5585 md_wakeup_thread(mddev->sync_thread);
5586 md_new_event(mddev);
5589 mddev_unlock(mddev);
5593 static int md_notify_reboot(struct notifier_block *this,
5594 unsigned long code, void *x)
5596 struct list_head *tmp;
5599 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5601 printk(KERN_INFO "md: stopping all md devices.\n");
5603 ITERATE_MDDEV(mddev,tmp)
5604 if (mddev_trylock(mddev)) {
5605 do_md_stop (mddev, 1);
5606 mddev_unlock(mddev);
5609 * certain more exotic SCSI devices are known to be
5610 * volatile wrt too early system reboots. While the
5611 * right place to handle this issue is the given
5612 * driver, we do want to have a safe RAID driver ...
5619 static struct notifier_block md_notifier = {
5620 .notifier_call = md_notify_reboot,
5622 .priority = INT_MAX, /* before any real devices */
5625 static void md_geninit(void)
5627 struct proc_dir_entry *p;
5629 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5631 p = create_proc_entry("mdstat", S_IRUGO, NULL);
5633 p->proc_fops = &md_seq_fops;
5636 static int __init md_init(void)
5638 if (register_blkdev(MAJOR_NR, "md"))
5640 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5641 unregister_blkdev(MAJOR_NR, "md");
5644 blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5645 md_probe, NULL, NULL);
5646 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5647 md_probe, NULL, NULL);
5649 register_reboot_notifier(&md_notifier);
5650 raid_table_header = register_sysctl_table(raid_root_table);
5660 * Searches all registered partitions for autorun RAID arrays
5663 static dev_t detected_devices[128];
5666 void md_autodetect_dev(dev_t dev)
5668 if (dev_cnt >= 0 && dev_cnt < 127)
5669 detected_devices[dev_cnt++] = dev;
5673 static void autostart_arrays(int part)
5678 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5680 for (i = 0; i < dev_cnt; i++) {
5681 dev_t dev = detected_devices[i];
5683 rdev = md_import_device(dev,0, 0);
5687 if (test_bit(Faulty, &rdev->flags)) {
5691 list_add(&rdev->same_set, &pending_raid_disks);
5695 autorun_devices(part);
5698 #endif /* !MODULE */
5700 static __exit void md_exit(void)
5703 struct list_head *tmp;
5705 blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
5706 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
5708 unregister_blkdev(MAJOR_NR,"md");
5709 unregister_blkdev(mdp_major, "mdp");
5710 unregister_reboot_notifier(&md_notifier);
5711 unregister_sysctl_table(raid_table_header);
5712 remove_proc_entry("mdstat", NULL);
5713 ITERATE_MDDEV(mddev,tmp) {
5714 struct gendisk *disk = mddev->gendisk;
5717 export_array(mddev);
5720 mddev->gendisk = NULL;
5725 module_init(md_init)
5726 module_exit(md_exit)
5728 static int get_ro(char *buffer, struct kernel_param *kp)
5730 return sprintf(buffer, "%d", start_readonly);
5732 static int set_ro(const char *val, struct kernel_param *kp)
5735 int num = simple_strtoul(val, &e, 10);
5736 if (*val && (*e == '\0' || *e == '\n')) {
5737 start_readonly = num;
5743 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
5744 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
5747 EXPORT_SYMBOL(register_md_personality);
5748 EXPORT_SYMBOL(unregister_md_personality);
5749 EXPORT_SYMBOL(md_error);
5750 EXPORT_SYMBOL(md_done_sync);
5751 EXPORT_SYMBOL(md_write_start);
5752 EXPORT_SYMBOL(md_write_end);
5753 EXPORT_SYMBOL(md_register_thread);
5754 EXPORT_SYMBOL(md_unregister_thread);
5755 EXPORT_SYMBOL(md_wakeup_thread);
5756 EXPORT_SYMBOL(md_check_recovery);
5757 MODULE_LICENSE("GPL");
5759 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);