[PATCH] md: add information about superblock version to /proc/mdstat
[linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/config.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/devfs_fs_kernel.h>
43 #include <linux/buffer_head.h> /* for invalidate_bdev */
44 #include <linux/suspend.h>
45
46 #include <linux/init.h>
47
48 #include <linux/file.h>
49
50 #ifdef CONFIG_KMOD
51 #include <linux/kmod.h>
52 #endif
53
54 #include <asm/unaligned.h>
55
56 #define MAJOR_NR MD_MAJOR
57 #define MD_DRIVER
58
59 /* 63 partitions with the alternate major number (mdp) */
60 #define MdpMinorShift 6
61
62 #define DEBUG 0
63 #define dprintk(x...) ((void)(DEBUG && printk(x)))
64
65
66 #ifndef MODULE
67 static void autostart_arrays (int part);
68 #endif
69
70 static mdk_personality_t *pers[MAX_PERSONALITY];
71 static DEFINE_SPINLOCK(pers_lock);
72
73 /*
74  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
75  * is 1000 KB/sec, so the extra system load does not show up that much.
76  * Increase it if you want to have more _guaranteed_ speed. Note that
77  * the RAID driver will use the maximum available bandwith if the IO
78  * subsystem is idle. There is also an 'absolute maximum' reconstruction
79  * speed limit - in case reconstruction slows down your system despite
80  * idle IO detection.
81  *
82  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
83  */
84
85 static int sysctl_speed_limit_min = 1000;
86 static int sysctl_speed_limit_max = 200000;
87
88 static struct ctl_table_header *raid_table_header;
89
90 static ctl_table raid_table[] = {
91         {
92                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
93                 .procname       = "speed_limit_min",
94                 .data           = &sysctl_speed_limit_min,
95                 .maxlen         = sizeof(int),
96                 .mode           = 0644,
97                 .proc_handler   = &proc_dointvec,
98         },
99         {
100                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
101                 .procname       = "speed_limit_max",
102                 .data           = &sysctl_speed_limit_max,
103                 .maxlen         = sizeof(int),
104                 .mode           = 0644,
105                 .proc_handler   = &proc_dointvec,
106         },
107         { .ctl_name = 0 }
108 };
109
110 static ctl_table raid_dir_table[] = {
111         {
112                 .ctl_name       = DEV_RAID,
113                 .procname       = "raid",
114                 .maxlen         = 0,
115                 .mode           = 0555,
116                 .child          = raid_table,
117         },
118         { .ctl_name = 0 }
119 };
120
121 static ctl_table raid_root_table[] = {
122         {
123                 .ctl_name       = CTL_DEV,
124                 .procname       = "dev",
125                 .maxlen         = 0,
126                 .mode           = 0555,
127                 .child          = raid_dir_table,
128         },
129         { .ctl_name = 0 }
130 };
131
132 static struct block_device_operations md_fops;
133
134 /*
135  * Enables to iterate over all existing md arrays
136  * all_mddevs_lock protects this list.
137  */
138 static LIST_HEAD(all_mddevs);
139 static DEFINE_SPINLOCK(all_mddevs_lock);
140
141
142 /*
143  * iterates through all used mddevs in the system.
144  * We take care to grab the all_mddevs_lock whenever navigating
145  * the list, and to always hold a refcount when unlocked.
146  * Any code which breaks out of this loop while own
147  * a reference to the current mddev and must mddev_put it.
148  */
149 #define ITERATE_MDDEV(mddev,tmp)                                        \
150                                                                         \
151         for (({ spin_lock(&all_mddevs_lock);                            \
152                 tmp = all_mddevs.next;                                  \
153                 mddev = NULL;});                                        \
154              ({ if (tmp != &all_mddevs)                                 \
155                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
156                 spin_unlock(&all_mddevs_lock);                          \
157                 if (mddev) mddev_put(mddev);                            \
158                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
159                 tmp != &all_mddevs;});                                  \
160              ({ spin_lock(&all_mddevs_lock);                            \
161                 tmp = tmp->next;})                                      \
162                 )
163
164
165 static int md_fail_request (request_queue_t *q, struct bio *bio)
166 {
167         bio_io_error(bio, bio->bi_size);
168         return 0;
169 }
170
171 static inline mddev_t *mddev_get(mddev_t *mddev)
172 {
173         atomic_inc(&mddev->active);
174         return mddev;
175 }
176
177 static void mddev_put(mddev_t *mddev)
178 {
179         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
180                 return;
181         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
182                 list_del(&mddev->all_mddevs);
183                 blk_put_queue(mddev->queue);
184                 kfree(mddev);
185         }
186         spin_unlock(&all_mddevs_lock);
187 }
188
189 static mddev_t * mddev_find(dev_t unit)
190 {
191         mddev_t *mddev, *new = NULL;
192
193  retry:
194         spin_lock(&all_mddevs_lock);
195         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
196                 if (mddev->unit == unit) {
197                         mddev_get(mddev);
198                         spin_unlock(&all_mddevs_lock);
199                         kfree(new);
200                         return mddev;
201                 }
202
203         if (new) {
204                 list_add(&new->all_mddevs, &all_mddevs);
205                 spin_unlock(&all_mddevs_lock);
206                 return new;
207         }
208         spin_unlock(&all_mddevs_lock);
209
210         new = (mddev_t *) kmalloc(sizeof(*new), GFP_KERNEL);
211         if (!new)
212                 return NULL;
213
214         memset(new, 0, sizeof(*new));
215
216         new->unit = unit;
217         if (MAJOR(unit) == MD_MAJOR)
218                 new->md_minor = MINOR(unit);
219         else
220                 new->md_minor = MINOR(unit) >> MdpMinorShift;
221
222         init_MUTEX(&new->reconfig_sem);
223         INIT_LIST_HEAD(&new->disks);
224         INIT_LIST_HEAD(&new->all_mddevs);
225         init_timer(&new->safemode_timer);
226         atomic_set(&new->active, 1);
227         spin_lock_init(&new->write_lock);
228         init_waitqueue_head(&new->sb_wait);
229
230         new->queue = blk_alloc_queue(GFP_KERNEL);
231         if (!new->queue) {
232                 kfree(new);
233                 return NULL;
234         }
235
236         blk_queue_make_request(new->queue, md_fail_request);
237
238         goto retry;
239 }
240
241 static inline int mddev_lock(mddev_t * mddev)
242 {
243         return down_interruptible(&mddev->reconfig_sem);
244 }
245
246 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
247 {
248         down(&mddev->reconfig_sem);
249 }
250
251 static inline int mddev_trylock(mddev_t * mddev)
252 {
253         return down_trylock(&mddev->reconfig_sem);
254 }
255
256 static inline void mddev_unlock(mddev_t * mddev)
257 {
258         up(&mddev->reconfig_sem);
259
260         md_wakeup_thread(mddev->thread);
261 }
262
263 mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
264 {
265         mdk_rdev_t * rdev;
266         struct list_head *tmp;
267
268         ITERATE_RDEV(mddev,rdev,tmp) {
269                 if (rdev->desc_nr == nr)
270                         return rdev;
271         }
272         return NULL;
273 }
274
275 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
276 {
277         struct list_head *tmp;
278         mdk_rdev_t *rdev;
279
280         ITERATE_RDEV(mddev,rdev,tmp) {
281                 if (rdev->bdev->bd_dev == dev)
282                         return rdev;
283         }
284         return NULL;
285 }
286
287 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
288 {
289         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
290         return MD_NEW_SIZE_BLOCKS(size);
291 }
292
293 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
294 {
295         sector_t size;
296
297         size = rdev->sb_offset;
298
299         if (chunk_size)
300                 size &= ~((sector_t)chunk_size/1024 - 1);
301         return size;
302 }
303
304 static int alloc_disk_sb(mdk_rdev_t * rdev)
305 {
306         if (rdev->sb_page)
307                 MD_BUG();
308
309         rdev->sb_page = alloc_page(GFP_KERNEL);
310         if (!rdev->sb_page) {
311                 printk(KERN_ALERT "md: out of memory.\n");
312                 return -EINVAL;
313         }
314
315         return 0;
316 }
317
318 static void free_disk_sb(mdk_rdev_t * rdev)
319 {
320         if (rdev->sb_page) {
321                 page_cache_release(rdev->sb_page);
322                 rdev->sb_loaded = 0;
323                 rdev->sb_page = NULL;
324                 rdev->sb_offset = 0;
325                 rdev->size = 0;
326         }
327 }
328
329
330 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
331 {
332         mdk_rdev_t *rdev = bio->bi_private;
333         if (bio->bi_size)
334                 return 1;
335
336         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
337                 md_error(rdev->mddev, rdev);
338
339         if (atomic_dec_and_test(&rdev->mddev->pending_writes))
340                 wake_up(&rdev->mddev->sb_wait);
341         bio_put(bio);
342         return 0;
343 }
344
345 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
346                    sector_t sector, int size, struct page *page)
347 {
348         /* write first size bytes of page to sector of rdev
349          * Increment mddev->pending_writes before returning
350          * and decrement it on completion, waking up sb_wait
351          * if zero is reached.
352          * If an error occurred, call md_error
353          */
354         struct bio *bio = bio_alloc(GFP_NOIO, 1);
355
356         bio->bi_bdev = rdev->bdev;
357         bio->bi_sector = sector;
358         bio_add_page(bio, page, size, 0);
359         bio->bi_private = rdev;
360         bio->bi_end_io = super_written;
361         atomic_inc(&mddev->pending_writes);
362         submit_bio((1<<BIO_RW)|(1<<BIO_RW_SYNC), bio);
363 }
364
365 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
366 {
367         if (bio->bi_size)
368                 return 1;
369
370         complete((struct completion*)bio->bi_private);
371         return 0;
372 }
373
374 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
375                    struct page *page, int rw)
376 {
377         struct bio *bio = bio_alloc(GFP_NOIO, 1);
378         struct completion event;
379         int ret;
380
381         rw |= (1 << BIO_RW_SYNC);
382
383         bio->bi_bdev = bdev;
384         bio->bi_sector = sector;
385         bio_add_page(bio, page, size, 0);
386         init_completion(&event);
387         bio->bi_private = &event;
388         bio->bi_end_io = bi_complete;
389         submit_bio(rw, bio);
390         wait_for_completion(&event);
391
392         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
393         bio_put(bio);
394         return ret;
395 }
396
397 static int read_disk_sb(mdk_rdev_t * rdev, int size)
398 {
399         char b[BDEVNAME_SIZE];
400         if (!rdev->sb_page) {
401                 MD_BUG();
402                 return -EINVAL;
403         }
404         if (rdev->sb_loaded)
405                 return 0;
406
407
408         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
409                 goto fail;
410         rdev->sb_loaded = 1;
411         return 0;
412
413 fail:
414         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
415                 bdevname(rdev->bdev,b));
416         return -EINVAL;
417 }
418
419 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
420 {
421         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
422                 (sb1->set_uuid1 == sb2->set_uuid1) &&
423                 (sb1->set_uuid2 == sb2->set_uuid2) &&
424                 (sb1->set_uuid3 == sb2->set_uuid3))
425
426                 return 1;
427
428         return 0;
429 }
430
431
432 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
433 {
434         int ret;
435         mdp_super_t *tmp1, *tmp2;
436
437         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
438         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
439
440         if (!tmp1 || !tmp2) {
441                 ret = 0;
442                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
443                 goto abort;
444         }
445
446         *tmp1 = *sb1;
447         *tmp2 = *sb2;
448
449         /*
450          * nr_disks is not constant
451          */
452         tmp1->nr_disks = 0;
453         tmp2->nr_disks = 0;
454
455         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
456                 ret = 0;
457         else
458                 ret = 1;
459
460 abort:
461         kfree(tmp1);
462         kfree(tmp2);
463         return ret;
464 }
465
466 static unsigned int calc_sb_csum(mdp_super_t * sb)
467 {
468         unsigned int disk_csum, csum;
469
470         disk_csum = sb->sb_csum;
471         sb->sb_csum = 0;
472         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
473         sb->sb_csum = disk_csum;
474         return csum;
475 }
476
477
478 /*
479  * Handle superblock details.
480  * We want to be able to handle multiple superblock formats
481  * so we have a common interface to them all, and an array of
482  * different handlers.
483  * We rely on user-space to write the initial superblock, and support
484  * reading and updating of superblocks.
485  * Interface methods are:
486  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
487  *      loads and validates a superblock on dev.
488  *      if refdev != NULL, compare superblocks on both devices
489  *    Return:
490  *      0 - dev has a superblock that is compatible with refdev
491  *      1 - dev has a superblock that is compatible and newer than refdev
492  *          so dev should be used as the refdev in future
493  *     -EINVAL superblock incompatible or invalid
494  *     -othererror e.g. -EIO
495  *
496  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
497  *      Verify that dev is acceptable into mddev.
498  *       The first time, mddev->raid_disks will be 0, and data from
499  *       dev should be merged in.  Subsequent calls check that dev
500  *       is new enough.  Return 0 or -EINVAL
501  *
502  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
503  *     Update the superblock for rdev with data in mddev
504  *     This does not write to disc.
505  *
506  */
507
508 struct super_type  {
509         char            *name;
510         struct module   *owner;
511         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
512         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
513         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
514 };
515
516 /*
517  * load_super for 0.90.0 
518  */
519 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
520 {
521         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
522         mdp_super_t *sb;
523         int ret;
524         sector_t sb_offset;
525
526         /*
527          * Calculate the position of the superblock,
528          * it's at the end of the disk.
529          *
530          * It also happens to be a multiple of 4Kb.
531          */
532         sb_offset = calc_dev_sboffset(rdev->bdev);
533         rdev->sb_offset = sb_offset;
534
535         ret = read_disk_sb(rdev, MD_SB_BYTES);
536         if (ret) return ret;
537
538         ret = -EINVAL;
539
540         bdevname(rdev->bdev, b);
541         sb = (mdp_super_t*)page_address(rdev->sb_page);
542
543         if (sb->md_magic != MD_SB_MAGIC) {
544                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
545                        b);
546                 goto abort;
547         }
548
549         if (sb->major_version != 0 ||
550             sb->minor_version != 90) {
551                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
552                         sb->major_version, sb->minor_version,
553                         b);
554                 goto abort;
555         }
556
557         if (sb->raid_disks <= 0)
558                 goto abort;
559
560         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
561                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
562                         b);
563                 goto abort;
564         }
565
566         rdev->preferred_minor = sb->md_minor;
567         rdev->data_offset = 0;
568         rdev->sb_size = MD_SB_BYTES;
569
570         if (sb->level == LEVEL_MULTIPATH)
571                 rdev->desc_nr = -1;
572         else
573                 rdev->desc_nr = sb->this_disk.number;
574
575         if (refdev == 0)
576                 ret = 1;
577         else {
578                 __u64 ev1, ev2;
579                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
580                 if (!uuid_equal(refsb, sb)) {
581                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
582                                 b, bdevname(refdev->bdev,b2));
583                         goto abort;
584                 }
585                 if (!sb_equal(refsb, sb)) {
586                         printk(KERN_WARNING "md: %s has same UUID"
587                                " but different superblock to %s\n",
588                                b, bdevname(refdev->bdev, b2));
589                         goto abort;
590                 }
591                 ev1 = md_event(sb);
592                 ev2 = md_event(refsb);
593                 if (ev1 > ev2)
594                         ret = 1;
595                 else 
596                         ret = 0;
597         }
598         rdev->size = calc_dev_size(rdev, sb->chunk_size);
599
600  abort:
601         return ret;
602 }
603
604 /*
605  * validate_super for 0.90.0
606  */
607 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
608 {
609         mdp_disk_t *desc;
610         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
611
612         rdev->raid_disk = -1;
613         rdev->in_sync = 0;
614         if (mddev->raid_disks == 0) {
615                 mddev->major_version = 0;
616                 mddev->minor_version = sb->minor_version;
617                 mddev->patch_version = sb->patch_version;
618                 mddev->persistent = ! sb->not_persistent;
619                 mddev->chunk_size = sb->chunk_size;
620                 mddev->ctime = sb->ctime;
621                 mddev->utime = sb->utime;
622                 mddev->level = sb->level;
623                 mddev->layout = sb->layout;
624                 mddev->raid_disks = sb->raid_disks;
625                 mddev->size = sb->size;
626                 mddev->events = md_event(sb);
627                 mddev->bitmap_offset = 0;
628                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
629
630                 if (sb->state & (1<<MD_SB_CLEAN))
631                         mddev->recovery_cp = MaxSector;
632                 else {
633                         if (sb->events_hi == sb->cp_events_hi && 
634                                 sb->events_lo == sb->cp_events_lo) {
635                                 mddev->recovery_cp = sb->recovery_cp;
636                         } else
637                                 mddev->recovery_cp = 0;
638                 }
639
640                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
641                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
642                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
643                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
644
645                 mddev->max_disks = MD_SB_DISKS;
646
647                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
648                     mddev->bitmap_file == NULL) {
649                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6) {
650                                 /* FIXME use a better test */
651                                 printk(KERN_WARNING "md: bitmaps only support for raid1\n");
652                                 return -EINVAL;
653                         }
654                         mddev->bitmap_offset = mddev->default_bitmap_offset;
655                 }
656
657         } else if (mddev->pers == NULL) {
658                 /* Insist on good event counter while assembling */
659                 __u64 ev1 = md_event(sb);
660                 ++ev1;
661                 if (ev1 < mddev->events) 
662                         return -EINVAL;
663         } else if (mddev->bitmap) {
664                 /* if adding to array with a bitmap, then we can accept an
665                  * older device ... but not too old.
666                  */
667                 __u64 ev1 = md_event(sb);
668                 if (ev1 < mddev->bitmap->events_cleared)
669                         return 0;
670         } else /* just a hot-add of a new device, leave raid_disk at -1 */
671                 return 0;
672
673         if (mddev->level != LEVEL_MULTIPATH) {
674                 rdev->faulty = 0;
675                 rdev->flags = 0;
676                 desc = sb->disks + rdev->desc_nr;
677
678                 if (desc->state & (1<<MD_DISK_FAULTY))
679                         rdev->faulty = 1;
680                 else if (desc->state & (1<<MD_DISK_SYNC) &&
681                          desc->raid_disk < mddev->raid_disks) {
682                         rdev->in_sync = 1;
683                         rdev->raid_disk = desc->raid_disk;
684                 }
685                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
686                         set_bit(WriteMostly, &rdev->flags);
687         } else /* MULTIPATH are always insync */
688                 rdev->in_sync = 1;
689         return 0;
690 }
691
692 /*
693  * sync_super for 0.90.0
694  */
695 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
696 {
697         mdp_super_t *sb;
698         struct list_head *tmp;
699         mdk_rdev_t *rdev2;
700         int next_spare = mddev->raid_disks;
701
702         /* make rdev->sb match mddev data..
703          *
704          * 1/ zero out disks
705          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
706          * 3/ any empty disks < next_spare become removed
707          *
708          * disks[0] gets initialised to REMOVED because
709          * we cannot be sure from other fields if it has
710          * been initialised or not.
711          */
712         int i;
713         int active=0, working=0,failed=0,spare=0,nr_disks=0;
714
715         sb = (mdp_super_t*)page_address(rdev->sb_page);
716
717         memset(sb, 0, sizeof(*sb));
718
719         sb->md_magic = MD_SB_MAGIC;
720         sb->major_version = mddev->major_version;
721         sb->minor_version = mddev->minor_version;
722         sb->patch_version = mddev->patch_version;
723         sb->gvalid_words  = 0; /* ignored */
724         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
725         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
726         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
727         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
728
729         sb->ctime = mddev->ctime;
730         sb->level = mddev->level;
731         sb->size  = mddev->size;
732         sb->raid_disks = mddev->raid_disks;
733         sb->md_minor = mddev->md_minor;
734         sb->not_persistent = !mddev->persistent;
735         sb->utime = mddev->utime;
736         sb->state = 0;
737         sb->events_hi = (mddev->events>>32);
738         sb->events_lo = (u32)mddev->events;
739
740         if (mddev->in_sync)
741         {
742                 sb->recovery_cp = mddev->recovery_cp;
743                 sb->cp_events_hi = (mddev->events>>32);
744                 sb->cp_events_lo = (u32)mddev->events;
745                 if (mddev->recovery_cp == MaxSector)
746                         sb->state = (1<< MD_SB_CLEAN);
747         } else
748                 sb->recovery_cp = 0;
749
750         sb->layout = mddev->layout;
751         sb->chunk_size = mddev->chunk_size;
752
753         if (mddev->bitmap && mddev->bitmap_file == NULL)
754                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
755
756         sb->disks[0].state = (1<<MD_DISK_REMOVED);
757         ITERATE_RDEV(mddev,rdev2,tmp) {
758                 mdp_disk_t *d;
759                 if (rdev2->raid_disk >= 0 && rdev2->in_sync && !rdev2->faulty)
760                         rdev2->desc_nr = rdev2->raid_disk;
761                 else
762                         rdev2->desc_nr = next_spare++;
763                 d = &sb->disks[rdev2->desc_nr];
764                 nr_disks++;
765                 d->number = rdev2->desc_nr;
766                 d->major = MAJOR(rdev2->bdev->bd_dev);
767                 d->minor = MINOR(rdev2->bdev->bd_dev);
768                 if (rdev2->raid_disk >= 0 && rdev->in_sync && !rdev2->faulty)
769                         d->raid_disk = rdev2->raid_disk;
770                 else
771                         d->raid_disk = rdev2->desc_nr; /* compatibility */
772                 if (rdev2->faulty) {
773                         d->state = (1<<MD_DISK_FAULTY);
774                         failed++;
775                 } else if (rdev2->in_sync) {
776                         d->state = (1<<MD_DISK_ACTIVE);
777                         d->state |= (1<<MD_DISK_SYNC);
778                         active++;
779                         working++;
780                 } else {
781                         d->state = 0;
782                         spare++;
783                         working++;
784                 }
785                 if (test_bit(WriteMostly, &rdev2->flags))
786                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
787         }
788         
789         /* now set the "removed" and "faulty" bits on any missing devices */
790         for (i=0 ; i < mddev->raid_disks ; i++) {
791                 mdp_disk_t *d = &sb->disks[i];
792                 if (d->state == 0 && d->number == 0) {
793                         d->number = i;
794                         d->raid_disk = i;
795                         d->state = (1<<MD_DISK_REMOVED);
796                         d->state |= (1<<MD_DISK_FAULTY);
797                         failed++;
798                 }
799         }
800         sb->nr_disks = nr_disks;
801         sb->active_disks = active;
802         sb->working_disks = working;
803         sb->failed_disks = failed;
804         sb->spare_disks = spare;
805
806         sb->this_disk = sb->disks[rdev->desc_nr];
807         sb->sb_csum = calc_sb_csum(sb);
808 }
809
810 /*
811  * version 1 superblock
812  */
813
814 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
815 {
816         unsigned int disk_csum, csum;
817         unsigned long long newcsum;
818         int size = 256 + le32_to_cpu(sb->max_dev)*2;
819         unsigned int *isuper = (unsigned int*)sb;
820         int i;
821
822         disk_csum = sb->sb_csum;
823         sb->sb_csum = 0;
824         newcsum = 0;
825         for (i=0; size>=4; size -= 4 )
826                 newcsum += le32_to_cpu(*isuper++);
827
828         if (size == 2)
829                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
830
831         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
832         sb->sb_csum = disk_csum;
833         return cpu_to_le32(csum);
834 }
835
836 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
837 {
838         struct mdp_superblock_1 *sb;
839         int ret;
840         sector_t sb_offset;
841         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
842         int bmask;
843
844         /*
845          * Calculate the position of the superblock.
846          * It is always aligned to a 4K boundary and
847          * depeding on minor_version, it can be:
848          * 0: At least 8K, but less than 12K, from end of device
849          * 1: At start of device
850          * 2: 4K from start of device.
851          */
852         switch(minor_version) {
853         case 0:
854                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
855                 sb_offset -= 8*2;
856                 sb_offset &= ~(sector_t)(4*2-1);
857                 /* convert from sectors to K */
858                 sb_offset /= 2;
859                 break;
860         case 1:
861                 sb_offset = 0;
862                 break;
863         case 2:
864                 sb_offset = 4;
865                 break;
866         default:
867                 return -EINVAL;
868         }
869         rdev->sb_offset = sb_offset;
870
871         /* superblock is rarely larger than 1K, but it can be larger,
872          * and it is safe to read 4k, so we do that
873          */
874         ret = read_disk_sb(rdev, 4096);
875         if (ret) return ret;
876
877
878         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
879
880         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
881             sb->major_version != cpu_to_le32(1) ||
882             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
883             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
884             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
885                 return -EINVAL;
886
887         if (calc_sb_1_csum(sb) != sb->sb_csum) {
888                 printk("md: invalid superblock checksum on %s\n",
889                         bdevname(rdev->bdev,b));
890                 return -EINVAL;
891         }
892         if (le64_to_cpu(sb->data_size) < 10) {
893                 printk("md: data_size too small on %s\n",
894                        bdevname(rdev->bdev,b));
895                 return -EINVAL;
896         }
897         rdev->preferred_minor = 0xffff;
898         rdev->data_offset = le64_to_cpu(sb->data_offset);
899
900         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
901         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
902         if (rdev->sb_size & bmask)
903                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
904
905         if (refdev == 0)
906                 return 1;
907         else {
908                 __u64 ev1, ev2;
909                 struct mdp_superblock_1 *refsb = 
910                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
911
912                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
913                     sb->level != refsb->level ||
914                     sb->layout != refsb->layout ||
915                     sb->chunksize != refsb->chunksize) {
916                         printk(KERN_WARNING "md: %s has strangely different"
917                                 " superblock to %s\n",
918                                 bdevname(rdev->bdev,b),
919                                 bdevname(refdev->bdev,b2));
920                         return -EINVAL;
921                 }
922                 ev1 = le64_to_cpu(sb->events);
923                 ev2 = le64_to_cpu(refsb->events);
924
925                 if (ev1 > ev2)
926                         return 1;
927         }
928         if (minor_version) 
929                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
930         else
931                 rdev->size = rdev->sb_offset;
932         if (rdev->size < le64_to_cpu(sb->data_size)/2)
933                 return -EINVAL;
934         rdev->size = le64_to_cpu(sb->data_size)/2;
935         if (le32_to_cpu(sb->chunksize))
936                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
937         return 0;
938 }
939
940 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
941 {
942         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
943
944         rdev->raid_disk = -1;
945         rdev->in_sync = 0;
946         if (mddev->raid_disks == 0) {
947                 mddev->major_version = 1;
948                 mddev->patch_version = 0;
949                 mddev->persistent = 1;
950                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
951                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
952                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
953                 mddev->level = le32_to_cpu(sb->level);
954                 mddev->layout = le32_to_cpu(sb->layout);
955                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
956                 mddev->size = le64_to_cpu(sb->size)/2;
957                 mddev->events = le64_to_cpu(sb->events);
958                 mddev->bitmap_offset = 0;
959                 mddev->default_bitmap_offset = 0;
960                 mddev->default_bitmap_offset = 1024;
961                 
962                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
963                 memcpy(mddev->uuid, sb->set_uuid, 16);
964
965                 mddev->max_disks =  (4096-256)/2;
966
967                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
968                     mddev->bitmap_file == NULL ) {
969                         if (mddev->level != 1) {
970                                 printk(KERN_WARNING "md: bitmaps only supported for raid1\n");
971                                 return -EINVAL;
972                         }
973                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
974                 }
975         } else if (mddev->pers == NULL) {
976                 /* Insist of good event counter while assembling */
977                 __u64 ev1 = le64_to_cpu(sb->events);
978                 ++ev1;
979                 if (ev1 < mddev->events)
980                         return -EINVAL;
981         } else if (mddev->bitmap) {
982                 /* If adding to array with a bitmap, then we can accept an
983                  * older device, but not too old.
984                  */
985                 __u64 ev1 = le64_to_cpu(sb->events);
986                 if (ev1 < mddev->bitmap->events_cleared)
987                         return 0;
988         } else /* just a hot-add of a new device, leave raid_disk at -1 */
989                 return 0;
990
991         if (mddev->level != LEVEL_MULTIPATH) {
992                 int role;
993                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
994                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
995                 switch(role) {
996                 case 0xffff: /* spare */
997                         rdev->faulty = 0;
998                         break;
999                 case 0xfffe: /* faulty */
1000                         rdev->faulty = 1;
1001                         break;
1002                 default:
1003                         rdev->in_sync = 1;
1004                         rdev->faulty = 0;
1005                         rdev->raid_disk = role;
1006                         break;
1007                 }
1008                 rdev->flags = 0;
1009                 if (sb->devflags & WriteMostly1)
1010                         set_bit(WriteMostly, &rdev->flags);
1011         } else /* MULTIPATH are always insync */
1012                 rdev->in_sync = 1;
1013
1014         return 0;
1015 }
1016
1017 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1018 {
1019         struct mdp_superblock_1 *sb;
1020         struct list_head *tmp;
1021         mdk_rdev_t *rdev2;
1022         int max_dev, i;
1023         /* make rdev->sb match mddev and rdev data. */
1024
1025         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1026
1027         sb->feature_map = 0;
1028         sb->pad0 = 0;
1029         memset(sb->pad1, 0, sizeof(sb->pad1));
1030         memset(sb->pad2, 0, sizeof(sb->pad2));
1031         memset(sb->pad3, 0, sizeof(sb->pad3));
1032
1033         sb->utime = cpu_to_le64((__u64)mddev->utime);
1034         sb->events = cpu_to_le64(mddev->events);
1035         if (mddev->in_sync)
1036                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1037         else
1038                 sb->resync_offset = cpu_to_le64(0);
1039
1040         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1041                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1042                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1043         }
1044
1045         max_dev = 0;
1046         ITERATE_RDEV(mddev,rdev2,tmp)
1047                 if (rdev2->desc_nr+1 > max_dev)
1048                         max_dev = rdev2->desc_nr+1;
1049         
1050         sb->max_dev = cpu_to_le32(max_dev);
1051         for (i=0; i<max_dev;i++)
1052                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1053         
1054         ITERATE_RDEV(mddev,rdev2,tmp) {
1055                 i = rdev2->desc_nr;
1056                 if (rdev2->faulty)
1057                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1058                 else if (rdev2->in_sync)
1059                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1060                 else
1061                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1062         }
1063
1064         sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1065         sb->sb_csum = calc_sb_1_csum(sb);
1066 }
1067
1068
1069 static struct super_type super_types[] = {
1070         [0] = {
1071                 .name   = "0.90.0",
1072                 .owner  = THIS_MODULE,
1073                 .load_super     = super_90_load,
1074                 .validate_super = super_90_validate,
1075                 .sync_super     = super_90_sync,
1076         },
1077         [1] = {
1078                 .name   = "md-1",
1079                 .owner  = THIS_MODULE,
1080                 .load_super     = super_1_load,
1081                 .validate_super = super_1_validate,
1082                 .sync_super     = super_1_sync,
1083         },
1084 };
1085         
1086 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1087 {
1088         struct list_head *tmp;
1089         mdk_rdev_t *rdev;
1090
1091         ITERATE_RDEV(mddev,rdev,tmp)
1092                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1093                         return rdev;
1094
1095         return NULL;
1096 }
1097
1098 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1099 {
1100         struct list_head *tmp;
1101         mdk_rdev_t *rdev;
1102
1103         ITERATE_RDEV(mddev1,rdev,tmp)
1104                 if (match_dev_unit(mddev2, rdev))
1105                         return 1;
1106
1107         return 0;
1108 }
1109
1110 static LIST_HEAD(pending_raid_disks);
1111
1112 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1113 {
1114         mdk_rdev_t *same_pdev;
1115         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1116
1117         if (rdev->mddev) {
1118                 MD_BUG();
1119                 return -EINVAL;
1120         }
1121         same_pdev = match_dev_unit(mddev, rdev);
1122         if (same_pdev)
1123                 printk(KERN_WARNING
1124                         "%s: WARNING: %s appears to be on the same physical"
1125                         " disk as %s. True\n     protection against single-disk"
1126                         " failure might be compromised.\n",
1127                         mdname(mddev), bdevname(rdev->bdev,b),
1128                         bdevname(same_pdev->bdev,b2));
1129
1130         /* Verify rdev->desc_nr is unique.
1131          * If it is -1, assign a free number, else
1132          * check number is not in use
1133          */
1134         if (rdev->desc_nr < 0) {
1135                 int choice = 0;
1136                 if (mddev->pers) choice = mddev->raid_disks;
1137                 while (find_rdev_nr(mddev, choice))
1138                         choice++;
1139                 rdev->desc_nr = choice;
1140         } else {
1141                 if (find_rdev_nr(mddev, rdev->desc_nr))
1142                         return -EBUSY;
1143         }
1144                         
1145         list_add(&rdev->same_set, &mddev->disks);
1146         rdev->mddev = mddev;
1147         printk(KERN_INFO "md: bind<%s>\n", bdevname(rdev->bdev,b));
1148         return 0;
1149 }
1150
1151 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1152 {
1153         char b[BDEVNAME_SIZE];
1154         if (!rdev->mddev) {
1155                 MD_BUG();
1156                 return;
1157         }
1158         list_del_init(&rdev->same_set);
1159         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1160         rdev->mddev = NULL;
1161 }
1162
1163 /*
1164  * prevent the device from being mounted, repartitioned or
1165  * otherwise reused by a RAID array (or any other kernel
1166  * subsystem), by bd_claiming the device.
1167  */
1168 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1169 {
1170         int err = 0;
1171         struct block_device *bdev;
1172         char b[BDEVNAME_SIZE];
1173
1174         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1175         if (IS_ERR(bdev)) {
1176                 printk(KERN_ERR "md: could not open %s.\n",
1177                         __bdevname(dev, b));
1178                 return PTR_ERR(bdev);
1179         }
1180         err = bd_claim(bdev, rdev);
1181         if (err) {
1182                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1183                         bdevname(bdev, b));
1184                 blkdev_put(bdev);
1185                 return err;
1186         }
1187         rdev->bdev = bdev;
1188         return err;
1189 }
1190
1191 static void unlock_rdev(mdk_rdev_t *rdev)
1192 {
1193         struct block_device *bdev = rdev->bdev;
1194         rdev->bdev = NULL;
1195         if (!bdev)
1196                 MD_BUG();
1197         bd_release(bdev);
1198         blkdev_put(bdev);
1199 }
1200
1201 void md_autodetect_dev(dev_t dev);
1202
1203 static void export_rdev(mdk_rdev_t * rdev)
1204 {
1205         char b[BDEVNAME_SIZE];
1206         printk(KERN_INFO "md: export_rdev(%s)\n",
1207                 bdevname(rdev->bdev,b));
1208         if (rdev->mddev)
1209                 MD_BUG();
1210         free_disk_sb(rdev);
1211         list_del_init(&rdev->same_set);
1212 #ifndef MODULE
1213         md_autodetect_dev(rdev->bdev->bd_dev);
1214 #endif
1215         unlock_rdev(rdev);
1216         kfree(rdev);
1217 }
1218
1219 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1220 {
1221         unbind_rdev_from_array(rdev);
1222         export_rdev(rdev);
1223 }
1224
1225 static void export_array(mddev_t *mddev)
1226 {
1227         struct list_head *tmp;
1228         mdk_rdev_t *rdev;
1229
1230         ITERATE_RDEV(mddev,rdev,tmp) {
1231                 if (!rdev->mddev) {
1232                         MD_BUG();
1233                         continue;
1234                 }
1235                 kick_rdev_from_array(rdev);
1236         }
1237         if (!list_empty(&mddev->disks))
1238                 MD_BUG();
1239         mddev->raid_disks = 0;
1240         mddev->major_version = 0;
1241 }
1242
1243 static void print_desc(mdp_disk_t *desc)
1244 {
1245         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1246                 desc->major,desc->minor,desc->raid_disk,desc->state);
1247 }
1248
1249 static void print_sb(mdp_super_t *sb)
1250 {
1251         int i;
1252
1253         printk(KERN_INFO 
1254                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1255                 sb->major_version, sb->minor_version, sb->patch_version,
1256                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1257                 sb->ctime);
1258         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1259                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1260                 sb->md_minor, sb->layout, sb->chunk_size);
1261         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1262                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1263                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1264                 sb->failed_disks, sb->spare_disks,
1265                 sb->sb_csum, (unsigned long)sb->events_lo);
1266
1267         printk(KERN_INFO);
1268         for (i = 0; i < MD_SB_DISKS; i++) {
1269                 mdp_disk_t *desc;
1270
1271                 desc = sb->disks + i;
1272                 if (desc->number || desc->major || desc->minor ||
1273                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1274                         printk("     D %2d: ", i);
1275                         print_desc(desc);
1276                 }
1277         }
1278         printk(KERN_INFO "md:     THIS: ");
1279         print_desc(&sb->this_disk);
1280
1281 }
1282
1283 static void print_rdev(mdk_rdev_t *rdev)
1284 {
1285         char b[BDEVNAME_SIZE];
1286         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1287                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1288                 rdev->faulty, rdev->in_sync, rdev->desc_nr);
1289         if (rdev->sb_loaded) {
1290                 printk(KERN_INFO "md: rdev superblock:\n");
1291                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1292         } else
1293                 printk(KERN_INFO "md: no rdev superblock!\n");
1294 }
1295
1296 void md_print_devices(void)
1297 {
1298         struct list_head *tmp, *tmp2;
1299         mdk_rdev_t *rdev;
1300         mddev_t *mddev;
1301         char b[BDEVNAME_SIZE];
1302
1303         printk("\n");
1304         printk("md:     **********************************\n");
1305         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1306         printk("md:     **********************************\n");
1307         ITERATE_MDDEV(mddev,tmp) {
1308
1309                 if (mddev->bitmap)
1310                         bitmap_print_sb(mddev->bitmap);
1311                 else
1312                         printk("%s: ", mdname(mddev));
1313                 ITERATE_RDEV(mddev,rdev,tmp2)
1314                         printk("<%s>", bdevname(rdev->bdev,b));
1315                 printk("\n");
1316
1317                 ITERATE_RDEV(mddev,rdev,tmp2)
1318                         print_rdev(rdev);
1319         }
1320         printk("md:     **********************************\n");
1321         printk("\n");
1322 }
1323
1324
1325 static void sync_sbs(mddev_t * mddev)
1326 {
1327         mdk_rdev_t *rdev;
1328         struct list_head *tmp;
1329
1330         ITERATE_RDEV(mddev,rdev,tmp) {
1331                 super_types[mddev->major_version].
1332                         sync_super(mddev, rdev);
1333                 rdev->sb_loaded = 1;
1334         }
1335 }
1336
1337 static void md_update_sb(mddev_t * mddev)
1338 {
1339         int err;
1340         struct list_head *tmp;
1341         mdk_rdev_t *rdev;
1342         int sync_req;
1343
1344 repeat:
1345         spin_lock(&mddev->write_lock);
1346         sync_req = mddev->in_sync;
1347         mddev->utime = get_seconds();
1348         mddev->events ++;
1349
1350         if (!mddev->events) {
1351                 /*
1352                  * oops, this 64-bit counter should never wrap.
1353                  * Either we are in around ~1 trillion A.C., assuming
1354                  * 1 reboot per second, or we have a bug:
1355                  */
1356                 MD_BUG();
1357                 mddev->events --;
1358         }
1359         mddev->sb_dirty = 2;
1360         sync_sbs(mddev);
1361
1362         /*
1363          * do not write anything to disk if using
1364          * nonpersistent superblocks
1365          */
1366         if (!mddev->persistent) {
1367                 mddev->sb_dirty = 0;
1368                 spin_unlock(&mddev->write_lock);
1369                 wake_up(&mddev->sb_wait);
1370                 return;
1371         }
1372         spin_unlock(&mddev->write_lock);
1373
1374         dprintk(KERN_INFO 
1375                 "md: updating %s RAID superblock on device (in sync %d)\n",
1376                 mdname(mddev),mddev->in_sync);
1377
1378         err = bitmap_update_sb(mddev->bitmap);
1379         ITERATE_RDEV(mddev,rdev,tmp) {
1380                 char b[BDEVNAME_SIZE];
1381                 dprintk(KERN_INFO "md: ");
1382                 if (rdev->faulty)
1383                         dprintk("(skipping faulty ");
1384
1385                 dprintk("%s ", bdevname(rdev->bdev,b));
1386                 if (!rdev->faulty) {
1387                         md_super_write(mddev,rdev,
1388                                        rdev->sb_offset<<1, rdev->sb_size,
1389                                        rdev->sb_page);
1390                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1391                                 bdevname(rdev->bdev,b),
1392                                 (unsigned long long)rdev->sb_offset);
1393
1394                 } else
1395                         dprintk(")\n");
1396                 if (mddev->level == LEVEL_MULTIPATH)
1397                         /* only need to write one superblock... */
1398                         break;
1399         }
1400         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1401         /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1402
1403         spin_lock(&mddev->write_lock);
1404         if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
1405                 /* have to write it out again */
1406                 spin_unlock(&mddev->write_lock);
1407                 goto repeat;
1408         }
1409         mddev->sb_dirty = 0;
1410         spin_unlock(&mddev->write_lock);
1411         wake_up(&mddev->sb_wait);
1412
1413 }
1414
1415 /*
1416  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1417  *
1418  * mark the device faulty if:
1419  *
1420  *   - the device is nonexistent (zero size)
1421  *   - the device has no valid superblock
1422  *
1423  * a faulty rdev _never_ has rdev->sb set.
1424  */
1425 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1426 {
1427         char b[BDEVNAME_SIZE];
1428         int err;
1429         mdk_rdev_t *rdev;
1430         sector_t size;
1431
1432         rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
1433         if (!rdev) {
1434                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1435                 return ERR_PTR(-ENOMEM);
1436         }
1437         memset(rdev, 0, sizeof(*rdev));
1438
1439         if ((err = alloc_disk_sb(rdev)))
1440                 goto abort_free;
1441
1442         err = lock_rdev(rdev, newdev);
1443         if (err)
1444                 goto abort_free;
1445
1446         rdev->desc_nr = -1;
1447         rdev->faulty = 0;
1448         rdev->in_sync = 0;
1449         rdev->data_offset = 0;
1450         atomic_set(&rdev->nr_pending, 0);
1451
1452         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1453         if (!size) {
1454                 printk(KERN_WARNING 
1455                         "md: %s has zero or unknown size, marking faulty!\n",
1456                         bdevname(rdev->bdev,b));
1457                 err = -EINVAL;
1458                 goto abort_free;
1459         }
1460
1461         if (super_format >= 0) {
1462                 err = super_types[super_format].
1463                         load_super(rdev, NULL, super_minor);
1464                 if (err == -EINVAL) {
1465                         printk(KERN_WARNING 
1466                                 "md: %s has invalid sb, not importing!\n",
1467                                 bdevname(rdev->bdev,b));
1468                         goto abort_free;
1469                 }
1470                 if (err < 0) {
1471                         printk(KERN_WARNING 
1472                                 "md: could not read %s's sb, not importing!\n",
1473                                 bdevname(rdev->bdev,b));
1474                         goto abort_free;
1475                 }
1476         }
1477         INIT_LIST_HEAD(&rdev->same_set);
1478
1479         return rdev;
1480
1481 abort_free:
1482         if (rdev->sb_page) {
1483                 if (rdev->bdev)
1484                         unlock_rdev(rdev);
1485                 free_disk_sb(rdev);
1486         }
1487         kfree(rdev);
1488         return ERR_PTR(err);
1489 }
1490
1491 /*
1492  * Check a full RAID array for plausibility
1493  */
1494
1495
1496 static void analyze_sbs(mddev_t * mddev)
1497 {
1498         int i;
1499         struct list_head *tmp;
1500         mdk_rdev_t *rdev, *freshest;
1501         char b[BDEVNAME_SIZE];
1502
1503         freshest = NULL;
1504         ITERATE_RDEV(mddev,rdev,tmp)
1505                 switch (super_types[mddev->major_version].
1506                         load_super(rdev, freshest, mddev->minor_version)) {
1507                 case 1:
1508                         freshest = rdev;
1509                         break;
1510                 case 0:
1511                         break;
1512                 default:
1513                         printk( KERN_ERR \
1514                                 "md: fatal superblock inconsistency in %s"
1515                                 " -- removing from array\n", 
1516                                 bdevname(rdev->bdev,b));
1517                         kick_rdev_from_array(rdev);
1518                 }
1519
1520
1521         super_types[mddev->major_version].
1522                 validate_super(mddev, freshest);
1523
1524         i = 0;
1525         ITERATE_RDEV(mddev,rdev,tmp) {
1526                 if (rdev != freshest)
1527                         if (super_types[mddev->major_version].
1528                             validate_super(mddev, rdev)) {
1529                                 printk(KERN_WARNING "md: kicking non-fresh %s"
1530                                         " from array!\n",
1531                                         bdevname(rdev->bdev,b));
1532                                 kick_rdev_from_array(rdev);
1533                                 continue;
1534                         }
1535                 if (mddev->level == LEVEL_MULTIPATH) {
1536                         rdev->desc_nr = i++;
1537                         rdev->raid_disk = rdev->desc_nr;
1538                         rdev->in_sync = 1;
1539                 }
1540         }
1541
1542
1543
1544         if (mddev->recovery_cp != MaxSector &&
1545             mddev->level >= 1)
1546                 printk(KERN_ERR "md: %s: raid array is not clean"
1547                        " -- starting background reconstruction\n",
1548                        mdname(mddev));
1549
1550 }
1551
1552 int mdp_major = 0;
1553
1554 static struct kobject *md_probe(dev_t dev, int *part, void *data)
1555 {
1556         static DECLARE_MUTEX(disks_sem);
1557         mddev_t *mddev = mddev_find(dev);
1558         struct gendisk *disk;
1559         int partitioned = (MAJOR(dev) != MD_MAJOR);
1560         int shift = partitioned ? MdpMinorShift : 0;
1561         int unit = MINOR(dev) >> shift;
1562
1563         if (!mddev)
1564                 return NULL;
1565
1566         down(&disks_sem);
1567         if (mddev->gendisk) {
1568                 up(&disks_sem);
1569                 mddev_put(mddev);
1570                 return NULL;
1571         }
1572         disk = alloc_disk(1 << shift);
1573         if (!disk) {
1574                 up(&disks_sem);
1575                 mddev_put(mddev);
1576                 return NULL;
1577         }
1578         disk->major = MAJOR(dev);
1579         disk->first_minor = unit << shift;
1580         if (partitioned) {
1581                 sprintf(disk->disk_name, "md_d%d", unit);
1582                 sprintf(disk->devfs_name, "md/d%d", unit);
1583         } else {
1584                 sprintf(disk->disk_name, "md%d", unit);
1585                 sprintf(disk->devfs_name, "md/%d", unit);
1586         }
1587         disk->fops = &md_fops;
1588         disk->private_data = mddev;
1589         disk->queue = mddev->queue;
1590         add_disk(disk);
1591         mddev->gendisk = disk;
1592         up(&disks_sem);
1593         return NULL;
1594 }
1595
1596 void md_wakeup_thread(mdk_thread_t *thread);
1597
1598 static void md_safemode_timeout(unsigned long data)
1599 {
1600         mddev_t *mddev = (mddev_t *) data;
1601
1602         mddev->safemode = 1;
1603         md_wakeup_thread(mddev->thread);
1604 }
1605
1606
1607 static int do_md_run(mddev_t * mddev)
1608 {
1609         int pnum, err;
1610         int chunk_size;
1611         struct list_head *tmp;
1612         mdk_rdev_t *rdev;
1613         struct gendisk *disk;
1614         char b[BDEVNAME_SIZE];
1615
1616         if (list_empty(&mddev->disks))
1617                 /* cannot run an array with no devices.. */
1618                 return -EINVAL;
1619
1620         if (mddev->pers)
1621                 return -EBUSY;
1622
1623         /*
1624          * Analyze all RAID superblock(s)
1625          */
1626         if (!mddev->raid_disks)
1627                 analyze_sbs(mddev);
1628
1629         chunk_size = mddev->chunk_size;
1630         pnum = level_to_pers(mddev->level);
1631
1632         if ((pnum != MULTIPATH) && (pnum != RAID1)) {
1633                 if (!chunk_size) {
1634                         /*
1635                          * 'default chunksize' in the old md code used to
1636                          * be PAGE_SIZE, baaad.
1637                          * we abort here to be on the safe side. We don't
1638                          * want to continue the bad practice.
1639                          */
1640                         printk(KERN_ERR 
1641                                 "no chunksize specified, see 'man raidtab'\n");
1642                         return -EINVAL;
1643                 }
1644                 if (chunk_size > MAX_CHUNK_SIZE) {
1645                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
1646                                 chunk_size, MAX_CHUNK_SIZE);
1647                         return -EINVAL;
1648                 }
1649                 /*
1650                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
1651                  */
1652                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
1653                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
1654                         return -EINVAL;
1655                 }
1656                 if (chunk_size < PAGE_SIZE) {
1657                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
1658                                 chunk_size, PAGE_SIZE);
1659                         return -EINVAL;
1660                 }
1661
1662                 /* devices must have minimum size of one chunk */
1663                 ITERATE_RDEV(mddev,rdev,tmp) {
1664                         if (rdev->faulty)
1665                                 continue;
1666                         if (rdev->size < chunk_size / 1024) {
1667                                 printk(KERN_WARNING
1668                                         "md: Dev %s smaller than chunk_size:"
1669                                         " %lluk < %dk\n",
1670                                         bdevname(rdev->bdev,b),
1671                                         (unsigned long long)rdev->size,
1672                                         chunk_size / 1024);
1673                                 return -EINVAL;
1674                         }
1675                 }
1676         }
1677
1678 #ifdef CONFIG_KMOD
1679         if (!pers[pnum])
1680         {
1681                 request_module("md-personality-%d", pnum);
1682         }
1683 #endif
1684
1685         /*
1686          * Drop all container device buffers, from now on
1687          * the only valid external interface is through the md
1688          * device.
1689          * Also find largest hardsector size
1690          */
1691         ITERATE_RDEV(mddev,rdev,tmp) {
1692                 if (rdev->faulty)
1693                         continue;
1694                 sync_blockdev(rdev->bdev);
1695                 invalidate_bdev(rdev->bdev, 0);
1696         }
1697
1698         md_probe(mddev->unit, NULL, NULL);
1699         disk = mddev->gendisk;
1700         if (!disk)
1701                 return -ENOMEM;
1702
1703         spin_lock(&pers_lock);
1704         if (!pers[pnum] || !try_module_get(pers[pnum]->owner)) {
1705                 spin_unlock(&pers_lock);
1706                 printk(KERN_WARNING "md: personality %d is not loaded!\n",
1707                        pnum);
1708                 return -EINVAL;
1709         }
1710
1711         mddev->pers = pers[pnum];
1712         spin_unlock(&pers_lock);
1713
1714         mddev->recovery = 0;
1715         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
1716
1717         /* before we start the array running, initialise the bitmap */
1718         err = bitmap_create(mddev);
1719         if (err)
1720                 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
1721                         mdname(mddev), err);
1722         else
1723                 err = mddev->pers->run(mddev);
1724         if (err) {
1725                 printk(KERN_ERR "md: pers->run() failed ...\n");
1726                 module_put(mddev->pers->owner);
1727                 mddev->pers = NULL;
1728                 bitmap_destroy(mddev);
1729                 return err;
1730         }
1731         atomic_set(&mddev->writes_pending,0);
1732         mddev->safemode = 0;
1733         mddev->safemode_timer.function = md_safemode_timeout;
1734         mddev->safemode_timer.data = (unsigned long) mddev;
1735         mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
1736         mddev->in_sync = 1;
1737         
1738         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1739         md_wakeup_thread(mddev->thread);
1740         
1741         if (mddev->sb_dirty)
1742                 md_update_sb(mddev);
1743
1744         set_capacity(disk, mddev->array_size<<1);
1745
1746         /* If we call blk_queue_make_request here, it will
1747          * re-initialise max_sectors etc which may have been
1748          * refined inside -> run.  So just set the bits we need to set.
1749          * Most initialisation happended when we called
1750          * blk_queue_make_request(..., md_fail_request)
1751          * earlier.
1752          */
1753         mddev->queue->queuedata = mddev;
1754         mddev->queue->make_request_fn = mddev->pers->make_request;
1755
1756         mddev->changed = 1;
1757         return 0;
1758 }
1759
1760 static int restart_array(mddev_t *mddev)
1761 {
1762         struct gendisk *disk = mddev->gendisk;
1763         int err;
1764
1765         /*
1766          * Complain if it has no devices
1767          */
1768         err = -ENXIO;
1769         if (list_empty(&mddev->disks))
1770                 goto out;
1771
1772         if (mddev->pers) {
1773                 err = -EBUSY;
1774                 if (!mddev->ro)
1775                         goto out;
1776
1777                 mddev->safemode = 0;
1778                 mddev->ro = 0;
1779                 set_disk_ro(disk, 0);
1780
1781                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
1782                         mdname(mddev));
1783                 /*
1784                  * Kick recovery or resync if necessary
1785                  */
1786                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1787                 md_wakeup_thread(mddev->thread);
1788                 err = 0;
1789         } else {
1790                 printk(KERN_ERR "md: %s has no personality assigned.\n",
1791                         mdname(mddev));
1792                 err = -EINVAL;
1793         }
1794
1795 out:
1796         return err;
1797 }
1798
1799 static int do_md_stop(mddev_t * mddev, int ro)
1800 {
1801         int err = 0;
1802         struct gendisk *disk = mddev->gendisk;
1803
1804         if (mddev->pers) {
1805                 if (atomic_read(&mddev->active)>2) {
1806                         printk("md: %s still in use.\n",mdname(mddev));
1807                         return -EBUSY;
1808                 }
1809
1810                 if (mddev->sync_thread) {
1811                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1812                         md_unregister_thread(mddev->sync_thread);
1813                         mddev->sync_thread = NULL;
1814                 }
1815
1816                 del_timer_sync(&mddev->safemode_timer);
1817
1818                 invalidate_partition(disk, 0);
1819
1820                 if (ro) {
1821                         err  = -ENXIO;
1822                         if (mddev->ro)
1823                                 goto out;
1824                         mddev->ro = 1;
1825                 } else {
1826                         bitmap_flush(mddev);
1827                         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
1828                         if (mddev->ro)
1829                                 set_disk_ro(disk, 0);
1830                         blk_queue_make_request(mddev->queue, md_fail_request);
1831                         mddev->pers->stop(mddev);
1832                         module_put(mddev->pers->owner);
1833                         mddev->pers = NULL;
1834                         if (mddev->ro)
1835                                 mddev->ro = 0;
1836                 }
1837                 if (!mddev->in_sync) {
1838                         /* mark array as shutdown cleanly */
1839                         mddev->in_sync = 1;
1840                         md_update_sb(mddev);
1841                 }
1842                 if (ro)
1843                         set_disk_ro(disk, 1);
1844         }
1845
1846         bitmap_destroy(mddev);
1847         if (mddev->bitmap_file) {
1848                 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
1849                 fput(mddev->bitmap_file);
1850                 mddev->bitmap_file = NULL;
1851         }
1852         mddev->bitmap_offset = 0;
1853
1854         /*
1855          * Free resources if final stop
1856          */
1857         if (!ro) {
1858                 struct gendisk *disk;
1859                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
1860
1861                 export_array(mddev);
1862
1863                 mddev->array_size = 0;
1864                 disk = mddev->gendisk;
1865                 if (disk)
1866                         set_capacity(disk, 0);
1867                 mddev->changed = 1;
1868         } else
1869                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
1870                         mdname(mddev));
1871         err = 0;
1872 out:
1873         return err;
1874 }
1875
1876 static void autorun_array(mddev_t *mddev)
1877 {
1878         mdk_rdev_t *rdev;
1879         struct list_head *tmp;
1880         int err;
1881
1882         if (list_empty(&mddev->disks))
1883                 return;
1884
1885         printk(KERN_INFO "md: running: ");
1886
1887         ITERATE_RDEV(mddev,rdev,tmp) {
1888                 char b[BDEVNAME_SIZE];
1889                 printk("<%s>", bdevname(rdev->bdev,b));
1890         }
1891         printk("\n");
1892
1893         err = do_md_run (mddev);
1894         if (err) {
1895                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
1896                 do_md_stop (mddev, 0);
1897         }
1898 }
1899
1900 /*
1901  * lets try to run arrays based on all disks that have arrived
1902  * until now. (those are in pending_raid_disks)
1903  *
1904  * the method: pick the first pending disk, collect all disks with
1905  * the same UUID, remove all from the pending list and put them into
1906  * the 'same_array' list. Then order this list based on superblock
1907  * update time (freshest comes first), kick out 'old' disks and
1908  * compare superblocks. If everything's fine then run it.
1909  *
1910  * If "unit" is allocated, then bump its reference count
1911  */
1912 static void autorun_devices(int part)
1913 {
1914         struct list_head candidates;
1915         struct list_head *tmp;
1916         mdk_rdev_t *rdev0, *rdev;
1917         mddev_t *mddev;
1918         char b[BDEVNAME_SIZE];
1919
1920         printk(KERN_INFO "md: autorun ...\n");
1921         while (!list_empty(&pending_raid_disks)) {
1922                 dev_t dev;
1923                 rdev0 = list_entry(pending_raid_disks.next,
1924                                          mdk_rdev_t, same_set);
1925
1926                 printk(KERN_INFO "md: considering %s ...\n",
1927                         bdevname(rdev0->bdev,b));
1928                 INIT_LIST_HEAD(&candidates);
1929                 ITERATE_RDEV_PENDING(rdev,tmp)
1930                         if (super_90_load(rdev, rdev0, 0) >= 0) {
1931                                 printk(KERN_INFO "md:  adding %s ...\n",
1932                                         bdevname(rdev->bdev,b));
1933                                 list_move(&rdev->same_set, &candidates);
1934                         }
1935                 /*
1936                  * now we have a set of devices, with all of them having
1937                  * mostly sane superblocks. It's time to allocate the
1938                  * mddev.
1939                  */
1940                 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
1941                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
1942                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
1943                         break;
1944                 }
1945                 if (part)
1946                         dev = MKDEV(mdp_major,
1947                                     rdev0->preferred_minor << MdpMinorShift);
1948                 else
1949                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
1950
1951                 md_probe(dev, NULL, NULL);
1952                 mddev = mddev_find(dev);
1953                 if (!mddev) {
1954                         printk(KERN_ERR 
1955                                 "md: cannot allocate memory for md drive.\n");
1956                         break;
1957                 }
1958                 if (mddev_lock(mddev)) 
1959                         printk(KERN_WARNING "md: %s locked, cannot run\n",
1960                                mdname(mddev));
1961                 else if (mddev->raid_disks || mddev->major_version
1962                          || !list_empty(&mddev->disks)) {
1963                         printk(KERN_WARNING 
1964                                 "md: %s already running, cannot run %s\n",
1965                                 mdname(mddev), bdevname(rdev0->bdev,b));
1966                         mddev_unlock(mddev);
1967                 } else {
1968                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
1969                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
1970                                 list_del_init(&rdev->same_set);
1971                                 if (bind_rdev_to_array(rdev, mddev))
1972                                         export_rdev(rdev);
1973                         }
1974                         autorun_array(mddev);
1975                         mddev_unlock(mddev);
1976                 }
1977                 /* on success, candidates will be empty, on error
1978                  * it won't...
1979                  */
1980                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
1981                         export_rdev(rdev);
1982                 mddev_put(mddev);
1983         }
1984         printk(KERN_INFO "md: ... autorun DONE.\n");
1985 }
1986
1987 /*
1988  * import RAID devices based on one partition
1989  * if possible, the array gets run as well.
1990  */
1991
1992 static int autostart_array(dev_t startdev)
1993 {
1994         char b[BDEVNAME_SIZE];
1995         int err = -EINVAL, i;
1996         mdp_super_t *sb = NULL;
1997         mdk_rdev_t *start_rdev = NULL, *rdev;
1998
1999         start_rdev = md_import_device(startdev, 0, 0);
2000         if (IS_ERR(start_rdev))
2001                 return err;
2002
2003
2004         /* NOTE: this can only work for 0.90.0 superblocks */
2005         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2006         if (sb->major_version != 0 ||
2007             sb->minor_version != 90 ) {
2008                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2009                 export_rdev(start_rdev);
2010                 return err;
2011         }
2012
2013         if (start_rdev->faulty) {
2014                 printk(KERN_WARNING 
2015                         "md: can not autostart based on faulty %s!\n",
2016                         bdevname(start_rdev->bdev,b));
2017                 export_rdev(start_rdev);
2018                 return err;
2019         }
2020         list_add(&start_rdev->same_set, &pending_raid_disks);
2021
2022         for (i = 0; i < MD_SB_DISKS; i++) {
2023                 mdp_disk_t *desc = sb->disks + i;
2024                 dev_t dev = MKDEV(desc->major, desc->minor);
2025
2026                 if (!dev)
2027                         continue;
2028                 if (dev == startdev)
2029                         continue;
2030                 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2031                         continue;
2032                 rdev = md_import_device(dev, 0, 0);
2033                 if (IS_ERR(rdev))
2034                         continue;
2035
2036                 list_add(&rdev->same_set, &pending_raid_disks);
2037         }
2038
2039         /*
2040          * possibly return codes
2041          */
2042         autorun_devices(0);
2043         return 0;
2044
2045 }
2046
2047
2048 static int get_version(void __user * arg)
2049 {
2050         mdu_version_t ver;
2051
2052         ver.major = MD_MAJOR_VERSION;
2053         ver.minor = MD_MINOR_VERSION;
2054         ver.patchlevel = MD_PATCHLEVEL_VERSION;
2055
2056         if (copy_to_user(arg, &ver, sizeof(ver)))
2057                 return -EFAULT;
2058
2059         return 0;
2060 }
2061
2062 static int get_array_info(mddev_t * mddev, void __user * arg)
2063 {
2064         mdu_array_info_t info;
2065         int nr,working,active,failed,spare;
2066         mdk_rdev_t *rdev;
2067         struct list_head *tmp;
2068
2069         nr=working=active=failed=spare=0;
2070         ITERATE_RDEV(mddev,rdev,tmp) {
2071                 nr++;
2072                 if (rdev->faulty)
2073                         failed++;
2074                 else {
2075                         working++;
2076                         if (rdev->in_sync)
2077                                 active++;       
2078                         else
2079                                 spare++;
2080                 }
2081         }
2082
2083         info.major_version = mddev->major_version;
2084         info.minor_version = mddev->minor_version;
2085         info.patch_version = MD_PATCHLEVEL_VERSION;
2086         info.ctime         = mddev->ctime;
2087         info.level         = mddev->level;
2088         info.size          = mddev->size;
2089         info.nr_disks      = nr;
2090         info.raid_disks    = mddev->raid_disks;
2091         info.md_minor      = mddev->md_minor;
2092         info.not_persistent= !mddev->persistent;
2093
2094         info.utime         = mddev->utime;
2095         info.state         = 0;
2096         if (mddev->in_sync)
2097                 info.state = (1<<MD_SB_CLEAN);
2098         if (mddev->bitmap && mddev->bitmap_offset)
2099                 info.state = (1<<MD_SB_BITMAP_PRESENT);
2100         info.active_disks  = active;
2101         info.working_disks = working;
2102         info.failed_disks  = failed;
2103         info.spare_disks   = spare;
2104
2105         info.layout        = mddev->layout;
2106         info.chunk_size    = mddev->chunk_size;
2107
2108         if (copy_to_user(arg, &info, sizeof(info)))
2109                 return -EFAULT;
2110
2111         return 0;
2112 }
2113
2114 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
2115 {
2116         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2117         char *ptr, *buf = NULL;
2118         int err = -ENOMEM;
2119
2120         file = kmalloc(sizeof(*file), GFP_KERNEL);
2121         if (!file)
2122                 goto out;
2123
2124         /* bitmap disabled, zero the first byte and copy out */
2125         if (!mddev->bitmap || !mddev->bitmap->file) {
2126                 file->pathname[0] = '\0';
2127                 goto copy_out;
2128         }
2129
2130         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2131         if (!buf)
2132                 goto out;
2133
2134         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2135         if (!ptr)
2136                 goto out;
2137
2138         strcpy(file->pathname, ptr);
2139
2140 copy_out:
2141         err = 0;
2142         if (copy_to_user(arg, file, sizeof(*file)))
2143                 err = -EFAULT;
2144 out:
2145         kfree(buf);
2146         kfree(file);
2147         return err;
2148 }
2149
2150 static int get_disk_info(mddev_t * mddev, void __user * arg)
2151 {
2152         mdu_disk_info_t info;
2153         unsigned int nr;
2154         mdk_rdev_t *rdev;
2155
2156         if (copy_from_user(&info, arg, sizeof(info)))
2157                 return -EFAULT;
2158
2159         nr = info.number;
2160
2161         rdev = find_rdev_nr(mddev, nr);
2162         if (rdev) {
2163                 info.major = MAJOR(rdev->bdev->bd_dev);
2164                 info.minor = MINOR(rdev->bdev->bd_dev);
2165                 info.raid_disk = rdev->raid_disk;
2166                 info.state = 0;
2167                 if (rdev->faulty)
2168                         info.state |= (1<<MD_DISK_FAULTY);
2169                 else if (rdev->in_sync) {
2170                         info.state |= (1<<MD_DISK_ACTIVE);
2171                         info.state |= (1<<MD_DISK_SYNC);
2172                 }
2173                 if (test_bit(WriteMostly, &rdev->flags))
2174                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
2175         } else {
2176                 info.major = info.minor = 0;
2177                 info.raid_disk = -1;
2178                 info.state = (1<<MD_DISK_REMOVED);
2179         }
2180
2181         if (copy_to_user(arg, &info, sizeof(info)))
2182                 return -EFAULT;
2183
2184         return 0;
2185 }
2186
2187 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
2188 {
2189         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
2190         mdk_rdev_t *rdev;
2191         dev_t dev = MKDEV(info->major,info->minor);
2192
2193         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
2194                 return -EOVERFLOW;
2195
2196         if (!mddev->raid_disks) {
2197                 int err;
2198                 /* expecting a device which has a superblock */
2199                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
2200                 if (IS_ERR(rdev)) {
2201                         printk(KERN_WARNING 
2202                                 "md: md_import_device returned %ld\n",
2203                                 PTR_ERR(rdev));
2204                         return PTR_ERR(rdev);
2205                 }
2206                 if (!list_empty(&mddev->disks)) {
2207                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2208                                                         mdk_rdev_t, same_set);
2209                         int err = super_types[mddev->major_version]
2210                                 .load_super(rdev, rdev0, mddev->minor_version);
2211                         if (err < 0) {
2212                                 printk(KERN_WARNING 
2213                                         "md: %s has different UUID to %s\n",
2214                                         bdevname(rdev->bdev,b), 
2215                                         bdevname(rdev0->bdev,b2));
2216                                 export_rdev(rdev);
2217                                 return -EINVAL;
2218                         }
2219                 }
2220                 err = bind_rdev_to_array(rdev, mddev);
2221                 if (err)
2222                         export_rdev(rdev);
2223                 return err;
2224         }
2225
2226         /*
2227          * add_new_disk can be used once the array is assembled
2228          * to add "hot spares".  They must already have a superblock
2229          * written
2230          */
2231         if (mddev->pers) {
2232                 int err;
2233                 if (!mddev->pers->hot_add_disk) {
2234                         printk(KERN_WARNING 
2235                                 "%s: personality does not support diskops!\n",
2236                                mdname(mddev));
2237                         return -EINVAL;
2238                 }
2239                 if (mddev->persistent)
2240                         rdev = md_import_device(dev, mddev->major_version,
2241                                                 mddev->minor_version);
2242                 else
2243                         rdev = md_import_device(dev, -1, -1);
2244                 if (IS_ERR(rdev)) {
2245                         printk(KERN_WARNING 
2246                                 "md: md_import_device returned %ld\n",
2247                                 PTR_ERR(rdev));
2248                         return PTR_ERR(rdev);
2249                 }
2250                 /* set save_raid_disk if appropriate */
2251                 if (!mddev->persistent) {
2252                         if (info->state & (1<<MD_DISK_SYNC)  &&
2253                             info->raid_disk < mddev->raid_disks)
2254                                 rdev->raid_disk = info->raid_disk;
2255                         else
2256                                 rdev->raid_disk = -1;
2257                 } else
2258                         super_types[mddev->major_version].
2259                                 validate_super(mddev, rdev);
2260                 rdev->saved_raid_disk = rdev->raid_disk;
2261
2262                 rdev->in_sync = 0; /* just to be sure */
2263                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2264                         set_bit(WriteMostly, &rdev->flags);
2265
2266                 rdev->raid_disk = -1;
2267                 err = bind_rdev_to_array(rdev, mddev);
2268                 if (err)
2269                         export_rdev(rdev);
2270
2271                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2272                 md_wakeup_thread(mddev->thread);
2273                 return err;
2274         }
2275
2276         /* otherwise, add_new_disk is only allowed
2277          * for major_version==0 superblocks
2278          */
2279         if (mddev->major_version != 0) {
2280                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
2281                        mdname(mddev));
2282                 return -EINVAL;
2283         }
2284
2285         if (!(info->state & (1<<MD_DISK_FAULTY))) {
2286                 int err;
2287                 rdev = md_import_device (dev, -1, 0);
2288                 if (IS_ERR(rdev)) {
2289                         printk(KERN_WARNING 
2290                                 "md: error, md_import_device() returned %ld\n",
2291                                 PTR_ERR(rdev));
2292                         return PTR_ERR(rdev);
2293                 }
2294                 rdev->desc_nr = info->number;
2295                 if (info->raid_disk < mddev->raid_disks)
2296                         rdev->raid_disk = info->raid_disk;
2297                 else
2298                         rdev->raid_disk = -1;
2299
2300                 rdev->faulty = 0;
2301                 if (rdev->raid_disk < mddev->raid_disks)
2302                         rdev->in_sync = (info->state & (1<<MD_DISK_SYNC));
2303                 else
2304                         rdev->in_sync = 0;
2305
2306                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
2307                         set_bit(WriteMostly, &rdev->flags);
2308
2309                 err = bind_rdev_to_array(rdev, mddev);
2310                 if (err) {
2311                         export_rdev(rdev);
2312                         return err;
2313                 }
2314
2315                 if (!mddev->persistent) {
2316                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
2317                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2318                 } else 
2319                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2320                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
2321
2322                 if (!mddev->size || (mddev->size > rdev->size))
2323                         mddev->size = rdev->size;
2324         }
2325
2326         return 0;
2327 }
2328
2329 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
2330 {
2331         char b[BDEVNAME_SIZE];
2332         mdk_rdev_t *rdev;
2333
2334         if (!mddev->pers)
2335                 return -ENODEV;
2336
2337         rdev = find_rdev(mddev, dev);
2338         if (!rdev)
2339                 return -ENXIO;
2340
2341         if (rdev->raid_disk >= 0)
2342                 goto busy;
2343
2344         kick_rdev_from_array(rdev);
2345         md_update_sb(mddev);
2346
2347         return 0;
2348 busy:
2349         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
2350                 bdevname(rdev->bdev,b), mdname(mddev));
2351         return -EBUSY;
2352 }
2353
2354 static int hot_add_disk(mddev_t * mddev, dev_t dev)
2355 {
2356         char b[BDEVNAME_SIZE];
2357         int err;
2358         unsigned int size;
2359         mdk_rdev_t *rdev;
2360
2361         if (!mddev->pers)
2362                 return -ENODEV;
2363
2364         if (mddev->major_version != 0) {
2365                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
2366                         " version-0 superblocks.\n",
2367                         mdname(mddev));
2368                 return -EINVAL;
2369         }
2370         if (!mddev->pers->hot_add_disk) {
2371                 printk(KERN_WARNING 
2372                         "%s: personality does not support diskops!\n",
2373                         mdname(mddev));
2374                 return -EINVAL;
2375         }
2376
2377         rdev = md_import_device (dev, -1, 0);
2378         if (IS_ERR(rdev)) {
2379                 printk(KERN_WARNING 
2380                         "md: error, md_import_device() returned %ld\n",
2381                         PTR_ERR(rdev));
2382                 return -EINVAL;
2383         }
2384
2385         if (mddev->persistent)
2386                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
2387         else
2388                 rdev->sb_offset =
2389                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2390
2391         size = calc_dev_size(rdev, mddev->chunk_size);
2392         rdev->size = size;
2393
2394         if (size < mddev->size) {
2395                 printk(KERN_WARNING 
2396                         "%s: disk size %llu blocks < array size %llu\n",
2397                         mdname(mddev), (unsigned long long)size,
2398                         (unsigned long long)mddev->size);
2399                 err = -ENOSPC;
2400                 goto abort_export;
2401         }
2402
2403         if (rdev->faulty) {
2404                 printk(KERN_WARNING 
2405                         "md: can not hot-add faulty %s disk to %s!\n",
2406                         bdevname(rdev->bdev,b), mdname(mddev));
2407                 err = -EINVAL;
2408                 goto abort_export;
2409         }
2410         rdev->in_sync = 0;
2411         rdev->desc_nr = -1;
2412         bind_rdev_to_array(rdev, mddev);
2413
2414         /*
2415          * The rest should better be atomic, we can have disk failures
2416          * noticed in interrupt contexts ...
2417          */
2418
2419         if (rdev->desc_nr == mddev->max_disks) {
2420                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
2421                         mdname(mddev));
2422                 err = -EBUSY;
2423                 goto abort_unbind_export;
2424         }
2425
2426         rdev->raid_disk = -1;
2427
2428         md_update_sb(mddev);
2429
2430         /*
2431          * Kick recovery, maybe this spare has to be added to the
2432          * array immediately.
2433          */
2434         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2435         md_wakeup_thread(mddev->thread);
2436
2437         return 0;
2438
2439 abort_unbind_export:
2440         unbind_rdev_from_array(rdev);
2441
2442 abort_export:
2443         export_rdev(rdev);
2444         return err;
2445 }
2446
2447 /* similar to deny_write_access, but accounts for our holding a reference
2448  * to the file ourselves */
2449 static int deny_bitmap_write_access(struct file * file)
2450 {
2451         struct inode *inode = file->f_mapping->host;
2452
2453         spin_lock(&inode->i_lock);
2454         if (atomic_read(&inode->i_writecount) > 1) {
2455                 spin_unlock(&inode->i_lock);
2456                 return -ETXTBSY;
2457         }
2458         atomic_set(&inode->i_writecount, -1);
2459         spin_unlock(&inode->i_lock);
2460
2461         return 0;
2462 }
2463
2464 static int set_bitmap_file(mddev_t *mddev, int fd)
2465 {
2466         int err;
2467
2468         if (mddev->pers) {
2469                 if (!mddev->pers->quiesce)
2470                         return -EBUSY;
2471                 if (mddev->recovery || mddev->sync_thread)
2472                         return -EBUSY;
2473                 /* we should be able to change the bitmap.. */
2474         }
2475
2476
2477         if (fd >= 0) {
2478                 if (mddev->bitmap)
2479                         return -EEXIST; /* cannot add when bitmap is present */
2480                 mddev->bitmap_file = fget(fd);
2481
2482                 if (mddev->bitmap_file == NULL) {
2483                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
2484                                mdname(mddev));
2485                         return -EBADF;
2486                 }
2487
2488                 err = deny_bitmap_write_access(mddev->bitmap_file);
2489                 if (err) {
2490                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
2491                                mdname(mddev));
2492                         fput(mddev->bitmap_file);
2493                         mddev->bitmap_file = NULL;
2494                         return err;
2495                 }
2496                 mddev->bitmap_offset = 0; /* file overrides offset */
2497         } else if (mddev->bitmap == NULL)
2498                 return -ENOENT; /* cannot remove what isn't there */
2499         err = 0;
2500         if (mddev->pers) {
2501                 mddev->pers->quiesce(mddev, 1);
2502                 if (fd >= 0)
2503                         err = bitmap_create(mddev);
2504                 if (fd < 0 || err)
2505                         bitmap_destroy(mddev);
2506                 mddev->pers->quiesce(mddev, 0);
2507         } else if (fd < 0) {
2508                 if (mddev->bitmap_file)
2509                         fput(mddev->bitmap_file);
2510                 mddev->bitmap_file = NULL;
2511         }
2512
2513         return err;
2514 }
2515
2516 /*
2517  * set_array_info is used two different ways
2518  * The original usage is when creating a new array.
2519  * In this usage, raid_disks is > 0 and it together with
2520  *  level, size, not_persistent,layout,chunksize determine the
2521  *  shape of the array.
2522  *  This will always create an array with a type-0.90.0 superblock.
2523  * The newer usage is when assembling an array.
2524  *  In this case raid_disks will be 0, and the major_version field is
2525  *  use to determine which style super-blocks are to be found on the devices.
2526  *  The minor and patch _version numbers are also kept incase the
2527  *  super_block handler wishes to interpret them.
2528  */
2529 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
2530 {
2531
2532         if (info->raid_disks == 0) {
2533                 /* just setting version number for superblock loading */
2534                 if (info->major_version < 0 ||
2535                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
2536                     super_types[info->major_version].name == NULL) {
2537                         /* maybe try to auto-load a module? */
2538                         printk(KERN_INFO 
2539                                 "md: superblock version %d not known\n",
2540                                 info->major_version);
2541                         return -EINVAL;
2542                 }
2543                 mddev->major_version = info->major_version;
2544                 mddev->minor_version = info->minor_version;
2545                 mddev->patch_version = info->patch_version;
2546                 return 0;
2547         }
2548         mddev->major_version = MD_MAJOR_VERSION;
2549         mddev->minor_version = MD_MINOR_VERSION;
2550         mddev->patch_version = MD_PATCHLEVEL_VERSION;
2551         mddev->ctime         = get_seconds();
2552
2553         mddev->level         = info->level;
2554         mddev->size          = info->size;
2555         mddev->raid_disks    = info->raid_disks;
2556         /* don't set md_minor, it is determined by which /dev/md* was
2557          * openned
2558          */
2559         if (info->state & (1<<MD_SB_CLEAN))
2560                 mddev->recovery_cp = MaxSector;
2561         else
2562                 mddev->recovery_cp = 0;
2563         mddev->persistent    = ! info->not_persistent;
2564
2565         mddev->layout        = info->layout;
2566         mddev->chunk_size    = info->chunk_size;
2567
2568         mddev->max_disks     = MD_SB_DISKS;
2569
2570         mddev->sb_dirty      = 1;
2571
2572         /*
2573          * Generate a 128 bit UUID
2574          */
2575         get_random_bytes(mddev->uuid, 16);
2576
2577         return 0;
2578 }
2579
2580 /*
2581  * update_array_info is used to change the configuration of an
2582  * on-line array.
2583  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
2584  * fields in the info are checked against the array.
2585  * Any differences that cannot be handled will cause an error.
2586  * Normally, only one change can be managed at a time.
2587  */
2588 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
2589 {
2590         int rv = 0;
2591         int cnt = 0;
2592         int state = 0;
2593
2594         /* calculate expected state,ignoring low bits */
2595         if (mddev->bitmap && mddev->bitmap_offset)
2596                 state |= (1 << MD_SB_BITMAP_PRESENT);
2597
2598         if (mddev->major_version != info->major_version ||
2599             mddev->minor_version != info->minor_version ||
2600 /*          mddev->patch_version != info->patch_version || */
2601             mddev->ctime         != info->ctime         ||
2602             mddev->level         != info->level         ||
2603 /*          mddev->layout        != info->layout        || */
2604             !mddev->persistent   != info->not_persistent||
2605             mddev->chunk_size    != info->chunk_size    ||
2606             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
2607             ((state^info->state) & 0xfffffe00)
2608                 )
2609                 return -EINVAL;
2610         /* Check there is only one change */
2611         if (mddev->size != info->size) cnt++;
2612         if (mddev->raid_disks != info->raid_disks) cnt++;
2613         if (mddev->layout != info->layout) cnt++;
2614         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
2615         if (cnt == 0) return 0;
2616         if (cnt > 1) return -EINVAL;
2617
2618         if (mddev->layout != info->layout) {
2619                 /* Change layout
2620                  * we don't need to do anything at the md level, the
2621                  * personality will take care of it all.
2622                  */
2623                 if (mddev->pers->reconfig == NULL)
2624                         return -EINVAL;
2625                 else
2626                         return mddev->pers->reconfig(mddev, info->layout, -1);
2627         }
2628         if (mddev->size != info->size) {
2629                 mdk_rdev_t * rdev;
2630                 struct list_head *tmp;
2631                 if (mddev->pers->resize == NULL)
2632                         return -EINVAL;
2633                 /* The "size" is the amount of each device that is used.
2634                  * This can only make sense for arrays with redundancy.
2635                  * linear and raid0 always use whatever space is available
2636                  * We can only consider changing the size if no resync
2637                  * or reconstruction is happening, and if the new size
2638                  * is acceptable. It must fit before the sb_offset or,
2639                  * if that is <data_offset, it must fit before the
2640                  * size of each device.
2641                  * If size is zero, we find the largest size that fits.
2642                  */
2643                 if (mddev->sync_thread)
2644                         return -EBUSY;
2645                 ITERATE_RDEV(mddev,rdev,tmp) {
2646                         sector_t avail;
2647                         int fit = (info->size == 0);
2648                         if (rdev->sb_offset > rdev->data_offset)
2649                                 avail = (rdev->sb_offset*2) - rdev->data_offset;
2650                         else
2651                                 avail = get_capacity(rdev->bdev->bd_disk)
2652                                         - rdev->data_offset;
2653                         if (fit && (info->size == 0 || info->size > avail/2))
2654                                 info->size = avail/2;
2655                         if (avail < ((sector_t)info->size << 1))
2656                                 return -ENOSPC;
2657                 }
2658                 rv = mddev->pers->resize(mddev, (sector_t)info->size *2);
2659                 if (!rv) {
2660                         struct block_device *bdev;
2661
2662                         bdev = bdget_disk(mddev->gendisk, 0);
2663                         if (bdev) {
2664                                 down(&bdev->bd_inode->i_sem);
2665                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2666                                 up(&bdev->bd_inode->i_sem);
2667                                 bdput(bdev);
2668                         }
2669                 }
2670         }
2671         if (mddev->raid_disks    != info->raid_disks) {
2672                 /* change the number of raid disks */
2673                 if (mddev->pers->reshape == NULL)
2674                         return -EINVAL;
2675                 if (info->raid_disks <= 0 ||
2676                     info->raid_disks >= mddev->max_disks)
2677                         return -EINVAL;
2678                 if (mddev->sync_thread)
2679                         return -EBUSY;
2680                 rv = mddev->pers->reshape(mddev, info->raid_disks);
2681                 if (!rv) {
2682                         struct block_device *bdev;
2683
2684                         bdev = bdget_disk(mddev->gendisk, 0);
2685                         if (bdev) {
2686                                 down(&bdev->bd_inode->i_sem);
2687                                 i_size_write(bdev->bd_inode, mddev->array_size << 10);
2688                                 up(&bdev->bd_inode->i_sem);
2689                                 bdput(bdev);
2690                         }
2691                 }
2692         }
2693         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
2694                 if (mddev->pers->quiesce == NULL)
2695                         return -EINVAL;
2696                 if (mddev->recovery || mddev->sync_thread)
2697                         return -EBUSY;
2698                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
2699                         /* add the bitmap */
2700                         if (mddev->bitmap)
2701                                 return -EEXIST;
2702                         if (mddev->default_bitmap_offset == 0)
2703                                 return -EINVAL;
2704                         mddev->bitmap_offset = mddev->default_bitmap_offset;
2705                         mddev->pers->quiesce(mddev, 1);
2706                         rv = bitmap_create(mddev);
2707                         if (rv)
2708                                 bitmap_destroy(mddev);
2709                         mddev->pers->quiesce(mddev, 0);
2710                 } else {
2711                         /* remove the bitmap */
2712                         if (!mddev->bitmap)
2713                                 return -ENOENT;
2714                         if (mddev->bitmap->file)
2715                                 return -EINVAL;
2716                         mddev->pers->quiesce(mddev, 1);
2717                         bitmap_destroy(mddev);
2718                         mddev->pers->quiesce(mddev, 0);
2719                         mddev->bitmap_offset = 0;
2720                 }
2721         }
2722         md_update_sb(mddev);
2723         return rv;
2724 }
2725
2726 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
2727 {
2728         mdk_rdev_t *rdev;
2729
2730         if (mddev->pers == NULL)
2731                 return -ENODEV;
2732
2733         rdev = find_rdev(mddev, dev);
2734         if (!rdev)
2735                 return -ENODEV;
2736
2737         md_error(mddev, rdev);
2738         return 0;
2739 }
2740
2741 static int md_ioctl(struct inode *inode, struct file *file,
2742                         unsigned int cmd, unsigned long arg)
2743 {
2744         int err = 0;
2745         void __user *argp = (void __user *)arg;
2746         struct hd_geometry __user *loc = argp;
2747         mddev_t *mddev = NULL;
2748
2749         if (!capable(CAP_SYS_ADMIN))
2750                 return -EACCES;
2751
2752         /*
2753          * Commands dealing with the RAID driver but not any
2754          * particular array:
2755          */
2756         switch (cmd)
2757         {
2758                 case RAID_VERSION:
2759                         err = get_version(argp);
2760                         goto done;
2761
2762                 case PRINT_RAID_DEBUG:
2763                         err = 0;
2764                         md_print_devices();
2765                         goto done;
2766
2767 #ifndef MODULE
2768                 case RAID_AUTORUN:
2769                         err = 0;
2770                         autostart_arrays(arg);
2771                         goto done;
2772 #endif
2773                 default:;
2774         }
2775
2776         /*
2777          * Commands creating/starting a new array:
2778          */
2779
2780         mddev = inode->i_bdev->bd_disk->private_data;
2781
2782         if (!mddev) {
2783                 BUG();
2784                 goto abort;
2785         }
2786
2787
2788         if (cmd == START_ARRAY) {
2789                 /* START_ARRAY doesn't need to lock the array as autostart_array
2790                  * does the locking, and it could even be a different array
2791                  */
2792                 static int cnt = 3;
2793                 if (cnt > 0 ) {
2794                         printk(KERN_WARNING
2795                                "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
2796                                "This will not be supported beyond 2.6\n",
2797                                current->comm, current->pid);
2798                         cnt--;
2799                 }
2800                 err = autostart_array(new_decode_dev(arg));
2801                 if (err) {
2802                         printk(KERN_WARNING "md: autostart failed!\n");
2803                         goto abort;
2804                 }
2805                 goto done;
2806         }
2807
2808         err = mddev_lock(mddev);
2809         if (err) {
2810                 printk(KERN_INFO 
2811                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
2812                         err, cmd);
2813                 goto abort;
2814         }
2815
2816         switch (cmd)
2817         {
2818                 case SET_ARRAY_INFO:
2819                         {
2820                                 mdu_array_info_t info;
2821                                 if (!arg)
2822                                         memset(&info, 0, sizeof(info));
2823                                 else if (copy_from_user(&info, argp, sizeof(info))) {
2824                                         err = -EFAULT;
2825                                         goto abort_unlock;
2826                                 }
2827                                 if (mddev->pers) {
2828                                         err = update_array_info(mddev, &info);
2829                                         if (err) {
2830                                                 printk(KERN_WARNING "md: couldn't update"
2831                                                        " array info. %d\n", err);
2832                                                 goto abort_unlock;
2833                                         }
2834                                         goto done_unlock;
2835                                 }
2836                                 if (!list_empty(&mddev->disks)) {
2837                                         printk(KERN_WARNING
2838                                                "md: array %s already has disks!\n",
2839                                                mdname(mddev));
2840                                         err = -EBUSY;
2841                                         goto abort_unlock;
2842                                 }
2843                                 if (mddev->raid_disks) {
2844                                         printk(KERN_WARNING
2845                                                "md: array %s already initialised!\n",
2846                                                mdname(mddev));
2847                                         err = -EBUSY;
2848                                         goto abort_unlock;
2849                                 }
2850                                 err = set_array_info(mddev, &info);
2851                                 if (err) {
2852                                         printk(KERN_WARNING "md: couldn't set"
2853                                                " array info. %d\n", err);
2854                                         goto abort_unlock;
2855                                 }
2856                         }
2857                         goto done_unlock;
2858
2859                 default:;
2860         }
2861
2862         /*
2863          * Commands querying/configuring an existing array:
2864          */
2865         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
2866          * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
2867         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
2868                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
2869                 err = -ENODEV;
2870                 goto abort_unlock;
2871         }
2872
2873         /*
2874          * Commands even a read-only array can execute:
2875          */
2876         switch (cmd)
2877         {
2878                 case GET_ARRAY_INFO:
2879                         err = get_array_info(mddev, argp);
2880                         goto done_unlock;
2881
2882                 case GET_BITMAP_FILE:
2883                         err = get_bitmap_file(mddev, argp);
2884                         goto done_unlock;
2885
2886                 case GET_DISK_INFO:
2887                         err = get_disk_info(mddev, argp);
2888                         goto done_unlock;
2889
2890                 case RESTART_ARRAY_RW:
2891                         err = restart_array(mddev);
2892                         goto done_unlock;
2893
2894                 case STOP_ARRAY:
2895                         err = do_md_stop (mddev, 0);
2896                         goto done_unlock;
2897
2898                 case STOP_ARRAY_RO:
2899                         err = do_md_stop (mddev, 1);
2900                         goto done_unlock;
2901
2902         /*
2903          * We have a problem here : there is no easy way to give a CHS
2904          * virtual geometry. We currently pretend that we have a 2 heads
2905          * 4 sectors (with a BIG number of cylinders...). This drives
2906          * dosfs just mad... ;-)
2907          */
2908                 case HDIO_GETGEO:
2909                         if (!loc) {
2910                                 err = -EINVAL;
2911                                 goto abort_unlock;
2912                         }
2913                         err = put_user (2, (char __user *) &loc->heads);
2914                         if (err)
2915                                 goto abort_unlock;
2916                         err = put_user (4, (char __user *) &loc->sectors);
2917                         if (err)
2918                                 goto abort_unlock;
2919                         err = put_user(get_capacity(mddev->gendisk)/8,
2920                                         (short __user *) &loc->cylinders);
2921                         if (err)
2922                                 goto abort_unlock;
2923                         err = put_user (get_start_sect(inode->i_bdev),
2924                                                 (long __user *) &loc->start);
2925                         goto done_unlock;
2926         }
2927
2928         /*
2929          * The remaining ioctls are changing the state of the
2930          * superblock, so we do not allow read-only arrays
2931          * here:
2932          */
2933         if (mddev->ro) {
2934                 err = -EROFS;
2935                 goto abort_unlock;
2936         }
2937
2938         switch (cmd)
2939         {
2940                 case ADD_NEW_DISK:
2941                 {
2942                         mdu_disk_info_t info;
2943                         if (copy_from_user(&info, argp, sizeof(info)))
2944                                 err = -EFAULT;
2945                         else
2946                                 err = add_new_disk(mddev, &info);
2947                         goto done_unlock;
2948                 }
2949
2950                 case HOT_REMOVE_DISK:
2951                         err = hot_remove_disk(mddev, new_decode_dev(arg));
2952                         goto done_unlock;
2953
2954                 case HOT_ADD_DISK:
2955                         err = hot_add_disk(mddev, new_decode_dev(arg));
2956                         goto done_unlock;
2957
2958                 case SET_DISK_FAULTY:
2959                         err = set_disk_faulty(mddev, new_decode_dev(arg));
2960                         goto done_unlock;
2961
2962                 case RUN_ARRAY:
2963                         err = do_md_run (mddev);
2964                         goto done_unlock;
2965
2966                 case SET_BITMAP_FILE:
2967                         err = set_bitmap_file(mddev, (int)arg);
2968                         goto done_unlock;
2969
2970                 default:
2971                         if (_IOC_TYPE(cmd) == MD_MAJOR)
2972                                 printk(KERN_WARNING "md: %s(pid %d) used"
2973                                         " obsolete MD ioctl, upgrade your"
2974                                         " software to use new ictls.\n",
2975                                         current->comm, current->pid);
2976                         err = -EINVAL;
2977                         goto abort_unlock;
2978         }
2979
2980 done_unlock:
2981 abort_unlock:
2982         mddev_unlock(mddev);
2983
2984         return err;
2985 done:
2986         if (err)
2987                 MD_BUG();
2988 abort:
2989         return err;
2990 }
2991
2992 static int md_open(struct inode *inode, struct file *file)
2993 {
2994         /*
2995          * Succeed if we can lock the mddev, which confirms that
2996          * it isn't being stopped right now.
2997          */
2998         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
2999         int err;
3000
3001         if ((err = mddev_lock(mddev)))
3002                 goto out;
3003
3004         err = 0;
3005         mddev_get(mddev);
3006         mddev_unlock(mddev);
3007
3008         check_disk_change(inode->i_bdev);
3009  out:
3010         return err;
3011 }
3012
3013 static int md_release(struct inode *inode, struct file * file)
3014 {
3015         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3016
3017         if (!mddev)
3018                 BUG();
3019         mddev_put(mddev);
3020
3021         return 0;
3022 }
3023
3024 static int md_media_changed(struct gendisk *disk)
3025 {
3026         mddev_t *mddev = disk->private_data;
3027
3028         return mddev->changed;
3029 }
3030
3031 static int md_revalidate(struct gendisk *disk)
3032 {
3033         mddev_t *mddev = disk->private_data;
3034
3035         mddev->changed = 0;
3036         return 0;
3037 }
3038 static struct block_device_operations md_fops =
3039 {
3040         .owner          = THIS_MODULE,
3041         .open           = md_open,
3042         .release        = md_release,
3043         .ioctl          = md_ioctl,
3044         .media_changed  = md_media_changed,
3045         .revalidate_disk= md_revalidate,
3046 };
3047
3048 static int md_thread(void * arg)
3049 {
3050         mdk_thread_t *thread = arg;
3051
3052         /*
3053          * md_thread is a 'system-thread', it's priority should be very
3054          * high. We avoid resource deadlocks individually in each
3055          * raid personality. (RAID5 does preallocation) We also use RR and
3056          * the very same RT priority as kswapd, thus we will never get
3057          * into a priority inversion deadlock.
3058          *
3059          * we definitely have to have equal or higher priority than
3060          * bdflush, otherwise bdflush will deadlock if there are too
3061          * many dirty RAID5 blocks.
3062          */
3063
3064         complete(thread->event);
3065         while (!kthread_should_stop()) {
3066                 void (*run)(mddev_t *);
3067
3068                 wait_event_interruptible_timeout(thread->wqueue,
3069                                                  test_bit(THREAD_WAKEUP, &thread->flags)
3070                                                  || kthread_should_stop(),
3071                                                  thread->timeout);
3072                 try_to_freeze();
3073
3074                 clear_bit(THREAD_WAKEUP, &thread->flags);
3075
3076                 run = thread->run;
3077                 if (run)
3078                         run(thread->mddev);
3079         }
3080
3081         return 0;
3082 }
3083
3084 void md_wakeup_thread(mdk_thread_t *thread)
3085 {
3086         if (thread) {
3087                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3088                 set_bit(THREAD_WAKEUP, &thread->flags);
3089                 wake_up(&thread->wqueue);
3090         }
3091 }
3092
3093 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3094                                  const char *name)
3095 {
3096         mdk_thread_t *thread;
3097         struct completion event;
3098
3099         thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
3100         if (!thread)
3101                 return NULL;
3102
3103         memset(thread, 0, sizeof(mdk_thread_t));
3104         init_waitqueue_head(&thread->wqueue);
3105
3106         init_completion(&event);
3107         thread->event = &event;
3108         thread->run = run;
3109         thread->mddev = mddev;
3110         thread->name = name;
3111         thread->timeout = MAX_SCHEDULE_TIMEOUT;
3112         thread->tsk = kthread_run(md_thread, thread, mdname(thread->mddev));
3113         if (IS_ERR(thread->tsk)) {
3114                 kfree(thread);
3115                 return NULL;
3116         }
3117         wait_for_completion(&event);
3118         return thread;
3119 }
3120
3121 void md_unregister_thread(mdk_thread_t *thread)
3122 {
3123         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
3124
3125         kthread_stop(thread->tsk);
3126         kfree(thread);
3127 }
3128
3129 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3130 {
3131         if (!mddev) {
3132                 MD_BUG();
3133                 return;
3134         }
3135
3136         if (!rdev || rdev->faulty)
3137                 return;
3138 /*
3139         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
3140                 mdname(mddev),
3141                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
3142                 __builtin_return_address(0),__builtin_return_address(1),
3143                 __builtin_return_address(2),__builtin_return_address(3));
3144 */
3145         if (!mddev->pers->error_handler)
3146                 return;
3147         mddev->pers->error_handler(mddev,rdev);
3148         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3149         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3150         md_wakeup_thread(mddev->thread);
3151 }
3152
3153 /* seq_file implementation /proc/mdstat */
3154
3155 static void status_unused(struct seq_file *seq)
3156 {
3157         int i = 0;
3158         mdk_rdev_t *rdev;
3159         struct list_head *tmp;
3160
3161         seq_printf(seq, "unused devices: ");
3162
3163         ITERATE_RDEV_PENDING(rdev,tmp) {
3164                 char b[BDEVNAME_SIZE];
3165                 i++;
3166                 seq_printf(seq, "%s ",
3167                               bdevname(rdev->bdev,b));
3168         }
3169         if (!i)
3170                 seq_printf(seq, "<none>");
3171
3172         seq_printf(seq, "\n");
3173 }
3174
3175
3176 static void status_resync(struct seq_file *seq, mddev_t * mddev)
3177 {
3178         unsigned long max_blocks, resync, res, dt, db, rt;
3179
3180         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
3181
3182         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3183                 max_blocks = mddev->resync_max_sectors >> 1;
3184         else
3185                 max_blocks = mddev->size;
3186
3187         /*
3188          * Should not happen.
3189          */
3190         if (!max_blocks) {
3191                 MD_BUG();
3192                 return;
3193         }
3194         res = (resync/1024)*1000/(max_blocks/1024 + 1);
3195         {
3196                 int i, x = res/50, y = 20-x;
3197                 seq_printf(seq, "[");
3198                 for (i = 0; i < x; i++)
3199                         seq_printf(seq, "=");
3200                 seq_printf(seq, ">");
3201                 for (i = 0; i < y; i++)
3202                         seq_printf(seq, ".");
3203                 seq_printf(seq, "] ");
3204         }
3205         seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
3206                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
3207                        "resync" : "recovery"),
3208                       res/10, res % 10, resync, max_blocks);
3209
3210         /*
3211          * We do not want to overflow, so the order of operands and
3212          * the * 100 / 100 trick are important. We do a +1 to be
3213          * safe against division by zero. We only estimate anyway.
3214          *
3215          * dt: time from mark until now
3216          * db: blocks written from mark until now
3217          * rt: remaining time
3218          */
3219         dt = ((jiffies - mddev->resync_mark) / HZ);
3220         if (!dt) dt++;
3221         db = resync - (mddev->resync_mark_cnt/2);
3222         rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
3223
3224         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
3225
3226         seq_printf(seq, " speed=%ldK/sec", db/dt);
3227 }
3228
3229 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
3230 {
3231         struct list_head *tmp;
3232         loff_t l = *pos;
3233         mddev_t *mddev;
3234
3235         if (l >= 0x10000)
3236                 return NULL;
3237         if (!l--)
3238                 /* header */
3239                 return (void*)1;
3240
3241         spin_lock(&all_mddevs_lock);
3242         list_for_each(tmp,&all_mddevs)
3243                 if (!l--) {
3244                         mddev = list_entry(tmp, mddev_t, all_mddevs);
3245                         mddev_get(mddev);
3246                         spin_unlock(&all_mddevs_lock);
3247                         return mddev;
3248                 }
3249         spin_unlock(&all_mddevs_lock);
3250         if (!l--)
3251                 return (void*)2;/* tail */
3252         return NULL;
3253 }
3254
3255 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3256 {
3257         struct list_head *tmp;
3258         mddev_t *next_mddev, *mddev = v;
3259         
3260         ++*pos;
3261         if (v == (void*)2)
3262                 return NULL;
3263
3264         spin_lock(&all_mddevs_lock);
3265         if (v == (void*)1)
3266                 tmp = all_mddevs.next;
3267         else
3268                 tmp = mddev->all_mddevs.next;
3269         if (tmp != &all_mddevs)
3270                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
3271         else {
3272                 next_mddev = (void*)2;
3273                 *pos = 0x10000;
3274         }               
3275         spin_unlock(&all_mddevs_lock);
3276
3277         if (v != (void*)1)
3278                 mddev_put(mddev);
3279         return next_mddev;
3280
3281 }
3282
3283 static void md_seq_stop(struct seq_file *seq, void *v)
3284 {
3285         mddev_t *mddev = v;
3286
3287         if (mddev && v != (void*)1 && v != (void*)2)
3288                 mddev_put(mddev);
3289 }
3290
3291 static int md_seq_show(struct seq_file *seq, void *v)
3292 {
3293         mddev_t *mddev = v;
3294         sector_t size;
3295         struct list_head *tmp2;
3296         mdk_rdev_t *rdev;
3297         int i;
3298         struct bitmap *bitmap;
3299
3300         if (v == (void*)1) {
3301                 seq_printf(seq, "Personalities : ");
3302                 spin_lock(&pers_lock);
3303                 for (i = 0; i < MAX_PERSONALITY; i++)
3304                         if (pers[i])
3305                                 seq_printf(seq, "[%s] ", pers[i]->name);
3306
3307                 spin_unlock(&pers_lock);
3308                 seq_printf(seq, "\n");
3309                 return 0;
3310         }
3311         if (v == (void*)2) {
3312                 status_unused(seq);
3313                 return 0;
3314         }
3315
3316         if (mddev_lock(mddev)!=0) 
3317                 return -EINTR;
3318         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
3319                 seq_printf(seq, "%s : %sactive", mdname(mddev),
3320                                                 mddev->pers ? "" : "in");
3321                 if (mddev->pers) {
3322                         if (mddev->ro)
3323                                 seq_printf(seq, " (read-only)");
3324                         seq_printf(seq, " %s", mddev->pers->name);
3325                 }
3326
3327                 size = 0;
3328                 ITERATE_RDEV(mddev,rdev,tmp2) {
3329                         char b[BDEVNAME_SIZE];
3330                         seq_printf(seq, " %s[%d]",
3331                                 bdevname(rdev->bdev,b), rdev->desc_nr);
3332                         if (test_bit(WriteMostly, &rdev->flags))
3333                                 seq_printf(seq, "(W)");
3334                         if (rdev->faulty) {
3335                                 seq_printf(seq, "(F)");
3336                                 continue;
3337                         }
3338                         size += rdev->size;
3339                 }
3340
3341                 if (!list_empty(&mddev->disks)) {
3342                         if (mddev->pers)
3343                                 seq_printf(seq, "\n      %llu blocks",
3344                                         (unsigned long long)mddev->array_size);
3345                         else
3346                                 seq_printf(seq, "\n      %llu blocks",
3347                                         (unsigned long long)size);
3348                 }
3349                 if (mddev->persistent) {
3350                         if (mddev->major_version != 0 ||
3351                             mddev->minor_version != 90) {
3352                                 seq_printf(seq," super %d.%d",
3353                                            mddev->major_version,
3354                                            mddev->minor_version);
3355                         }
3356                 } else
3357                         seq_printf(seq, " super non-persistent");
3358
3359                 if (mddev->pers) {
3360                         mddev->pers->status (seq, mddev);
3361                         seq_printf(seq, "\n      ");
3362                         if (mddev->curr_resync > 2) {
3363                                 status_resync (seq, mddev);
3364                                 seq_printf(seq, "\n      ");
3365                         } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
3366                                 seq_printf(seq, "       resync=DELAYED\n      ");
3367                 } else
3368                         seq_printf(seq, "\n       ");
3369
3370                 if ((bitmap = mddev->bitmap)) {
3371                         unsigned long chunk_kb;
3372                         unsigned long flags;
3373                         spin_lock_irqsave(&bitmap->lock, flags);
3374                         chunk_kb = bitmap->chunksize >> 10;
3375                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
3376                                 "%lu%s chunk",
3377                                 bitmap->pages - bitmap->missing_pages,
3378                                 bitmap->pages,
3379                                 (bitmap->pages - bitmap->missing_pages)
3380                                         << (PAGE_SHIFT - 10),
3381                                 chunk_kb ? chunk_kb : bitmap->chunksize,
3382                                 chunk_kb ? "KB" : "B");
3383                         if (bitmap->file) {
3384                                 seq_printf(seq, ", file: ");
3385                                 seq_path(seq, bitmap->file->f_vfsmnt,
3386                                          bitmap->file->f_dentry," \t\n");
3387                         }
3388
3389                         seq_printf(seq, "\n");
3390                         spin_unlock_irqrestore(&bitmap->lock, flags);
3391                 }
3392
3393                 seq_printf(seq, "\n");
3394         }
3395         mddev_unlock(mddev);
3396         
3397         return 0;
3398 }
3399
3400 static struct seq_operations md_seq_ops = {
3401         .start  = md_seq_start,
3402         .next   = md_seq_next,
3403         .stop   = md_seq_stop,
3404         .show   = md_seq_show,
3405 };
3406
3407 static int md_seq_open(struct inode *inode, struct file *file)
3408 {
3409         int error;
3410
3411         error = seq_open(file, &md_seq_ops);
3412         return error;
3413 }
3414
3415 static struct file_operations md_seq_fops = {
3416         .open           = md_seq_open,
3417         .read           = seq_read,
3418         .llseek         = seq_lseek,
3419         .release        = seq_release,
3420 };
3421
3422 int register_md_personality(int pnum, mdk_personality_t *p)
3423 {
3424         if (pnum >= MAX_PERSONALITY) {
3425                 printk(KERN_ERR
3426                        "md: tried to install personality %s as nr %d, but max is %lu\n",
3427                        p->name, pnum, MAX_PERSONALITY-1);
3428                 return -EINVAL;
3429         }
3430
3431         spin_lock(&pers_lock);
3432         if (pers[pnum]) {
3433                 spin_unlock(&pers_lock);
3434                 return -EBUSY;
3435         }
3436
3437         pers[pnum] = p;
3438         printk(KERN_INFO "md: %s personality registered as nr %d\n", p->name, pnum);
3439         spin_unlock(&pers_lock);
3440         return 0;
3441 }
3442
3443 int unregister_md_personality(int pnum)
3444 {
3445         if (pnum >= MAX_PERSONALITY)
3446                 return -EINVAL;
3447
3448         printk(KERN_INFO "md: %s personality unregistered\n", pers[pnum]->name);
3449         spin_lock(&pers_lock);
3450         pers[pnum] = NULL;
3451         spin_unlock(&pers_lock);
3452         return 0;
3453 }
3454
3455 static int is_mddev_idle(mddev_t *mddev)
3456 {
3457         mdk_rdev_t * rdev;
3458         struct list_head *tmp;
3459         int idle;
3460         unsigned long curr_events;
3461
3462         idle = 1;
3463         ITERATE_RDEV(mddev,rdev,tmp) {
3464                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
3465                 curr_events = disk_stat_read(disk, read_sectors) + 
3466                                 disk_stat_read(disk, write_sectors) - 
3467                                 atomic_read(&disk->sync_io);
3468                 /* Allow some slack between valud of curr_events and last_events,
3469                  * as there are some uninteresting races.
3470                  * Note: the following is an unsigned comparison.
3471                  */
3472                 if ((curr_events - rdev->last_events + 32) > 64) {
3473                         rdev->last_events = curr_events;
3474                         idle = 0;
3475                 }
3476         }
3477         return idle;
3478 }
3479
3480 void md_done_sync(mddev_t *mddev, int blocks, int ok)
3481 {
3482         /* another "blocks" (512byte) blocks have been synced */
3483         atomic_sub(blocks, &mddev->recovery_active);
3484         wake_up(&mddev->recovery_wait);
3485         if (!ok) {
3486                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3487                 md_wakeup_thread(mddev->thread);
3488                 // stop recovery, signal do_sync ....
3489         }
3490 }
3491
3492
3493 /* md_write_start(mddev, bi)
3494  * If we need to update some array metadata (e.g. 'active' flag
3495  * in superblock) before writing, schedule a superblock update
3496  * and wait for it to complete.
3497  */
3498 void md_write_start(mddev_t *mddev, struct bio *bi)
3499 {
3500         if (bio_data_dir(bi) != WRITE)
3501                 return;
3502
3503         atomic_inc(&mddev->writes_pending);
3504         if (mddev->in_sync) {
3505                 spin_lock(&mddev->write_lock);
3506                 if (mddev->in_sync) {
3507                         mddev->in_sync = 0;
3508                         mddev->sb_dirty = 1;
3509                         md_wakeup_thread(mddev->thread);
3510                 }
3511                 spin_unlock(&mddev->write_lock);
3512         }
3513         wait_event(mddev->sb_wait, mddev->sb_dirty==0);
3514 }
3515
3516 void md_write_end(mddev_t *mddev)
3517 {
3518         if (atomic_dec_and_test(&mddev->writes_pending)) {
3519                 if (mddev->safemode == 2)
3520                         md_wakeup_thread(mddev->thread);
3521                 else
3522                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
3523         }
3524 }
3525
3526 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
3527
3528 #define SYNC_MARKS      10
3529 #define SYNC_MARK_STEP  (3*HZ)
3530 static void md_do_sync(mddev_t *mddev)
3531 {
3532         mddev_t *mddev2;
3533         unsigned int currspeed = 0,
3534                  window;
3535         sector_t max_sectors,j, io_sectors;
3536         unsigned long mark[SYNC_MARKS];
3537         sector_t mark_cnt[SYNC_MARKS];
3538         int last_mark,m;
3539         struct list_head *tmp;
3540         sector_t last_check;
3541         int skipped = 0;
3542
3543         /* just incase thread restarts... */
3544         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
3545                 return;
3546
3547         /* we overload curr_resync somewhat here.
3548          * 0 == not engaged in resync at all
3549          * 2 == checking that there is no conflict with another sync
3550          * 1 == like 2, but have yielded to allow conflicting resync to
3551          *              commense
3552          * other == active in resync - this many blocks
3553          *
3554          * Before starting a resync we must have set curr_resync to
3555          * 2, and then checked that every "conflicting" array has curr_resync
3556          * less than ours.  When we find one that is the same or higher
3557          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
3558          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
3559          * This will mean we have to start checking from the beginning again.
3560          *
3561          */
3562
3563         do {
3564                 mddev->curr_resync = 2;
3565
3566         try_again:
3567                 if (signal_pending(current)) {
3568                         flush_signals(current);
3569                         goto skip;
3570                 }
3571                 ITERATE_MDDEV(mddev2,tmp) {
3572                         if (mddev2 == mddev)
3573                                 continue;
3574                         if (mddev2->curr_resync && 
3575                             match_mddev_units(mddev,mddev2)) {
3576                                 DEFINE_WAIT(wq);
3577                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
3578                                         /* arbitrarily yield */
3579                                         mddev->curr_resync = 1;
3580                                         wake_up(&resync_wait);
3581                                 }
3582                                 if (mddev > mddev2 && mddev->curr_resync == 1)
3583                                         /* no need to wait here, we can wait the next
3584                                          * time 'round when curr_resync == 2
3585                                          */
3586                                         continue;
3587                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
3588                                 if (!signal_pending(current)
3589                                     && mddev2->curr_resync >= mddev->curr_resync) {
3590                                         printk(KERN_INFO "md: delaying resync of %s"
3591                                                " until %s has finished resync (they"
3592                                                " share one or more physical units)\n",
3593                                                mdname(mddev), mdname(mddev2));
3594                                         mddev_put(mddev2);
3595                                         schedule();
3596                                         finish_wait(&resync_wait, &wq);
3597                                         goto try_again;
3598                                 }
3599                                 finish_wait(&resync_wait, &wq);
3600                         }
3601                 }
3602         } while (mddev->curr_resync < 2);
3603
3604         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3605                 /* resync follows the size requested by the personality,
3606                  * which defaults to physical size, but can be virtual size
3607                  */
3608                 max_sectors = mddev->resync_max_sectors;
3609         else
3610                 /* recovery follows the physical size of devices */
3611                 max_sectors = mddev->size << 1;
3612
3613         printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
3614         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
3615                 " %d KB/sec/disc.\n", sysctl_speed_limit_min);
3616         printk(KERN_INFO "md: using maximum available idle IO bandwith "
3617                "(but not more than %d KB/sec) for reconstruction.\n",
3618                sysctl_speed_limit_max);
3619
3620         is_mddev_idle(mddev); /* this also initializes IO event counters */
3621         /* we don't use the checkpoint if there's a bitmap */
3622         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap)
3623                 j = mddev->recovery_cp;
3624         else
3625                 j = 0;
3626         io_sectors = 0;
3627         for (m = 0; m < SYNC_MARKS; m++) {
3628                 mark[m] = jiffies;
3629                 mark_cnt[m] = io_sectors;
3630         }
3631         last_mark = 0;
3632         mddev->resync_mark = mark[last_mark];
3633         mddev->resync_mark_cnt = mark_cnt[last_mark];
3634
3635         /*
3636          * Tune reconstruction:
3637          */
3638         window = 32*(PAGE_SIZE/512);
3639         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
3640                 window/2,(unsigned long long) max_sectors/2);
3641
3642         atomic_set(&mddev->recovery_active, 0);
3643         init_waitqueue_head(&mddev->recovery_wait);
3644         last_check = 0;
3645
3646         if (j>2) {
3647                 printk(KERN_INFO 
3648                         "md: resuming recovery of %s from checkpoint.\n",
3649                         mdname(mddev));
3650                 mddev->curr_resync = j;
3651         }
3652
3653         while (j < max_sectors) {
3654                 sector_t sectors;
3655
3656                 skipped = 0;
3657                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
3658                                             currspeed < sysctl_speed_limit_min);
3659                 if (sectors == 0) {
3660                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
3661                         goto out;
3662                 }
3663
3664                 if (!skipped) { /* actual IO requested */
3665                         io_sectors += sectors;
3666                         atomic_add(sectors, &mddev->recovery_active);
3667                 }
3668
3669                 j += sectors;
3670                 if (j>1) mddev->curr_resync = j;
3671
3672
3673                 if (last_check + window > io_sectors || j == max_sectors)
3674                         continue;
3675
3676                 last_check = io_sectors;
3677
3678                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
3679                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
3680                         break;
3681
3682         repeat:
3683                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
3684                         /* step marks */
3685                         int next = (last_mark+1) % SYNC_MARKS;
3686
3687                         mddev->resync_mark = mark[next];
3688                         mddev->resync_mark_cnt = mark_cnt[next];
3689                         mark[next] = jiffies;
3690                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
3691                         last_mark = next;
3692                 }
3693
3694
3695                 if (signal_pending(current)) {
3696                         /*
3697                          * got a signal, exit.
3698                          */
3699                         printk(KERN_INFO 
3700                                 "md: md_do_sync() got signal ... exiting\n");
3701                         flush_signals(current);
3702                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3703                         goto out;
3704                 }
3705
3706                 /*
3707                  * this loop exits only if either when we are slower than
3708                  * the 'hard' speed limit, or the system was IO-idle for
3709                  * a jiffy.
3710                  * the system might be non-idle CPU-wise, but we only care
3711                  * about not overloading the IO subsystem. (things like an
3712                  * e2fsck being done on the RAID array should execute fast)
3713                  */
3714                 mddev->queue->unplug_fn(mddev->queue);
3715                 cond_resched();
3716
3717                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
3718                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
3719
3720                 if (currspeed > sysctl_speed_limit_min) {
3721                         if ((currspeed > sysctl_speed_limit_max) ||
3722                                         !is_mddev_idle(mddev)) {
3723                                 msleep_interruptible(250);
3724                                 goto repeat;
3725                         }
3726                 }
3727         }
3728         printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
3729         /*
3730          * this also signals 'finished resyncing' to md_stop
3731          */
3732  out:
3733         mddev->queue->unplug_fn(mddev->queue);
3734
3735         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
3736
3737         /* tell personality that we are finished */
3738         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
3739
3740         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3741             mddev->curr_resync > 2 &&
3742             mddev->curr_resync >= mddev->recovery_cp) {
3743                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3744                         printk(KERN_INFO 
3745                                 "md: checkpointing recovery of %s.\n",
3746                                 mdname(mddev));
3747                         mddev->recovery_cp = mddev->curr_resync;
3748                 } else
3749                         mddev->recovery_cp = MaxSector;
3750         }
3751
3752  skip:
3753         mddev->curr_resync = 0;
3754         wake_up(&resync_wait);
3755         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
3756         md_wakeup_thread(mddev->thread);
3757 }
3758
3759
3760 /*
3761  * This routine is regularly called by all per-raid-array threads to
3762  * deal with generic issues like resync and super-block update.
3763  * Raid personalities that don't have a thread (linear/raid0) do not
3764  * need this as they never do any recovery or update the superblock.
3765  *
3766  * It does not do any resync itself, but rather "forks" off other threads
3767  * to do that as needed.
3768  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
3769  * "->recovery" and create a thread at ->sync_thread.
3770  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
3771  * and wakeups up this thread which will reap the thread and finish up.
3772  * This thread also removes any faulty devices (with nr_pending == 0).
3773  *
3774  * The overall approach is:
3775  *  1/ if the superblock needs updating, update it.
3776  *  2/ If a recovery thread is running, don't do anything else.
3777  *  3/ If recovery has finished, clean up, possibly marking spares active.
3778  *  4/ If there are any faulty devices, remove them.
3779  *  5/ If array is degraded, try to add spares devices
3780  *  6/ If array has spares or is not in-sync, start a resync thread.
3781  */
3782 void md_check_recovery(mddev_t *mddev)
3783 {
3784         mdk_rdev_t *rdev;
3785         struct list_head *rtmp;
3786
3787
3788         if (mddev->bitmap)
3789                 bitmap_daemon_work(mddev->bitmap);
3790
3791         if (mddev->ro)
3792                 return;
3793
3794         if (signal_pending(current)) {
3795                 if (mddev->pers->sync_request) {
3796                         printk(KERN_INFO "md: %s in immediate safe mode\n",
3797                                mdname(mddev));
3798                         mddev->safemode = 2;
3799                 }
3800                 flush_signals(current);
3801         }
3802
3803         if ( ! (
3804                 mddev->sb_dirty ||
3805                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
3806                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
3807                 (mddev->safemode == 1) ||
3808                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
3809                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
3810                 ))
3811                 return;
3812
3813         if (mddev_trylock(mddev)==0) {
3814                 int spares =0;
3815
3816                 spin_lock(&mddev->write_lock);
3817                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
3818                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
3819                         mddev->in_sync = 1;
3820                         mddev->sb_dirty = 1;
3821                 }
3822                 if (mddev->safemode == 1)
3823                         mddev->safemode = 0;
3824                 spin_unlock(&mddev->write_lock);
3825
3826                 if (mddev->sb_dirty)
3827                         md_update_sb(mddev);
3828
3829
3830                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
3831                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
3832                         /* resync/recovery still happening */
3833                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3834                         goto unlock;
3835                 }
3836                 if (mddev->sync_thread) {
3837                         /* resync has finished, collect result */
3838                         md_unregister_thread(mddev->sync_thread);
3839                         mddev->sync_thread = NULL;
3840                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
3841                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
3842                                 /* success...*/
3843                                 /* activate any spares */
3844                                 mddev->pers->spare_active(mddev);
3845                         }
3846                         md_update_sb(mddev);
3847
3848                         /* if array is no-longer degraded, then any saved_raid_disk
3849                          * information must be scrapped
3850                          */
3851                         if (!mddev->degraded)
3852                                 ITERATE_RDEV(mddev,rdev,rtmp)
3853                                         rdev->saved_raid_disk = -1;
3854
3855                         mddev->recovery = 0;
3856                         /* flag recovery needed just to double check */
3857                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3858                         goto unlock;
3859                 }
3860                 if (mddev->recovery)
3861                         /* probably just the RECOVERY_NEEDED flag */
3862                         mddev->recovery = 0;
3863
3864                 /* no recovery is running.
3865                  * remove any failed drives, then
3866                  * add spares if possible.
3867                  * Spare are also removed and re-added, to allow
3868                  * the personality to fail the re-add.
3869                  */
3870                 ITERATE_RDEV(mddev,rdev,rtmp)
3871                         if (rdev->raid_disk >= 0 &&
3872                             (rdev->faulty || ! rdev->in_sync) &&
3873                             atomic_read(&rdev->nr_pending)==0) {
3874                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0)
3875                                         rdev->raid_disk = -1;
3876                         }
3877
3878                 if (mddev->degraded) {
3879                         ITERATE_RDEV(mddev,rdev,rtmp)
3880                                 if (rdev->raid_disk < 0
3881                                     && !rdev->faulty) {
3882                                         if (mddev->pers->hot_add_disk(mddev,rdev))
3883                                                 spares++;
3884                                         else
3885                                                 break;
3886                                 }
3887                 }
3888
3889                 if (!spares && (mddev->recovery_cp == MaxSector )) {
3890                         /* nothing we can do ... */
3891                         goto unlock;
3892                 }
3893                 if (mddev->pers->sync_request) {
3894                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3895                         if (!spares)
3896                                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3897                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
3898                                 /* We are adding a device or devices to an array
3899                                  * which has the bitmap stored on all devices.
3900                                  * So make sure all bitmap pages get written
3901                                  */
3902                                 bitmap_write_all(mddev->bitmap);
3903                         }
3904                         mddev->sync_thread = md_register_thread(md_do_sync,
3905                                                                 mddev,
3906                                                                 "%s_resync");
3907                         if (!mddev->sync_thread) {
3908                                 printk(KERN_ERR "%s: could not start resync"
3909                                         " thread...\n", 
3910                                         mdname(mddev));
3911                                 /* leave the spares where they are, it shouldn't hurt */
3912                                 mddev->recovery = 0;
3913                         } else {
3914                                 md_wakeup_thread(mddev->sync_thread);
3915                         }
3916                 }
3917         unlock:
3918                 mddev_unlock(mddev);
3919         }
3920 }
3921
3922 static int md_notify_reboot(struct notifier_block *this,
3923                             unsigned long code, void *x)
3924 {
3925         struct list_head *tmp;
3926         mddev_t *mddev;
3927
3928         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
3929
3930                 printk(KERN_INFO "md: stopping all md devices.\n");
3931
3932                 ITERATE_MDDEV(mddev,tmp)
3933                         if (mddev_trylock(mddev)==0)
3934                                 do_md_stop (mddev, 1);
3935                 /*
3936                  * certain more exotic SCSI devices are known to be
3937                  * volatile wrt too early system reboots. While the
3938                  * right place to handle this issue is the given
3939                  * driver, we do want to have a safe RAID driver ...
3940                  */
3941                 mdelay(1000*1);
3942         }
3943         return NOTIFY_DONE;
3944 }
3945
3946 static struct notifier_block md_notifier = {
3947         .notifier_call  = md_notify_reboot,
3948         .next           = NULL,
3949         .priority       = INT_MAX, /* before any real devices */
3950 };
3951
3952 static void md_geninit(void)
3953 {
3954         struct proc_dir_entry *p;
3955
3956         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
3957
3958         p = create_proc_entry("mdstat", S_IRUGO, NULL);
3959         if (p)
3960                 p->proc_fops = &md_seq_fops;
3961 }
3962
3963 static int __init md_init(void)
3964 {
3965         int minor;
3966
3967         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
3968                         " MD_SB_DISKS=%d\n",
3969                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
3970                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
3971         printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR,
3972                         BITMAP_MINOR);
3973
3974         if (register_blkdev(MAJOR_NR, "md"))
3975                 return -1;
3976         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
3977                 unregister_blkdev(MAJOR_NR, "md");
3978                 return -1;
3979         }
3980         devfs_mk_dir("md");
3981         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
3982                                 md_probe, NULL, NULL);
3983         blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
3984                             md_probe, NULL, NULL);
3985
3986         for (minor=0; minor < MAX_MD_DEVS; ++minor)
3987                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
3988                                 S_IFBLK|S_IRUSR|S_IWUSR,
3989                                 "md/%d", minor);
3990
3991         for (minor=0; minor < MAX_MD_DEVS; ++minor)
3992                 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
3993                               S_IFBLK|S_IRUSR|S_IWUSR,
3994                               "md/mdp%d", minor);
3995
3996
3997         register_reboot_notifier(&md_notifier);
3998         raid_table_header = register_sysctl_table(raid_root_table, 1);
3999
4000         md_geninit();
4001         return (0);
4002 }
4003
4004
4005 #ifndef MODULE
4006
4007 /*
4008  * Searches all registered partitions for autorun RAID arrays
4009  * at boot time.
4010  */
4011 static dev_t detected_devices[128];
4012 static int dev_cnt;
4013
4014 void md_autodetect_dev(dev_t dev)
4015 {
4016         if (dev_cnt >= 0 && dev_cnt < 127)
4017                 detected_devices[dev_cnt++] = dev;
4018 }
4019
4020
4021 static void autostart_arrays(int part)
4022 {
4023         mdk_rdev_t *rdev;
4024         int i;
4025
4026         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4027
4028         for (i = 0; i < dev_cnt; i++) {
4029                 dev_t dev = detected_devices[i];
4030
4031                 rdev = md_import_device(dev,0, 0);
4032                 if (IS_ERR(rdev))
4033                         continue;
4034
4035                 if (rdev->faulty) {
4036                         MD_BUG();
4037                         continue;
4038                 }
4039                 list_add(&rdev->same_set, &pending_raid_disks);
4040         }
4041         dev_cnt = 0;
4042
4043         autorun_devices(part);
4044 }
4045
4046 #endif
4047
4048 static __exit void md_exit(void)
4049 {
4050         mddev_t *mddev;
4051         struct list_head *tmp;
4052         int i;
4053         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4054         blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4055         for (i=0; i < MAX_MD_DEVS; i++)
4056                 devfs_remove("md/%d", i);
4057         for (i=0; i < MAX_MD_DEVS; i++)
4058                 devfs_remove("md/d%d", i);
4059
4060         devfs_remove("md");
4061
4062         unregister_blkdev(MAJOR_NR,"md");
4063         unregister_blkdev(mdp_major, "mdp");
4064         unregister_reboot_notifier(&md_notifier);
4065         unregister_sysctl_table(raid_table_header);
4066         remove_proc_entry("mdstat", NULL);
4067         ITERATE_MDDEV(mddev,tmp) {
4068                 struct gendisk *disk = mddev->gendisk;
4069                 if (!disk)
4070                         continue;
4071                 export_array(mddev);
4072                 del_gendisk(disk);
4073                 put_disk(disk);
4074                 mddev->gendisk = NULL;
4075                 mddev_put(mddev);
4076         }
4077 }
4078
4079 module_init(md_init)
4080 module_exit(md_exit)
4081
4082 EXPORT_SYMBOL(register_md_personality);
4083 EXPORT_SYMBOL(unregister_md_personality);
4084 EXPORT_SYMBOL(md_error);
4085 EXPORT_SYMBOL(md_done_sync);
4086 EXPORT_SYMBOL(md_write_start);
4087 EXPORT_SYMBOL(md_write_end);
4088 EXPORT_SYMBOL(md_register_thread);
4089 EXPORT_SYMBOL(md_unregister_thread);
4090 EXPORT_SYMBOL(md_wakeup_thread);
4091 EXPORT_SYMBOL(md_print_devices);
4092 EXPORT_SYMBOL(md_check_recovery);
4093 MODULE_LICENSE("GPL");
4094 MODULE_ALIAS("md");
4095 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);