Pull sem2mutex-ioc4 into release branch
[linux-2.6] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/config.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/devfs_fs_kernel.h>
43 #include <linux/buffer_head.h> /* for invalidate_bdev */
44 #include <linux/suspend.h>
45 #include <linux/poll.h>
46
47 #include <linux/init.h>
48
49 #include <linux/file.h>
50
51 #ifdef CONFIG_KMOD
52 #include <linux/kmod.h>
53 #endif
54
55 #include <asm/unaligned.h>
56
57 #define MAJOR_NR MD_MAJOR
58 #define MD_DRIVER
59
60 /* 63 partitions with the alternate major number (mdp) */
61 #define MdpMinorShift 6
62
63 #define DEBUG 0
64 #define dprintk(x...) ((void)(DEBUG && printk(x)))
65
66
67 #ifndef MODULE
68 static void autostart_arrays (int part);
69 #endif
70
71 static LIST_HEAD(pers_list);
72 static DEFINE_SPINLOCK(pers_lock);
73
74 /*
75  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
76  * is 1000 KB/sec, so the extra system load does not show up that much.
77  * Increase it if you want to have more _guaranteed_ speed. Note that
78  * the RAID driver will use the maximum available bandwidth if the IO
79  * subsystem is idle. There is also an 'absolute maximum' reconstruction
80  * speed limit - in case reconstruction slows down your system despite
81  * idle IO detection.
82  *
83  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
84  * or /sys/block/mdX/md/sync_speed_{min,max}
85  */
86
87 static int sysctl_speed_limit_min = 1000;
88 static int sysctl_speed_limit_max = 200000;
89 static inline int speed_min(mddev_t *mddev)
90 {
91         return mddev->sync_speed_min ?
92                 mddev->sync_speed_min : sysctl_speed_limit_min;
93 }
94
95 static inline int speed_max(mddev_t *mddev)
96 {
97         return mddev->sync_speed_max ?
98                 mddev->sync_speed_max : sysctl_speed_limit_max;
99 }
100
101 static struct ctl_table_header *raid_table_header;
102
103 static ctl_table raid_table[] = {
104         {
105                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
106                 .procname       = "speed_limit_min",
107                 .data           = &sysctl_speed_limit_min,
108                 .maxlen         = sizeof(int),
109                 .mode           = 0644,
110                 .proc_handler   = &proc_dointvec,
111         },
112         {
113                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
114                 .procname       = "speed_limit_max",
115                 .data           = &sysctl_speed_limit_max,
116                 .maxlen         = sizeof(int),
117                 .mode           = 0644,
118                 .proc_handler   = &proc_dointvec,
119         },
120         { .ctl_name = 0 }
121 };
122
123 static ctl_table raid_dir_table[] = {
124         {
125                 .ctl_name       = DEV_RAID,
126                 .procname       = "raid",
127                 .maxlen         = 0,
128                 .mode           = 0555,
129                 .child          = raid_table,
130         },
131         { .ctl_name = 0 }
132 };
133
134 static ctl_table raid_root_table[] = {
135         {
136                 .ctl_name       = CTL_DEV,
137                 .procname       = "dev",
138                 .maxlen         = 0,
139                 .mode           = 0555,
140                 .child          = raid_dir_table,
141         },
142         { .ctl_name = 0 }
143 };
144
145 static struct block_device_operations md_fops;
146
147 static int start_readonly;
148
149 /*
150  * We have a system wide 'event count' that is incremented
151  * on any 'interesting' event, and readers of /proc/mdstat
152  * can use 'poll' or 'select' to find out when the event
153  * count increases.
154  *
155  * Events are:
156  *  start array, stop array, error, add device, remove device,
157  *  start build, activate spare
158  */
159 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
160 static atomic_t md_event_count;
161 static void md_new_event(mddev_t *mddev)
162 {
163         atomic_inc(&md_event_count);
164         wake_up(&md_event_waiters);
165 }
166
167 /*
168  * Enables to iterate over all existing md arrays
169  * all_mddevs_lock protects this list.
170  */
171 static LIST_HEAD(all_mddevs);
172 static DEFINE_SPINLOCK(all_mddevs_lock);
173
174
175 /*
176  * iterates through all used mddevs in the system.
177  * We take care to grab the all_mddevs_lock whenever navigating
178  * the list, and to always hold a refcount when unlocked.
179  * Any code which breaks out of this loop while own
180  * a reference to the current mddev and must mddev_put it.
181  */
182 #define ITERATE_MDDEV(mddev,tmp)                                        \
183                                                                         \
184         for (({ spin_lock(&all_mddevs_lock);                            \
185                 tmp = all_mddevs.next;                                  \
186                 mddev = NULL;});                                        \
187              ({ if (tmp != &all_mddevs)                                 \
188                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
189                 spin_unlock(&all_mddevs_lock);                          \
190                 if (mddev) mddev_put(mddev);                            \
191                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
192                 tmp != &all_mddevs;});                                  \
193              ({ spin_lock(&all_mddevs_lock);                            \
194                 tmp = tmp->next;})                                      \
195                 )
196
197
198 static int md_fail_request (request_queue_t *q, struct bio *bio)
199 {
200         bio_io_error(bio, bio->bi_size);
201         return 0;
202 }
203
204 static inline mddev_t *mddev_get(mddev_t *mddev)
205 {
206         atomic_inc(&mddev->active);
207         return mddev;
208 }
209
210 static void mddev_put(mddev_t *mddev)
211 {
212         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
213                 return;
214         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
215                 list_del(&mddev->all_mddevs);
216                 blk_put_queue(mddev->queue);
217                 kobject_unregister(&mddev->kobj);
218         }
219         spin_unlock(&all_mddevs_lock);
220 }
221
222 static mddev_t * mddev_find(dev_t unit)
223 {
224         mddev_t *mddev, *new = NULL;
225
226  retry:
227         spin_lock(&all_mddevs_lock);
228         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
229                 if (mddev->unit == unit) {
230                         mddev_get(mddev);
231                         spin_unlock(&all_mddevs_lock);
232                         kfree(new);
233                         return mddev;
234                 }
235
236         if (new) {
237                 list_add(&new->all_mddevs, &all_mddevs);
238                 spin_unlock(&all_mddevs_lock);
239                 return new;
240         }
241         spin_unlock(&all_mddevs_lock);
242
243         new = kzalloc(sizeof(*new), GFP_KERNEL);
244         if (!new)
245                 return NULL;
246
247         new->unit = unit;
248         if (MAJOR(unit) == MD_MAJOR)
249                 new->md_minor = MINOR(unit);
250         else
251                 new->md_minor = MINOR(unit) >> MdpMinorShift;
252
253         init_MUTEX(&new->reconfig_sem);
254         INIT_LIST_HEAD(&new->disks);
255         INIT_LIST_HEAD(&new->all_mddevs);
256         init_timer(&new->safemode_timer);
257         atomic_set(&new->active, 1);
258         spin_lock_init(&new->write_lock);
259         init_waitqueue_head(&new->sb_wait);
260
261         new->queue = blk_alloc_queue(GFP_KERNEL);
262         if (!new->queue) {
263                 kfree(new);
264                 return NULL;
265         }
266
267         blk_queue_make_request(new->queue, md_fail_request);
268
269         goto retry;
270 }
271
272 static inline int mddev_lock(mddev_t * mddev)
273 {
274         return down_interruptible(&mddev->reconfig_sem);
275 }
276
277 static inline void mddev_lock_uninterruptible(mddev_t * mddev)
278 {
279         down(&mddev->reconfig_sem);
280 }
281
282 static inline int mddev_trylock(mddev_t * mddev)
283 {
284         return down_trylock(&mddev->reconfig_sem);
285 }
286
287 static inline void mddev_unlock(mddev_t * mddev)
288 {
289         up(&mddev->reconfig_sem);
290
291         md_wakeup_thread(mddev->thread);
292 }
293
294 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
295 {
296         mdk_rdev_t * rdev;
297         struct list_head *tmp;
298
299         ITERATE_RDEV(mddev,rdev,tmp) {
300                 if (rdev->desc_nr == nr)
301                         return rdev;
302         }
303         return NULL;
304 }
305
306 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
307 {
308         struct list_head *tmp;
309         mdk_rdev_t *rdev;
310
311         ITERATE_RDEV(mddev,rdev,tmp) {
312                 if (rdev->bdev->bd_dev == dev)
313                         return rdev;
314         }
315         return NULL;
316 }
317
318 static struct mdk_personality *find_pers(int level, char *clevel)
319 {
320         struct mdk_personality *pers;
321         list_for_each_entry(pers, &pers_list, list) {
322                 if (level != LEVEL_NONE && pers->level == level)
323                         return pers;
324                 if (strcmp(pers->name, clevel)==0)
325                         return pers;
326         }
327         return NULL;
328 }
329
330 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
331 {
332         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
333         return MD_NEW_SIZE_BLOCKS(size);
334 }
335
336 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
337 {
338         sector_t size;
339
340         size = rdev->sb_offset;
341
342         if (chunk_size)
343                 size &= ~((sector_t)chunk_size/1024 - 1);
344         return size;
345 }
346
347 static int alloc_disk_sb(mdk_rdev_t * rdev)
348 {
349         if (rdev->sb_page)
350                 MD_BUG();
351
352         rdev->sb_page = alloc_page(GFP_KERNEL);
353         if (!rdev->sb_page) {
354                 printk(KERN_ALERT "md: out of memory.\n");
355                 return -EINVAL;
356         }
357
358         return 0;
359 }
360
361 static void free_disk_sb(mdk_rdev_t * rdev)
362 {
363         if (rdev->sb_page) {
364                 put_page(rdev->sb_page);
365                 rdev->sb_loaded = 0;
366                 rdev->sb_page = NULL;
367                 rdev->sb_offset = 0;
368                 rdev->size = 0;
369         }
370 }
371
372
373 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
374 {
375         mdk_rdev_t *rdev = bio->bi_private;
376         mddev_t *mddev = rdev->mddev;
377         if (bio->bi_size)
378                 return 1;
379
380         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
381                 md_error(mddev, rdev);
382
383         if (atomic_dec_and_test(&mddev->pending_writes))
384                 wake_up(&mddev->sb_wait);
385         bio_put(bio);
386         return 0;
387 }
388
389 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
390 {
391         struct bio *bio2 = bio->bi_private;
392         mdk_rdev_t *rdev = bio2->bi_private;
393         mddev_t *mddev = rdev->mddev;
394         if (bio->bi_size)
395                 return 1;
396
397         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
398             error == -EOPNOTSUPP) {
399                 unsigned long flags;
400                 /* barriers don't appear to be supported :-( */
401                 set_bit(BarriersNotsupp, &rdev->flags);
402                 mddev->barriers_work = 0;
403                 spin_lock_irqsave(&mddev->write_lock, flags);
404                 bio2->bi_next = mddev->biolist;
405                 mddev->biolist = bio2;
406                 spin_unlock_irqrestore(&mddev->write_lock, flags);
407                 wake_up(&mddev->sb_wait);
408                 bio_put(bio);
409                 return 0;
410         }
411         bio_put(bio2);
412         bio->bi_private = rdev;
413         return super_written(bio, bytes_done, error);
414 }
415
416 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
417                    sector_t sector, int size, struct page *page)
418 {
419         /* write first size bytes of page to sector of rdev
420          * Increment mddev->pending_writes before returning
421          * and decrement it on completion, waking up sb_wait
422          * if zero is reached.
423          * If an error occurred, call md_error
424          *
425          * As we might need to resubmit the request if BIO_RW_BARRIER
426          * causes ENOTSUPP, we allocate a spare bio...
427          */
428         struct bio *bio = bio_alloc(GFP_NOIO, 1);
429         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
430
431         bio->bi_bdev = rdev->bdev;
432         bio->bi_sector = sector;
433         bio_add_page(bio, page, size, 0);
434         bio->bi_private = rdev;
435         bio->bi_end_io = super_written;
436         bio->bi_rw = rw;
437
438         atomic_inc(&mddev->pending_writes);
439         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
440                 struct bio *rbio;
441                 rw |= (1<<BIO_RW_BARRIER);
442                 rbio = bio_clone(bio, GFP_NOIO);
443                 rbio->bi_private = bio;
444                 rbio->bi_end_io = super_written_barrier;
445                 submit_bio(rw, rbio);
446         } else
447                 submit_bio(rw, bio);
448 }
449
450 void md_super_wait(mddev_t *mddev)
451 {
452         /* wait for all superblock writes that were scheduled to complete.
453          * if any had to be retried (due to BARRIER problems), retry them
454          */
455         DEFINE_WAIT(wq);
456         for(;;) {
457                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
458                 if (atomic_read(&mddev->pending_writes)==0)
459                         break;
460                 while (mddev->biolist) {
461                         struct bio *bio;
462                         spin_lock_irq(&mddev->write_lock);
463                         bio = mddev->biolist;
464                         mddev->biolist = bio->bi_next ;
465                         bio->bi_next = NULL;
466                         spin_unlock_irq(&mddev->write_lock);
467                         submit_bio(bio->bi_rw, bio);
468                 }
469                 schedule();
470         }
471         finish_wait(&mddev->sb_wait, &wq);
472 }
473
474 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
475 {
476         if (bio->bi_size)
477                 return 1;
478
479         complete((struct completion*)bio->bi_private);
480         return 0;
481 }
482
483 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
484                    struct page *page, int rw)
485 {
486         struct bio *bio = bio_alloc(GFP_NOIO, 1);
487         struct completion event;
488         int ret;
489
490         rw |= (1 << BIO_RW_SYNC);
491
492         bio->bi_bdev = bdev;
493         bio->bi_sector = sector;
494         bio_add_page(bio, page, size, 0);
495         init_completion(&event);
496         bio->bi_private = &event;
497         bio->bi_end_io = bi_complete;
498         submit_bio(rw, bio);
499         wait_for_completion(&event);
500
501         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
502         bio_put(bio);
503         return ret;
504 }
505 EXPORT_SYMBOL_GPL(sync_page_io);
506
507 static int read_disk_sb(mdk_rdev_t * rdev, int size)
508 {
509         char b[BDEVNAME_SIZE];
510         if (!rdev->sb_page) {
511                 MD_BUG();
512                 return -EINVAL;
513         }
514         if (rdev->sb_loaded)
515                 return 0;
516
517
518         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
519                 goto fail;
520         rdev->sb_loaded = 1;
521         return 0;
522
523 fail:
524         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
525                 bdevname(rdev->bdev,b));
526         return -EINVAL;
527 }
528
529 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
530 {
531         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
532                 (sb1->set_uuid1 == sb2->set_uuid1) &&
533                 (sb1->set_uuid2 == sb2->set_uuid2) &&
534                 (sb1->set_uuid3 == sb2->set_uuid3))
535
536                 return 1;
537
538         return 0;
539 }
540
541
542 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
543 {
544         int ret;
545         mdp_super_t *tmp1, *tmp2;
546
547         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
548         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
549
550         if (!tmp1 || !tmp2) {
551                 ret = 0;
552                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
553                 goto abort;
554         }
555
556         *tmp1 = *sb1;
557         *tmp2 = *sb2;
558
559         /*
560          * nr_disks is not constant
561          */
562         tmp1->nr_disks = 0;
563         tmp2->nr_disks = 0;
564
565         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
566                 ret = 0;
567         else
568                 ret = 1;
569
570 abort:
571         kfree(tmp1);
572         kfree(tmp2);
573         return ret;
574 }
575
576 static unsigned int calc_sb_csum(mdp_super_t * sb)
577 {
578         unsigned int disk_csum, csum;
579
580         disk_csum = sb->sb_csum;
581         sb->sb_csum = 0;
582         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
583         sb->sb_csum = disk_csum;
584         return csum;
585 }
586
587
588 /*
589  * Handle superblock details.
590  * We want to be able to handle multiple superblock formats
591  * so we have a common interface to them all, and an array of
592  * different handlers.
593  * We rely on user-space to write the initial superblock, and support
594  * reading and updating of superblocks.
595  * Interface methods are:
596  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
597  *      loads and validates a superblock on dev.
598  *      if refdev != NULL, compare superblocks on both devices
599  *    Return:
600  *      0 - dev has a superblock that is compatible with refdev
601  *      1 - dev has a superblock that is compatible and newer than refdev
602  *          so dev should be used as the refdev in future
603  *     -EINVAL superblock incompatible or invalid
604  *     -othererror e.g. -EIO
605  *
606  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
607  *      Verify that dev is acceptable into mddev.
608  *       The first time, mddev->raid_disks will be 0, and data from
609  *       dev should be merged in.  Subsequent calls check that dev
610  *       is new enough.  Return 0 or -EINVAL
611  *
612  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
613  *     Update the superblock for rdev with data in mddev
614  *     This does not write to disc.
615  *
616  */
617
618 struct super_type  {
619         char            *name;
620         struct module   *owner;
621         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
622         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
623         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
624 };
625
626 /*
627  * load_super for 0.90.0 
628  */
629 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
630 {
631         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
632         mdp_super_t *sb;
633         int ret;
634         sector_t sb_offset;
635
636         /*
637          * Calculate the position of the superblock,
638          * it's at the end of the disk.
639          *
640          * It also happens to be a multiple of 4Kb.
641          */
642         sb_offset = calc_dev_sboffset(rdev->bdev);
643         rdev->sb_offset = sb_offset;
644
645         ret = read_disk_sb(rdev, MD_SB_BYTES);
646         if (ret) return ret;
647
648         ret = -EINVAL;
649
650         bdevname(rdev->bdev, b);
651         sb = (mdp_super_t*)page_address(rdev->sb_page);
652
653         if (sb->md_magic != MD_SB_MAGIC) {
654                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
655                        b);
656                 goto abort;
657         }
658
659         if (sb->major_version != 0 ||
660             sb->minor_version != 90) {
661                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
662                         sb->major_version, sb->minor_version,
663                         b);
664                 goto abort;
665         }
666
667         if (sb->raid_disks <= 0)
668                 goto abort;
669
670         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
671                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
672                         b);
673                 goto abort;
674         }
675
676         rdev->preferred_minor = sb->md_minor;
677         rdev->data_offset = 0;
678         rdev->sb_size = MD_SB_BYTES;
679
680         if (sb->level == LEVEL_MULTIPATH)
681                 rdev->desc_nr = -1;
682         else
683                 rdev->desc_nr = sb->this_disk.number;
684
685         if (refdev == 0)
686                 ret = 1;
687         else {
688                 __u64 ev1, ev2;
689                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
690                 if (!uuid_equal(refsb, sb)) {
691                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
692                                 b, bdevname(refdev->bdev,b2));
693                         goto abort;
694                 }
695                 if (!sb_equal(refsb, sb)) {
696                         printk(KERN_WARNING "md: %s has same UUID"
697                                " but different superblock to %s\n",
698                                b, bdevname(refdev->bdev, b2));
699                         goto abort;
700                 }
701                 ev1 = md_event(sb);
702                 ev2 = md_event(refsb);
703                 if (ev1 > ev2)
704                         ret = 1;
705                 else 
706                         ret = 0;
707         }
708         rdev->size = calc_dev_size(rdev, sb->chunk_size);
709
710         if (rdev->size < sb->size && sb->level > 1)
711                 /* "this cannot possibly happen" ... */
712                 ret = -EINVAL;
713
714  abort:
715         return ret;
716 }
717
718 /*
719  * validate_super for 0.90.0
720  */
721 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
722 {
723         mdp_disk_t *desc;
724         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
725
726         rdev->raid_disk = -1;
727         rdev->flags = 0;
728         if (mddev->raid_disks == 0) {
729                 mddev->major_version = 0;
730                 mddev->minor_version = sb->minor_version;
731                 mddev->patch_version = sb->patch_version;
732                 mddev->persistent = ! sb->not_persistent;
733                 mddev->chunk_size = sb->chunk_size;
734                 mddev->ctime = sb->ctime;
735                 mddev->utime = sb->utime;
736                 mddev->level = sb->level;
737                 mddev->clevel[0] = 0;
738                 mddev->layout = sb->layout;
739                 mddev->raid_disks = sb->raid_disks;
740                 mddev->size = sb->size;
741                 mddev->events = md_event(sb);
742                 mddev->bitmap_offset = 0;
743                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
744
745                 if (sb->state & (1<<MD_SB_CLEAN))
746                         mddev->recovery_cp = MaxSector;
747                 else {
748                         if (sb->events_hi == sb->cp_events_hi && 
749                                 sb->events_lo == sb->cp_events_lo) {
750                                 mddev->recovery_cp = sb->recovery_cp;
751                         } else
752                                 mddev->recovery_cp = 0;
753                 }
754
755                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
756                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
757                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
758                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
759
760                 mddev->max_disks = MD_SB_DISKS;
761
762                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
763                     mddev->bitmap_file == NULL) {
764                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
765                             && mddev->level != 10) {
766                                 /* FIXME use a better test */
767                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
768                                 return -EINVAL;
769                         }
770                         mddev->bitmap_offset = mddev->default_bitmap_offset;
771                 }
772
773         } else if (mddev->pers == NULL) {
774                 /* Insist on good event counter while assembling */
775                 __u64 ev1 = md_event(sb);
776                 ++ev1;
777                 if (ev1 < mddev->events) 
778                         return -EINVAL;
779         } else if (mddev->bitmap) {
780                 /* if adding to array with a bitmap, then we can accept an
781                  * older device ... but not too old.
782                  */
783                 __u64 ev1 = md_event(sb);
784                 if (ev1 < mddev->bitmap->events_cleared)
785                         return 0;
786         } else /* just a hot-add of a new device, leave raid_disk at -1 */
787                 return 0;
788
789         if (mddev->level != LEVEL_MULTIPATH) {
790                 desc = sb->disks + rdev->desc_nr;
791
792                 if (desc->state & (1<<MD_DISK_FAULTY))
793                         set_bit(Faulty, &rdev->flags);
794                 else if (desc->state & (1<<MD_DISK_SYNC) &&
795                          desc->raid_disk < mddev->raid_disks) {
796                         set_bit(In_sync, &rdev->flags);
797                         rdev->raid_disk = desc->raid_disk;
798                 }
799                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
800                         set_bit(WriteMostly, &rdev->flags);
801         } else /* MULTIPATH are always insync */
802                 set_bit(In_sync, &rdev->flags);
803         return 0;
804 }
805
806 /*
807  * sync_super for 0.90.0
808  */
809 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
810 {
811         mdp_super_t *sb;
812         struct list_head *tmp;
813         mdk_rdev_t *rdev2;
814         int next_spare = mddev->raid_disks;
815
816
817         /* make rdev->sb match mddev data..
818          *
819          * 1/ zero out disks
820          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
821          * 3/ any empty disks < next_spare become removed
822          *
823          * disks[0] gets initialised to REMOVED because
824          * we cannot be sure from other fields if it has
825          * been initialised or not.
826          */
827         int i;
828         int active=0, working=0,failed=0,spare=0,nr_disks=0;
829
830         rdev->sb_size = MD_SB_BYTES;
831
832         sb = (mdp_super_t*)page_address(rdev->sb_page);
833
834         memset(sb, 0, sizeof(*sb));
835
836         sb->md_magic = MD_SB_MAGIC;
837         sb->major_version = mddev->major_version;
838         sb->minor_version = mddev->minor_version;
839         sb->patch_version = mddev->patch_version;
840         sb->gvalid_words  = 0; /* ignored */
841         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
842         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
843         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
844         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
845
846         sb->ctime = mddev->ctime;
847         sb->level = mddev->level;
848         sb->size  = mddev->size;
849         sb->raid_disks = mddev->raid_disks;
850         sb->md_minor = mddev->md_minor;
851         sb->not_persistent = !mddev->persistent;
852         sb->utime = mddev->utime;
853         sb->state = 0;
854         sb->events_hi = (mddev->events>>32);
855         sb->events_lo = (u32)mddev->events;
856
857         if (mddev->in_sync)
858         {
859                 sb->recovery_cp = mddev->recovery_cp;
860                 sb->cp_events_hi = (mddev->events>>32);
861                 sb->cp_events_lo = (u32)mddev->events;
862                 if (mddev->recovery_cp == MaxSector)
863                         sb->state = (1<< MD_SB_CLEAN);
864         } else
865                 sb->recovery_cp = 0;
866
867         sb->layout = mddev->layout;
868         sb->chunk_size = mddev->chunk_size;
869
870         if (mddev->bitmap && mddev->bitmap_file == NULL)
871                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
872
873         sb->disks[0].state = (1<<MD_DISK_REMOVED);
874         ITERATE_RDEV(mddev,rdev2,tmp) {
875                 mdp_disk_t *d;
876                 int desc_nr;
877                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
878                     && !test_bit(Faulty, &rdev2->flags))
879                         desc_nr = rdev2->raid_disk;
880                 else
881                         desc_nr = next_spare++;
882                 rdev2->desc_nr = desc_nr;
883                 d = &sb->disks[rdev2->desc_nr];
884                 nr_disks++;
885                 d->number = rdev2->desc_nr;
886                 d->major = MAJOR(rdev2->bdev->bd_dev);
887                 d->minor = MINOR(rdev2->bdev->bd_dev);
888                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
889                     && !test_bit(Faulty, &rdev2->flags))
890                         d->raid_disk = rdev2->raid_disk;
891                 else
892                         d->raid_disk = rdev2->desc_nr; /* compatibility */
893                 if (test_bit(Faulty, &rdev2->flags)) {
894                         d->state = (1<<MD_DISK_FAULTY);
895                         failed++;
896                 } else if (test_bit(In_sync, &rdev2->flags)) {
897                         d->state = (1<<MD_DISK_ACTIVE);
898                         d->state |= (1<<MD_DISK_SYNC);
899                         active++;
900                         working++;
901                 } else {
902                         d->state = 0;
903                         spare++;
904                         working++;
905                 }
906                 if (test_bit(WriteMostly, &rdev2->flags))
907                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
908         }
909         /* now set the "removed" and "faulty" bits on any missing devices */
910         for (i=0 ; i < mddev->raid_disks ; i++) {
911                 mdp_disk_t *d = &sb->disks[i];
912                 if (d->state == 0 && d->number == 0) {
913                         d->number = i;
914                         d->raid_disk = i;
915                         d->state = (1<<MD_DISK_REMOVED);
916                         d->state |= (1<<MD_DISK_FAULTY);
917                         failed++;
918                 }
919         }
920         sb->nr_disks = nr_disks;
921         sb->active_disks = active;
922         sb->working_disks = working;
923         sb->failed_disks = failed;
924         sb->spare_disks = spare;
925
926         sb->this_disk = sb->disks[rdev->desc_nr];
927         sb->sb_csum = calc_sb_csum(sb);
928 }
929
930 /*
931  * version 1 superblock
932  */
933
934 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
935 {
936         unsigned int disk_csum, csum;
937         unsigned long long newcsum;
938         int size = 256 + le32_to_cpu(sb->max_dev)*2;
939         unsigned int *isuper = (unsigned int*)sb;
940         int i;
941
942         disk_csum = sb->sb_csum;
943         sb->sb_csum = 0;
944         newcsum = 0;
945         for (i=0; size>=4; size -= 4 )
946                 newcsum += le32_to_cpu(*isuper++);
947
948         if (size == 2)
949                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
950
951         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
952         sb->sb_csum = disk_csum;
953         return cpu_to_le32(csum);
954 }
955
956 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
957 {
958         struct mdp_superblock_1 *sb;
959         int ret;
960         sector_t sb_offset;
961         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
962         int bmask;
963
964         /*
965          * Calculate the position of the superblock.
966          * It is always aligned to a 4K boundary and
967          * depeding on minor_version, it can be:
968          * 0: At least 8K, but less than 12K, from end of device
969          * 1: At start of device
970          * 2: 4K from start of device.
971          */
972         switch(minor_version) {
973         case 0:
974                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
975                 sb_offset -= 8*2;
976                 sb_offset &= ~(sector_t)(4*2-1);
977                 /* convert from sectors to K */
978                 sb_offset /= 2;
979                 break;
980         case 1:
981                 sb_offset = 0;
982                 break;
983         case 2:
984                 sb_offset = 4;
985                 break;
986         default:
987                 return -EINVAL;
988         }
989         rdev->sb_offset = sb_offset;
990
991         /* superblock is rarely larger than 1K, but it can be larger,
992          * and it is safe to read 4k, so we do that
993          */
994         ret = read_disk_sb(rdev, 4096);
995         if (ret) return ret;
996
997
998         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
999
1000         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1001             sb->major_version != cpu_to_le32(1) ||
1002             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1003             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1004             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1005                 return -EINVAL;
1006
1007         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1008                 printk("md: invalid superblock checksum on %s\n",
1009                         bdevname(rdev->bdev,b));
1010                 return -EINVAL;
1011         }
1012         if (le64_to_cpu(sb->data_size) < 10) {
1013                 printk("md: data_size too small on %s\n",
1014                        bdevname(rdev->bdev,b));
1015                 return -EINVAL;
1016         }
1017         rdev->preferred_minor = 0xffff;
1018         rdev->data_offset = le64_to_cpu(sb->data_offset);
1019         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1020
1021         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1022         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1023         if (rdev->sb_size & bmask)
1024                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1025
1026         if (refdev == 0)
1027                 ret = 1;
1028         else {
1029                 __u64 ev1, ev2;
1030                 struct mdp_superblock_1 *refsb = 
1031                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1032
1033                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1034                     sb->level != refsb->level ||
1035                     sb->layout != refsb->layout ||
1036                     sb->chunksize != refsb->chunksize) {
1037                         printk(KERN_WARNING "md: %s has strangely different"
1038                                 " superblock to %s\n",
1039                                 bdevname(rdev->bdev,b),
1040                                 bdevname(refdev->bdev,b2));
1041                         return -EINVAL;
1042                 }
1043                 ev1 = le64_to_cpu(sb->events);
1044                 ev2 = le64_to_cpu(refsb->events);
1045
1046                 if (ev1 > ev2)
1047                         ret = 1;
1048                 else
1049                         ret = 0;
1050         }
1051         if (minor_version) 
1052                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1053         else
1054                 rdev->size = rdev->sb_offset;
1055         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1056                 return -EINVAL;
1057         rdev->size = le64_to_cpu(sb->data_size)/2;
1058         if (le32_to_cpu(sb->chunksize))
1059                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1060
1061         if (le32_to_cpu(sb->size) > rdev->size*2)
1062                 return -EINVAL;
1063         return ret;
1064 }
1065
1066 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1067 {
1068         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1069
1070         rdev->raid_disk = -1;
1071         rdev->flags = 0;
1072         if (mddev->raid_disks == 0) {
1073                 mddev->major_version = 1;
1074                 mddev->patch_version = 0;
1075                 mddev->persistent = 1;
1076                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1077                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1078                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1079                 mddev->level = le32_to_cpu(sb->level);
1080                 mddev->clevel[0] = 0;
1081                 mddev->layout = le32_to_cpu(sb->layout);
1082                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1083                 mddev->size = le64_to_cpu(sb->size)/2;
1084                 mddev->events = le64_to_cpu(sb->events);
1085                 mddev->bitmap_offset = 0;
1086                 mddev->default_bitmap_offset = 1024 >> 9;
1087                 
1088                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1089                 memcpy(mddev->uuid, sb->set_uuid, 16);
1090
1091                 mddev->max_disks =  (4096-256)/2;
1092
1093                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1094                     mddev->bitmap_file == NULL ) {
1095                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1096                             && mddev->level != 10) {
1097                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1098                                 return -EINVAL;
1099                         }
1100                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1101                 }
1102         } else if (mddev->pers == NULL) {
1103                 /* Insist of good event counter while assembling */
1104                 __u64 ev1 = le64_to_cpu(sb->events);
1105                 ++ev1;
1106                 if (ev1 < mddev->events)
1107                         return -EINVAL;
1108         } else if (mddev->bitmap) {
1109                 /* If adding to array with a bitmap, then we can accept an
1110                  * older device, but not too old.
1111                  */
1112                 __u64 ev1 = le64_to_cpu(sb->events);
1113                 if (ev1 < mddev->bitmap->events_cleared)
1114                         return 0;
1115         } else /* just a hot-add of a new device, leave raid_disk at -1 */
1116                 return 0;
1117
1118         if (mddev->level != LEVEL_MULTIPATH) {
1119                 int role;
1120                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1121                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1122                 switch(role) {
1123                 case 0xffff: /* spare */
1124                         break;
1125                 case 0xfffe: /* faulty */
1126                         set_bit(Faulty, &rdev->flags);
1127                         break;
1128                 default:
1129                         set_bit(In_sync, &rdev->flags);
1130                         rdev->raid_disk = role;
1131                         break;
1132                 }
1133                 if (sb->devflags & WriteMostly1)
1134                         set_bit(WriteMostly, &rdev->flags);
1135         } else /* MULTIPATH are always insync */
1136                 set_bit(In_sync, &rdev->flags);
1137
1138         return 0;
1139 }
1140
1141 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1142 {
1143         struct mdp_superblock_1 *sb;
1144         struct list_head *tmp;
1145         mdk_rdev_t *rdev2;
1146         int max_dev, i;
1147         /* make rdev->sb match mddev and rdev data. */
1148
1149         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1150
1151         sb->feature_map = 0;
1152         sb->pad0 = 0;
1153         memset(sb->pad1, 0, sizeof(sb->pad1));
1154         memset(sb->pad2, 0, sizeof(sb->pad2));
1155         memset(sb->pad3, 0, sizeof(sb->pad3));
1156
1157         sb->utime = cpu_to_le64((__u64)mddev->utime);
1158         sb->events = cpu_to_le64(mddev->events);
1159         if (mddev->in_sync)
1160                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1161         else
1162                 sb->resync_offset = cpu_to_le64(0);
1163
1164         sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1165
1166         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1167         sb->size = cpu_to_le64(mddev->size<<1);
1168
1169         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1170                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1171                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1172         }
1173
1174         max_dev = 0;
1175         ITERATE_RDEV(mddev,rdev2,tmp)
1176                 if (rdev2->desc_nr+1 > max_dev)
1177                         max_dev = rdev2->desc_nr+1;
1178         
1179         sb->max_dev = cpu_to_le32(max_dev);
1180         for (i=0; i<max_dev;i++)
1181                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1182         
1183         ITERATE_RDEV(mddev,rdev2,tmp) {
1184                 i = rdev2->desc_nr;
1185                 if (test_bit(Faulty, &rdev2->flags))
1186                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1187                 else if (test_bit(In_sync, &rdev2->flags))
1188                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1189                 else
1190                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1191         }
1192
1193         sb->recovery_offset = cpu_to_le64(0); /* not supported yet */
1194         sb->sb_csum = calc_sb_1_csum(sb);
1195 }
1196
1197
1198 static struct super_type super_types[] = {
1199         [0] = {
1200                 .name   = "0.90.0",
1201                 .owner  = THIS_MODULE,
1202                 .load_super     = super_90_load,
1203                 .validate_super = super_90_validate,
1204                 .sync_super     = super_90_sync,
1205         },
1206         [1] = {
1207                 .name   = "md-1",
1208                 .owner  = THIS_MODULE,
1209                 .load_super     = super_1_load,
1210                 .validate_super = super_1_validate,
1211                 .sync_super     = super_1_sync,
1212         },
1213 };
1214         
1215 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1216 {
1217         struct list_head *tmp;
1218         mdk_rdev_t *rdev;
1219
1220         ITERATE_RDEV(mddev,rdev,tmp)
1221                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1222                         return rdev;
1223
1224         return NULL;
1225 }
1226
1227 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1228 {
1229         struct list_head *tmp;
1230         mdk_rdev_t *rdev;
1231
1232         ITERATE_RDEV(mddev1,rdev,tmp)
1233                 if (match_dev_unit(mddev2, rdev))
1234                         return 1;
1235
1236         return 0;
1237 }
1238
1239 static LIST_HEAD(pending_raid_disks);
1240
1241 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1242 {
1243         mdk_rdev_t *same_pdev;
1244         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1245         struct kobject *ko;
1246         char *s;
1247
1248         if (rdev->mddev) {
1249                 MD_BUG();
1250                 return -EINVAL;
1251         }
1252         /* make sure rdev->size exceeds mddev->size */
1253         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1254                 if (mddev->pers)
1255                         /* Cannot change size, so fail */
1256                         return -ENOSPC;
1257                 else
1258                         mddev->size = rdev->size;
1259         }
1260         same_pdev = match_dev_unit(mddev, rdev);
1261         if (same_pdev)
1262                 printk(KERN_WARNING
1263                         "%s: WARNING: %s appears to be on the same physical"
1264                         " disk as %s. True\n     protection against single-disk"
1265                         " failure might be compromised.\n",
1266                         mdname(mddev), bdevname(rdev->bdev,b),
1267                         bdevname(same_pdev->bdev,b2));
1268
1269         /* Verify rdev->desc_nr is unique.
1270          * If it is -1, assign a free number, else
1271          * check number is not in use
1272          */
1273         if (rdev->desc_nr < 0) {
1274                 int choice = 0;
1275                 if (mddev->pers) choice = mddev->raid_disks;
1276                 while (find_rdev_nr(mddev, choice))
1277                         choice++;
1278                 rdev->desc_nr = choice;
1279         } else {
1280                 if (find_rdev_nr(mddev, rdev->desc_nr))
1281                         return -EBUSY;
1282         }
1283         bdevname(rdev->bdev,b);
1284         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1285                 return -ENOMEM;
1286         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1287                 *s = '!';
1288                         
1289         list_add(&rdev->same_set, &mddev->disks);
1290         rdev->mddev = mddev;
1291         printk(KERN_INFO "md: bind<%s>\n", b);
1292
1293         rdev->kobj.parent = &mddev->kobj;
1294         kobject_add(&rdev->kobj);
1295
1296         if (rdev->bdev->bd_part)
1297                 ko = &rdev->bdev->bd_part->kobj;
1298         else
1299                 ko = &rdev->bdev->bd_disk->kobj;
1300         sysfs_create_link(&rdev->kobj, ko, "block");
1301         return 0;
1302 }
1303
1304 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1305 {
1306         char b[BDEVNAME_SIZE];
1307         if (!rdev->mddev) {
1308                 MD_BUG();
1309                 return;
1310         }
1311         list_del_init(&rdev->same_set);
1312         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1313         rdev->mddev = NULL;
1314         sysfs_remove_link(&rdev->kobj, "block");
1315         kobject_del(&rdev->kobj);
1316 }
1317
1318 /*
1319  * prevent the device from being mounted, repartitioned or
1320  * otherwise reused by a RAID array (or any other kernel
1321  * subsystem), by bd_claiming the device.
1322  */
1323 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1324 {
1325         int err = 0;
1326         struct block_device *bdev;
1327         char b[BDEVNAME_SIZE];
1328
1329         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1330         if (IS_ERR(bdev)) {
1331                 printk(KERN_ERR "md: could not open %s.\n",
1332                         __bdevname(dev, b));
1333                 return PTR_ERR(bdev);
1334         }
1335         err = bd_claim(bdev, rdev);
1336         if (err) {
1337                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1338                         bdevname(bdev, b));
1339                 blkdev_put(bdev);
1340                 return err;
1341         }
1342         rdev->bdev = bdev;
1343         return err;
1344 }
1345
1346 static void unlock_rdev(mdk_rdev_t *rdev)
1347 {
1348         struct block_device *bdev = rdev->bdev;
1349         rdev->bdev = NULL;
1350         if (!bdev)
1351                 MD_BUG();
1352         bd_release(bdev);
1353         blkdev_put(bdev);
1354 }
1355
1356 void md_autodetect_dev(dev_t dev);
1357
1358 static void export_rdev(mdk_rdev_t * rdev)
1359 {
1360         char b[BDEVNAME_SIZE];
1361         printk(KERN_INFO "md: export_rdev(%s)\n",
1362                 bdevname(rdev->bdev,b));
1363         if (rdev->mddev)
1364                 MD_BUG();
1365         free_disk_sb(rdev);
1366         list_del_init(&rdev->same_set);
1367 #ifndef MODULE
1368         md_autodetect_dev(rdev->bdev->bd_dev);
1369 #endif
1370         unlock_rdev(rdev);
1371         kobject_put(&rdev->kobj);
1372 }
1373
1374 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1375 {
1376         unbind_rdev_from_array(rdev);
1377         export_rdev(rdev);
1378 }
1379
1380 static void export_array(mddev_t *mddev)
1381 {
1382         struct list_head *tmp;
1383         mdk_rdev_t *rdev;
1384
1385         ITERATE_RDEV(mddev,rdev,tmp) {
1386                 if (!rdev->mddev) {
1387                         MD_BUG();
1388                         continue;
1389                 }
1390                 kick_rdev_from_array(rdev);
1391         }
1392         if (!list_empty(&mddev->disks))
1393                 MD_BUG();
1394         mddev->raid_disks = 0;
1395         mddev->major_version = 0;
1396 }
1397
1398 static void print_desc(mdp_disk_t *desc)
1399 {
1400         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1401                 desc->major,desc->minor,desc->raid_disk,desc->state);
1402 }
1403
1404 static void print_sb(mdp_super_t *sb)
1405 {
1406         int i;
1407
1408         printk(KERN_INFO 
1409                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1410                 sb->major_version, sb->minor_version, sb->patch_version,
1411                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1412                 sb->ctime);
1413         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1414                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1415                 sb->md_minor, sb->layout, sb->chunk_size);
1416         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1417                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1418                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1419                 sb->failed_disks, sb->spare_disks,
1420                 sb->sb_csum, (unsigned long)sb->events_lo);
1421
1422         printk(KERN_INFO);
1423         for (i = 0; i < MD_SB_DISKS; i++) {
1424                 mdp_disk_t *desc;
1425
1426                 desc = sb->disks + i;
1427                 if (desc->number || desc->major || desc->minor ||
1428                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1429                         printk("     D %2d: ", i);
1430                         print_desc(desc);
1431                 }
1432         }
1433         printk(KERN_INFO "md:     THIS: ");
1434         print_desc(&sb->this_disk);
1435
1436 }
1437
1438 static void print_rdev(mdk_rdev_t *rdev)
1439 {
1440         char b[BDEVNAME_SIZE];
1441         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1442                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1443                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1444                 rdev->desc_nr);
1445         if (rdev->sb_loaded) {
1446                 printk(KERN_INFO "md: rdev superblock:\n");
1447                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1448         } else
1449                 printk(KERN_INFO "md: no rdev superblock!\n");
1450 }
1451
1452 void md_print_devices(void)
1453 {
1454         struct list_head *tmp, *tmp2;
1455         mdk_rdev_t *rdev;
1456         mddev_t *mddev;
1457         char b[BDEVNAME_SIZE];
1458
1459         printk("\n");
1460         printk("md:     **********************************\n");
1461         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1462         printk("md:     **********************************\n");
1463         ITERATE_MDDEV(mddev,tmp) {
1464
1465                 if (mddev->bitmap)
1466                         bitmap_print_sb(mddev->bitmap);
1467                 else
1468                         printk("%s: ", mdname(mddev));
1469                 ITERATE_RDEV(mddev,rdev,tmp2)
1470                         printk("<%s>", bdevname(rdev->bdev,b));
1471                 printk("\n");
1472
1473                 ITERATE_RDEV(mddev,rdev,tmp2)
1474                         print_rdev(rdev);
1475         }
1476         printk("md:     **********************************\n");
1477         printk("\n");
1478 }
1479
1480
1481 static void sync_sbs(mddev_t * mddev)
1482 {
1483         mdk_rdev_t *rdev;
1484         struct list_head *tmp;
1485
1486         ITERATE_RDEV(mddev,rdev,tmp) {
1487                 super_types[mddev->major_version].
1488                         sync_super(mddev, rdev);
1489                 rdev->sb_loaded = 1;
1490         }
1491 }
1492
1493 static void md_update_sb(mddev_t * mddev)
1494 {
1495         int err;
1496         struct list_head *tmp;
1497         mdk_rdev_t *rdev;
1498         int sync_req;
1499
1500 repeat:
1501         spin_lock_irq(&mddev->write_lock);
1502         sync_req = mddev->in_sync;
1503         mddev->utime = get_seconds();
1504         mddev->events ++;
1505
1506         if (!mddev->events) {
1507                 /*
1508                  * oops, this 64-bit counter should never wrap.
1509                  * Either we are in around ~1 trillion A.C., assuming
1510                  * 1 reboot per second, or we have a bug:
1511                  */
1512                 MD_BUG();
1513                 mddev->events --;
1514         }
1515         mddev->sb_dirty = 2;
1516         sync_sbs(mddev);
1517
1518         /*
1519          * do not write anything to disk if using
1520          * nonpersistent superblocks
1521          */
1522         if (!mddev->persistent) {
1523                 mddev->sb_dirty = 0;
1524                 spin_unlock_irq(&mddev->write_lock);
1525                 wake_up(&mddev->sb_wait);
1526                 return;
1527         }
1528         spin_unlock_irq(&mddev->write_lock);
1529
1530         dprintk(KERN_INFO 
1531                 "md: updating %s RAID superblock on device (in sync %d)\n",
1532                 mdname(mddev),mddev->in_sync);
1533
1534         err = bitmap_update_sb(mddev->bitmap);
1535         ITERATE_RDEV(mddev,rdev,tmp) {
1536                 char b[BDEVNAME_SIZE];
1537                 dprintk(KERN_INFO "md: ");
1538                 if (test_bit(Faulty, &rdev->flags))
1539                         dprintk("(skipping faulty ");
1540
1541                 dprintk("%s ", bdevname(rdev->bdev,b));
1542                 if (!test_bit(Faulty, &rdev->flags)) {
1543                         md_super_write(mddev,rdev,
1544                                        rdev->sb_offset<<1, rdev->sb_size,
1545                                        rdev->sb_page);
1546                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1547                                 bdevname(rdev->bdev,b),
1548                                 (unsigned long long)rdev->sb_offset);
1549
1550                 } else
1551                         dprintk(")\n");
1552                 if (mddev->level == LEVEL_MULTIPATH)
1553                         /* only need to write one superblock... */
1554                         break;
1555         }
1556         md_super_wait(mddev);
1557         /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1558
1559         spin_lock_irq(&mddev->write_lock);
1560         if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
1561                 /* have to write it out again */
1562                 spin_unlock_irq(&mddev->write_lock);
1563                 goto repeat;
1564         }
1565         mddev->sb_dirty = 0;
1566         spin_unlock_irq(&mddev->write_lock);
1567         wake_up(&mddev->sb_wait);
1568
1569 }
1570
1571 /* words written to sysfs files may, or my not, be \n terminated.
1572  * We want to accept with case. For this we use cmd_match.
1573  */
1574 static int cmd_match(const char *cmd, const char *str)
1575 {
1576         /* See if cmd, written into a sysfs file, matches
1577          * str.  They must either be the same, or cmd can
1578          * have a trailing newline
1579          */
1580         while (*cmd && *str && *cmd == *str) {
1581                 cmd++;
1582                 str++;
1583         }
1584         if (*cmd == '\n')
1585                 cmd++;
1586         if (*str || *cmd)
1587                 return 0;
1588         return 1;
1589 }
1590
1591 struct rdev_sysfs_entry {
1592         struct attribute attr;
1593         ssize_t (*show)(mdk_rdev_t *, char *);
1594         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1595 };
1596
1597 static ssize_t
1598 state_show(mdk_rdev_t *rdev, char *page)
1599 {
1600         char *sep = "";
1601         int len=0;
1602
1603         if (test_bit(Faulty, &rdev->flags)) {
1604                 len+= sprintf(page+len, "%sfaulty",sep);
1605                 sep = ",";
1606         }
1607         if (test_bit(In_sync, &rdev->flags)) {
1608                 len += sprintf(page+len, "%sin_sync",sep);
1609                 sep = ",";
1610         }
1611         if (!test_bit(Faulty, &rdev->flags) &&
1612             !test_bit(In_sync, &rdev->flags)) {
1613                 len += sprintf(page+len, "%sspare", sep);
1614                 sep = ",";
1615         }
1616         return len+sprintf(page+len, "\n");
1617 }
1618
1619 static struct rdev_sysfs_entry
1620 rdev_state = __ATTR_RO(state);
1621
1622 static ssize_t
1623 super_show(mdk_rdev_t *rdev, char *page)
1624 {
1625         if (rdev->sb_loaded && rdev->sb_size) {
1626                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1627                 return rdev->sb_size;
1628         } else
1629                 return 0;
1630 }
1631 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1632
1633 static ssize_t
1634 errors_show(mdk_rdev_t *rdev, char *page)
1635 {
1636         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1637 }
1638
1639 static ssize_t
1640 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1641 {
1642         char *e;
1643         unsigned long n = simple_strtoul(buf, &e, 10);
1644         if (*buf && (*e == 0 || *e == '\n')) {
1645                 atomic_set(&rdev->corrected_errors, n);
1646                 return len;
1647         }
1648         return -EINVAL;
1649 }
1650 static struct rdev_sysfs_entry rdev_errors =
1651 __ATTR(errors, 0644, errors_show, errors_store);
1652
1653 static ssize_t
1654 slot_show(mdk_rdev_t *rdev, char *page)
1655 {
1656         if (rdev->raid_disk < 0)
1657                 return sprintf(page, "none\n");
1658         else
1659                 return sprintf(page, "%d\n", rdev->raid_disk);
1660 }
1661
1662 static ssize_t
1663 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1664 {
1665         char *e;
1666         int slot = simple_strtoul(buf, &e, 10);
1667         if (strncmp(buf, "none", 4)==0)
1668                 slot = -1;
1669         else if (e==buf || (*e && *e!= '\n'))
1670                 return -EINVAL;
1671         if (rdev->mddev->pers)
1672                 /* Cannot set slot in active array (yet) */
1673                 return -EBUSY;
1674         if (slot >= rdev->mddev->raid_disks)
1675                 return -ENOSPC;
1676         rdev->raid_disk = slot;
1677         /* assume it is working */
1678         rdev->flags = 0;
1679         set_bit(In_sync, &rdev->flags);
1680         return len;
1681 }
1682
1683
1684 static struct rdev_sysfs_entry rdev_slot =
1685 __ATTR(slot, 0644, slot_show, slot_store);
1686
1687 static ssize_t
1688 offset_show(mdk_rdev_t *rdev, char *page)
1689 {
1690         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1691 }
1692
1693 static ssize_t
1694 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1695 {
1696         char *e;
1697         unsigned long long offset = simple_strtoull(buf, &e, 10);
1698         if (e==buf || (*e && *e != '\n'))
1699                 return -EINVAL;
1700         if (rdev->mddev->pers)
1701                 return -EBUSY;
1702         rdev->data_offset = offset;
1703         return len;
1704 }
1705
1706 static struct rdev_sysfs_entry rdev_offset =
1707 __ATTR(offset, 0644, offset_show, offset_store);
1708
1709 static ssize_t
1710 rdev_size_show(mdk_rdev_t *rdev, char *page)
1711 {
1712         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1713 }
1714
1715 static ssize_t
1716 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1717 {
1718         char *e;
1719         unsigned long long size = simple_strtoull(buf, &e, 10);
1720         if (e==buf || (*e && *e != '\n'))
1721                 return -EINVAL;
1722         if (rdev->mddev->pers)
1723                 return -EBUSY;
1724         rdev->size = size;
1725         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1726                 rdev->mddev->size = size;
1727         return len;
1728 }
1729
1730 static struct rdev_sysfs_entry rdev_size =
1731 __ATTR(size, 0644, rdev_size_show, rdev_size_store);
1732
1733 static struct attribute *rdev_default_attrs[] = {
1734         &rdev_state.attr,
1735         &rdev_super.attr,
1736         &rdev_errors.attr,
1737         &rdev_slot.attr,
1738         &rdev_offset.attr,
1739         &rdev_size.attr,
1740         NULL,
1741 };
1742 static ssize_t
1743 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1744 {
1745         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1746         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1747
1748         if (!entry->show)
1749                 return -EIO;
1750         return entry->show(rdev, page);
1751 }
1752
1753 static ssize_t
1754 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1755               const char *page, size_t length)
1756 {
1757         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1758         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1759
1760         if (!entry->store)
1761                 return -EIO;
1762         return entry->store(rdev, page, length);
1763 }
1764
1765 static void rdev_free(struct kobject *ko)
1766 {
1767         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1768         kfree(rdev);
1769 }
1770 static struct sysfs_ops rdev_sysfs_ops = {
1771         .show           = rdev_attr_show,
1772         .store          = rdev_attr_store,
1773 };
1774 static struct kobj_type rdev_ktype = {
1775         .release        = rdev_free,
1776         .sysfs_ops      = &rdev_sysfs_ops,
1777         .default_attrs  = rdev_default_attrs,
1778 };
1779
1780 /*
1781  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1782  *
1783  * mark the device faulty if:
1784  *
1785  *   - the device is nonexistent (zero size)
1786  *   - the device has no valid superblock
1787  *
1788  * a faulty rdev _never_ has rdev->sb set.
1789  */
1790 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1791 {
1792         char b[BDEVNAME_SIZE];
1793         int err;
1794         mdk_rdev_t *rdev;
1795         sector_t size;
1796
1797         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1798         if (!rdev) {
1799                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1800                 return ERR_PTR(-ENOMEM);
1801         }
1802
1803         if ((err = alloc_disk_sb(rdev)))
1804                 goto abort_free;
1805
1806         err = lock_rdev(rdev, newdev);
1807         if (err)
1808                 goto abort_free;
1809
1810         rdev->kobj.parent = NULL;
1811         rdev->kobj.ktype = &rdev_ktype;
1812         kobject_init(&rdev->kobj);
1813
1814         rdev->desc_nr = -1;
1815         rdev->flags = 0;
1816         rdev->data_offset = 0;
1817         atomic_set(&rdev->nr_pending, 0);
1818         atomic_set(&rdev->read_errors, 0);
1819         atomic_set(&rdev->corrected_errors, 0);
1820
1821         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1822         if (!size) {
1823                 printk(KERN_WARNING 
1824                         "md: %s has zero or unknown size, marking faulty!\n",
1825                         bdevname(rdev->bdev,b));
1826                 err = -EINVAL;
1827                 goto abort_free;
1828         }
1829
1830         if (super_format >= 0) {
1831                 err = super_types[super_format].
1832                         load_super(rdev, NULL, super_minor);
1833                 if (err == -EINVAL) {
1834                         printk(KERN_WARNING 
1835                                 "md: %s has invalid sb, not importing!\n",
1836                                 bdevname(rdev->bdev,b));
1837                         goto abort_free;
1838                 }
1839                 if (err < 0) {
1840                         printk(KERN_WARNING 
1841                                 "md: could not read %s's sb, not importing!\n",
1842                                 bdevname(rdev->bdev,b));
1843                         goto abort_free;
1844                 }
1845         }
1846         INIT_LIST_HEAD(&rdev->same_set);
1847
1848         return rdev;
1849
1850 abort_free:
1851         if (rdev->sb_page) {
1852                 if (rdev->bdev)
1853                         unlock_rdev(rdev);
1854                 free_disk_sb(rdev);
1855         }
1856         kfree(rdev);
1857         return ERR_PTR(err);
1858 }
1859
1860 /*
1861  * Check a full RAID array for plausibility
1862  */
1863
1864
1865 static void analyze_sbs(mddev_t * mddev)
1866 {
1867         int i;
1868         struct list_head *tmp;
1869         mdk_rdev_t *rdev, *freshest;
1870         char b[BDEVNAME_SIZE];
1871
1872         freshest = NULL;
1873         ITERATE_RDEV(mddev,rdev,tmp)
1874                 switch (super_types[mddev->major_version].
1875                         load_super(rdev, freshest, mddev->minor_version)) {
1876                 case 1:
1877                         freshest = rdev;
1878                         break;
1879                 case 0:
1880                         break;
1881                 default:
1882                         printk( KERN_ERR \
1883                                 "md: fatal superblock inconsistency in %s"
1884                                 " -- removing from array\n", 
1885                                 bdevname(rdev->bdev,b));
1886                         kick_rdev_from_array(rdev);
1887                 }
1888
1889
1890         super_types[mddev->major_version].
1891                 validate_super(mddev, freshest);
1892
1893         i = 0;
1894         ITERATE_RDEV(mddev,rdev,tmp) {
1895                 if (rdev != freshest)
1896                         if (super_types[mddev->major_version].
1897                             validate_super(mddev, rdev)) {
1898                                 printk(KERN_WARNING "md: kicking non-fresh %s"
1899                                         " from array!\n",
1900                                         bdevname(rdev->bdev,b));
1901                                 kick_rdev_from_array(rdev);
1902                                 continue;
1903                         }
1904                 if (mddev->level == LEVEL_MULTIPATH) {
1905                         rdev->desc_nr = i++;
1906                         rdev->raid_disk = rdev->desc_nr;
1907                         set_bit(In_sync, &rdev->flags);
1908                 }
1909         }
1910
1911
1912
1913         if (mddev->recovery_cp != MaxSector &&
1914             mddev->level >= 1)
1915                 printk(KERN_ERR "md: %s: raid array is not clean"
1916                        " -- starting background reconstruction\n",
1917                        mdname(mddev));
1918
1919 }
1920
1921 static ssize_t
1922 level_show(mddev_t *mddev, char *page)
1923 {
1924         struct mdk_personality *p = mddev->pers;
1925         if (p)
1926                 return sprintf(page, "%s\n", p->name);
1927         else if (mddev->clevel[0])
1928                 return sprintf(page, "%s\n", mddev->clevel);
1929         else if (mddev->level != LEVEL_NONE)
1930                 return sprintf(page, "%d\n", mddev->level);
1931         else
1932                 return 0;
1933 }
1934
1935 static ssize_t
1936 level_store(mddev_t *mddev, const char *buf, size_t len)
1937 {
1938         int rv = len;
1939         if (mddev->pers)
1940                 return -EBUSY;
1941         if (len == 0)
1942                 return 0;
1943         if (len >= sizeof(mddev->clevel))
1944                 return -ENOSPC;
1945         strncpy(mddev->clevel, buf, len);
1946         if (mddev->clevel[len-1] == '\n')
1947                 len--;
1948         mddev->clevel[len] = 0;
1949         mddev->level = LEVEL_NONE;
1950         return rv;
1951 }
1952
1953 static struct md_sysfs_entry md_level =
1954 __ATTR(level, 0644, level_show, level_store);
1955
1956 static ssize_t
1957 raid_disks_show(mddev_t *mddev, char *page)
1958 {
1959         if (mddev->raid_disks == 0)
1960                 return 0;
1961         return sprintf(page, "%d\n", mddev->raid_disks);
1962 }
1963
1964 static int update_raid_disks(mddev_t *mddev, int raid_disks);
1965
1966 static ssize_t
1967 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
1968 {
1969         /* can only set raid_disks if array is not yet active */
1970         char *e;
1971         int rv = 0;
1972         unsigned long n = simple_strtoul(buf, &e, 10);
1973
1974         if (!*buf || (*e && *e != '\n'))
1975                 return -EINVAL;
1976
1977         if (mddev->pers)
1978                 rv = update_raid_disks(mddev, n);
1979         else
1980                 mddev->raid_disks = n;
1981         return rv ? rv : len;
1982 }
1983 static struct md_sysfs_entry md_raid_disks =
1984 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
1985
1986 static ssize_t
1987 chunk_size_show(mddev_t *mddev, char *page)
1988 {
1989         return sprintf(page, "%d\n", mddev->chunk_size);
1990 }
1991
1992 static ssize_t
1993 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
1994 {
1995         /* can only set chunk_size if array is not yet active */
1996         char *e;
1997         unsigned long n = simple_strtoul(buf, &e, 10);
1998
1999         if (mddev->pers)
2000                 return -EBUSY;
2001         if (!*buf || (*e && *e != '\n'))
2002                 return -EINVAL;
2003
2004         mddev->chunk_size = n;
2005         return len;
2006 }
2007 static struct md_sysfs_entry md_chunk_size =
2008 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2009
2010 static ssize_t
2011 null_show(mddev_t *mddev, char *page)
2012 {
2013         return -EINVAL;
2014 }
2015
2016 static ssize_t
2017 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2018 {
2019         /* buf must be %d:%d\n? giving major and minor numbers */
2020         /* The new device is added to the array.
2021          * If the array has a persistent superblock, we read the
2022          * superblock to initialise info and check validity.
2023          * Otherwise, only checking done is that in bind_rdev_to_array,
2024          * which mainly checks size.
2025          */
2026         char *e;
2027         int major = simple_strtoul(buf, &e, 10);
2028         int minor;
2029         dev_t dev;
2030         mdk_rdev_t *rdev;
2031         int err;
2032
2033         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2034                 return -EINVAL;
2035         minor = simple_strtoul(e+1, &e, 10);
2036         if (*e && *e != '\n')
2037                 return -EINVAL;
2038         dev = MKDEV(major, minor);
2039         if (major != MAJOR(dev) ||
2040             minor != MINOR(dev))
2041                 return -EOVERFLOW;
2042
2043
2044         if (mddev->persistent) {
2045                 rdev = md_import_device(dev, mddev->major_version,
2046                                         mddev->minor_version);
2047                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2048                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2049                                                        mdk_rdev_t, same_set);
2050                         err = super_types[mddev->major_version]
2051                                 .load_super(rdev, rdev0, mddev->minor_version);
2052                         if (err < 0)
2053                                 goto out;
2054                 }
2055         } else
2056                 rdev = md_import_device(dev, -1, -1);
2057
2058         if (IS_ERR(rdev))
2059                 return PTR_ERR(rdev);
2060         err = bind_rdev_to_array(rdev, mddev);
2061  out:
2062         if (err)
2063                 export_rdev(rdev);
2064         return err ? err : len;
2065 }
2066
2067 static struct md_sysfs_entry md_new_device =
2068 __ATTR(new_dev, 0200, null_show, new_dev_store);
2069
2070 static ssize_t
2071 size_show(mddev_t *mddev, char *page)
2072 {
2073         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2074 }
2075
2076 static int update_size(mddev_t *mddev, unsigned long size);
2077
2078 static ssize_t
2079 size_store(mddev_t *mddev, const char *buf, size_t len)
2080 {
2081         /* If array is inactive, we can reduce the component size, but
2082          * not increase it (except from 0).
2083          * If array is active, we can try an on-line resize
2084          */
2085         char *e;
2086         int err = 0;
2087         unsigned long long size = simple_strtoull(buf, &e, 10);
2088         if (!*buf || *buf == '\n' ||
2089             (*e && *e != '\n'))
2090                 return -EINVAL;
2091
2092         if (mddev->pers) {
2093                 err = update_size(mddev, size);
2094                 md_update_sb(mddev);
2095         } else {
2096                 if (mddev->size == 0 ||
2097                     mddev->size > size)
2098                         mddev->size = size;
2099                 else
2100                         err = -ENOSPC;
2101         }
2102         return err ? err : len;
2103 }
2104
2105 static struct md_sysfs_entry md_size =
2106 __ATTR(component_size, 0644, size_show, size_store);
2107
2108
2109 /* Metdata version.
2110  * This is either 'none' for arrays with externally managed metadata,
2111  * or N.M for internally known formats
2112  */
2113 static ssize_t
2114 metadata_show(mddev_t *mddev, char *page)
2115 {
2116         if (mddev->persistent)
2117                 return sprintf(page, "%d.%d\n",
2118                                mddev->major_version, mddev->minor_version);
2119         else
2120                 return sprintf(page, "none\n");
2121 }
2122
2123 static ssize_t
2124 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2125 {
2126         int major, minor;
2127         char *e;
2128         if (!list_empty(&mddev->disks))
2129                 return -EBUSY;
2130
2131         if (cmd_match(buf, "none")) {
2132                 mddev->persistent = 0;
2133                 mddev->major_version = 0;
2134                 mddev->minor_version = 90;
2135                 return len;
2136         }
2137         major = simple_strtoul(buf, &e, 10);
2138         if (e==buf || *e != '.')
2139                 return -EINVAL;
2140         buf = e+1;
2141         minor = simple_strtoul(buf, &e, 10);
2142         if (e==buf || *e != '\n')
2143                 return -EINVAL;
2144         if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2145             super_types[major].name == NULL)
2146                 return -ENOENT;
2147         mddev->major_version = major;
2148         mddev->minor_version = minor;
2149         mddev->persistent = 1;
2150         return len;
2151 }
2152
2153 static struct md_sysfs_entry md_metadata =
2154 __ATTR(metadata_version, 0644, metadata_show, metadata_store);
2155
2156 static ssize_t
2157 action_show(mddev_t *mddev, char *page)
2158 {
2159         char *type = "idle";
2160         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2161             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2162                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2163                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2164                                 type = "resync";
2165                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2166                                 type = "check";
2167                         else
2168                                 type = "repair";
2169                 } else
2170                         type = "recover";
2171         }
2172         return sprintf(page, "%s\n", type);
2173 }
2174
2175 static ssize_t
2176 action_store(mddev_t *mddev, const char *page, size_t len)
2177 {
2178         if (!mddev->pers || !mddev->pers->sync_request)
2179                 return -EINVAL;
2180
2181         if (cmd_match(page, "idle")) {
2182                 if (mddev->sync_thread) {
2183                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2184                         md_unregister_thread(mddev->sync_thread);
2185                         mddev->sync_thread = NULL;
2186                         mddev->recovery = 0;
2187                 }
2188         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2189                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2190                 return -EBUSY;
2191         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2192                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2193         else {
2194                 if (cmd_match(page, "check"))
2195                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2196                 else if (cmd_match(page, "repair"))
2197                         return -EINVAL;
2198                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2199                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2200         }
2201         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2202         md_wakeup_thread(mddev->thread);
2203         return len;
2204 }
2205
2206 static ssize_t
2207 mismatch_cnt_show(mddev_t *mddev, char *page)
2208 {
2209         return sprintf(page, "%llu\n",
2210                        (unsigned long long) mddev->resync_mismatches);
2211 }
2212
2213 static struct md_sysfs_entry
2214 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2215
2216
2217 static struct md_sysfs_entry
2218 md_mismatches = __ATTR_RO(mismatch_cnt);
2219
2220 static ssize_t
2221 sync_min_show(mddev_t *mddev, char *page)
2222 {
2223         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2224                        mddev->sync_speed_min ? "local": "system");
2225 }
2226
2227 static ssize_t
2228 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2229 {
2230         int min;
2231         char *e;
2232         if (strncmp(buf, "system", 6)==0) {
2233                 mddev->sync_speed_min = 0;
2234                 return len;
2235         }
2236         min = simple_strtoul(buf, &e, 10);
2237         if (buf == e || (*e && *e != '\n') || min <= 0)
2238                 return -EINVAL;
2239         mddev->sync_speed_min = min;
2240         return len;
2241 }
2242
2243 static struct md_sysfs_entry md_sync_min =
2244 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2245
2246 static ssize_t
2247 sync_max_show(mddev_t *mddev, char *page)
2248 {
2249         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2250                        mddev->sync_speed_max ? "local": "system");
2251 }
2252
2253 static ssize_t
2254 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2255 {
2256         int max;
2257         char *e;
2258         if (strncmp(buf, "system", 6)==0) {
2259                 mddev->sync_speed_max = 0;
2260                 return len;
2261         }
2262         max = simple_strtoul(buf, &e, 10);
2263         if (buf == e || (*e && *e != '\n') || max <= 0)
2264                 return -EINVAL;
2265         mddev->sync_speed_max = max;
2266         return len;
2267 }
2268
2269 static struct md_sysfs_entry md_sync_max =
2270 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2271
2272
2273 static ssize_t
2274 sync_speed_show(mddev_t *mddev, char *page)
2275 {
2276         unsigned long resync, dt, db;
2277         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2278         dt = ((jiffies - mddev->resync_mark) / HZ);
2279         if (!dt) dt++;
2280         db = resync - (mddev->resync_mark_cnt);
2281         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2282 }
2283
2284 static struct md_sysfs_entry
2285 md_sync_speed = __ATTR_RO(sync_speed);
2286
2287 static ssize_t
2288 sync_completed_show(mddev_t *mddev, char *page)
2289 {
2290         unsigned long max_blocks, resync;
2291
2292         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2293                 max_blocks = mddev->resync_max_sectors;
2294         else
2295                 max_blocks = mddev->size << 1;
2296
2297         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2298         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2299 }
2300
2301 static struct md_sysfs_entry
2302 md_sync_completed = __ATTR_RO(sync_completed);
2303
2304 static struct attribute *md_default_attrs[] = {
2305         &md_level.attr,
2306         &md_raid_disks.attr,
2307         &md_chunk_size.attr,
2308         &md_size.attr,
2309         &md_metadata.attr,
2310         &md_new_device.attr,
2311         NULL,
2312 };
2313
2314 static struct attribute *md_redundancy_attrs[] = {
2315         &md_scan_mode.attr,
2316         &md_mismatches.attr,
2317         &md_sync_min.attr,
2318         &md_sync_max.attr,
2319         &md_sync_speed.attr,
2320         &md_sync_completed.attr,
2321         NULL,
2322 };
2323 static struct attribute_group md_redundancy_group = {
2324         .name = NULL,
2325         .attrs = md_redundancy_attrs,
2326 };
2327
2328
2329 static ssize_t
2330 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2331 {
2332         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2333         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2334         ssize_t rv;
2335
2336         if (!entry->show)
2337                 return -EIO;
2338         mddev_lock(mddev);
2339         rv = entry->show(mddev, page);
2340         mddev_unlock(mddev);
2341         return rv;
2342 }
2343
2344 static ssize_t
2345 md_attr_store(struct kobject *kobj, struct attribute *attr,
2346               const char *page, size_t length)
2347 {
2348         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2349         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2350         ssize_t rv;
2351
2352         if (!entry->store)
2353                 return -EIO;
2354         mddev_lock(mddev);
2355         rv = entry->store(mddev, page, length);
2356         mddev_unlock(mddev);
2357         return rv;
2358 }
2359
2360 static void md_free(struct kobject *ko)
2361 {
2362         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2363         kfree(mddev);
2364 }
2365
2366 static struct sysfs_ops md_sysfs_ops = {
2367         .show   = md_attr_show,
2368         .store  = md_attr_store,
2369 };
2370 static struct kobj_type md_ktype = {
2371         .release        = md_free,
2372         .sysfs_ops      = &md_sysfs_ops,
2373         .default_attrs  = md_default_attrs,
2374 };
2375
2376 int mdp_major = 0;
2377
2378 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2379 {
2380         static DECLARE_MUTEX(disks_sem);
2381         mddev_t *mddev = mddev_find(dev);
2382         struct gendisk *disk;
2383         int partitioned = (MAJOR(dev) != MD_MAJOR);
2384         int shift = partitioned ? MdpMinorShift : 0;
2385         int unit = MINOR(dev) >> shift;
2386
2387         if (!mddev)
2388                 return NULL;
2389
2390         down(&disks_sem);
2391         if (mddev->gendisk) {
2392                 up(&disks_sem);
2393                 mddev_put(mddev);
2394                 return NULL;
2395         }
2396         disk = alloc_disk(1 << shift);
2397         if (!disk) {
2398                 up(&disks_sem);
2399                 mddev_put(mddev);
2400                 return NULL;
2401         }
2402         disk->major = MAJOR(dev);
2403         disk->first_minor = unit << shift;
2404         if (partitioned) {
2405                 sprintf(disk->disk_name, "md_d%d", unit);
2406                 sprintf(disk->devfs_name, "md/d%d", unit);
2407         } else {
2408                 sprintf(disk->disk_name, "md%d", unit);
2409                 sprintf(disk->devfs_name, "md/%d", unit);
2410         }
2411         disk->fops = &md_fops;
2412         disk->private_data = mddev;
2413         disk->queue = mddev->queue;
2414         add_disk(disk);
2415         mddev->gendisk = disk;
2416         up(&disks_sem);
2417         mddev->kobj.parent = &disk->kobj;
2418         mddev->kobj.k_name = NULL;
2419         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2420         mddev->kobj.ktype = &md_ktype;
2421         kobject_register(&mddev->kobj);
2422         return NULL;
2423 }
2424
2425 void md_wakeup_thread(mdk_thread_t *thread);
2426
2427 static void md_safemode_timeout(unsigned long data)
2428 {
2429         mddev_t *mddev = (mddev_t *) data;
2430
2431         mddev->safemode = 1;
2432         md_wakeup_thread(mddev->thread);
2433 }
2434
2435 static int start_dirty_degraded;
2436
2437 static int do_md_run(mddev_t * mddev)
2438 {
2439         int err;
2440         int chunk_size;
2441         struct list_head *tmp;
2442         mdk_rdev_t *rdev;
2443         struct gendisk *disk;
2444         struct mdk_personality *pers;
2445         char b[BDEVNAME_SIZE];
2446
2447         if (list_empty(&mddev->disks))
2448                 /* cannot run an array with no devices.. */
2449                 return -EINVAL;
2450
2451         if (mddev->pers)
2452                 return -EBUSY;
2453
2454         /*
2455          * Analyze all RAID superblock(s)
2456          */
2457         if (!mddev->raid_disks)
2458                 analyze_sbs(mddev);
2459
2460         chunk_size = mddev->chunk_size;
2461
2462         if (chunk_size) {
2463                 if (chunk_size > MAX_CHUNK_SIZE) {
2464                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
2465                                 chunk_size, MAX_CHUNK_SIZE);
2466                         return -EINVAL;
2467                 }
2468                 /*
2469                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2470                  */
2471                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
2472                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
2473                         return -EINVAL;
2474                 }
2475                 if (chunk_size < PAGE_SIZE) {
2476                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
2477                                 chunk_size, PAGE_SIZE);
2478                         return -EINVAL;
2479                 }
2480
2481                 /* devices must have minimum size of one chunk */
2482                 ITERATE_RDEV(mddev,rdev,tmp) {
2483                         if (test_bit(Faulty, &rdev->flags))
2484                                 continue;
2485                         if (rdev->size < chunk_size / 1024) {
2486                                 printk(KERN_WARNING
2487                                         "md: Dev %s smaller than chunk_size:"
2488                                         " %lluk < %dk\n",
2489                                         bdevname(rdev->bdev,b),
2490                                         (unsigned long long)rdev->size,
2491                                         chunk_size / 1024);
2492                                 return -EINVAL;
2493                         }
2494                 }
2495         }
2496
2497 #ifdef CONFIG_KMOD
2498         if (mddev->level != LEVEL_NONE)
2499                 request_module("md-level-%d", mddev->level);
2500         else if (mddev->clevel[0])
2501                 request_module("md-%s", mddev->clevel);
2502 #endif
2503
2504         /*
2505          * Drop all container device buffers, from now on
2506          * the only valid external interface is through the md
2507          * device.
2508          * Also find largest hardsector size
2509          */
2510         ITERATE_RDEV(mddev,rdev,tmp) {
2511                 if (test_bit(Faulty, &rdev->flags))
2512                         continue;
2513                 sync_blockdev(rdev->bdev);
2514                 invalidate_bdev(rdev->bdev, 0);
2515         }
2516
2517         md_probe(mddev->unit, NULL, NULL);
2518         disk = mddev->gendisk;
2519         if (!disk)
2520                 return -ENOMEM;
2521
2522         spin_lock(&pers_lock);
2523         pers = find_pers(mddev->level, mddev->clevel);
2524         if (!pers || !try_module_get(pers->owner)) {
2525                 spin_unlock(&pers_lock);
2526                 if (mddev->level != LEVEL_NONE)
2527                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2528                                mddev->level);
2529                 else
2530                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2531                                mddev->clevel);
2532                 return -EINVAL;
2533         }
2534         mddev->pers = pers;
2535         spin_unlock(&pers_lock);
2536         mddev->level = pers->level;
2537         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2538
2539         mddev->recovery = 0;
2540         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2541         mddev->barriers_work = 1;
2542         mddev->ok_start_degraded = start_dirty_degraded;
2543
2544         if (start_readonly)
2545                 mddev->ro = 2; /* read-only, but switch on first write */
2546
2547         err = mddev->pers->run(mddev);
2548         if (!err && mddev->pers->sync_request) {
2549                 err = bitmap_create(mddev);
2550                 if (err) {
2551                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
2552                                mdname(mddev), err);
2553                         mddev->pers->stop(mddev);
2554                 }
2555         }
2556         if (err) {
2557                 printk(KERN_ERR "md: pers->run() failed ...\n");
2558                 module_put(mddev->pers->owner);
2559                 mddev->pers = NULL;
2560                 bitmap_destroy(mddev);
2561                 return err;
2562         }
2563         if (mddev->pers->sync_request)
2564                 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
2565         else if (mddev->ro == 2) /* auto-readonly not meaningful */
2566                 mddev->ro = 0;
2567
2568         atomic_set(&mddev->writes_pending,0);
2569         mddev->safemode = 0;
2570         mddev->safemode_timer.function = md_safemode_timeout;
2571         mddev->safemode_timer.data = (unsigned long) mddev;
2572         mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */
2573         mddev->in_sync = 1;
2574
2575         ITERATE_RDEV(mddev,rdev,tmp)
2576                 if (rdev->raid_disk >= 0) {
2577                         char nm[20];
2578                         sprintf(nm, "rd%d", rdev->raid_disk);
2579                         sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
2580                 }
2581         
2582         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2583         md_wakeup_thread(mddev->thread);
2584         
2585         if (mddev->sb_dirty)
2586                 md_update_sb(mddev);
2587
2588         set_capacity(disk, mddev->array_size<<1);
2589
2590         /* If we call blk_queue_make_request here, it will
2591          * re-initialise max_sectors etc which may have been
2592          * refined inside -> run.  So just set the bits we need to set.
2593          * Most initialisation happended when we called
2594          * blk_queue_make_request(..., md_fail_request)
2595          * earlier.
2596          */
2597         mddev->queue->queuedata = mddev;
2598         mddev->queue->make_request_fn = mddev->pers->make_request;
2599
2600         mddev->changed = 1;
2601         md_new_event(mddev);
2602         return 0;
2603 }
2604
2605 static int restart_array(mddev_t *mddev)
2606 {
2607         struct gendisk *disk = mddev->gendisk;
2608         int err;
2609
2610         /*
2611          * Complain if it has no devices
2612          */
2613         err = -ENXIO;
2614         if (list_empty(&mddev->disks))
2615                 goto out;
2616
2617         if (mddev->pers) {
2618                 err = -EBUSY;
2619                 if (!mddev->ro)
2620                         goto out;
2621
2622                 mddev->safemode = 0;
2623                 mddev->ro = 0;
2624                 set_disk_ro(disk, 0);
2625
2626                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
2627                         mdname(mddev));
2628                 /*
2629                  * Kick recovery or resync if necessary
2630                  */
2631                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2632                 md_wakeup_thread(mddev->thread);
2633                 err = 0;
2634         } else {
2635                 printk(KERN_ERR "md: %s has no personality assigned.\n",
2636                         mdname(mddev));
2637                 err = -EINVAL;
2638         }
2639
2640 out:
2641         return err;
2642 }
2643
2644 static int do_md_stop(mddev_t * mddev, int ro)
2645 {
2646         int err = 0;
2647         struct gendisk *disk = mddev->gendisk;
2648
2649         if (mddev->pers) {
2650                 if (atomic_read(&mddev->active)>2) {
2651                         printk("md: %s still in use.\n",mdname(mddev));
2652                         return -EBUSY;
2653                 }
2654
2655                 if (mddev->sync_thread) {
2656                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2657                         md_unregister_thread(mddev->sync_thread);
2658                         mddev->sync_thread = NULL;
2659                 }
2660
2661                 del_timer_sync(&mddev->safemode_timer);
2662
2663                 invalidate_partition(disk, 0);
2664
2665                 if (ro) {
2666                         err  = -ENXIO;
2667                         if (mddev->ro==1)
2668                                 goto out;
2669                         mddev->ro = 1;
2670                 } else {
2671                         bitmap_flush(mddev);
2672                         md_super_wait(mddev);
2673                         if (mddev->ro)
2674                                 set_disk_ro(disk, 0);
2675                         blk_queue_make_request(mddev->queue, md_fail_request);
2676                         mddev->pers->stop(mddev);
2677                         if (mddev->pers->sync_request)
2678                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
2679
2680                         module_put(mddev->pers->owner);
2681                         mddev->pers = NULL;
2682                         if (mddev->ro)
2683                                 mddev->ro = 0;
2684                 }
2685                 if (!mddev->in_sync) {
2686                         /* mark array as shutdown cleanly */
2687                         mddev->in_sync = 1;
2688                         md_update_sb(mddev);
2689                 }
2690                 if (ro)
2691                         set_disk_ro(disk, 1);
2692         }
2693
2694         /*
2695          * Free resources if final stop
2696          */
2697         if (!ro) {
2698                 mdk_rdev_t *rdev;
2699                 struct list_head *tmp;
2700                 struct gendisk *disk;
2701                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
2702
2703                 bitmap_destroy(mddev);
2704                 if (mddev->bitmap_file) {
2705                         atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1);
2706                         fput(mddev->bitmap_file);
2707                         mddev->bitmap_file = NULL;
2708                 }
2709                 mddev->bitmap_offset = 0;
2710
2711                 ITERATE_RDEV(mddev,rdev,tmp)
2712                         if (rdev->raid_disk >= 0) {
2713                                 char nm[20];
2714                                 sprintf(nm, "rd%d", rdev->raid_disk);
2715                                 sysfs_remove_link(&mddev->kobj, nm);
2716                         }
2717
2718                 export_array(mddev);
2719
2720                 mddev->array_size = 0;
2721                 disk = mddev->gendisk;
2722                 if (disk)
2723                         set_capacity(disk, 0);
2724                 mddev->changed = 1;
2725         } else
2726                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
2727                         mdname(mddev));
2728         err = 0;
2729         md_new_event(mddev);
2730 out:
2731         return err;
2732 }
2733
2734 static void autorun_array(mddev_t *mddev)
2735 {
2736         mdk_rdev_t *rdev;
2737         struct list_head *tmp;
2738         int err;
2739
2740         if (list_empty(&mddev->disks))
2741                 return;
2742
2743         printk(KERN_INFO "md: running: ");
2744
2745         ITERATE_RDEV(mddev,rdev,tmp) {
2746                 char b[BDEVNAME_SIZE];
2747                 printk("<%s>", bdevname(rdev->bdev,b));
2748         }
2749         printk("\n");
2750
2751         err = do_md_run (mddev);
2752         if (err) {
2753                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
2754                 do_md_stop (mddev, 0);
2755         }
2756 }
2757
2758 /*
2759  * lets try to run arrays based on all disks that have arrived
2760  * until now. (those are in pending_raid_disks)
2761  *
2762  * the method: pick the first pending disk, collect all disks with
2763  * the same UUID, remove all from the pending list and put them into
2764  * the 'same_array' list. Then order this list based on superblock
2765  * update time (freshest comes first), kick out 'old' disks and
2766  * compare superblocks. If everything's fine then run it.
2767  *
2768  * If "unit" is allocated, then bump its reference count
2769  */
2770 static void autorun_devices(int part)
2771 {
2772         struct list_head candidates;
2773         struct list_head *tmp;
2774         mdk_rdev_t *rdev0, *rdev;
2775         mddev_t *mddev;
2776         char b[BDEVNAME_SIZE];
2777
2778         printk(KERN_INFO "md: autorun ...\n");
2779         while (!list_empty(&pending_raid_disks)) {
2780                 dev_t dev;
2781                 rdev0 = list_entry(pending_raid_disks.next,
2782                                          mdk_rdev_t, same_set);
2783
2784                 printk(KERN_INFO "md: considering %s ...\n",
2785                         bdevname(rdev0->bdev,b));
2786                 INIT_LIST_HEAD(&candidates);
2787                 ITERATE_RDEV_PENDING(rdev,tmp)
2788                         if (super_90_load(rdev, rdev0, 0) >= 0) {
2789                                 printk(KERN_INFO "md:  adding %s ...\n",
2790                                         bdevname(rdev->bdev,b));
2791                                 list_move(&rdev->same_set, &candidates);
2792                         }
2793                 /*
2794                  * now we have a set of devices, with all of them having
2795                  * mostly sane superblocks. It's time to allocate the
2796                  * mddev.
2797                  */
2798                 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
2799                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
2800                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
2801                         break;
2802                 }
2803                 if (part)
2804                         dev = MKDEV(mdp_major,
2805                                     rdev0->preferred_minor << MdpMinorShift);
2806                 else
2807                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
2808
2809                 md_probe(dev, NULL, NULL);
2810                 mddev = mddev_find(dev);
2811                 if (!mddev) {
2812                         printk(KERN_ERR 
2813                                 "md: cannot allocate memory for md drive.\n");
2814                         break;
2815                 }
2816                 if (mddev_lock(mddev)) 
2817                         printk(KERN_WARNING "md: %s locked, cannot run\n",
2818                                mdname(mddev));
2819                 else if (mddev->raid_disks || mddev->major_version
2820                          || !list_empty(&mddev->disks)) {
2821                         printk(KERN_WARNING 
2822                                 "md: %s already running, cannot run %s\n",
2823                                 mdname(mddev), bdevname(rdev0->bdev,b));
2824                         mddev_unlock(mddev);
2825                 } else {
2826                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
2827                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
2828                                 list_del_init(&rdev->same_set);
2829                                 if (bind_rdev_to_array(rdev, mddev))
2830                                         export_rdev(rdev);
2831                         }
2832                         autorun_array(mddev);
2833                         mddev_unlock(mddev);
2834                 }
2835                 /* on success, candidates will be empty, on error
2836                  * it won't...
2837                  */
2838                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
2839                         export_rdev(rdev);
2840                 mddev_put(mddev);
2841         }
2842         printk(KERN_INFO "md: ... autorun DONE.\n");
2843 }
2844
2845 /*
2846  * import RAID devices based on one partition
2847  * if possible, the array gets run as well.
2848  */
2849
2850 static int autostart_array(dev_t startdev)
2851 {
2852         char b[BDEVNAME_SIZE];
2853         int err = -EINVAL, i;
2854         mdp_super_t *sb = NULL;
2855         mdk_rdev_t *start_rdev = NULL, *rdev;
2856
2857         start_rdev = md_import_device(startdev, 0, 0);
2858         if (IS_ERR(start_rdev))
2859                 return err;
2860
2861
2862         /* NOTE: this can only work for 0.90.0 superblocks */
2863         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
2864         if (sb->major_version != 0 ||
2865             sb->minor_version != 90 ) {
2866                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
2867                 export_rdev(start_rdev);
2868                 return err;
2869         }
2870
2871         if (test_bit(Faulty, &start_rdev->flags)) {
2872                 printk(KERN_WARNING 
2873                         "md: can not autostart based on faulty %s!\n",
2874                         bdevname(start_rdev->bdev,b));
2875                 export_rdev(start_rdev);
2876                 return err;
2877         }
2878         list_add(&start_rdev->same_set, &pending_raid_disks);
2879
2880         for (i = 0; i < MD_SB_DISKS; i++) {
2881                 mdp_disk_t *desc = sb->disks + i;
2882                 dev_t dev = MKDEV(desc->major, desc->minor);
2883
2884                 if (!dev)
2885                         continue;
2886                 if (dev == startdev)
2887                         continue;
2888                 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
2889                         continue;
2890                 rdev = md_import_device(dev, 0, 0);
2891                 if (IS_ERR(rdev))
2892                         continue;
2893
2894                 list_add(&rdev->same_set, &pending_raid_disks);
2895         }
2896
2897         /*
2898          * possibly return codes
2899          */
2900         autorun_devices(0);
2901         return 0;
2902
2903 }
2904
2905
2906 static int get_version(void __user * arg)
2907 {
2908         mdu_version_t ver;
2909
2910         ver.major = MD_MAJOR_VERSION;
2911         ver.minor = MD_MINOR_VERSION;
2912         ver.patchlevel = MD_PATCHLEVEL_VERSION;
2913
2914         if (copy_to_user(arg, &ver, sizeof(ver)))
2915                 return -EFAULT;
2916
2917         return 0;
2918 }
2919
2920 static int get_array_info(mddev_t * mddev, void __user * arg)
2921 {
2922         mdu_array_info_t info;
2923         int nr,working,active,failed,spare;
2924         mdk_rdev_t *rdev;
2925         struct list_head *tmp;
2926
2927         nr=working=active=failed=spare=0;
2928         ITERATE_RDEV(mddev,rdev,tmp) {
2929                 nr++;
2930                 if (test_bit(Faulty, &rdev->flags))
2931                         failed++;
2932                 else {
2933                         working++;
2934                         if (test_bit(In_sync, &rdev->flags))
2935                                 active++;       
2936                         else
2937                                 spare++;
2938                 }
2939         }
2940
2941         info.major_version = mddev->major_version;
2942         info.minor_version = mddev->minor_version;
2943         info.patch_version = MD_PATCHLEVEL_VERSION;
2944         info.ctime         = mddev->ctime;
2945         info.level         = mddev->level;
2946         info.size          = mddev->size;
2947         if (info.size != mddev->size) /* overflow */
2948                 info.size = -1;
2949         info.nr_disks      = nr;
2950         info.raid_disks    = mddev->raid_disks;
2951         info.md_minor      = mddev->md_minor;
2952         info.not_persistent= !mddev->persistent;
2953
2954         info.utime         = mddev->utime;
2955         info.state         = 0;
2956         if (mddev->in_sync)
2957                 info.state = (1<<MD_SB_CLEAN);
2958         if (mddev->bitmap && mddev->bitmap_offset)
2959                 info.state = (1<<MD_SB_BITMAP_PRESENT);
2960         info.active_disks  = active;
2961         info.working_disks = working;
2962         info.failed_disks  = failed;
2963         info.spare_disks   = spare;
2964
2965         info.layout        = mddev->layout;
2966         info.chunk_size    = mddev->chunk_size;
2967
2968         if (copy_to_user(arg, &info, sizeof(info)))
2969                 return -EFAULT;
2970
2971         return 0;
2972 }
2973
2974 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
2975 {
2976         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
2977         char *ptr, *buf = NULL;
2978         int err = -ENOMEM;
2979
2980         file = kmalloc(sizeof(*file), GFP_KERNEL);
2981         if (!file)
2982                 goto out;
2983
2984         /* bitmap disabled, zero the first byte and copy out */
2985         if (!mddev->bitmap || !mddev->bitmap->file) {
2986                 file->pathname[0] = '\0';
2987                 goto copy_out;
2988         }
2989
2990         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
2991         if (!buf)
2992                 goto out;
2993
2994         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
2995         if (!ptr)
2996                 goto out;
2997
2998         strcpy(file->pathname, ptr);
2999
3000 copy_out:
3001         err = 0;
3002         if (copy_to_user(arg, file, sizeof(*file)))
3003                 err = -EFAULT;
3004 out:
3005         kfree(buf);
3006         kfree(file);
3007         return err;
3008 }
3009
3010 static int get_disk_info(mddev_t * mddev, void __user * arg)
3011 {
3012         mdu_disk_info_t info;
3013         unsigned int nr;
3014         mdk_rdev_t *rdev;
3015
3016         if (copy_from_user(&info, arg, sizeof(info)))
3017                 return -EFAULT;
3018
3019         nr = info.number;
3020
3021         rdev = find_rdev_nr(mddev, nr);
3022         if (rdev) {
3023                 info.major = MAJOR(rdev->bdev->bd_dev);
3024                 info.minor = MINOR(rdev->bdev->bd_dev);
3025                 info.raid_disk = rdev->raid_disk;
3026                 info.state = 0;
3027                 if (test_bit(Faulty, &rdev->flags))
3028                         info.state |= (1<<MD_DISK_FAULTY);
3029                 else if (test_bit(In_sync, &rdev->flags)) {
3030                         info.state |= (1<<MD_DISK_ACTIVE);
3031                         info.state |= (1<<MD_DISK_SYNC);
3032                 }
3033                 if (test_bit(WriteMostly, &rdev->flags))
3034                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3035         } else {
3036                 info.major = info.minor = 0;
3037                 info.raid_disk = -1;
3038                 info.state = (1<<MD_DISK_REMOVED);
3039         }
3040
3041         if (copy_to_user(arg, &info, sizeof(info)))
3042                 return -EFAULT;
3043
3044         return 0;
3045 }
3046
3047 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3048 {
3049         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3050         mdk_rdev_t *rdev;
3051         dev_t dev = MKDEV(info->major,info->minor);
3052
3053         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3054                 return -EOVERFLOW;
3055
3056         if (!mddev->raid_disks) {
3057                 int err;
3058                 /* expecting a device which has a superblock */
3059                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3060                 if (IS_ERR(rdev)) {
3061                         printk(KERN_WARNING 
3062                                 "md: md_import_device returned %ld\n",
3063                                 PTR_ERR(rdev));
3064                         return PTR_ERR(rdev);
3065                 }
3066                 if (!list_empty(&mddev->disks)) {
3067                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3068                                                         mdk_rdev_t, same_set);
3069                         int err = super_types[mddev->major_version]
3070                                 .load_super(rdev, rdev0, mddev->minor_version);
3071                         if (err < 0) {
3072                                 printk(KERN_WARNING 
3073                                         "md: %s has different UUID to %s\n",
3074                                         bdevname(rdev->bdev,b), 
3075                                         bdevname(rdev0->bdev,b2));
3076                                 export_rdev(rdev);
3077                                 return -EINVAL;
3078                         }
3079                 }
3080                 err = bind_rdev_to_array(rdev, mddev);
3081                 if (err)
3082                         export_rdev(rdev);
3083                 return err;
3084         }
3085
3086         /*
3087          * add_new_disk can be used once the array is assembled
3088          * to add "hot spares".  They must already have a superblock
3089          * written
3090          */
3091         if (mddev->pers) {
3092                 int err;
3093                 if (!mddev->pers->hot_add_disk) {
3094                         printk(KERN_WARNING 
3095                                 "%s: personality does not support diskops!\n",
3096                                mdname(mddev));
3097                         return -EINVAL;
3098                 }
3099                 if (mddev->persistent)
3100                         rdev = md_import_device(dev, mddev->major_version,
3101                                                 mddev->minor_version);
3102                 else
3103                         rdev = md_import_device(dev, -1, -1);
3104                 if (IS_ERR(rdev)) {
3105                         printk(KERN_WARNING 
3106                                 "md: md_import_device returned %ld\n",
3107                                 PTR_ERR(rdev));
3108                         return PTR_ERR(rdev);
3109                 }
3110                 /* set save_raid_disk if appropriate */
3111                 if (!mddev->persistent) {
3112                         if (info->state & (1<<MD_DISK_SYNC)  &&
3113                             info->raid_disk < mddev->raid_disks)
3114                                 rdev->raid_disk = info->raid_disk;
3115                         else
3116                                 rdev->raid_disk = -1;
3117                 } else
3118                         super_types[mddev->major_version].
3119                                 validate_super(mddev, rdev);
3120                 rdev->saved_raid_disk = rdev->raid_disk;
3121
3122                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3123                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3124                         set_bit(WriteMostly, &rdev->flags);
3125
3126                 rdev->raid_disk = -1;
3127                 err = bind_rdev_to_array(rdev, mddev);
3128                 if (err)
3129                         export_rdev(rdev);
3130
3131                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3132                 md_wakeup_thread(mddev->thread);
3133                 return err;
3134         }
3135
3136         /* otherwise, add_new_disk is only allowed
3137          * for major_version==0 superblocks
3138          */
3139         if (mddev->major_version != 0) {
3140                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3141                        mdname(mddev));
3142                 return -EINVAL;
3143         }
3144
3145         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3146                 int err;
3147                 rdev = md_import_device (dev, -1, 0);
3148                 if (IS_ERR(rdev)) {
3149                         printk(KERN_WARNING 
3150                                 "md: error, md_import_device() returned %ld\n",
3151                                 PTR_ERR(rdev));
3152                         return PTR_ERR(rdev);
3153                 }
3154                 rdev->desc_nr = info->number;
3155                 if (info->raid_disk < mddev->raid_disks)
3156                         rdev->raid_disk = info->raid_disk;
3157                 else
3158                         rdev->raid_disk = -1;
3159
3160                 rdev->flags = 0;
3161
3162                 if (rdev->raid_disk < mddev->raid_disks)
3163                         if (info->state & (1<<MD_DISK_SYNC))
3164                                 set_bit(In_sync, &rdev->flags);
3165
3166                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3167                         set_bit(WriteMostly, &rdev->flags);
3168
3169                 if (!mddev->persistent) {
3170                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3171                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3172                 } else 
3173                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3174                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3175
3176                 err = bind_rdev_to_array(rdev, mddev);
3177                 if (err) {
3178                         export_rdev(rdev);
3179                         return err;
3180                 }
3181         }
3182
3183         return 0;
3184 }
3185
3186 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3187 {
3188         char b[BDEVNAME_SIZE];
3189         mdk_rdev_t *rdev;
3190
3191         if (!mddev->pers)
3192                 return -ENODEV;
3193
3194         rdev = find_rdev(mddev, dev);
3195         if (!rdev)
3196                 return -ENXIO;
3197
3198         if (rdev->raid_disk >= 0)
3199                 goto busy;
3200
3201         kick_rdev_from_array(rdev);
3202         md_update_sb(mddev);
3203         md_new_event(mddev);
3204
3205         return 0;
3206 busy:
3207         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3208                 bdevname(rdev->bdev,b), mdname(mddev));
3209         return -EBUSY;
3210 }
3211
3212 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3213 {
3214         char b[BDEVNAME_SIZE];
3215         int err;
3216         unsigned int size;
3217         mdk_rdev_t *rdev;
3218
3219         if (!mddev->pers)
3220                 return -ENODEV;
3221
3222         if (mddev->major_version != 0) {
3223                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3224                         " version-0 superblocks.\n",
3225                         mdname(mddev));
3226                 return -EINVAL;
3227         }
3228         if (!mddev->pers->hot_add_disk) {
3229                 printk(KERN_WARNING 
3230                         "%s: personality does not support diskops!\n",
3231                         mdname(mddev));
3232                 return -EINVAL;
3233         }
3234
3235         rdev = md_import_device (dev, -1, 0);
3236         if (IS_ERR(rdev)) {
3237                 printk(KERN_WARNING 
3238                         "md: error, md_import_device() returned %ld\n",
3239                         PTR_ERR(rdev));
3240                 return -EINVAL;
3241         }
3242
3243         if (mddev->persistent)
3244                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3245         else
3246                 rdev->sb_offset =
3247                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3248
3249         size = calc_dev_size(rdev, mddev->chunk_size);
3250         rdev->size = size;
3251
3252         if (test_bit(Faulty, &rdev->flags)) {
3253                 printk(KERN_WARNING 
3254                         "md: can not hot-add faulty %s disk to %s!\n",
3255                         bdevname(rdev->bdev,b), mdname(mddev));
3256                 err = -EINVAL;
3257                 goto abort_export;
3258         }
3259         clear_bit(In_sync, &rdev->flags);
3260         rdev->desc_nr = -1;
3261         err = bind_rdev_to_array(rdev, mddev);
3262         if (err)
3263                 goto abort_export;
3264
3265         /*
3266          * The rest should better be atomic, we can have disk failures
3267          * noticed in interrupt contexts ...
3268          */
3269
3270         if (rdev->desc_nr == mddev->max_disks) {
3271                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3272                         mdname(mddev));
3273                 err = -EBUSY;
3274                 goto abort_unbind_export;
3275         }
3276
3277         rdev->raid_disk = -1;
3278
3279         md_update_sb(mddev);
3280
3281         /*
3282          * Kick recovery, maybe this spare has to be added to the
3283          * array immediately.
3284          */
3285         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3286         md_wakeup_thread(mddev->thread);
3287         md_new_event(mddev);
3288         return 0;
3289
3290 abort_unbind_export:
3291         unbind_rdev_from_array(rdev);
3292
3293 abort_export:
3294         export_rdev(rdev);
3295         return err;
3296 }
3297
3298 /* similar to deny_write_access, but accounts for our holding a reference
3299  * to the file ourselves */
3300 static int deny_bitmap_write_access(struct file * file)
3301 {
3302         struct inode *inode = file->f_mapping->host;
3303
3304         spin_lock(&inode->i_lock);
3305         if (atomic_read(&inode->i_writecount) > 1) {
3306                 spin_unlock(&inode->i_lock);
3307                 return -ETXTBSY;
3308         }
3309         atomic_set(&inode->i_writecount, -1);
3310         spin_unlock(&inode->i_lock);
3311
3312         return 0;
3313 }
3314
3315 static int set_bitmap_file(mddev_t *mddev, int fd)
3316 {
3317         int err;
3318
3319         if (mddev->pers) {
3320                 if (!mddev->pers->quiesce)
3321                         return -EBUSY;
3322                 if (mddev->recovery || mddev->sync_thread)
3323                         return -EBUSY;
3324                 /* we should be able to change the bitmap.. */
3325         }
3326
3327
3328         if (fd >= 0) {
3329                 if (mddev->bitmap)
3330                         return -EEXIST; /* cannot add when bitmap is present */
3331                 mddev->bitmap_file = fget(fd);
3332
3333                 if (mddev->bitmap_file == NULL) {
3334                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3335                                mdname(mddev));
3336                         return -EBADF;
3337                 }
3338
3339                 err = deny_bitmap_write_access(mddev->bitmap_file);
3340                 if (err) {
3341                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3342                                mdname(mddev));
3343                         fput(mddev->bitmap_file);
3344                         mddev->bitmap_file = NULL;
3345                         return err;
3346                 }
3347                 mddev->bitmap_offset = 0; /* file overrides offset */
3348         } else if (mddev->bitmap == NULL)
3349                 return -ENOENT; /* cannot remove what isn't there */
3350         err = 0;
3351         if (mddev->pers) {
3352                 mddev->pers->quiesce(mddev, 1);
3353                 if (fd >= 0)
3354                         err = bitmap_create(mddev);
3355                 if (fd < 0 || err)
3356                         bitmap_destroy(mddev);
3357                 mddev->pers->quiesce(mddev, 0);
3358         } else if (fd < 0) {
3359                 if (mddev->bitmap_file)
3360                         fput(mddev->bitmap_file);
3361                 mddev->bitmap_file = NULL;
3362         }
3363
3364         return err;
3365 }
3366
3367 /*
3368  * set_array_info is used two different ways
3369  * The original usage is when creating a new array.
3370  * In this usage, raid_disks is > 0 and it together with
3371  *  level, size, not_persistent,layout,chunksize determine the
3372  *  shape of the array.
3373  *  This will always create an array with a type-0.90.0 superblock.
3374  * The newer usage is when assembling an array.
3375  *  In this case raid_disks will be 0, and the major_version field is
3376  *  use to determine which style super-blocks are to be found on the devices.
3377  *  The minor and patch _version numbers are also kept incase the
3378  *  super_block handler wishes to interpret them.
3379  */
3380 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3381 {
3382
3383         if (info->raid_disks == 0) {
3384                 /* just setting version number for superblock loading */
3385                 if (info->major_version < 0 ||
3386                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3387                     super_types[info->major_version].name == NULL) {
3388                         /* maybe try to auto-load a module? */
3389                         printk(KERN_INFO 
3390                                 "md: superblock version %d not known\n",
3391                                 info->major_version);
3392                         return -EINVAL;
3393                 }
3394                 mddev->major_version = info->major_version;
3395                 mddev->minor_version = info->minor_version;
3396                 mddev->patch_version = info->patch_version;
3397                 return 0;
3398         }
3399         mddev->major_version = MD_MAJOR_VERSION;
3400         mddev->minor_version = MD_MINOR_VERSION;
3401         mddev->patch_version = MD_PATCHLEVEL_VERSION;
3402         mddev->ctime         = get_seconds();
3403
3404         mddev->level         = info->level;
3405         mddev->clevel[0]     = 0;
3406         mddev->size          = info->size;
3407         mddev->raid_disks    = info->raid_disks;
3408         /* don't set md_minor, it is determined by which /dev/md* was
3409          * openned
3410          */
3411         if (info->state & (1<<MD_SB_CLEAN))
3412                 mddev->recovery_cp = MaxSector;
3413         else
3414                 mddev->recovery_cp = 0;
3415         mddev->persistent    = ! info->not_persistent;
3416
3417         mddev->layout        = info->layout;
3418         mddev->chunk_size    = info->chunk_size;
3419
3420         mddev->max_disks     = MD_SB_DISKS;
3421
3422         mddev->sb_dirty      = 1;
3423
3424         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3425         mddev->bitmap_offset = 0;
3426
3427         /*
3428          * Generate a 128 bit UUID
3429          */
3430         get_random_bytes(mddev->uuid, 16);
3431
3432         return 0;
3433 }
3434
3435 static int update_size(mddev_t *mddev, unsigned long size)
3436 {
3437         mdk_rdev_t * rdev;
3438         int rv;
3439         struct list_head *tmp;
3440
3441         if (mddev->pers->resize == NULL)
3442                 return -EINVAL;
3443         /* The "size" is the amount of each device that is used.
3444          * This can only make sense for arrays with redundancy.
3445          * linear and raid0 always use whatever space is available
3446          * We can only consider changing the size if no resync
3447          * or reconstruction is happening, and if the new size
3448          * is acceptable. It must fit before the sb_offset or,
3449          * if that is <data_offset, it must fit before the
3450          * size of each device.
3451          * If size is zero, we find the largest size that fits.
3452          */
3453         if (mddev->sync_thread)
3454                 return -EBUSY;
3455         ITERATE_RDEV(mddev,rdev,tmp) {
3456                 sector_t avail;
3457                 int fit = (size == 0);
3458                 if (rdev->sb_offset > rdev->data_offset)
3459                         avail = (rdev->sb_offset*2) - rdev->data_offset;
3460                 else
3461                         avail = get_capacity(rdev->bdev->bd_disk)
3462                                 - rdev->data_offset;
3463                 if (fit && (size == 0 || size > avail/2))
3464                         size = avail/2;
3465                 if (avail < ((sector_t)size << 1))
3466                         return -ENOSPC;
3467         }
3468         rv = mddev->pers->resize(mddev, (sector_t)size *2);
3469         if (!rv) {
3470                 struct block_device *bdev;
3471
3472                 bdev = bdget_disk(mddev->gendisk, 0);
3473                 if (bdev) {
3474                         mutex_lock(&bdev->bd_inode->i_mutex);
3475                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
3476                         mutex_unlock(&bdev->bd_inode->i_mutex);
3477                         bdput(bdev);
3478                 }
3479         }
3480         return rv;
3481 }
3482
3483 static int update_raid_disks(mddev_t *mddev, int raid_disks)
3484 {
3485         int rv;
3486         /* change the number of raid disks */
3487         if (mddev->pers->reshape == NULL)
3488                 return -EINVAL;
3489         if (raid_disks <= 0 ||
3490             raid_disks >= mddev->max_disks)
3491                 return -EINVAL;
3492         if (mddev->sync_thread)
3493                 return -EBUSY;
3494         rv = mddev->pers->reshape(mddev, raid_disks);
3495         return rv;
3496 }
3497
3498
3499 /*
3500  * update_array_info is used to change the configuration of an
3501  * on-line array.
3502  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
3503  * fields in the info are checked against the array.
3504  * Any differences that cannot be handled will cause an error.
3505  * Normally, only one change can be managed at a time.
3506  */
3507 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
3508 {
3509         int rv = 0;
3510         int cnt = 0;
3511         int state = 0;
3512
3513         /* calculate expected state,ignoring low bits */
3514         if (mddev->bitmap && mddev->bitmap_offset)
3515                 state |= (1 << MD_SB_BITMAP_PRESENT);
3516
3517         if (mddev->major_version != info->major_version ||
3518             mddev->minor_version != info->minor_version ||
3519 /*          mddev->patch_version != info->patch_version || */
3520             mddev->ctime         != info->ctime         ||
3521             mddev->level         != info->level         ||
3522 /*          mddev->layout        != info->layout        || */
3523             !mddev->persistent   != info->not_persistent||
3524             mddev->chunk_size    != info->chunk_size    ||
3525             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
3526             ((state^info->state) & 0xfffffe00)
3527                 )
3528                 return -EINVAL;
3529         /* Check there is only one change */
3530         if (info->size >= 0 && mddev->size != info->size) cnt++;
3531         if (mddev->raid_disks != info->raid_disks) cnt++;
3532         if (mddev->layout != info->layout) cnt++;
3533         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
3534         if (cnt == 0) return 0;
3535         if (cnt > 1) return -EINVAL;
3536
3537         if (mddev->layout != info->layout) {
3538                 /* Change layout
3539                  * we don't need to do anything at the md level, the
3540                  * personality will take care of it all.
3541                  */
3542                 if (mddev->pers->reconfig == NULL)
3543                         return -EINVAL;
3544                 else
3545                         return mddev->pers->reconfig(mddev, info->layout, -1);
3546         }
3547         if (info->size >= 0 && mddev->size != info->size)
3548                 rv = update_size(mddev, info->size);
3549
3550         if (mddev->raid_disks    != info->raid_disks)
3551                 rv = update_raid_disks(mddev, info->raid_disks);
3552
3553         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
3554                 if (mddev->pers->quiesce == NULL)
3555                         return -EINVAL;
3556                 if (mddev->recovery || mddev->sync_thread)
3557                         return -EBUSY;
3558                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
3559                         /* add the bitmap */
3560                         if (mddev->bitmap)
3561                                 return -EEXIST;
3562                         if (mddev->default_bitmap_offset == 0)
3563                                 return -EINVAL;
3564                         mddev->bitmap_offset = mddev->default_bitmap_offset;
3565                         mddev->pers->quiesce(mddev, 1);
3566                         rv = bitmap_create(mddev);
3567                         if (rv)
3568                                 bitmap_destroy(mddev);
3569                         mddev->pers->quiesce(mddev, 0);
3570                 } else {
3571                         /* remove the bitmap */
3572                         if (!mddev->bitmap)
3573                                 return -ENOENT;
3574                         if (mddev->bitmap->file)
3575                                 return -EINVAL;
3576                         mddev->pers->quiesce(mddev, 1);
3577                         bitmap_destroy(mddev);
3578                         mddev->pers->quiesce(mddev, 0);
3579                         mddev->bitmap_offset = 0;
3580                 }
3581         }
3582         md_update_sb(mddev);
3583         return rv;
3584 }
3585
3586 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
3587 {
3588         mdk_rdev_t *rdev;
3589
3590         if (mddev->pers == NULL)
3591                 return -ENODEV;
3592
3593         rdev = find_rdev(mddev, dev);
3594         if (!rdev)
3595                 return -ENODEV;
3596
3597         md_error(mddev, rdev);
3598         return 0;
3599 }
3600
3601 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3602 {
3603         mddev_t *mddev = bdev->bd_disk->private_data;
3604
3605         geo->heads = 2;
3606         geo->sectors = 4;
3607         geo->cylinders = get_capacity(mddev->gendisk) / 8;
3608         return 0;
3609 }
3610
3611 static int md_ioctl(struct inode *inode, struct file *file,
3612                         unsigned int cmd, unsigned long arg)
3613 {
3614         int err = 0;
3615         void __user *argp = (void __user *)arg;
3616         mddev_t *mddev = NULL;
3617
3618         if (!capable(CAP_SYS_ADMIN))
3619                 return -EACCES;
3620
3621         /*
3622          * Commands dealing with the RAID driver but not any
3623          * particular array:
3624          */
3625         switch (cmd)
3626         {
3627                 case RAID_VERSION:
3628                         err = get_version(argp);
3629                         goto done;
3630
3631                 case PRINT_RAID_DEBUG:
3632                         err = 0;
3633                         md_print_devices();
3634                         goto done;
3635
3636 #ifndef MODULE
3637                 case RAID_AUTORUN:
3638                         err = 0;
3639                         autostart_arrays(arg);
3640                         goto done;
3641 #endif
3642                 default:;
3643         }
3644
3645         /*
3646          * Commands creating/starting a new array:
3647          */
3648
3649         mddev = inode->i_bdev->bd_disk->private_data;
3650
3651         if (!mddev) {
3652                 BUG();
3653                 goto abort;
3654         }
3655
3656
3657         if (cmd == START_ARRAY) {
3658                 /* START_ARRAY doesn't need to lock the array as autostart_array
3659                  * does the locking, and it could even be a different array
3660                  */
3661                 static int cnt = 3;
3662                 if (cnt > 0 ) {
3663                         printk(KERN_WARNING
3664                                "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
3665                                "This will not be supported beyond July 2006\n",
3666                                current->comm, current->pid);
3667                         cnt--;
3668                 }
3669                 err = autostart_array(new_decode_dev(arg));
3670                 if (err) {
3671                         printk(KERN_WARNING "md: autostart failed!\n");
3672                         goto abort;
3673                 }
3674                 goto done;
3675         }
3676
3677         err = mddev_lock(mddev);
3678         if (err) {
3679                 printk(KERN_INFO 
3680                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
3681                         err, cmd);
3682                 goto abort;
3683         }
3684
3685         switch (cmd)
3686         {
3687                 case SET_ARRAY_INFO:
3688                         {
3689                                 mdu_array_info_t info;
3690                                 if (!arg)
3691                                         memset(&info, 0, sizeof(info));
3692                                 else if (copy_from_user(&info, argp, sizeof(info))) {
3693                                         err = -EFAULT;
3694                                         goto abort_unlock;
3695                                 }
3696                                 if (mddev->pers) {
3697                                         err = update_array_info(mddev, &info);
3698                                         if (err) {
3699                                                 printk(KERN_WARNING "md: couldn't update"
3700                                                        " array info. %d\n", err);
3701                                                 goto abort_unlock;
3702                                         }
3703                                         goto done_unlock;
3704                                 }
3705                                 if (!list_empty(&mddev->disks)) {
3706                                         printk(KERN_WARNING
3707                                                "md: array %s already has disks!\n",
3708                                                mdname(mddev));
3709                                         err = -EBUSY;
3710                                         goto abort_unlock;
3711                                 }
3712                                 if (mddev->raid_disks) {
3713                                         printk(KERN_WARNING
3714                                                "md: array %s already initialised!\n",
3715                                                mdname(mddev));
3716                                         err = -EBUSY;
3717                                         goto abort_unlock;
3718                                 }
3719                                 err = set_array_info(mddev, &info);
3720                                 if (err) {
3721                                         printk(KERN_WARNING "md: couldn't set"
3722                                                " array info. %d\n", err);
3723                                         goto abort_unlock;
3724                                 }
3725                         }
3726                         goto done_unlock;
3727
3728                 default:;
3729         }
3730
3731         /*
3732          * Commands querying/configuring an existing array:
3733          */
3734         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
3735          * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
3736         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
3737                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
3738                 err = -ENODEV;
3739                 goto abort_unlock;
3740         }
3741
3742         /*
3743          * Commands even a read-only array can execute:
3744          */
3745         switch (cmd)
3746         {
3747                 case GET_ARRAY_INFO:
3748                         err = get_array_info(mddev, argp);
3749                         goto done_unlock;
3750
3751                 case GET_BITMAP_FILE:
3752                         err = get_bitmap_file(mddev, argp);
3753                         goto done_unlock;
3754
3755                 case GET_DISK_INFO:
3756                         err = get_disk_info(mddev, argp);
3757                         goto done_unlock;
3758
3759                 case RESTART_ARRAY_RW:
3760                         err = restart_array(mddev);
3761                         goto done_unlock;
3762
3763                 case STOP_ARRAY:
3764                         err = do_md_stop (mddev, 0);
3765                         goto done_unlock;
3766
3767                 case STOP_ARRAY_RO:
3768                         err = do_md_stop (mddev, 1);
3769                         goto done_unlock;
3770
3771         /*
3772          * We have a problem here : there is no easy way to give a CHS
3773          * virtual geometry. We currently pretend that we have a 2 heads
3774          * 4 sectors (with a BIG number of cylinders...). This drives
3775          * dosfs just mad... ;-)
3776          */
3777         }
3778
3779         /*
3780          * The remaining ioctls are changing the state of the
3781          * superblock, so we do not allow them on read-only arrays.
3782          * However non-MD ioctls (e.g. get-size) will still come through
3783          * here and hit the 'default' below, so only disallow
3784          * 'md' ioctls, and switch to rw mode if started auto-readonly.
3785          */
3786         if (_IOC_TYPE(cmd) == MD_MAJOR &&
3787             mddev->ro && mddev->pers) {
3788                 if (mddev->ro == 2) {
3789                         mddev->ro = 0;
3790                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3791                 md_wakeup_thread(mddev->thread);
3792
3793                 } else {
3794                         err = -EROFS;
3795                         goto abort_unlock;
3796                 }
3797         }
3798
3799         switch (cmd)
3800         {
3801                 case ADD_NEW_DISK:
3802                 {
3803                         mdu_disk_info_t info;
3804                         if (copy_from_user(&info, argp, sizeof(info)))
3805                                 err = -EFAULT;
3806                         else
3807                                 err = add_new_disk(mddev, &info);
3808                         goto done_unlock;
3809                 }
3810
3811                 case HOT_REMOVE_DISK:
3812                         err = hot_remove_disk(mddev, new_decode_dev(arg));
3813                         goto done_unlock;
3814
3815                 case HOT_ADD_DISK:
3816                         err = hot_add_disk(mddev, new_decode_dev(arg));
3817                         goto done_unlock;
3818
3819                 case SET_DISK_FAULTY:
3820                         err = set_disk_faulty(mddev, new_decode_dev(arg));
3821                         goto done_unlock;
3822
3823                 case RUN_ARRAY:
3824                         err = do_md_run (mddev);
3825                         goto done_unlock;
3826
3827                 case SET_BITMAP_FILE:
3828                         err = set_bitmap_file(mddev, (int)arg);
3829                         goto done_unlock;
3830
3831                 default:
3832                         if (_IOC_TYPE(cmd) == MD_MAJOR)
3833                                 printk(KERN_WARNING "md: %s(pid %d) used"
3834                                         " obsolete MD ioctl, upgrade your"
3835                                         " software to use new ictls.\n",
3836                                         current->comm, current->pid);
3837                         err = -EINVAL;
3838                         goto abort_unlock;
3839         }
3840
3841 done_unlock:
3842 abort_unlock:
3843         mddev_unlock(mddev);
3844
3845         return err;
3846 done:
3847         if (err)
3848                 MD_BUG();
3849 abort:
3850         return err;
3851 }
3852
3853 static int md_open(struct inode *inode, struct file *file)
3854 {
3855         /*
3856          * Succeed if we can lock the mddev, which confirms that
3857          * it isn't being stopped right now.
3858          */
3859         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3860         int err;
3861
3862         if ((err = mddev_lock(mddev)))
3863                 goto out;
3864
3865         err = 0;
3866         mddev_get(mddev);
3867         mddev_unlock(mddev);
3868
3869         check_disk_change(inode->i_bdev);
3870  out:
3871         return err;
3872 }
3873
3874 static int md_release(struct inode *inode, struct file * file)
3875 {
3876         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
3877
3878         if (!mddev)
3879                 BUG();
3880         mddev_put(mddev);
3881
3882         return 0;
3883 }
3884
3885 static int md_media_changed(struct gendisk *disk)
3886 {
3887         mddev_t *mddev = disk->private_data;
3888
3889         return mddev->changed;
3890 }
3891
3892 static int md_revalidate(struct gendisk *disk)
3893 {
3894         mddev_t *mddev = disk->private_data;
3895
3896         mddev->changed = 0;
3897         return 0;
3898 }
3899 static struct block_device_operations md_fops =
3900 {
3901         .owner          = THIS_MODULE,
3902         .open           = md_open,
3903         .release        = md_release,
3904         .ioctl          = md_ioctl,
3905         .getgeo         = md_getgeo,
3906         .media_changed  = md_media_changed,
3907         .revalidate_disk= md_revalidate,
3908 };
3909
3910 static int md_thread(void * arg)
3911 {
3912         mdk_thread_t *thread = arg;
3913
3914         /*
3915          * md_thread is a 'system-thread', it's priority should be very
3916          * high. We avoid resource deadlocks individually in each
3917          * raid personality. (RAID5 does preallocation) We also use RR and
3918          * the very same RT priority as kswapd, thus we will never get
3919          * into a priority inversion deadlock.
3920          *
3921          * we definitely have to have equal or higher priority than
3922          * bdflush, otherwise bdflush will deadlock if there are too
3923          * many dirty RAID5 blocks.
3924          */
3925
3926         allow_signal(SIGKILL);
3927         while (!kthread_should_stop()) {
3928
3929                 /* We need to wait INTERRUPTIBLE so that
3930                  * we don't add to the load-average.
3931                  * That means we need to be sure no signals are
3932                  * pending
3933                  */
3934                 if (signal_pending(current))
3935                         flush_signals(current);
3936
3937                 wait_event_interruptible_timeout
3938                         (thread->wqueue,
3939                          test_bit(THREAD_WAKEUP, &thread->flags)
3940                          || kthread_should_stop(),
3941                          thread->timeout);
3942                 try_to_freeze();
3943
3944                 clear_bit(THREAD_WAKEUP, &thread->flags);
3945
3946                 thread->run(thread->mddev);
3947         }
3948
3949         return 0;
3950 }
3951
3952 void md_wakeup_thread(mdk_thread_t *thread)
3953 {
3954         if (thread) {
3955                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
3956                 set_bit(THREAD_WAKEUP, &thread->flags);
3957                 wake_up(&thread->wqueue);
3958         }
3959 }
3960
3961 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
3962                                  const char *name)
3963 {
3964         mdk_thread_t *thread;
3965
3966         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
3967         if (!thread)
3968                 return NULL;
3969
3970         init_waitqueue_head(&thread->wqueue);
3971
3972         thread->run = run;
3973         thread->mddev = mddev;
3974         thread->timeout = MAX_SCHEDULE_TIMEOUT;
3975         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
3976         if (IS_ERR(thread->tsk)) {
3977                 kfree(thread);
3978                 return NULL;
3979         }
3980         return thread;
3981 }
3982
3983 void md_unregister_thread(mdk_thread_t *thread)
3984 {
3985         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
3986
3987         kthread_stop(thread->tsk);
3988         kfree(thread);
3989 }
3990
3991 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
3992 {
3993         if (!mddev) {
3994                 MD_BUG();
3995                 return;
3996         }
3997
3998         if (!rdev || test_bit(Faulty, &rdev->flags))
3999                 return;
4000 /*
4001         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4002                 mdname(mddev),
4003                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4004                 __builtin_return_address(0),__builtin_return_address(1),
4005                 __builtin_return_address(2),__builtin_return_address(3));
4006 */
4007         if (!mddev->pers->error_handler)
4008                 return;
4009         mddev->pers->error_handler(mddev,rdev);
4010         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4011         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4012         md_wakeup_thread(mddev->thread);
4013         md_new_event(mddev);
4014 }
4015
4016 /* seq_file implementation /proc/mdstat */
4017
4018 static void status_unused(struct seq_file *seq)
4019 {
4020         int i = 0;
4021         mdk_rdev_t *rdev;
4022         struct list_head *tmp;
4023
4024         seq_printf(seq, "unused devices: ");
4025
4026         ITERATE_RDEV_PENDING(rdev,tmp) {
4027                 char b[BDEVNAME_SIZE];
4028                 i++;
4029                 seq_printf(seq, "%s ",
4030                               bdevname(rdev->bdev,b));
4031         }
4032         if (!i)
4033                 seq_printf(seq, "<none>");
4034
4035         seq_printf(seq, "\n");
4036 }
4037
4038
4039 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4040 {
4041         unsigned long max_blocks, resync, res, dt, db, rt;
4042
4043         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4044
4045         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4046                 max_blocks = mddev->resync_max_sectors >> 1;
4047         else
4048                 max_blocks = mddev->size;
4049
4050         /*
4051          * Should not happen.
4052          */
4053         if (!max_blocks) {
4054                 MD_BUG();
4055                 return;
4056         }
4057         res = (resync/1024)*1000/(max_blocks/1024 + 1);
4058         {
4059                 int i, x = res/50, y = 20-x;
4060                 seq_printf(seq, "[");
4061                 for (i = 0; i < x; i++)
4062                         seq_printf(seq, "=");
4063                 seq_printf(seq, ">");
4064                 for (i = 0; i < y; i++)
4065                         seq_printf(seq, ".");
4066                 seq_printf(seq, "] ");
4067         }
4068         seq_printf(seq, " %s =%3lu.%lu%% (%lu/%lu)",
4069                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4070                        "resync" : "recovery"),
4071                       res/10, res % 10, resync, max_blocks);
4072
4073         /*
4074          * We do not want to overflow, so the order of operands and
4075          * the * 100 / 100 trick are important. We do a +1 to be
4076          * safe against division by zero. We only estimate anyway.
4077          *
4078          * dt: time from mark until now
4079          * db: blocks written from mark until now
4080          * rt: remaining time
4081          */
4082         dt = ((jiffies - mddev->resync_mark) / HZ);
4083         if (!dt) dt++;
4084         db = resync - (mddev->resync_mark_cnt/2);
4085         rt = (dt * ((max_blocks-resync) / (db/100+1)))/100;
4086
4087         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4088
4089         seq_printf(seq, " speed=%ldK/sec", db/dt);
4090 }
4091
4092 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4093 {
4094         struct list_head *tmp;
4095         loff_t l = *pos;
4096         mddev_t *mddev;
4097
4098         if (l >= 0x10000)
4099                 return NULL;
4100         if (!l--)
4101                 /* header */
4102                 return (void*)1;
4103
4104         spin_lock(&all_mddevs_lock);
4105         list_for_each(tmp,&all_mddevs)
4106                 if (!l--) {
4107                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4108                         mddev_get(mddev);
4109                         spin_unlock(&all_mddevs_lock);
4110                         return mddev;
4111                 }
4112         spin_unlock(&all_mddevs_lock);
4113         if (!l--)
4114                 return (void*)2;/* tail */
4115         return NULL;
4116 }
4117
4118 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4119 {
4120         struct list_head *tmp;
4121         mddev_t *next_mddev, *mddev = v;
4122         
4123         ++*pos;
4124         if (v == (void*)2)
4125                 return NULL;
4126
4127         spin_lock(&all_mddevs_lock);
4128         if (v == (void*)1)
4129                 tmp = all_mddevs.next;
4130         else
4131                 tmp = mddev->all_mddevs.next;
4132         if (tmp != &all_mddevs)
4133                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4134         else {
4135                 next_mddev = (void*)2;
4136                 *pos = 0x10000;
4137         }               
4138         spin_unlock(&all_mddevs_lock);
4139
4140         if (v != (void*)1)
4141                 mddev_put(mddev);
4142         return next_mddev;
4143
4144 }
4145
4146 static void md_seq_stop(struct seq_file *seq, void *v)
4147 {
4148         mddev_t *mddev = v;
4149
4150         if (mddev && v != (void*)1 && v != (void*)2)
4151                 mddev_put(mddev);
4152 }
4153
4154 struct mdstat_info {
4155         int event;
4156 };
4157
4158 static int md_seq_show(struct seq_file *seq, void *v)
4159 {
4160         mddev_t *mddev = v;
4161         sector_t size;
4162         struct list_head *tmp2;
4163         mdk_rdev_t *rdev;
4164         struct mdstat_info *mi = seq->private;
4165         struct bitmap *bitmap;
4166
4167         if (v == (void*)1) {
4168                 struct mdk_personality *pers;
4169                 seq_printf(seq, "Personalities : ");
4170                 spin_lock(&pers_lock);
4171                 list_for_each_entry(pers, &pers_list, list)
4172                         seq_printf(seq, "[%s] ", pers->name);
4173
4174                 spin_unlock(&pers_lock);
4175                 seq_printf(seq, "\n");
4176                 mi->event = atomic_read(&md_event_count);
4177                 return 0;
4178         }
4179         if (v == (void*)2) {
4180                 status_unused(seq);
4181                 return 0;
4182         }
4183
4184         if (mddev_lock(mddev)!=0) 
4185                 return -EINTR;
4186         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4187                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4188                                                 mddev->pers ? "" : "in");
4189                 if (mddev->pers) {
4190                         if (mddev->ro==1)
4191                                 seq_printf(seq, " (read-only)");
4192                         if (mddev->ro==2)
4193                                 seq_printf(seq, "(auto-read-only)");
4194                         seq_printf(seq, " %s", mddev->pers->name);
4195                 }
4196
4197                 size = 0;
4198                 ITERATE_RDEV(mddev,rdev,tmp2) {
4199                         char b[BDEVNAME_SIZE];
4200                         seq_printf(seq, " %s[%d]",
4201                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4202                         if (test_bit(WriteMostly, &rdev->flags))
4203                                 seq_printf(seq, "(W)");
4204                         if (test_bit(Faulty, &rdev->flags)) {
4205                                 seq_printf(seq, "(F)");
4206                                 continue;
4207                         } else if (rdev->raid_disk < 0)
4208                                 seq_printf(seq, "(S)"); /* spare */
4209                         size += rdev->size;
4210                 }
4211
4212                 if (!list_empty(&mddev->disks)) {
4213                         if (mddev->pers)
4214                                 seq_printf(seq, "\n      %llu blocks",
4215                                         (unsigned long long)mddev->array_size);
4216                         else
4217                                 seq_printf(seq, "\n      %llu blocks",
4218                                         (unsigned long long)size);
4219                 }
4220                 if (mddev->persistent) {
4221                         if (mddev->major_version != 0 ||
4222                             mddev->minor_version != 90) {
4223                                 seq_printf(seq," super %d.%d",
4224                                            mddev->major_version,
4225                                            mddev->minor_version);
4226                         }
4227                 } else
4228                         seq_printf(seq, " super non-persistent");
4229
4230                 if (mddev->pers) {
4231                         mddev->pers->status (seq, mddev);
4232                         seq_printf(seq, "\n      ");
4233                         if (mddev->pers->sync_request) {
4234                                 if (mddev->curr_resync > 2) {
4235                                         status_resync (seq, mddev);
4236                                         seq_printf(seq, "\n      ");
4237                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4238                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4239                                 else if (mddev->recovery_cp < MaxSector)
4240                                         seq_printf(seq, "\tresync=PENDING\n      ");
4241                         }
4242                 } else
4243                         seq_printf(seq, "\n       ");
4244
4245                 if ((bitmap = mddev->bitmap)) {
4246                         unsigned long chunk_kb;
4247                         unsigned long flags;
4248                         spin_lock_irqsave(&bitmap->lock, flags);
4249                         chunk_kb = bitmap->chunksize >> 10;
4250                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4251                                 "%lu%s chunk",
4252                                 bitmap->pages - bitmap->missing_pages,
4253                                 bitmap->pages,
4254                                 (bitmap->pages - bitmap->missing_pages)
4255                                         << (PAGE_SHIFT - 10),
4256                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4257                                 chunk_kb ? "KB" : "B");
4258                         if (bitmap->file) {
4259                                 seq_printf(seq, ", file: ");
4260                                 seq_path(seq, bitmap->file->f_vfsmnt,
4261                                          bitmap->file->f_dentry," \t\n");
4262                         }
4263
4264                         seq_printf(seq, "\n");
4265                         spin_unlock_irqrestore(&bitmap->lock, flags);
4266                 }
4267
4268                 seq_printf(seq, "\n");
4269         }
4270         mddev_unlock(mddev);
4271         
4272         return 0;
4273 }
4274
4275 static struct seq_operations md_seq_ops = {
4276         .start  = md_seq_start,
4277         .next   = md_seq_next,
4278         .stop   = md_seq_stop,
4279         .show   = md_seq_show,
4280 };
4281
4282 static int md_seq_open(struct inode *inode, struct file *file)
4283 {
4284         int error;
4285         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4286         if (mi == NULL)
4287                 return -ENOMEM;
4288
4289         error = seq_open(file, &md_seq_ops);
4290         if (error)
4291                 kfree(mi);
4292         else {
4293                 struct seq_file *p = file->private_data;
4294                 p->private = mi;
4295                 mi->event = atomic_read(&md_event_count);
4296         }
4297         return error;
4298 }
4299
4300 static int md_seq_release(struct inode *inode, struct file *file)
4301 {
4302         struct seq_file *m = file->private_data;
4303         struct mdstat_info *mi = m->private;
4304         m->private = NULL;
4305         kfree(mi);
4306         return seq_release(inode, file);
4307 }
4308
4309 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4310 {
4311         struct seq_file *m = filp->private_data;
4312         struct mdstat_info *mi = m->private;
4313         int mask;
4314
4315         poll_wait(filp, &md_event_waiters, wait);
4316
4317         /* always allow read */
4318         mask = POLLIN | POLLRDNORM;
4319
4320         if (mi->event != atomic_read(&md_event_count))
4321                 mask |= POLLERR | POLLPRI;
4322         return mask;
4323 }
4324
4325 static struct file_operations md_seq_fops = {
4326         .open           = md_seq_open,
4327         .read           = seq_read,
4328         .llseek         = seq_lseek,
4329         .release        = md_seq_release,
4330         .poll           = mdstat_poll,
4331 };
4332
4333 int register_md_personality(struct mdk_personality *p)
4334 {
4335         spin_lock(&pers_lock);
4336         list_add_tail(&p->list, &pers_list);
4337         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4338         spin_unlock(&pers_lock);
4339         return 0;
4340 }
4341
4342 int unregister_md_personality(struct mdk_personality *p)
4343 {
4344         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4345         spin_lock(&pers_lock);
4346         list_del_init(&p->list);
4347         spin_unlock(&pers_lock);
4348         return 0;
4349 }
4350
4351 static int is_mddev_idle(mddev_t *mddev)
4352 {
4353         mdk_rdev_t * rdev;
4354         struct list_head *tmp;
4355         int idle;
4356         unsigned long curr_events;
4357
4358         idle = 1;
4359         ITERATE_RDEV(mddev,rdev,tmp) {
4360                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4361                 curr_events = disk_stat_read(disk, sectors[0]) + 
4362                                 disk_stat_read(disk, sectors[1]) - 
4363                                 atomic_read(&disk->sync_io);
4364                 /* The difference between curr_events and last_events
4365                  * will be affected by any new non-sync IO (making
4366                  * curr_events bigger) and any difference in the amount of
4367                  * in-flight syncio (making current_events bigger or smaller)
4368                  * The amount in-flight is currently limited to
4369                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4370                  * which is at most 4096 sectors.
4371                  * These numbers are fairly fragile and should be made
4372                  * more robust, probably by enforcing the
4373                  * 'window size' that md_do_sync sort-of uses.
4374                  *
4375                  * Note: the following is an unsigned comparison.
4376                  */
4377                 if ((curr_events - rdev->last_events + 4096) > 8192) {
4378                         rdev->last_events = curr_events;
4379                         idle = 0;
4380                 }
4381         }
4382         return idle;
4383 }
4384
4385 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4386 {
4387         /* another "blocks" (512byte) blocks have been synced */
4388         atomic_sub(blocks, &mddev->recovery_active);
4389         wake_up(&mddev->recovery_wait);
4390         if (!ok) {
4391                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4392                 md_wakeup_thread(mddev->thread);
4393                 // stop recovery, signal do_sync ....
4394         }
4395 }
4396
4397
4398 /* md_write_start(mddev, bi)
4399  * If we need to update some array metadata (e.g. 'active' flag
4400  * in superblock) before writing, schedule a superblock update
4401  * and wait for it to complete.
4402  */
4403 void md_write_start(mddev_t *mddev, struct bio *bi)
4404 {
4405         if (bio_data_dir(bi) != WRITE)
4406                 return;
4407
4408         BUG_ON(mddev->ro == 1);
4409         if (mddev->ro == 2) {
4410                 /* need to switch to read/write */
4411                 mddev->ro = 0;
4412                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4413                 md_wakeup_thread(mddev->thread);
4414         }
4415         atomic_inc(&mddev->writes_pending);
4416         if (mddev->in_sync) {
4417                 spin_lock_irq(&mddev->write_lock);
4418                 if (mddev->in_sync) {
4419                         mddev->in_sync = 0;
4420                         mddev->sb_dirty = 1;
4421                         md_wakeup_thread(mddev->thread);
4422                 }
4423                 spin_unlock_irq(&mddev->write_lock);
4424         }
4425         wait_event(mddev->sb_wait, mddev->sb_dirty==0);
4426 }
4427
4428 void md_write_end(mddev_t *mddev)
4429 {
4430         if (atomic_dec_and_test(&mddev->writes_pending)) {
4431                 if (mddev->safemode == 2)
4432                         md_wakeup_thread(mddev->thread);
4433                 else
4434                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4435         }
4436 }
4437
4438 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
4439
4440 #define SYNC_MARKS      10
4441 #define SYNC_MARK_STEP  (3*HZ)
4442 static void md_do_sync(mddev_t *mddev)
4443 {
4444         mddev_t *mddev2;
4445         unsigned int currspeed = 0,
4446                  window;
4447         sector_t max_sectors,j, io_sectors;
4448         unsigned long mark[SYNC_MARKS];
4449         sector_t mark_cnt[SYNC_MARKS];
4450         int last_mark,m;
4451         struct list_head *tmp;
4452         sector_t last_check;
4453         int skipped = 0;
4454
4455         /* just incase thread restarts... */
4456         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
4457                 return;
4458
4459         /* we overload curr_resync somewhat here.
4460          * 0 == not engaged in resync at all
4461          * 2 == checking that there is no conflict with another sync
4462          * 1 == like 2, but have yielded to allow conflicting resync to
4463          *              commense
4464          * other == active in resync - this many blocks
4465          *
4466          * Before starting a resync we must have set curr_resync to
4467          * 2, and then checked that every "conflicting" array has curr_resync
4468          * less than ours.  When we find one that is the same or higher
4469          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
4470          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
4471          * This will mean we have to start checking from the beginning again.
4472          *
4473          */
4474
4475         do {
4476                 mddev->curr_resync = 2;
4477
4478         try_again:
4479                 if (kthread_should_stop()) {
4480                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4481                         goto skip;
4482                 }
4483                 ITERATE_MDDEV(mddev2,tmp) {
4484                         if (mddev2 == mddev)
4485                                 continue;
4486                         if (mddev2->curr_resync && 
4487                             match_mddev_units(mddev,mddev2)) {
4488                                 DEFINE_WAIT(wq);
4489                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
4490                                         /* arbitrarily yield */
4491                                         mddev->curr_resync = 1;
4492                                         wake_up(&resync_wait);
4493                                 }
4494                                 if (mddev > mddev2 && mddev->curr_resync == 1)
4495                                         /* no need to wait here, we can wait the next
4496                                          * time 'round when curr_resync == 2
4497                                          */
4498                                         continue;
4499                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
4500                                 if (!kthread_should_stop() &&
4501                                     mddev2->curr_resync >= mddev->curr_resync) {
4502                                         printk(KERN_INFO "md: delaying resync of %s"
4503                                                " until %s has finished resync (they"
4504                                                " share one or more physical units)\n",
4505                                                mdname(mddev), mdname(mddev2));
4506                                         mddev_put(mddev2);
4507                                         schedule();
4508                                         finish_wait(&resync_wait, &wq);
4509                                         goto try_again;
4510                                 }
4511                                 finish_wait(&resync_wait, &wq);
4512                         }
4513                 }
4514         } while (mddev->curr_resync < 2);
4515
4516         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4517                 /* resync follows the size requested by the personality,
4518                  * which defaults to physical size, but can be virtual size
4519                  */
4520                 max_sectors = mddev->resync_max_sectors;
4521                 mddev->resync_mismatches = 0;
4522         } else
4523                 /* recovery follows the physical size of devices */
4524                 max_sectors = mddev->size << 1;
4525
4526         printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
4527         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
4528                 " %d KB/sec/disc.\n", speed_min(mddev));
4529         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
4530                "(but not more than %d KB/sec) for reconstruction.\n",
4531                speed_max(mddev));
4532
4533         is_mddev_idle(mddev); /* this also initializes IO event counters */
4534         /* we don't use the checkpoint if there's a bitmap */
4535         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap
4536             && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
4537                 j = mddev->recovery_cp;
4538         else
4539                 j = 0;
4540         io_sectors = 0;
4541         for (m = 0; m < SYNC_MARKS; m++) {
4542                 mark[m] = jiffies;
4543                 mark_cnt[m] = io_sectors;
4544         }
4545         last_mark = 0;
4546         mddev->resync_mark = mark[last_mark];
4547         mddev->resync_mark_cnt = mark_cnt[last_mark];
4548
4549         /*
4550          * Tune reconstruction:
4551          */
4552         window = 32*(PAGE_SIZE/512);
4553         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
4554                 window/2,(unsigned long long) max_sectors/2);
4555
4556         atomic_set(&mddev->recovery_active, 0);
4557         init_waitqueue_head(&mddev->recovery_wait);
4558         last_check = 0;
4559
4560         if (j>2) {
4561                 printk(KERN_INFO 
4562                         "md: resuming recovery of %s from checkpoint.\n",
4563                         mdname(mddev));
4564                 mddev->curr_resync = j;
4565         }
4566
4567         while (j < max_sectors) {
4568                 sector_t sectors;
4569
4570                 skipped = 0;
4571                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
4572                                             currspeed < speed_min(mddev));
4573                 if (sectors == 0) {
4574                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4575                         goto out;
4576                 }
4577
4578                 if (!skipped) { /* actual IO requested */
4579                         io_sectors += sectors;
4580                         atomic_add(sectors, &mddev->recovery_active);
4581                 }
4582
4583                 j += sectors;
4584                 if (j>1) mddev->curr_resync = j;
4585                 if (last_check == 0)
4586                         /* this is the earliers that rebuilt will be
4587                          * visible in /proc/mdstat
4588                          */
4589                         md_new_event(mddev);
4590
4591                 if (last_check + window > io_sectors || j == max_sectors)
4592                         continue;
4593
4594                 last_check = io_sectors;
4595
4596                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
4597                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
4598                         break;
4599
4600         repeat:
4601                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
4602                         /* step marks */
4603                         int next = (last_mark+1) % SYNC_MARKS;
4604
4605                         mddev->resync_mark = mark[next];
4606                         mddev->resync_mark_cnt = mark_cnt[next];
4607                         mark[next] = jiffies;
4608                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
4609                         last_mark = next;
4610                 }
4611
4612
4613                 if (kthread_should_stop()) {
4614                         /*
4615                          * got a signal, exit.
4616                          */
4617                         printk(KERN_INFO 
4618                                 "md: md_do_sync() got signal ... exiting\n");
4619                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4620                         goto out;
4621                 }
4622
4623                 /*
4624                  * this loop exits only if either when we are slower than
4625                  * the 'hard' speed limit, or the system was IO-idle for
4626                  * a jiffy.
4627                  * the system might be non-idle CPU-wise, but we only care
4628                  * about not overloading the IO subsystem. (things like an
4629                  * e2fsck being done on the RAID array should execute fast)
4630                  */
4631                 mddev->queue->unplug_fn(mddev->queue);
4632                 cond_resched();
4633
4634                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
4635                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
4636
4637                 if (currspeed > speed_min(mddev)) {
4638                         if ((currspeed > speed_max(mddev)) ||
4639                                         !is_mddev_idle(mddev)) {
4640                                 msleep(500);
4641                                 goto repeat;
4642                         }
4643                 }
4644         }
4645         printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
4646         /*
4647          * this also signals 'finished resyncing' to md_stop
4648          */
4649  out:
4650         mddev->queue->unplug_fn(mddev->queue);
4651
4652         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
4653
4654         /* tell personality that we are finished */
4655         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
4656
4657         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4658             mddev->curr_resync > 2 &&
4659             mddev->curr_resync >= mddev->recovery_cp) {
4660                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4661                         printk(KERN_INFO 
4662                                 "md: checkpointing recovery of %s.\n",
4663                                 mdname(mddev));
4664                         mddev->recovery_cp = mddev->curr_resync;
4665                 } else
4666                         mddev->recovery_cp = MaxSector;
4667         }
4668
4669  skip:
4670         mddev->curr_resync = 0;
4671         wake_up(&resync_wait);
4672         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
4673         md_wakeup_thread(mddev->thread);
4674 }
4675
4676
4677 /*
4678  * This routine is regularly called by all per-raid-array threads to
4679  * deal with generic issues like resync and super-block update.
4680  * Raid personalities that don't have a thread (linear/raid0) do not
4681  * need this as they never do any recovery or update the superblock.
4682  *
4683  * It does not do any resync itself, but rather "forks" off other threads
4684  * to do that as needed.
4685  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
4686  * "->recovery" and create a thread at ->sync_thread.
4687  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
4688  * and wakeups up this thread which will reap the thread and finish up.
4689  * This thread also removes any faulty devices (with nr_pending == 0).
4690  *
4691  * The overall approach is:
4692  *  1/ if the superblock needs updating, update it.
4693  *  2/ If a recovery thread is running, don't do anything else.
4694  *  3/ If recovery has finished, clean up, possibly marking spares active.
4695  *  4/ If there are any faulty devices, remove them.
4696  *  5/ If array is degraded, try to add spares devices
4697  *  6/ If array has spares or is not in-sync, start a resync thread.
4698  */
4699 void md_check_recovery(mddev_t *mddev)
4700 {
4701         mdk_rdev_t *rdev;
4702         struct list_head *rtmp;
4703
4704
4705         if (mddev->bitmap)
4706                 bitmap_daemon_work(mddev->bitmap);
4707
4708         if (mddev->ro)
4709                 return;
4710
4711         if (signal_pending(current)) {
4712                 if (mddev->pers->sync_request) {
4713                         printk(KERN_INFO "md: %s in immediate safe mode\n",
4714                                mdname(mddev));
4715                         mddev->safemode = 2;
4716                 }
4717                 flush_signals(current);
4718         }
4719
4720         if ( ! (
4721                 mddev->sb_dirty ||
4722                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
4723                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
4724                 (mddev->safemode == 1) ||
4725                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
4726                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
4727                 ))
4728                 return;
4729
4730         if (mddev_trylock(mddev)==0) {
4731                 int spares =0;
4732
4733                 spin_lock_irq(&mddev->write_lock);
4734                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
4735                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
4736                         mddev->in_sync = 1;
4737                         mddev->sb_dirty = 1;
4738                 }
4739                 if (mddev->safemode == 1)
4740                         mddev->safemode = 0;
4741                 spin_unlock_irq(&mddev->write_lock);
4742
4743                 if (mddev->sb_dirty)
4744                         md_update_sb(mddev);
4745
4746
4747                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4748                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
4749                         /* resync/recovery still happening */
4750                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4751                         goto unlock;
4752                 }
4753                 if (mddev->sync_thread) {
4754                         /* resync has finished, collect result */
4755                         md_unregister_thread(mddev->sync_thread);
4756                         mddev->sync_thread = NULL;
4757                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
4758                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4759                                 /* success...*/
4760                                 /* activate any spares */
4761                                 mddev->pers->spare_active(mddev);
4762                         }
4763                         md_update_sb(mddev);
4764
4765                         /* if array is no-longer degraded, then any saved_raid_disk
4766                          * information must be scrapped
4767                          */
4768                         if (!mddev->degraded)
4769                                 ITERATE_RDEV(mddev,rdev,rtmp)
4770                                         rdev->saved_raid_disk = -1;
4771
4772                         mddev->recovery = 0;
4773                         /* flag recovery needed just to double check */
4774                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4775                         md_new_event(mddev);
4776                         goto unlock;
4777                 }
4778                 /* Clear some bits that don't mean anything, but
4779                  * might be left set
4780                  */
4781                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4782                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
4783                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
4784                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4785
4786                 /* no recovery is running.
4787                  * remove any failed drives, then
4788                  * add spares if possible.
4789                  * Spare are also removed and re-added, to allow
4790                  * the personality to fail the re-add.
4791                  */
4792                 ITERATE_RDEV(mddev,rdev,rtmp)
4793                         if (rdev->raid_disk >= 0 &&
4794                             (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
4795                             atomic_read(&rdev->nr_pending)==0) {
4796                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
4797                                         char nm[20];
4798                                         sprintf(nm,"rd%d", rdev->raid_disk);
4799                                         sysfs_remove_link(&mddev->kobj, nm);
4800                                         rdev->raid_disk = -1;
4801                                 }
4802                         }
4803
4804                 if (mddev->degraded) {
4805                         ITERATE_RDEV(mddev,rdev,rtmp)
4806                                 if (rdev->raid_disk < 0
4807                                     && !test_bit(Faulty, &rdev->flags)) {
4808                                         if (mddev->pers->hot_add_disk(mddev,rdev)) {
4809                                                 char nm[20];
4810                                                 sprintf(nm, "rd%d", rdev->raid_disk);
4811                                                 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
4812                                                 spares++;
4813                                                 md_new_event(mddev);
4814                                         } else
4815                                                 break;
4816                                 }
4817                 }
4818
4819                 if (spares) {
4820                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4821                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4822                 } else if (mddev->recovery_cp < MaxSector) {
4823                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4824                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4825                         /* nothing to be done ... */
4826                         goto unlock;
4827
4828                 if (mddev->pers->sync_request) {
4829                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4830                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
4831                                 /* We are adding a device or devices to an array
4832                                  * which has the bitmap stored on all devices.
4833                                  * So make sure all bitmap pages get written
4834                                  */
4835                                 bitmap_write_all(mddev->bitmap);
4836                         }
4837                         mddev->sync_thread = md_register_thread(md_do_sync,
4838                                                                 mddev,
4839                                                                 "%s_resync");
4840                         if (!mddev->sync_thread) {
4841                                 printk(KERN_ERR "%s: could not start resync"
4842                                         " thread...\n", 
4843                                         mdname(mddev));
4844                                 /* leave the spares where they are, it shouldn't hurt */
4845                                 mddev->recovery = 0;
4846                         } else
4847                                 md_wakeup_thread(mddev->sync_thread);
4848                         md_new_event(mddev);
4849                 }
4850         unlock:
4851                 mddev_unlock(mddev);
4852         }
4853 }
4854
4855 static int md_notify_reboot(struct notifier_block *this,
4856                             unsigned long code, void *x)
4857 {
4858         struct list_head *tmp;
4859         mddev_t *mddev;
4860
4861         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
4862
4863                 printk(KERN_INFO "md: stopping all md devices.\n");
4864
4865                 ITERATE_MDDEV(mddev,tmp)
4866                         if (mddev_trylock(mddev)==0)
4867                                 do_md_stop (mddev, 1);
4868                 /*
4869                  * certain more exotic SCSI devices are known to be
4870                  * volatile wrt too early system reboots. While the
4871                  * right place to handle this issue is the given
4872                  * driver, we do want to have a safe RAID driver ...
4873                  */
4874                 mdelay(1000*1);
4875         }
4876         return NOTIFY_DONE;
4877 }
4878
4879 static struct notifier_block md_notifier = {
4880         .notifier_call  = md_notify_reboot,
4881         .next           = NULL,
4882         .priority       = INT_MAX, /* before any real devices */
4883 };
4884
4885 static void md_geninit(void)
4886 {
4887         struct proc_dir_entry *p;
4888
4889         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
4890
4891         p = create_proc_entry("mdstat", S_IRUGO, NULL);
4892         if (p)
4893                 p->proc_fops = &md_seq_fops;
4894 }
4895
4896 static int __init md_init(void)
4897 {
4898         int minor;
4899
4900         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
4901                         " MD_SB_DISKS=%d\n",
4902                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
4903                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
4904         printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
4905                         BITMAP_MINOR);
4906
4907         if (register_blkdev(MAJOR_NR, "md"))
4908                 return -1;
4909         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
4910                 unregister_blkdev(MAJOR_NR, "md");
4911                 return -1;
4912         }
4913         devfs_mk_dir("md");
4914         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
4915                                 md_probe, NULL, NULL);
4916         blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
4917                             md_probe, NULL, NULL);
4918
4919         for (minor=0; minor < MAX_MD_DEVS; ++minor)
4920                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
4921                                 S_IFBLK|S_IRUSR|S_IWUSR,
4922                                 "md/%d", minor);
4923
4924         for (minor=0; minor < MAX_MD_DEVS; ++minor)
4925                 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
4926                               S_IFBLK|S_IRUSR|S_IWUSR,
4927                               "md/mdp%d", minor);
4928
4929
4930         register_reboot_notifier(&md_notifier);
4931         raid_table_header = register_sysctl_table(raid_root_table, 1);
4932
4933         md_geninit();
4934         return (0);
4935 }
4936
4937
4938 #ifndef MODULE
4939
4940 /*
4941  * Searches all registered partitions for autorun RAID arrays
4942  * at boot time.
4943  */
4944 static dev_t detected_devices[128];
4945 static int dev_cnt;
4946
4947 void md_autodetect_dev(dev_t dev)
4948 {
4949         if (dev_cnt >= 0 && dev_cnt < 127)
4950                 detected_devices[dev_cnt++] = dev;
4951 }
4952
4953
4954 static void autostart_arrays(int part)
4955 {
4956         mdk_rdev_t *rdev;
4957         int i;
4958
4959         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
4960
4961         for (i = 0; i < dev_cnt; i++) {
4962                 dev_t dev = detected_devices[i];
4963
4964                 rdev = md_import_device(dev,0, 0);
4965                 if (IS_ERR(rdev))
4966                         continue;
4967
4968                 if (test_bit(Faulty, &rdev->flags)) {
4969                         MD_BUG();
4970                         continue;
4971                 }
4972                 list_add(&rdev->same_set, &pending_raid_disks);
4973         }
4974         dev_cnt = 0;
4975
4976         autorun_devices(part);
4977 }
4978
4979 #endif
4980
4981 static __exit void md_exit(void)
4982 {
4983         mddev_t *mddev;
4984         struct list_head *tmp;
4985         int i;
4986         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
4987         blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
4988         for (i=0; i < MAX_MD_DEVS; i++)
4989                 devfs_remove("md/%d", i);
4990         for (i=0; i < MAX_MD_DEVS; i++)
4991                 devfs_remove("md/d%d", i);
4992
4993         devfs_remove("md");
4994
4995         unregister_blkdev(MAJOR_NR,"md");
4996         unregister_blkdev(mdp_major, "mdp");
4997         unregister_reboot_notifier(&md_notifier);
4998         unregister_sysctl_table(raid_table_header);
4999         remove_proc_entry("mdstat", NULL);
5000         ITERATE_MDDEV(mddev,tmp) {
5001                 struct gendisk *disk = mddev->gendisk;
5002                 if (!disk)
5003                         continue;
5004                 export_array(mddev);
5005                 del_gendisk(disk);
5006                 put_disk(disk);
5007                 mddev->gendisk = NULL;
5008                 mddev_put(mddev);
5009         }
5010 }
5011
5012 module_init(md_init)
5013 module_exit(md_exit)
5014
5015 static int get_ro(char *buffer, struct kernel_param *kp)
5016 {
5017         return sprintf(buffer, "%d", start_readonly);
5018 }
5019 static int set_ro(const char *val, struct kernel_param *kp)
5020 {
5021         char *e;
5022         int num = simple_strtoul(val, &e, 10);
5023         if (*val && (*e == '\0' || *e == '\n')) {
5024                 start_readonly = num;
5025                 return 0;
5026         }
5027         return -EINVAL;
5028 }
5029
5030 module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
5031 module_param(start_dirty_degraded, int, 0644);
5032
5033
5034 EXPORT_SYMBOL(register_md_personality);
5035 EXPORT_SYMBOL(unregister_md_personality);
5036 EXPORT_SYMBOL(md_error);
5037 EXPORT_SYMBOL(md_done_sync);
5038 EXPORT_SYMBOL(md_write_start);
5039 EXPORT_SYMBOL(md_write_end);
5040 EXPORT_SYMBOL(md_register_thread);
5041 EXPORT_SYMBOL(md_unregister_thread);
5042 EXPORT_SYMBOL(md_wakeup_thread);
5043 EXPORT_SYMBOL(md_print_devices);
5044 EXPORT_SYMBOL(md_check_recovery);
5045 MODULE_LICENSE("GPL");
5046 MODULE_ALIAS("md");
5047 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);