2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
6 * RAID-5 management functions.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * You should have received a copy of the GNU General Public License
14 * (for example /usr/src/linux/COPYING); if not, write to the Free
15 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/raid/raid5.h>
23 #include <linux/highmem.h>
24 #include <linux/bitops.h>
25 #include <asm/atomic.h>
27 #include <linux/raid/bitmap.h>
33 #define NR_STRIPES 256
34 #define STRIPE_SIZE PAGE_SIZE
35 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
36 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
37 #define IO_THRESHOLD 1
39 #define HASH_PAGES_ORDER 0
40 #define NR_HASH (HASH_PAGES * PAGE_SIZE / sizeof(struct stripe_head *))
41 #define HASH_MASK (NR_HASH - 1)
43 #define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
45 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
46 * order without overlap. There may be several bio's per stripe+device, and
47 * a bio could span several devices.
48 * When walking this list for a particular stripe+device, we must never proceed
49 * beyond a bio that extends past this device, as the next bio might no longer
51 * This macro is used to determine the 'next' bio in the list, given the sector
52 * of the current stripe+device
54 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
56 * The following can be used to debug the driver
59 #define RAID5_PARANOIA 1
60 #if RAID5_PARANOIA && defined(CONFIG_SMP)
61 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
63 # define CHECK_DEVLOCK()
66 #define PRINTK(x...) ((void)(RAID5_DEBUG && printk(x)))
72 static void print_raid5_conf (raid5_conf_t *conf);
74 static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
76 if (atomic_dec_and_test(&sh->count)) {
77 if (!list_empty(&sh->lru))
79 if (atomic_read(&conf->active_stripes)==0)
81 if (test_bit(STRIPE_HANDLE, &sh->state)) {
82 if (test_bit(STRIPE_DELAYED, &sh->state))
83 list_add_tail(&sh->lru, &conf->delayed_list);
84 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
85 conf->seq_write == sh->bm_seq)
86 list_add_tail(&sh->lru, &conf->bitmap_list);
88 clear_bit(STRIPE_BIT_DELAY, &sh->state);
89 list_add_tail(&sh->lru, &conf->handle_list);
91 md_wakeup_thread(conf->mddev->thread);
93 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
94 atomic_dec(&conf->preread_active_stripes);
95 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
96 md_wakeup_thread(conf->mddev->thread);
98 list_add_tail(&sh->lru, &conf->inactive_list);
99 atomic_dec(&conf->active_stripes);
100 if (!conf->inactive_blocked ||
101 atomic_read(&conf->active_stripes) < (NR_STRIPES*3/4))
102 wake_up(&conf->wait_for_stripe);
106 static void release_stripe(struct stripe_head *sh)
108 raid5_conf_t *conf = sh->raid_conf;
111 spin_lock_irqsave(&conf->device_lock, flags);
112 __release_stripe(conf, sh);
113 spin_unlock_irqrestore(&conf->device_lock, flags);
116 static void remove_hash(struct stripe_head *sh)
118 PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
120 if (sh->hash_pprev) {
122 sh->hash_next->hash_pprev = sh->hash_pprev;
123 *sh->hash_pprev = sh->hash_next;
124 sh->hash_pprev = NULL;
128 static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
130 struct stripe_head **shp = &stripe_hash(conf, sh->sector);
132 PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
135 if ((sh->hash_next = *shp) != NULL)
136 (*shp)->hash_pprev = &sh->hash_next;
138 sh->hash_pprev = shp;
142 /* find an idle stripe, make sure it is unhashed, and return it. */
143 static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
145 struct stripe_head *sh = NULL;
146 struct list_head *first;
149 if (list_empty(&conf->inactive_list))
151 first = conf->inactive_list.next;
152 sh = list_entry(first, struct stripe_head, lru);
153 list_del_init(first);
155 atomic_inc(&conf->active_stripes);
160 static void shrink_buffers(struct stripe_head *sh, int num)
165 for (i=0; i<num ; i++) {
169 sh->dev[i].page = NULL;
170 page_cache_release(p);
174 static int grow_buffers(struct stripe_head *sh, int num)
178 for (i=0; i<num; i++) {
181 if (!(page = alloc_page(GFP_KERNEL))) {
184 sh->dev[i].page = page;
189 static void raid5_build_block (struct stripe_head *sh, int i);
191 static inline void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
193 raid5_conf_t *conf = sh->raid_conf;
194 int disks = conf->raid_disks, i;
196 if (atomic_read(&sh->count) != 0)
198 if (test_bit(STRIPE_HANDLE, &sh->state))
202 PRINTK("init_stripe called, stripe %llu\n",
203 (unsigned long long)sh->sector);
211 for (i=disks; i--; ) {
212 struct r5dev *dev = &sh->dev[i];
214 if (dev->toread || dev->towrite || dev->written ||
215 test_bit(R5_LOCKED, &dev->flags)) {
216 printk("sector=%llx i=%d %p %p %p %d\n",
217 (unsigned long long)sh->sector, i, dev->toread,
218 dev->towrite, dev->written,
219 test_bit(R5_LOCKED, &dev->flags));
223 raid5_build_block(sh, i);
225 insert_hash(conf, sh);
228 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector)
230 struct stripe_head *sh;
233 PRINTK("__find_stripe, sector %llu\n", (unsigned long long)sector);
234 for (sh = stripe_hash(conf, sector); sh; sh = sh->hash_next)
235 if (sh->sector == sector)
237 PRINTK("__stripe %llu not in cache\n", (unsigned long long)sector);
241 static void unplug_slaves(mddev_t *mddev);
242 static void raid5_unplug_device(request_queue_t *q);
244 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector,
245 int pd_idx, int noblock)
247 struct stripe_head *sh;
249 PRINTK("get_stripe, sector %llu\n", (unsigned long long)sector);
251 spin_lock_irq(&conf->device_lock);
254 wait_event_lock_irq(conf->wait_for_stripe,
256 conf->device_lock, /* nothing */);
257 sh = __find_stripe(conf, sector);
259 if (!conf->inactive_blocked)
260 sh = get_free_stripe(conf);
261 if (noblock && sh == NULL)
264 conf->inactive_blocked = 1;
265 wait_event_lock_irq(conf->wait_for_stripe,
266 !list_empty(&conf->inactive_list) &&
267 (atomic_read(&conf->active_stripes) < (NR_STRIPES *3/4)
268 || !conf->inactive_blocked),
270 unplug_slaves(conf->mddev);
272 conf->inactive_blocked = 0;
274 init_stripe(sh, sector, pd_idx);
276 if (atomic_read(&sh->count)) {
277 if (!list_empty(&sh->lru))
280 if (!test_bit(STRIPE_HANDLE, &sh->state))
281 atomic_inc(&conf->active_stripes);
282 if (list_empty(&sh->lru))
284 list_del_init(&sh->lru);
287 } while (sh == NULL);
290 atomic_inc(&sh->count);
292 spin_unlock_irq(&conf->device_lock);
296 static int grow_one_stripe(raid5_conf_t *conf)
298 struct stripe_head *sh;
299 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
302 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
303 sh->raid_conf = conf;
304 spin_lock_init(&sh->lock);
306 if (grow_buffers(sh, conf->raid_disks)) {
307 shrink_buffers(sh, conf->raid_disks);
308 kmem_cache_free(conf->slab_cache, sh);
311 /* we just created an active stripe so... */
312 atomic_set(&sh->count, 1);
313 atomic_inc(&conf->active_stripes);
314 INIT_LIST_HEAD(&sh->lru);
319 static int grow_stripes(raid5_conf_t *conf, int num)
322 int devs = conf->raid_disks;
324 sprintf(conf->cache_name, "raid5/%s", mdname(conf->mddev));
326 sc = kmem_cache_create(conf->cache_name,
327 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
331 conf->slab_cache = sc;
333 if (!grow_one_stripe(conf))
339 static int drop_one_stripe(raid5_conf_t *conf)
341 struct stripe_head *sh;
343 spin_lock_irq(&conf->device_lock);
344 sh = get_free_stripe(conf);
345 spin_unlock_irq(&conf->device_lock);
348 if (atomic_read(&sh->count))
350 shrink_buffers(sh, conf->raid_disks);
351 kmem_cache_free(conf->slab_cache, sh);
352 atomic_dec(&conf->active_stripes);
356 static void shrink_stripes(raid5_conf_t *conf)
358 while (drop_one_stripe(conf))
361 kmem_cache_destroy(conf->slab_cache);
362 conf->slab_cache = NULL;
365 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done,
368 struct stripe_head *sh = bi->bi_private;
369 raid5_conf_t *conf = sh->raid_conf;
370 int disks = conf->raid_disks, i;
371 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
376 for (i=0 ; i<disks; i++)
377 if (bi == &sh->dev[i].req)
380 PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n",
381 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
392 spin_lock_irqsave(&conf->device_lock, flags);
393 /* we can return a buffer if we bypassed the cache or
394 * if the top buffer is not in highmem. If there are
395 * multiple buffers, leave the extra work to
398 buffer = sh->bh_read[i];
400 (!PageHighMem(buffer->b_page)
401 || buffer->b_page == bh->b_page )
403 sh->bh_read[i] = buffer->b_reqnext;
404 buffer->b_reqnext = NULL;
407 spin_unlock_irqrestore(&conf->device_lock, flags);
408 if (sh->bh_page[i]==bh->b_page)
409 set_buffer_uptodate(bh);
411 if (buffer->b_page != bh->b_page)
412 memcpy(buffer->b_data, bh->b_data, bh->b_size);
413 buffer->b_end_io(buffer, 1);
416 set_bit(R5_UPTODATE, &sh->dev[i].flags);
418 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
419 printk("R5: read error corrected!!\n");
420 clear_bit(R5_ReadError, &sh->dev[i].flags);
421 clear_bit(R5_ReWrite, &sh->dev[i].flags);
423 if (atomic_read(&conf->disks[i].rdev->read_errors))
424 atomic_set(&conf->disks[i].rdev->read_errors, 0);
427 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
428 atomic_inc(&conf->disks[i].rdev->read_errors);
429 if (conf->mddev->degraded)
430 printk("R5: read error not correctable.\n");
431 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
433 printk("R5: read error NOT corrected!!\n");
434 else if (atomic_read(&conf->disks[i].rdev->read_errors)
435 > conf->max_nr_stripes)
436 printk("raid5: Too many read errors, failing device.\n");
440 set_bit(R5_ReadError, &sh->dev[i].flags);
442 clear_bit(R5_ReadError, &sh->dev[i].flags);
443 clear_bit(R5_ReWrite, &sh->dev[i].flags);
444 md_error(conf->mddev, conf->disks[i].rdev);
447 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
449 /* must restore b_page before unlocking buffer... */
450 if (sh->bh_page[i] != bh->b_page) {
451 bh->b_page = sh->bh_page[i];
452 bh->b_data = page_address(bh->b_page);
453 clear_buffer_uptodate(bh);
456 clear_bit(R5_LOCKED, &sh->dev[i].flags);
457 set_bit(STRIPE_HANDLE, &sh->state);
462 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
465 struct stripe_head *sh = bi->bi_private;
466 raid5_conf_t *conf = sh->raid_conf;
467 int disks = conf->raid_disks, i;
469 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
474 for (i=0 ; i<disks; i++)
475 if (bi == &sh->dev[i].req)
478 PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n",
479 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
486 spin_lock_irqsave(&conf->device_lock, flags);
488 md_error(conf->mddev, conf->disks[i].rdev);
490 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
492 clear_bit(R5_LOCKED, &sh->dev[i].flags);
493 set_bit(STRIPE_HANDLE, &sh->state);
494 __release_stripe(conf, sh);
495 spin_unlock_irqrestore(&conf->device_lock, flags);
500 static sector_t compute_blocknr(struct stripe_head *sh, int i);
502 static void raid5_build_block (struct stripe_head *sh, int i)
504 struct r5dev *dev = &sh->dev[i];
507 dev->req.bi_io_vec = &dev->vec;
509 dev->req.bi_max_vecs++;
510 dev->vec.bv_page = dev->page;
511 dev->vec.bv_len = STRIPE_SIZE;
512 dev->vec.bv_offset = 0;
514 dev->req.bi_sector = sh->sector;
515 dev->req.bi_private = sh;
519 dev->sector = compute_blocknr(sh, i);
522 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
524 char b[BDEVNAME_SIZE];
525 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
526 PRINTK("raid5: error called\n");
528 if (!test_bit(Faulty, &rdev->flags)) {
530 if (test_bit(In_sync, &rdev->flags)) {
531 conf->working_disks--;
533 conf->failed_disks++;
534 clear_bit(In_sync, &rdev->flags);
536 * if recovery was running, make sure it aborts.
538 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
540 set_bit(Faulty, &rdev->flags);
542 "raid5: Disk failure on %s, disabling device."
543 " Operation continuing on %d devices\n",
544 bdevname(rdev->bdev,b), conf->working_disks);
549 * Input: a 'big' sector number,
550 * Output: index of the data and parity disk, and the sector # in them.
552 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks,
553 unsigned int data_disks, unsigned int * dd_idx,
554 unsigned int * pd_idx, raid5_conf_t *conf)
557 unsigned long chunk_number;
558 unsigned int chunk_offset;
560 int sectors_per_chunk = conf->chunk_size >> 9;
562 /* First compute the information on this sector */
565 * Compute the chunk number and the sector offset inside the chunk
567 chunk_offset = sector_div(r_sector, sectors_per_chunk);
568 chunk_number = r_sector;
569 BUG_ON(r_sector != chunk_number);
572 * Compute the stripe number
574 stripe = chunk_number / data_disks;
577 * Compute the data disk and parity disk indexes inside the stripe
579 *dd_idx = chunk_number % data_disks;
582 * Select the parity disk based on the user selected algorithm.
584 if (conf->level == 4)
585 *pd_idx = data_disks;
586 else switch (conf->algorithm) {
587 case ALGORITHM_LEFT_ASYMMETRIC:
588 *pd_idx = data_disks - stripe % raid_disks;
589 if (*dd_idx >= *pd_idx)
592 case ALGORITHM_RIGHT_ASYMMETRIC:
593 *pd_idx = stripe % raid_disks;
594 if (*dd_idx >= *pd_idx)
597 case ALGORITHM_LEFT_SYMMETRIC:
598 *pd_idx = data_disks - stripe % raid_disks;
599 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
601 case ALGORITHM_RIGHT_SYMMETRIC:
602 *pd_idx = stripe % raid_disks;
603 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks;
606 printk("raid5: unsupported algorithm %d\n",
611 * Finally, compute the new sector number
613 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
618 static sector_t compute_blocknr(struct stripe_head *sh, int i)
620 raid5_conf_t *conf = sh->raid_conf;
621 int raid_disks = conf->raid_disks, data_disks = raid_disks - 1;
622 sector_t new_sector = sh->sector, check;
623 int sectors_per_chunk = conf->chunk_size >> 9;
626 int chunk_number, dummy1, dummy2, dd_idx = i;
629 chunk_offset = sector_div(new_sector, sectors_per_chunk);
631 BUG_ON(new_sector != stripe);
634 switch (conf->algorithm) {
635 case ALGORITHM_LEFT_ASYMMETRIC:
636 case ALGORITHM_RIGHT_ASYMMETRIC:
640 case ALGORITHM_LEFT_SYMMETRIC:
641 case ALGORITHM_RIGHT_SYMMETRIC:
644 i -= (sh->pd_idx + 1);
647 printk("raid5: unsupported algorithm %d\n",
651 chunk_number = stripe * data_disks + i;
652 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
654 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf);
655 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) {
656 printk("compute_blocknr: map not correct\n");
665 * Copy data between a page in the stripe cache, and a bio.
666 * There are no alignment or size guarantees between the page or the
667 * bio except that there is some overlap.
668 * All iovecs in the bio must be considered.
670 static void copy_data(int frombio, struct bio *bio,
674 char *pa = page_address(page);
679 if (bio->bi_sector >= sector)
680 page_offset = (signed)(bio->bi_sector - sector) * 512;
682 page_offset = (signed)(sector - bio->bi_sector) * -512;
683 bio_for_each_segment(bvl, bio, i) {
684 int len = bio_iovec_idx(bio,i)->bv_len;
688 if (page_offset < 0) {
689 b_offset = -page_offset;
690 page_offset += b_offset;
694 if (len > 0 && page_offset + len > STRIPE_SIZE)
695 clen = STRIPE_SIZE - page_offset;
699 char *ba = __bio_kmap_atomic(bio, i, KM_USER0);
701 memcpy(pa+page_offset, ba+b_offset, clen);
703 memcpy(ba+b_offset, pa+page_offset, clen);
704 __bio_kunmap_atomic(ba, KM_USER0);
706 if (clen < len) /* hit end of page */
712 #define check_xor() do { \
713 if (count == MAX_XOR_BLOCKS) { \
714 xor_block(count, STRIPE_SIZE, ptr); \
720 static void compute_block(struct stripe_head *sh, int dd_idx)
722 raid5_conf_t *conf = sh->raid_conf;
723 int i, count, disks = conf->raid_disks;
724 void *ptr[MAX_XOR_BLOCKS], *p;
726 PRINTK("compute_block, stripe %llu, idx %d\n",
727 (unsigned long long)sh->sector, dd_idx);
729 ptr[0] = page_address(sh->dev[dd_idx].page);
730 memset(ptr[0], 0, STRIPE_SIZE);
732 for (i = disks ; i--; ) {
735 p = page_address(sh->dev[i].page);
736 if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
739 printk("compute_block() %d, stripe %llu, %d"
740 " not present\n", dd_idx,
741 (unsigned long long)sh->sector, i);
746 xor_block(count, STRIPE_SIZE, ptr);
747 set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags);
750 static void compute_parity(struct stripe_head *sh, int method)
752 raid5_conf_t *conf = sh->raid_conf;
753 int i, pd_idx = sh->pd_idx, disks = conf->raid_disks, count;
754 void *ptr[MAX_XOR_BLOCKS];
757 PRINTK("compute_parity, stripe %llu, method %d\n",
758 (unsigned long long)sh->sector, method);
761 ptr[0] = page_address(sh->dev[pd_idx].page);
763 case READ_MODIFY_WRITE:
764 if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags))
766 for (i=disks ; i-- ;) {
769 if (sh->dev[i].towrite &&
770 test_bit(R5_UPTODATE, &sh->dev[i].flags)) {
771 ptr[count++] = page_address(sh->dev[i].page);
772 chosen = sh->dev[i].towrite;
773 sh->dev[i].towrite = NULL;
775 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
776 wake_up(&conf->wait_for_overlap);
778 if (sh->dev[i].written) BUG();
779 sh->dev[i].written = chosen;
784 case RECONSTRUCT_WRITE:
785 memset(ptr[0], 0, STRIPE_SIZE);
786 for (i= disks; i-- ;)
787 if (i!=pd_idx && sh->dev[i].towrite) {
788 chosen = sh->dev[i].towrite;
789 sh->dev[i].towrite = NULL;
791 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
792 wake_up(&conf->wait_for_overlap);
794 if (sh->dev[i].written) BUG();
795 sh->dev[i].written = chosen;
802 xor_block(count, STRIPE_SIZE, ptr);
806 for (i = disks; i--;)
807 if (sh->dev[i].written) {
808 sector_t sector = sh->dev[i].sector;
809 struct bio *wbi = sh->dev[i].written;
810 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) {
811 copy_data(1, wbi, sh->dev[i].page, sector);
812 wbi = r5_next_bio(wbi, sector);
815 set_bit(R5_LOCKED, &sh->dev[i].flags);
816 set_bit(R5_UPTODATE, &sh->dev[i].flags);
820 case RECONSTRUCT_WRITE:
824 ptr[count++] = page_address(sh->dev[i].page);
828 case READ_MODIFY_WRITE:
829 for (i = disks; i--;)
830 if (sh->dev[i].written) {
831 ptr[count++] = page_address(sh->dev[i].page);
836 xor_block(count, STRIPE_SIZE, ptr);
838 if (method != CHECK_PARITY) {
839 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
840 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
842 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
846 * Each stripe/dev can have one or more bion attached.
847 * toread/towrite point to the first in a chain.
848 * The bi_next chain must be in order.
850 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
853 raid5_conf_t *conf = sh->raid_conf;
856 PRINTK("adding bh b#%llu to stripe s#%llu\n",
857 (unsigned long long)bi->bi_sector,
858 (unsigned long long)sh->sector);
861 spin_lock(&sh->lock);
862 spin_lock_irq(&conf->device_lock);
864 bip = &sh->dev[dd_idx].towrite;
865 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
868 bip = &sh->dev[dd_idx].toread;
869 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
870 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
872 bip = & (*bip)->bi_next;
874 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
877 if (*bip && bi->bi_next && (*bip) != bi->bi_next)
882 bi->bi_phys_segments ++;
883 spin_unlock_irq(&conf->device_lock);
884 spin_unlock(&sh->lock);
886 PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n",
887 (unsigned long long)bi->bi_sector,
888 (unsigned long long)sh->sector, dd_idx);
890 if (conf->mddev->bitmap && firstwrite) {
891 sh->bm_seq = conf->seq_write;
892 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
894 set_bit(STRIPE_BIT_DELAY, &sh->state);
898 /* check if page is covered */
899 sector_t sector = sh->dev[dd_idx].sector;
900 for (bi=sh->dev[dd_idx].towrite;
901 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
902 bi && bi->bi_sector <= sector;
903 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
904 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
905 sector = bi->bi_sector + (bi->bi_size>>9);
907 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
908 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
913 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
914 spin_unlock_irq(&conf->device_lock);
915 spin_unlock(&sh->lock);
921 * handle_stripe - do things to a stripe.
923 * We lock the stripe and then examine the state of various bits
924 * to see what needs to be done.
926 * return some read request which now have data
927 * return some write requests which are safely on disc
928 * schedule a read on some buffers
929 * schedule a write of some buffers
930 * return confirmation of parity correctness
932 * Parity calculations are done inside the stripe lock
933 * buffers are taken off read_list or write_list, and bh_cache buffers
934 * get BH_Lock set before the stripe lock is released.
938 static void handle_stripe(struct stripe_head *sh)
940 raid5_conf_t *conf = sh->raid_conf;
941 int disks = conf->raid_disks;
942 struct bio *return_bi= NULL;
946 int locked=0, uptodate=0, to_read=0, to_write=0, failed=0, written=0;
947 int non_overwrite = 0;
951 PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n",
952 (unsigned long long)sh->sector, atomic_read(&sh->count),
955 spin_lock(&sh->lock);
956 clear_bit(STRIPE_HANDLE, &sh->state);
957 clear_bit(STRIPE_DELAYED, &sh->state);
959 syncing = test_bit(STRIPE_SYNCING, &sh->state);
960 /* Now to look around and see what can be done */
962 for (i=disks; i--; ) {
965 clear_bit(R5_Insync, &dev->flags);
966 clear_bit(R5_Syncio, &dev->flags);
968 PRINTK("check %d: state 0x%lx read %p write %p written %p\n",
969 i, dev->flags, dev->toread, dev->towrite, dev->written);
970 /* maybe we can reply to a read */
971 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) {
972 struct bio *rbi, *rbi2;
973 PRINTK("Return read for disc %d\n", i);
974 spin_lock_irq(&conf->device_lock);
977 if (test_and_clear_bit(R5_Overlap, &dev->flags))
978 wake_up(&conf->wait_for_overlap);
979 spin_unlock_irq(&conf->device_lock);
980 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) {
981 copy_data(0, rbi, dev->page, dev->sector);
982 rbi2 = r5_next_bio(rbi, dev->sector);
983 spin_lock_irq(&conf->device_lock);
984 if (--rbi->bi_phys_segments == 0) {
985 rbi->bi_next = return_bi;
988 spin_unlock_irq(&conf->device_lock);
993 /* now count some things */
994 if (test_bit(R5_LOCKED, &dev->flags)) locked++;
995 if (test_bit(R5_UPTODATE, &dev->flags)) uptodate++;
998 if (dev->toread) to_read++;
1001 if (!test_bit(R5_OVERWRITE, &dev->flags))
1004 if (dev->written) written++;
1005 rdev = conf->disks[i].rdev; /* FIXME, should I be looking rdev */
1006 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
1007 /* The ReadError flag wil just be confusing now */
1008 clear_bit(R5_ReadError, &dev->flags);
1009 clear_bit(R5_ReWrite, &dev->flags);
1011 if (!rdev || !test_bit(In_sync, &rdev->flags)
1012 || test_bit(R5_ReadError, &dev->flags)) {
1016 set_bit(R5_Insync, &dev->flags);
1018 PRINTK("locked=%d uptodate=%d to_read=%d"
1019 " to_write=%d failed=%d failed_num=%d\n",
1020 locked, uptodate, to_read, to_write, failed, failed_num);
1021 /* check if the array has lost two devices and, if so, some requests might
1024 if (failed > 1 && to_read+to_write+written) {
1025 for (i=disks; i--; ) {
1028 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1029 mdk_rdev_t *rdev = conf->disks[i].rdev;
1030 if (rdev && test_bit(In_sync, &rdev->flags))
1031 /* multiple read failures in one stripe */
1032 md_error(conf->mddev, rdev);
1035 spin_lock_irq(&conf->device_lock);
1036 /* fail all writes first */
1037 bi = sh->dev[i].towrite;
1038 sh->dev[i].towrite = NULL;
1039 if (bi) { to_write--; bitmap_end = 1; }
1041 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1042 wake_up(&conf->wait_for_overlap);
1044 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1045 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1046 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1047 if (--bi->bi_phys_segments == 0) {
1048 md_write_end(conf->mddev);
1049 bi->bi_next = return_bi;
1054 /* and fail all 'written' */
1055 bi = sh->dev[i].written;
1056 sh->dev[i].written = NULL;
1057 if (bi) bitmap_end = 1;
1058 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS) {
1059 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
1060 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1061 if (--bi->bi_phys_segments == 0) {
1062 md_write_end(conf->mddev);
1063 bi->bi_next = return_bi;
1069 /* fail any reads if this device is non-operational */
1070 if (!test_bit(R5_Insync, &sh->dev[i].flags) ||
1071 test_bit(R5_ReadError, &sh->dev[i].flags)) {
1072 bi = sh->dev[i].toread;
1073 sh->dev[i].toread = NULL;
1074 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
1075 wake_up(&conf->wait_for_overlap);
1077 while (bi && bi->bi_sector < sh->dev[i].sector + STRIPE_SECTORS){
1078 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
1079 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1080 if (--bi->bi_phys_segments == 0) {
1081 bi->bi_next = return_bi;
1087 spin_unlock_irq(&conf->device_lock);
1089 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1090 STRIPE_SECTORS, 0, 0);
1093 if (failed > 1 && syncing) {
1094 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
1095 clear_bit(STRIPE_SYNCING, &sh->state);
1099 /* might be able to return some write requests if the parity block
1100 * is safe, or on a failed drive
1102 dev = &sh->dev[sh->pd_idx];
1104 ( (test_bit(R5_Insync, &dev->flags) && !test_bit(R5_LOCKED, &dev->flags) &&
1105 test_bit(R5_UPTODATE, &dev->flags))
1106 || (failed == 1 && failed_num == sh->pd_idx))
1108 /* any written block on an uptodate or failed drive can be returned.
1109 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
1110 * never LOCKED, so we don't need to test 'failed' directly.
1112 for (i=disks; i--; )
1113 if (sh->dev[i].written) {
1115 if (!test_bit(R5_LOCKED, &dev->flags) &&
1116 test_bit(R5_UPTODATE, &dev->flags) ) {
1117 /* We can return any write requests */
1118 struct bio *wbi, *wbi2;
1120 PRINTK("Return write for disc %d\n", i);
1121 spin_lock_irq(&conf->device_lock);
1123 dev->written = NULL;
1124 while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) {
1125 wbi2 = r5_next_bio(wbi, dev->sector);
1126 if (--wbi->bi_phys_segments == 0) {
1127 md_write_end(conf->mddev);
1128 wbi->bi_next = return_bi;
1133 if (dev->towrite == NULL)
1135 spin_unlock_irq(&conf->device_lock);
1137 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
1139 !test_bit(STRIPE_DEGRADED, &sh->state), 0);
1144 /* Now we might consider reading some blocks, either to check/generate
1145 * parity, or to satisfy requests
1146 * or to load a block that is being partially written.
1148 if (to_read || non_overwrite || (syncing && (uptodate < disks))) {
1149 for (i=disks; i--;) {
1151 if (!test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1153 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
1155 (failed && (sh->dev[failed_num].toread ||
1156 (sh->dev[failed_num].towrite && !test_bit(R5_OVERWRITE, &sh->dev[failed_num].flags))))
1159 /* we would like to get this block, possibly
1160 * by computing it, but we might not be able to
1162 if (uptodate == disks-1) {
1163 PRINTK("Computing block %d\n", i);
1164 compute_block(sh, i);
1166 } else if (test_bit(R5_Insync, &dev->flags)) {
1167 set_bit(R5_LOCKED, &dev->flags);
1168 set_bit(R5_Wantread, &dev->flags);
1170 /* if I am just reading this block and we don't have
1171 a failed drive, or any pending writes then sidestep the cache */
1172 if (sh->bh_read[i] && !sh->bh_read[i]->b_reqnext &&
1173 ! syncing && !failed && !to_write) {
1174 sh->bh_cache[i]->b_page = sh->bh_read[i]->b_page;
1175 sh->bh_cache[i]->b_data = sh->bh_read[i]->b_data;
1179 PRINTK("Reading block %d (sync=%d)\n",
1182 md_sync_acct(conf->disks[i].rdev->bdev,
1187 set_bit(STRIPE_HANDLE, &sh->state);
1190 /* now to consider writing and what else, if anything should be read */
1193 for (i=disks ; i--;) {
1194 /* would I have to read this buffer for read_modify_write */
1196 if ((dev->towrite || i == sh->pd_idx) &&
1197 (!test_bit(R5_LOCKED, &dev->flags)
1199 || sh->bh_page[i]!=bh->b_page
1202 !test_bit(R5_UPTODATE, &dev->flags)) {
1203 if (test_bit(R5_Insync, &dev->flags)
1204 /* && !(!mddev->insync && i == sh->pd_idx) */
1207 else rmw += 2*disks; /* cannot read it */
1209 /* Would I have to read this buffer for reconstruct_write */
1210 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1211 (!test_bit(R5_LOCKED, &dev->flags)
1213 || sh->bh_page[i] != bh->b_page
1216 !test_bit(R5_UPTODATE, &dev->flags)) {
1217 if (test_bit(R5_Insync, &dev->flags)) rcw++;
1218 else rcw += 2*disks;
1221 PRINTK("for sector %llu, rmw=%d rcw=%d\n",
1222 (unsigned long long)sh->sector, rmw, rcw);
1223 set_bit(STRIPE_HANDLE, &sh->state);
1224 if (rmw < rcw && rmw > 0)
1225 /* prefer read-modify-write, but need to get some data */
1226 for (i=disks; i--;) {
1228 if ((dev->towrite || i == sh->pd_idx) &&
1229 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1230 test_bit(R5_Insync, &dev->flags)) {
1231 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1233 PRINTK("Read_old block %d for r-m-w\n", i);
1234 set_bit(R5_LOCKED, &dev->flags);
1235 set_bit(R5_Wantread, &dev->flags);
1238 set_bit(STRIPE_DELAYED, &sh->state);
1239 set_bit(STRIPE_HANDLE, &sh->state);
1243 if (rcw <= rmw && rcw > 0)
1244 /* want reconstruct write, but need to get some data */
1245 for (i=disks; i--;) {
1247 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
1248 !test_bit(R5_LOCKED, &dev->flags) && !test_bit(R5_UPTODATE, &dev->flags) &&
1249 test_bit(R5_Insync, &dev->flags)) {
1250 if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1252 PRINTK("Read_old block %d for Reconstruct\n", i);
1253 set_bit(R5_LOCKED, &dev->flags);
1254 set_bit(R5_Wantread, &dev->flags);
1257 set_bit(STRIPE_DELAYED, &sh->state);
1258 set_bit(STRIPE_HANDLE, &sh->state);
1262 /* now if nothing is locked, and if we have enough data, we can start a write request */
1263 if (locked == 0 && (rcw == 0 ||rmw == 0) &&
1264 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
1265 PRINTK("Computing parity...\n");
1266 compute_parity(sh, rcw==0 ? RECONSTRUCT_WRITE : READ_MODIFY_WRITE);
1267 /* now every locked buffer is ready to be written */
1269 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) {
1270 PRINTK("Writing block %d\n", i);
1272 set_bit(R5_Wantwrite, &sh->dev[i].flags);
1273 if (!test_bit(R5_Insync, &sh->dev[i].flags)
1274 || (i==sh->pd_idx && failed == 0))
1275 set_bit(STRIPE_INSYNC, &sh->state);
1277 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
1278 atomic_dec(&conf->preread_active_stripes);
1279 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
1280 md_wakeup_thread(conf->mddev->thread);
1285 /* maybe we need to check and possibly fix the parity for this stripe
1286 * Any reads will already have been scheduled, so we just see if enough data
1289 if (syncing && locked == 0 &&
1290 !test_bit(STRIPE_INSYNC, &sh->state) && failed <= 1) {
1291 set_bit(STRIPE_HANDLE, &sh->state);
1294 if (uptodate != disks)
1296 compute_parity(sh, CHECK_PARITY);
1298 pagea = page_address(sh->dev[sh->pd_idx].page);
1299 if ((*(u32*)pagea) == 0 &&
1300 !memcmp(pagea, pagea+4, STRIPE_SIZE-4)) {
1301 /* parity is correct (on disc, not in buffer any more) */
1302 set_bit(STRIPE_INSYNC, &sh->state);
1304 conf->mddev->resync_mismatches += STRIPE_SECTORS;
1305 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
1306 /* don't try to repair!! */
1307 set_bit(STRIPE_INSYNC, &sh->state);
1310 if (!test_bit(STRIPE_INSYNC, &sh->state)) {
1312 failed_num = sh->pd_idx;
1313 /* should be able to compute the missing block and write it to spare */
1314 if (!test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)) {
1315 if (uptodate+1 != disks)
1317 compute_block(sh, failed_num);
1320 if (uptodate != disks)
1322 dev = &sh->dev[failed_num];
1323 set_bit(R5_LOCKED, &dev->flags);
1324 set_bit(R5_Wantwrite, &dev->flags);
1325 clear_bit(STRIPE_DEGRADED, &sh->state);
1327 set_bit(STRIPE_INSYNC, &sh->state);
1328 set_bit(R5_Syncio, &dev->flags);
1331 if (syncing && locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
1332 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
1333 clear_bit(STRIPE_SYNCING, &sh->state);
1336 /* If the failed drive is just a ReadError, then we might need to progress
1337 * the repair/check process
1339 if (failed == 1 && ! conf->mddev->ro &&
1340 test_bit(R5_ReadError, &sh->dev[failed_num].flags)
1341 && !test_bit(R5_LOCKED, &sh->dev[failed_num].flags)
1342 && test_bit(R5_UPTODATE, &sh->dev[failed_num].flags)
1344 dev = &sh->dev[failed_num];
1345 if (!test_bit(R5_ReWrite, &dev->flags)) {
1346 set_bit(R5_Wantwrite, &dev->flags);
1347 set_bit(R5_ReWrite, &dev->flags);
1348 set_bit(R5_LOCKED, &dev->flags);
1350 /* let's read it back */
1351 set_bit(R5_Wantread, &dev->flags);
1352 set_bit(R5_LOCKED, &dev->flags);
1356 spin_unlock(&sh->lock);
1358 while ((bi=return_bi)) {
1359 int bytes = bi->bi_size;
1361 return_bi = bi->bi_next;
1364 bi->bi_end_io(bi, bytes, 0);
1366 for (i=disks; i-- ;) {
1370 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
1372 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
1377 bi = &sh->dev[i].req;
1381 bi->bi_end_io = raid5_end_write_request;
1383 bi->bi_end_io = raid5_end_read_request;
1386 rdev = rcu_dereference(conf->disks[i].rdev);
1387 if (rdev && test_bit(Faulty, &rdev->flags))
1390 atomic_inc(&rdev->nr_pending);
1394 if (test_bit(R5_Syncio, &sh->dev[i].flags))
1395 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
1397 bi->bi_bdev = rdev->bdev;
1398 PRINTK("for %llu schedule op %ld on disc %d\n",
1399 (unsigned long long)sh->sector, bi->bi_rw, i);
1400 atomic_inc(&sh->count);
1401 bi->bi_sector = sh->sector + rdev->data_offset;
1402 bi->bi_flags = 1 << BIO_UPTODATE;
1404 bi->bi_max_vecs = 1;
1406 bi->bi_io_vec = &sh->dev[i].vec;
1407 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
1408 bi->bi_io_vec[0].bv_offset = 0;
1409 bi->bi_size = STRIPE_SIZE;
1411 generic_make_request(bi);
1414 set_bit(STRIPE_DEGRADED, &sh->state);
1415 PRINTK("skip op %ld on disc %d for sector %llu\n",
1416 bi->bi_rw, i, (unsigned long long)sh->sector);
1417 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1418 set_bit(STRIPE_HANDLE, &sh->state);
1423 static inline void raid5_activate_delayed(raid5_conf_t *conf)
1425 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
1426 while (!list_empty(&conf->delayed_list)) {
1427 struct list_head *l = conf->delayed_list.next;
1428 struct stripe_head *sh;
1429 sh = list_entry(l, struct stripe_head, lru);
1431 clear_bit(STRIPE_DELAYED, &sh->state);
1432 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
1433 atomic_inc(&conf->preread_active_stripes);
1434 list_add_tail(&sh->lru, &conf->handle_list);
1439 static inline void activate_bit_delay(raid5_conf_t *conf)
1441 /* device_lock is held */
1442 struct list_head head;
1443 list_add(&head, &conf->bitmap_list);
1444 list_del_init(&conf->bitmap_list);
1445 while (!list_empty(&head)) {
1446 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
1447 list_del_init(&sh->lru);
1448 atomic_inc(&sh->count);
1449 __release_stripe(conf, sh);
1453 static void unplug_slaves(mddev_t *mddev)
1455 raid5_conf_t *conf = mddev_to_conf(mddev);
1459 for (i=0; i<mddev->raid_disks; i++) {
1460 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1461 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
1462 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
1464 atomic_inc(&rdev->nr_pending);
1467 if (r_queue->unplug_fn)
1468 r_queue->unplug_fn(r_queue);
1470 rdev_dec_pending(rdev, mddev);
1477 static void raid5_unplug_device(request_queue_t *q)
1479 mddev_t *mddev = q->queuedata;
1480 raid5_conf_t *conf = mddev_to_conf(mddev);
1481 unsigned long flags;
1483 spin_lock_irqsave(&conf->device_lock, flags);
1485 if (blk_remove_plug(q)) {
1487 raid5_activate_delayed(conf);
1489 md_wakeup_thread(mddev->thread);
1491 spin_unlock_irqrestore(&conf->device_lock, flags);
1493 unplug_slaves(mddev);
1496 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
1497 sector_t *error_sector)
1499 mddev_t *mddev = q->queuedata;
1500 raid5_conf_t *conf = mddev_to_conf(mddev);
1504 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
1505 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
1506 if (rdev && !test_bit(Faulty, &rdev->flags)) {
1507 struct block_device *bdev = rdev->bdev;
1508 request_queue_t *r_queue = bdev_get_queue(bdev);
1510 if (!r_queue->issue_flush_fn)
1513 atomic_inc(&rdev->nr_pending);
1515 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
1517 rdev_dec_pending(rdev, mddev);
1526 static inline void raid5_plug_device(raid5_conf_t *conf)
1528 spin_lock_irq(&conf->device_lock);
1529 blk_plug_device(conf->mddev->queue);
1530 spin_unlock_irq(&conf->device_lock);
1533 static int make_request (request_queue_t *q, struct bio * bi)
1535 mddev_t *mddev = q->queuedata;
1536 raid5_conf_t *conf = mddev_to_conf(mddev);
1537 const unsigned int raid_disks = conf->raid_disks;
1538 const unsigned int data_disks = raid_disks - 1;
1539 unsigned int dd_idx, pd_idx;
1540 sector_t new_sector;
1541 sector_t logical_sector, last_sector;
1542 struct stripe_head *sh;
1543 const int rw = bio_data_dir(bi);
1545 if (unlikely(bio_barrier(bi))) {
1546 bio_endio(bi, bi->bi_size, -EOPNOTSUPP);
1550 md_write_start(mddev, bi);
1552 disk_stat_inc(mddev->gendisk, ios[rw]);
1553 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi));
1555 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
1556 last_sector = bi->bi_sector + (bi->bi_size>>9);
1558 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
1560 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
1563 new_sector = raid5_compute_sector(logical_sector,
1564 raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1566 PRINTK("raid5: make_request, sector %llu logical %llu\n",
1567 (unsigned long long)new_sector,
1568 (unsigned long long)logical_sector);
1571 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
1572 sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
1574 if (!add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
1575 /* Add failed due to overlap. Flush everything
1578 raid5_unplug_device(mddev->queue);
1583 finish_wait(&conf->wait_for_overlap, &w);
1584 raid5_plug_device(conf);
1589 /* cannot get stripe for read-ahead, just give-up */
1590 clear_bit(BIO_UPTODATE, &bi->bi_flags);
1591 finish_wait(&conf->wait_for_overlap, &w);
1596 spin_lock_irq(&conf->device_lock);
1597 if (--bi->bi_phys_segments == 0) {
1598 int bytes = bi->bi_size;
1600 if ( bio_data_dir(bi) == WRITE )
1601 md_write_end(mddev);
1603 bi->bi_end_io(bi, bytes, 0);
1605 spin_unlock_irq(&conf->device_lock);
1609 /* FIXME go_faster isn't used */
1610 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1612 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1613 struct stripe_head *sh;
1614 int sectors_per_chunk = conf->chunk_size >> 9;
1616 unsigned long stripe;
1619 sector_t first_sector;
1620 int raid_disks = conf->raid_disks;
1621 int data_disks = raid_disks-1;
1622 sector_t max_sector = mddev->size << 1;
1625 if (sector_nr >= max_sector) {
1626 /* just being told to finish up .. nothing much to do */
1627 unplug_slaves(mddev);
1629 if (mddev->curr_resync < max_sector) /* aborted */
1630 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1632 else /* compelted sync */
1634 bitmap_close_sync(mddev->bitmap);
1638 /* if there is 1 or more failed drives and we are trying
1639 * to resync, then assert that we are finished, because there is
1640 * nothing we can do.
1642 if (mddev->degraded >= 1 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1643 sector_t rv = (mddev->size << 1) - sector_nr;
1647 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1648 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
1649 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
1650 /* we can skip this block, and probably more */
1651 sync_blocks /= STRIPE_SECTORS;
1653 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
1657 chunk_offset = sector_div(x, sectors_per_chunk);
1659 BUG_ON(x != stripe);
1661 first_sector = raid5_compute_sector((sector_t)stripe*data_disks*sectors_per_chunk
1662 + chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
1663 sh = get_active_stripe(conf, sector_nr, pd_idx, 1);
1665 sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
1666 /* make sure we don't swamp the stripe cache if someone else
1667 * is trying to get access
1669 schedule_timeout_uninterruptible(1);
1671 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0);
1672 spin_lock(&sh->lock);
1673 set_bit(STRIPE_SYNCING, &sh->state);
1674 clear_bit(STRIPE_INSYNC, &sh->state);
1675 spin_unlock(&sh->lock);
1680 return STRIPE_SECTORS;
1684 * This is our raid5 kernel thread.
1686 * We scan the hash table for stripes which can be handled now.
1687 * During the scan, completed stripes are saved for us by the interrupt
1688 * handler, so that they will not have to wait for our next wakeup.
1690 static void raid5d (mddev_t *mddev)
1692 struct stripe_head *sh;
1693 raid5_conf_t *conf = mddev_to_conf(mddev);
1696 PRINTK("+++ raid5d active\n");
1698 md_check_recovery(mddev);
1701 spin_lock_irq(&conf->device_lock);
1703 struct list_head *first;
1705 if (conf->seq_flush - conf->seq_write > 0) {
1706 int seq = conf->seq_flush;
1707 bitmap_unplug(mddev->bitmap);
1708 conf->seq_write = seq;
1709 activate_bit_delay(conf);
1712 if (list_empty(&conf->handle_list) &&
1713 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
1714 !blk_queue_plugged(mddev->queue) &&
1715 !list_empty(&conf->delayed_list))
1716 raid5_activate_delayed(conf);
1718 if (list_empty(&conf->handle_list))
1721 first = conf->handle_list.next;
1722 sh = list_entry(first, struct stripe_head, lru);
1724 list_del_init(first);
1725 atomic_inc(&sh->count);
1726 if (atomic_read(&sh->count)!= 1)
1728 spin_unlock_irq(&conf->device_lock);
1734 spin_lock_irq(&conf->device_lock);
1736 PRINTK("%d stripes handled\n", handled);
1738 spin_unlock_irq(&conf->device_lock);
1740 unplug_slaves(mddev);
1742 PRINTK("--- raid5d inactive\n");
1746 raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
1748 raid5_conf_t *conf = mddev_to_conf(mddev);
1750 return sprintf(page, "%d\n", conf->max_nr_stripes);
1756 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
1758 raid5_conf_t *conf = mddev_to_conf(mddev);
1761 if (len >= PAGE_SIZE)
1766 new = simple_strtoul(page, &end, 10);
1767 if (!*page || (*end && *end != '\n') )
1769 if (new <= 16 || new > 32768)
1771 while (new < conf->max_nr_stripes) {
1772 if (drop_one_stripe(conf))
1773 conf->max_nr_stripes--;
1777 while (new > conf->max_nr_stripes) {
1778 if (grow_one_stripe(conf))
1779 conf->max_nr_stripes++;
1785 static struct md_sysfs_entry
1786 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
1787 raid5_show_stripe_cache_size,
1788 raid5_store_stripe_cache_size);
1791 stripe_cache_active_show(mddev_t *mddev, char *page)
1793 raid5_conf_t *conf = mddev_to_conf(mddev);
1795 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
1800 static struct md_sysfs_entry
1801 raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
1803 static struct attribute *raid5_attrs[] = {
1804 &raid5_stripecache_size.attr,
1805 &raid5_stripecache_active.attr,
1808 static struct attribute_group raid5_attrs_group = {
1810 .attrs = raid5_attrs,
1813 static int run(mddev_t *mddev)
1816 int raid_disk, memory;
1818 struct disk_info *disk;
1819 struct list_head *tmp;
1821 if (mddev->level != 5 && mddev->level != 4) {
1822 printk("raid5: %s: raid level not set to 4/5 (%d)\n", mdname(mddev), mddev->level);
1826 mddev->private = kmalloc (sizeof (raid5_conf_t)
1827 + mddev->raid_disks * sizeof(struct disk_info),
1829 if ((conf = mddev->private) == NULL)
1831 memset (conf, 0, sizeof (*conf) + mddev->raid_disks * sizeof(struct disk_info) );
1832 conf->mddev = mddev;
1834 if ((conf->stripe_hashtbl = (struct stripe_head **) __get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER)) == NULL)
1836 memset(conf->stripe_hashtbl, 0, HASH_PAGES * PAGE_SIZE);
1838 spin_lock_init(&conf->device_lock);
1839 init_waitqueue_head(&conf->wait_for_stripe);
1840 init_waitqueue_head(&conf->wait_for_overlap);
1841 INIT_LIST_HEAD(&conf->handle_list);
1842 INIT_LIST_HEAD(&conf->delayed_list);
1843 INIT_LIST_HEAD(&conf->bitmap_list);
1844 INIT_LIST_HEAD(&conf->inactive_list);
1845 atomic_set(&conf->active_stripes, 0);
1846 atomic_set(&conf->preread_active_stripes, 0);
1848 PRINTK("raid5: run(%s) called.\n", mdname(mddev));
1850 ITERATE_RDEV(mddev,rdev,tmp) {
1851 raid_disk = rdev->raid_disk;
1852 if (raid_disk >= mddev->raid_disks
1855 disk = conf->disks + raid_disk;
1859 if (test_bit(In_sync, &rdev->flags)) {
1860 char b[BDEVNAME_SIZE];
1861 printk(KERN_INFO "raid5: device %s operational as raid"
1862 " disk %d\n", bdevname(rdev->bdev,b),
1864 conf->working_disks++;
1868 conf->raid_disks = mddev->raid_disks;
1870 * 0 for a fully functional array, 1 for a degraded array.
1872 mddev->degraded = conf->failed_disks = conf->raid_disks - conf->working_disks;
1873 conf->mddev = mddev;
1874 conf->chunk_size = mddev->chunk_size;
1875 conf->level = mddev->level;
1876 conf->algorithm = mddev->layout;
1877 conf->max_nr_stripes = NR_STRIPES;
1879 /* device size must be a multiple of chunk size */
1880 mddev->size &= ~(mddev->chunk_size/1024 -1);
1881 mddev->resync_max_sectors = mddev->size << 1;
1883 if (!conf->chunk_size || conf->chunk_size % 4) {
1884 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
1885 conf->chunk_size, mdname(mddev));
1888 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) {
1890 "raid5: unsupported parity algorithm %d for %s\n",
1891 conf->algorithm, mdname(mddev));
1894 if (mddev->degraded > 1) {
1895 printk(KERN_ERR "raid5: not enough operational devices for %s"
1896 " (%d/%d failed)\n",
1897 mdname(mddev), conf->failed_disks, conf->raid_disks);
1901 if (mddev->degraded == 1 &&
1902 mddev->recovery_cp != MaxSector) {
1904 "raid5: cannot start dirty degraded array for %s\n",
1910 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5");
1911 if (!mddev->thread) {
1913 "raid5: couldn't allocate thread for %s\n",
1918 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
1919 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
1920 if (grow_stripes(conf, conf->max_nr_stripes)) {
1922 "raid5: couldn't allocate %dkB for buffers\n", memory);
1923 shrink_stripes(conf);
1924 md_unregister_thread(mddev->thread);
1927 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
1928 memory, mdname(mddev));
1930 if (mddev->degraded == 0)
1931 printk("raid5: raid level %d set %s active with %d out of %d"
1932 " devices, algorithm %d\n", conf->level, mdname(mddev),
1933 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
1936 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
1937 " out of %d devices, algorithm %d\n", conf->level,
1938 mdname(mddev), mddev->raid_disks - mddev->degraded,
1939 mddev->raid_disks, conf->algorithm);
1941 print_raid5_conf(conf);
1943 /* read-ahead size must cover two whole stripes, which is
1944 * 2 * (n-1) * chunksize where 'n' is the number of raid devices
1947 int stripe = (mddev->raid_disks-1) * mddev->chunk_size
1949 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
1950 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
1953 /* Ok, everything is just fine now */
1954 sysfs_create_group(&mddev->kobj, &raid5_attrs_group);
1957 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
1959 mddev->queue->unplug_fn = raid5_unplug_device;
1960 mddev->queue->issue_flush_fn = raid5_issue_flush;
1962 mddev->array_size = mddev->size * (mddev->raid_disks - 1);
1966 print_raid5_conf(conf);
1967 if (conf->stripe_hashtbl)
1968 free_pages((unsigned long) conf->stripe_hashtbl,
1972 mddev->private = NULL;
1973 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
1979 static int stop(mddev_t *mddev)
1981 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
1983 md_unregister_thread(mddev->thread);
1984 mddev->thread = NULL;
1985 shrink_stripes(conf);
1986 free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
1987 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
1988 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
1990 mddev->private = NULL;
1995 static void print_sh (struct stripe_head *sh)
1999 printk("sh %llu, pd_idx %d, state %ld.\n",
2000 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
2001 printk("sh %llu, count %d.\n",
2002 (unsigned long long)sh->sector, atomic_read(&sh->count));
2003 printk("sh %llu, ", (unsigned long long)sh->sector);
2004 for (i = 0; i < sh->raid_conf->raid_disks; i++) {
2005 printk("(cache%d: %p %ld) ",
2006 i, sh->dev[i].page, sh->dev[i].flags);
2011 static void printall (raid5_conf_t *conf)
2013 struct stripe_head *sh;
2016 spin_lock_irq(&conf->device_lock);
2017 for (i = 0; i < NR_HASH; i++) {
2018 sh = conf->stripe_hashtbl[i];
2019 for (; sh; sh = sh->hash_next) {
2020 if (sh->raid_conf != conf)
2025 spin_unlock_irq(&conf->device_lock);
2029 static void status (struct seq_file *seq, mddev_t *mddev)
2031 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
2034 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
2035 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->working_disks);
2036 for (i = 0; i < conf->raid_disks; i++)
2037 seq_printf (seq, "%s",
2038 conf->disks[i].rdev &&
2039 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
2040 seq_printf (seq, "]");
2043 seq_printf (seq, "<"#x":%d>", atomic_read(&conf->x))
2048 static void print_raid5_conf (raid5_conf_t *conf)
2051 struct disk_info *tmp;
2053 printk("RAID5 conf printout:\n");
2055 printk("(conf==NULL)\n");
2058 printk(" --- rd:%d wd:%d fd:%d\n", conf->raid_disks,
2059 conf->working_disks, conf->failed_disks);
2061 for (i = 0; i < conf->raid_disks; i++) {
2062 char b[BDEVNAME_SIZE];
2063 tmp = conf->disks + i;
2065 printk(" disk %d, o:%d, dev:%s\n",
2066 i, !test_bit(Faulty, &tmp->rdev->flags),
2067 bdevname(tmp->rdev->bdev,b));
2071 static int raid5_spare_active(mddev_t *mddev)
2074 raid5_conf_t *conf = mddev->private;
2075 struct disk_info *tmp;
2077 for (i = 0; i < conf->raid_disks; i++) {
2078 tmp = conf->disks + i;
2080 && !test_bit(Faulty, &tmp->rdev->flags)
2081 && !test_bit(In_sync, &tmp->rdev->flags)) {
2083 conf->failed_disks--;
2084 conf->working_disks++;
2085 set_bit(In_sync, &tmp->rdev->flags);
2088 print_raid5_conf(conf);
2092 static int raid5_remove_disk(mddev_t *mddev, int number)
2094 raid5_conf_t *conf = mddev->private;
2097 struct disk_info *p = conf->disks + number;
2099 print_raid5_conf(conf);
2102 if (test_bit(In_sync, &rdev->flags) ||
2103 atomic_read(&rdev->nr_pending)) {
2109 if (atomic_read(&rdev->nr_pending)) {
2110 /* lost the race, try later */
2117 print_raid5_conf(conf);
2121 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
2123 raid5_conf_t *conf = mddev->private;
2126 struct disk_info *p;
2128 if (mddev->degraded > 1)
2129 /* no point adding a device */
2135 for (disk=0; disk < mddev->raid_disks; disk++)
2136 if ((p=conf->disks + disk)->rdev == NULL) {
2137 clear_bit(In_sync, &rdev->flags);
2138 rdev->raid_disk = disk;
2140 if (rdev->saved_raid_disk != disk)
2142 rcu_assign_pointer(p->rdev, rdev);
2145 print_raid5_conf(conf);
2149 static int raid5_resize(mddev_t *mddev, sector_t sectors)
2151 /* no resync is happening, and there is enough space
2152 * on all devices, so we can resize.
2153 * We need to make sure resync covers any new space.
2154 * If the array is shrinking we should possibly wait until
2155 * any io in the removed space completes, but it hardly seems
2158 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
2159 mddev->array_size = (sectors * (mddev->raid_disks-1))>>1;
2160 set_capacity(mddev->gendisk, mddev->array_size << 1);
2162 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) {
2163 mddev->recovery_cp = mddev->size << 1;
2164 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2166 mddev->size = sectors /2;
2167 mddev->resync_max_sectors = sectors;
2171 static void raid5_quiesce(mddev_t *mddev, int state)
2173 raid5_conf_t *conf = mddev_to_conf(mddev);
2176 case 1: /* stop all writes */
2177 spin_lock_irq(&conf->device_lock);
2179 wait_event_lock_irq(conf->wait_for_stripe,
2180 atomic_read(&conf->active_stripes) == 0,
2181 conf->device_lock, /* nothing */);
2182 spin_unlock_irq(&conf->device_lock);
2185 case 0: /* re-enable writes */
2186 spin_lock_irq(&conf->device_lock);
2188 wake_up(&conf->wait_for_stripe);
2189 spin_unlock_irq(&conf->device_lock);
2192 if (mddev->thread) {
2194 mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2196 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2197 md_wakeup_thread(mddev->thread);
2200 static mdk_personality_t raid5_personality=
2203 .owner = THIS_MODULE,
2204 .make_request = make_request,
2208 .error_handler = error,
2209 .hot_add_disk = raid5_add_disk,
2210 .hot_remove_disk= raid5_remove_disk,
2211 .spare_active = raid5_spare_active,
2212 .sync_request = sync_request,
2213 .resize = raid5_resize,
2214 .quiesce = raid5_quiesce,
2217 static int __init raid5_init (void)
2219 return register_md_personality (RAID5, &raid5_personality);
2222 static void raid5_exit (void)
2224 unregister_md_personality (RAID5);
2227 module_init(raid5_init);
2228 module_exit(raid5_exit);
2229 MODULE_LICENSE("GPL");
2230 MODULE_ALIAS("md-personality-4"); /* RAID5 */