Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[linux-2.6] / drivers / md / raid10.c
1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for futher copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 #include "dm-bio-list.h"
22 #include <linux/raid/raid10.h>
23 #include <linux/raid/bitmap.h>
24
25 /*
26  * RAID10 provides a combination of RAID0 and RAID1 functionality.
27  * The layout of data is defined by
28  *    chunk_size
29  *    raid_disks
30  *    near_copies (stored in low byte of layout)
31  *    far_copies (stored in second byte of layout)
32  *    far_offset (stored in bit 16 of layout )
33  *
34  * The data to be stored is divided into chunks using chunksize.
35  * Each device is divided into far_copies sections.
36  * In each section, chunks are laid out in a style similar to raid0, but
37  * near_copies copies of each chunk is stored (each on a different drive).
38  * The starting device for each section is offset near_copies from the starting
39  * device of the previous section.
40  * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
41  * drive.
42  * near_copies and far_copies must be at least one, and their product is at most
43  * raid_disks.
44  *
45  * If far_offset is true, then the far_copies are handled a bit differently.
46  * The copies are still in different stripes, but instead of be very far apart
47  * on disk, there are adjacent stripes.
48  */
49
50 /*
51  * Number of guaranteed r10bios in case of extreme VM load:
52  */
53 #define NR_RAID10_BIOS 256
54
55 static void unplug_slaves(mddev_t *mddev);
56
57 static void allow_barrier(conf_t *conf);
58 static void lower_barrier(conf_t *conf);
59
60 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
61 {
62         conf_t *conf = data;
63         r10bio_t *r10_bio;
64         int size = offsetof(struct r10bio_s, devs[conf->copies]);
65
66         /* allocate a r10bio with room for raid_disks entries in the bios array */
67         r10_bio = kzalloc(size, gfp_flags);
68         if (!r10_bio)
69                 unplug_slaves(conf->mddev);
70
71         return r10_bio;
72 }
73
74 static void r10bio_pool_free(void *r10_bio, void *data)
75 {
76         kfree(r10_bio);
77 }
78
79 /* Maximum size of each resync request */
80 #define RESYNC_BLOCK_SIZE (64*1024)
81 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
82 /* amount of memory to reserve for resync requests */
83 #define RESYNC_WINDOW (1024*1024)
84 /* maximum number of concurrent requests, memory permitting */
85 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
86
87 /*
88  * When performing a resync, we need to read and compare, so
89  * we need as many pages are there are copies.
90  * When performing a recovery, we need 2 bios, one for read,
91  * one for write (we recover only one drive per r10buf)
92  *
93  */
94 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
95 {
96         conf_t *conf = data;
97         struct page *page;
98         r10bio_t *r10_bio;
99         struct bio *bio;
100         int i, j;
101         int nalloc;
102
103         r10_bio = r10bio_pool_alloc(gfp_flags, conf);
104         if (!r10_bio) {
105                 unplug_slaves(conf->mddev);
106                 return NULL;
107         }
108
109         if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
110                 nalloc = conf->copies; /* resync */
111         else
112                 nalloc = 2; /* recovery */
113
114         /*
115          * Allocate bios.
116          */
117         for (j = nalloc ; j-- ; ) {
118                 bio = bio_alloc(gfp_flags, RESYNC_PAGES);
119                 if (!bio)
120                         goto out_free_bio;
121                 r10_bio->devs[j].bio = bio;
122         }
123         /*
124          * Allocate RESYNC_PAGES data pages and attach them
125          * where needed.
126          */
127         for (j = 0 ; j < nalloc; j++) {
128                 bio = r10_bio->devs[j].bio;
129                 for (i = 0; i < RESYNC_PAGES; i++) {
130                         page = alloc_page(gfp_flags);
131                         if (unlikely(!page))
132                                 goto out_free_pages;
133
134                         bio->bi_io_vec[i].bv_page = page;
135                 }
136         }
137
138         return r10_bio;
139
140 out_free_pages:
141         for ( ; i > 0 ; i--)
142                 safe_put_page(bio->bi_io_vec[i-1].bv_page);
143         while (j--)
144                 for (i = 0; i < RESYNC_PAGES ; i++)
145                         safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
146         j = -1;
147 out_free_bio:
148         while ( ++j < nalloc )
149                 bio_put(r10_bio->devs[j].bio);
150         r10bio_pool_free(r10_bio, conf);
151         return NULL;
152 }
153
154 static void r10buf_pool_free(void *__r10_bio, void *data)
155 {
156         int i;
157         conf_t *conf = data;
158         r10bio_t *r10bio = __r10_bio;
159         int j;
160
161         for (j=0; j < conf->copies; j++) {
162                 struct bio *bio = r10bio->devs[j].bio;
163                 if (bio) {
164                         for (i = 0; i < RESYNC_PAGES; i++) {
165                                 safe_put_page(bio->bi_io_vec[i].bv_page);
166                                 bio->bi_io_vec[i].bv_page = NULL;
167                         }
168                         bio_put(bio);
169                 }
170         }
171         r10bio_pool_free(r10bio, conf);
172 }
173
174 static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
175 {
176         int i;
177
178         for (i = 0; i < conf->copies; i++) {
179                 struct bio **bio = & r10_bio->devs[i].bio;
180                 if (*bio && *bio != IO_BLOCKED)
181                         bio_put(*bio);
182                 *bio = NULL;
183         }
184 }
185
186 static void free_r10bio(r10bio_t *r10_bio)
187 {
188         conf_t *conf = mddev_to_conf(r10_bio->mddev);
189
190         /*
191          * Wake up any possible resync thread that waits for the device
192          * to go idle.
193          */
194         allow_barrier(conf);
195
196         put_all_bios(conf, r10_bio);
197         mempool_free(r10_bio, conf->r10bio_pool);
198 }
199
200 static void put_buf(r10bio_t *r10_bio)
201 {
202         conf_t *conf = mddev_to_conf(r10_bio->mddev);
203
204         mempool_free(r10_bio, conf->r10buf_pool);
205
206         lower_barrier(conf);
207 }
208
209 static void reschedule_retry(r10bio_t *r10_bio)
210 {
211         unsigned long flags;
212         mddev_t *mddev = r10_bio->mddev;
213         conf_t *conf = mddev_to_conf(mddev);
214
215         spin_lock_irqsave(&conf->device_lock, flags);
216         list_add(&r10_bio->retry_list, &conf->retry_list);
217         conf->nr_queued ++;
218         spin_unlock_irqrestore(&conf->device_lock, flags);
219
220         /* wake up frozen array... */
221         wake_up(&conf->wait_barrier);
222
223         md_wakeup_thread(mddev->thread);
224 }
225
226 /*
227  * raid_end_bio_io() is called when we have finished servicing a mirrored
228  * operation and are ready to return a success/failure code to the buffer
229  * cache layer.
230  */
231 static void raid_end_bio_io(r10bio_t *r10_bio)
232 {
233         struct bio *bio = r10_bio->master_bio;
234
235         bio_endio(bio,
236                 test_bit(R10BIO_Uptodate, &r10_bio->state) ? 0 : -EIO);
237         free_r10bio(r10_bio);
238 }
239
240 /*
241  * Update disk head position estimator based on IRQ completion info.
242  */
243 static inline void update_head_pos(int slot, r10bio_t *r10_bio)
244 {
245         conf_t *conf = mddev_to_conf(r10_bio->mddev);
246
247         conf->mirrors[r10_bio->devs[slot].devnum].head_position =
248                 r10_bio->devs[slot].addr + (r10_bio->sectors);
249 }
250
251 static void raid10_end_read_request(struct bio *bio, int error)
252 {
253         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
254         r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
255         int slot, dev;
256         conf_t *conf = mddev_to_conf(r10_bio->mddev);
257
258
259         slot = r10_bio->read_slot;
260         dev = r10_bio->devs[slot].devnum;
261         /*
262          * this branch is our 'one mirror IO has finished' event handler:
263          */
264         update_head_pos(slot, r10_bio);
265
266         if (uptodate) {
267                 /*
268                  * Set R10BIO_Uptodate in our master bio, so that
269                  * we will return a good error code to the higher
270                  * levels even if IO on some other mirrored buffer fails.
271                  *
272                  * The 'master' represents the composite IO operation to
273                  * user-side. So if something waits for IO, then it will
274                  * wait for the 'master' bio.
275                  */
276                 set_bit(R10BIO_Uptodate, &r10_bio->state);
277                 raid_end_bio_io(r10_bio);
278         } else {
279                 /*
280                  * oops, read error:
281                  */
282                 char b[BDEVNAME_SIZE];
283                 if (printk_ratelimit())
284                         printk(KERN_ERR "raid10: %s: rescheduling sector %llu\n",
285                                bdevname(conf->mirrors[dev].rdev->bdev,b), (unsigned long long)r10_bio->sector);
286                 reschedule_retry(r10_bio);
287         }
288
289         rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
290 }
291
292 static void raid10_end_write_request(struct bio *bio, int error)
293 {
294         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
295         r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
296         int slot, dev;
297         conf_t *conf = mddev_to_conf(r10_bio->mddev);
298
299         for (slot = 0; slot < conf->copies; slot++)
300                 if (r10_bio->devs[slot].bio == bio)
301                         break;
302         dev = r10_bio->devs[slot].devnum;
303
304         /*
305          * this branch is our 'one mirror IO has finished' event handler:
306          */
307         if (!uptodate) {
308                 md_error(r10_bio->mddev, conf->mirrors[dev].rdev);
309                 /* an I/O failed, we can't clear the bitmap */
310                 set_bit(R10BIO_Degraded, &r10_bio->state);
311         } else
312                 /*
313                  * Set R10BIO_Uptodate in our master bio, so that
314                  * we will return a good error code for to the higher
315                  * levels even if IO on some other mirrored buffer fails.
316                  *
317                  * The 'master' represents the composite IO operation to
318                  * user-side. So if something waits for IO, then it will
319                  * wait for the 'master' bio.
320                  */
321                 set_bit(R10BIO_Uptodate, &r10_bio->state);
322
323         update_head_pos(slot, r10_bio);
324
325         /*
326          *
327          * Let's see if all mirrored write operations have finished
328          * already.
329          */
330         if (atomic_dec_and_test(&r10_bio->remaining)) {
331                 /* clear the bitmap if all writes complete successfully */
332                 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
333                                 r10_bio->sectors,
334                                 !test_bit(R10BIO_Degraded, &r10_bio->state),
335                                 0);
336                 md_write_end(r10_bio->mddev);
337                 raid_end_bio_io(r10_bio);
338         }
339
340         rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
341 }
342
343
344 /*
345  * RAID10 layout manager
346  * Aswell as the chunksize and raid_disks count, there are two
347  * parameters: near_copies and far_copies.
348  * near_copies * far_copies must be <= raid_disks.
349  * Normally one of these will be 1.
350  * If both are 1, we get raid0.
351  * If near_copies == raid_disks, we get raid1.
352  *
353  * Chunks are layed out in raid0 style with near_copies copies of the
354  * first chunk, followed by near_copies copies of the next chunk and
355  * so on.
356  * If far_copies > 1, then after 1/far_copies of the array has been assigned
357  * as described above, we start again with a device offset of near_copies.
358  * So we effectively have another copy of the whole array further down all
359  * the drives, but with blocks on different drives.
360  * With this layout, and block is never stored twice on the one device.
361  *
362  * raid10_find_phys finds the sector offset of a given virtual sector
363  * on each device that it is on.
364  *
365  * raid10_find_virt does the reverse mapping, from a device and a
366  * sector offset to a virtual address
367  */
368
369 static void raid10_find_phys(conf_t *conf, r10bio_t *r10bio)
370 {
371         int n,f;
372         sector_t sector;
373         sector_t chunk;
374         sector_t stripe;
375         int dev;
376
377         int slot = 0;
378
379         /* now calculate first sector/dev */
380         chunk = r10bio->sector >> conf->chunk_shift;
381         sector = r10bio->sector & conf->chunk_mask;
382
383         chunk *= conf->near_copies;
384         stripe = chunk;
385         dev = sector_div(stripe, conf->raid_disks);
386         if (conf->far_offset)
387                 stripe *= conf->far_copies;
388
389         sector += stripe << conf->chunk_shift;
390
391         /* and calculate all the others */
392         for (n=0; n < conf->near_copies; n++) {
393                 int d = dev;
394                 sector_t s = sector;
395                 r10bio->devs[slot].addr = sector;
396                 r10bio->devs[slot].devnum = d;
397                 slot++;
398
399                 for (f = 1; f < conf->far_copies; f++) {
400                         d += conf->near_copies;
401                         if (d >= conf->raid_disks)
402                                 d -= conf->raid_disks;
403                         s += conf->stride;
404                         r10bio->devs[slot].devnum = d;
405                         r10bio->devs[slot].addr = s;
406                         slot++;
407                 }
408                 dev++;
409                 if (dev >= conf->raid_disks) {
410                         dev = 0;
411                         sector += (conf->chunk_mask + 1);
412                 }
413         }
414         BUG_ON(slot != conf->copies);
415 }
416
417 static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
418 {
419         sector_t offset, chunk, vchunk;
420
421         offset = sector & conf->chunk_mask;
422         if (conf->far_offset) {
423                 int fc;
424                 chunk = sector >> conf->chunk_shift;
425                 fc = sector_div(chunk, conf->far_copies);
426                 dev -= fc * conf->near_copies;
427                 if (dev < 0)
428                         dev += conf->raid_disks;
429         } else {
430                 while (sector >= conf->stride) {
431                         sector -= conf->stride;
432                         if (dev < conf->near_copies)
433                                 dev += conf->raid_disks - conf->near_copies;
434                         else
435                                 dev -= conf->near_copies;
436                 }
437                 chunk = sector >> conf->chunk_shift;
438         }
439         vchunk = chunk * conf->raid_disks + dev;
440         sector_div(vchunk, conf->near_copies);
441         return (vchunk << conf->chunk_shift) + offset;
442 }
443
444 /**
445  *      raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
446  *      @q: request queue
447  *      @bvm: properties of new bio
448  *      @biovec: the request that could be merged to it.
449  *
450  *      Return amount of bytes we can accept at this offset
451  *      If near_copies == raid_disk, there are no striping issues,
452  *      but in that case, the function isn't called at all.
453  */
454 static int raid10_mergeable_bvec(struct request_queue *q,
455                                  struct bvec_merge_data *bvm,
456                                  struct bio_vec *biovec)
457 {
458         mddev_t *mddev = q->queuedata;
459         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
460         int max;
461         unsigned int chunk_sectors = mddev->chunk_size >> 9;
462         unsigned int bio_sectors = bvm->bi_size >> 9;
463
464         max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
465         if (max < 0) max = 0; /* bio_add cannot handle a negative return */
466         if (max <= biovec->bv_len && bio_sectors == 0)
467                 return biovec->bv_len;
468         else
469                 return max;
470 }
471
472 /*
473  * This routine returns the disk from which the requested read should
474  * be done. There is a per-array 'next expected sequential IO' sector
475  * number - if this matches on the next IO then we use the last disk.
476  * There is also a per-disk 'last know head position' sector that is
477  * maintained from IRQ contexts, both the normal and the resync IO
478  * completion handlers update this position correctly. If there is no
479  * perfect sequential match then we pick the disk whose head is closest.
480  *
481  * If there are 2 mirrors in the same 2 devices, performance degrades
482  * because position is mirror, not device based.
483  *
484  * The rdev for the device selected will have nr_pending incremented.
485  */
486
487 /*
488  * FIXME: possibly should rethink readbalancing and do it differently
489  * depending on near_copies / far_copies geometry.
490  */
491 static int read_balance(conf_t *conf, r10bio_t *r10_bio)
492 {
493         const unsigned long this_sector = r10_bio->sector;
494         int disk, slot, nslot;
495         const int sectors = r10_bio->sectors;
496         sector_t new_distance, current_distance;
497         mdk_rdev_t *rdev;
498
499         raid10_find_phys(conf, r10_bio);
500         rcu_read_lock();
501         /*
502          * Check if we can balance. We can balance on the whole
503          * device if no resync is going on (recovery is ok), or below
504          * the resync window. We take the first readable disk when
505          * above the resync window.
506          */
507         if (conf->mddev->recovery_cp < MaxSector
508             && (this_sector + sectors >= conf->next_resync)) {
509                 /* make sure that disk is operational */
510                 slot = 0;
511                 disk = r10_bio->devs[slot].devnum;
512
513                 while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
514                        r10_bio->devs[slot].bio == IO_BLOCKED ||
515                        !test_bit(In_sync, &rdev->flags)) {
516                         slot++;
517                         if (slot == conf->copies) {
518                                 slot = 0;
519                                 disk = -1;
520                                 break;
521                         }
522                         disk = r10_bio->devs[slot].devnum;
523                 }
524                 goto rb_out;
525         }
526
527
528         /* make sure the disk is operational */
529         slot = 0;
530         disk = r10_bio->devs[slot].devnum;
531         while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
532                r10_bio->devs[slot].bio == IO_BLOCKED ||
533                !test_bit(In_sync, &rdev->flags)) {
534                 slot ++;
535                 if (slot == conf->copies) {
536                         disk = -1;
537                         goto rb_out;
538                 }
539                 disk = r10_bio->devs[slot].devnum;
540         }
541
542
543         current_distance = abs(r10_bio->devs[slot].addr -
544                                conf->mirrors[disk].head_position);
545
546         /* Find the disk whose head is closest,
547          * or - for far > 1 - find the closest to partition beginning */
548
549         for (nslot = slot; nslot < conf->copies; nslot++) {
550                 int ndisk = r10_bio->devs[nslot].devnum;
551
552
553                 if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
554                     r10_bio->devs[nslot].bio == IO_BLOCKED ||
555                     !test_bit(In_sync, &rdev->flags))
556                         continue;
557
558                 /* This optimisation is debatable, and completely destroys
559                  * sequential read speed for 'far copies' arrays.  So only
560                  * keep it for 'near' arrays, and review those later.
561                  */
562                 if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) {
563                         disk = ndisk;
564                         slot = nslot;
565                         break;
566                 }
567
568                 /* for far > 1 always use the lowest address */
569                 if (conf->far_copies > 1)
570                         new_distance = r10_bio->devs[nslot].addr;
571                 else
572                         new_distance = abs(r10_bio->devs[nslot].addr -
573                                            conf->mirrors[ndisk].head_position);
574                 if (new_distance < current_distance) {
575                         current_distance = new_distance;
576                         disk = ndisk;
577                         slot = nslot;
578                 }
579         }
580
581 rb_out:
582         r10_bio->read_slot = slot;
583 /*      conf->next_seq_sect = this_sector + sectors;*/
584
585         if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
586                 atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
587         else
588                 disk = -1;
589         rcu_read_unlock();
590
591         return disk;
592 }
593
594 static void unplug_slaves(mddev_t *mddev)
595 {
596         conf_t *conf = mddev_to_conf(mddev);
597         int i;
598
599         rcu_read_lock();
600         for (i=0; i<mddev->raid_disks; i++) {
601                 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
602                 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
603                         struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
604
605                         atomic_inc(&rdev->nr_pending);
606                         rcu_read_unlock();
607
608                         blk_unplug(r_queue);
609
610                         rdev_dec_pending(rdev, mddev);
611                         rcu_read_lock();
612                 }
613         }
614         rcu_read_unlock();
615 }
616
617 static void raid10_unplug(struct request_queue *q)
618 {
619         mddev_t *mddev = q->queuedata;
620
621         unplug_slaves(q->queuedata);
622         md_wakeup_thread(mddev->thread);
623 }
624
625 static int raid10_congested(void *data, int bits)
626 {
627         mddev_t *mddev = data;
628         conf_t *conf = mddev_to_conf(mddev);
629         int i, ret = 0;
630
631         rcu_read_lock();
632         for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
633                 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
634                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
635                         struct request_queue *q = bdev_get_queue(rdev->bdev);
636
637                         ret |= bdi_congested(&q->backing_dev_info, bits);
638                 }
639         }
640         rcu_read_unlock();
641         return ret;
642 }
643
644 static int flush_pending_writes(conf_t *conf)
645 {
646         /* Any writes that have been queued but are awaiting
647          * bitmap updates get flushed here.
648          * We return 1 if any requests were actually submitted.
649          */
650         int rv = 0;
651
652         spin_lock_irq(&conf->device_lock);
653
654         if (conf->pending_bio_list.head) {
655                 struct bio *bio;
656                 bio = bio_list_get(&conf->pending_bio_list);
657                 blk_remove_plug(conf->mddev->queue);
658                 spin_unlock_irq(&conf->device_lock);
659                 /* flush any pending bitmap writes to disk
660                  * before proceeding w/ I/O */
661                 bitmap_unplug(conf->mddev->bitmap);
662
663                 while (bio) { /* submit pending writes */
664                         struct bio *next = bio->bi_next;
665                         bio->bi_next = NULL;
666                         generic_make_request(bio);
667                         bio = next;
668                 }
669                 rv = 1;
670         } else
671                 spin_unlock_irq(&conf->device_lock);
672         return rv;
673 }
674 /* Barriers....
675  * Sometimes we need to suspend IO while we do something else,
676  * either some resync/recovery, or reconfigure the array.
677  * To do this we raise a 'barrier'.
678  * The 'barrier' is a counter that can be raised multiple times
679  * to count how many activities are happening which preclude
680  * normal IO.
681  * We can only raise the barrier if there is no pending IO.
682  * i.e. if nr_pending == 0.
683  * We choose only to raise the barrier if no-one is waiting for the
684  * barrier to go down.  This means that as soon as an IO request
685  * is ready, no other operations which require a barrier will start
686  * until the IO request has had a chance.
687  *
688  * So: regular IO calls 'wait_barrier'.  When that returns there
689  *    is no backgroup IO happening,  It must arrange to call
690  *    allow_barrier when it has finished its IO.
691  * backgroup IO calls must call raise_barrier.  Once that returns
692  *    there is no normal IO happeing.  It must arrange to call
693  *    lower_barrier when the particular background IO completes.
694  */
695
696 static void raise_barrier(conf_t *conf, int force)
697 {
698         BUG_ON(force && !conf->barrier);
699         spin_lock_irq(&conf->resync_lock);
700
701         /* Wait until no block IO is waiting (unless 'force') */
702         wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
703                             conf->resync_lock,
704                             raid10_unplug(conf->mddev->queue));
705
706         /* block any new IO from starting */
707         conf->barrier++;
708
709         /* No wait for all pending IO to complete */
710         wait_event_lock_irq(conf->wait_barrier,
711                             !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
712                             conf->resync_lock,
713                             raid10_unplug(conf->mddev->queue));
714
715         spin_unlock_irq(&conf->resync_lock);
716 }
717
718 static void lower_barrier(conf_t *conf)
719 {
720         unsigned long flags;
721         spin_lock_irqsave(&conf->resync_lock, flags);
722         conf->barrier--;
723         spin_unlock_irqrestore(&conf->resync_lock, flags);
724         wake_up(&conf->wait_barrier);
725 }
726
727 static void wait_barrier(conf_t *conf)
728 {
729         spin_lock_irq(&conf->resync_lock);
730         if (conf->barrier) {
731                 conf->nr_waiting++;
732                 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
733                                     conf->resync_lock,
734                                     raid10_unplug(conf->mddev->queue));
735                 conf->nr_waiting--;
736         }
737         conf->nr_pending++;
738         spin_unlock_irq(&conf->resync_lock);
739 }
740
741 static void allow_barrier(conf_t *conf)
742 {
743         unsigned long flags;
744         spin_lock_irqsave(&conf->resync_lock, flags);
745         conf->nr_pending--;
746         spin_unlock_irqrestore(&conf->resync_lock, flags);
747         wake_up(&conf->wait_barrier);
748 }
749
750 static void freeze_array(conf_t *conf)
751 {
752         /* stop syncio and normal IO and wait for everything to
753          * go quiet.
754          * We increment barrier and nr_waiting, and then
755          * wait until nr_pending match nr_queued+1
756          * This is called in the context of one normal IO request
757          * that has failed. Thus any sync request that might be pending
758          * will be blocked by nr_pending, and we need to wait for
759          * pending IO requests to complete or be queued for re-try.
760          * Thus the number queued (nr_queued) plus this request (1)
761          * must match the number of pending IOs (nr_pending) before
762          * we continue.
763          */
764         spin_lock_irq(&conf->resync_lock);
765         conf->barrier++;
766         conf->nr_waiting++;
767         wait_event_lock_irq(conf->wait_barrier,
768                             conf->nr_pending == conf->nr_queued+1,
769                             conf->resync_lock,
770                             ({ flush_pending_writes(conf);
771                                raid10_unplug(conf->mddev->queue); }));
772         spin_unlock_irq(&conf->resync_lock);
773 }
774
775 static void unfreeze_array(conf_t *conf)
776 {
777         /* reverse the effect of the freeze */
778         spin_lock_irq(&conf->resync_lock);
779         conf->barrier--;
780         conf->nr_waiting--;
781         wake_up(&conf->wait_barrier);
782         spin_unlock_irq(&conf->resync_lock);
783 }
784
785 static int make_request(struct request_queue *q, struct bio * bio)
786 {
787         mddev_t *mddev = q->queuedata;
788         conf_t *conf = mddev_to_conf(mddev);
789         mirror_info_t *mirror;
790         r10bio_t *r10_bio;
791         struct bio *read_bio;
792         int cpu;
793         int i;
794         int chunk_sects = conf->chunk_mask + 1;
795         const int rw = bio_data_dir(bio);
796         const int do_sync = bio_sync(bio);
797         struct bio_list bl;
798         unsigned long flags;
799         mdk_rdev_t *blocked_rdev;
800
801         if (unlikely(bio_barrier(bio))) {
802                 bio_endio(bio, -EOPNOTSUPP);
803                 return 0;
804         }
805
806         /* If this request crosses a chunk boundary, we need to
807          * split it.  This will only happen for 1 PAGE (or less) requests.
808          */
809         if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
810                       > chunk_sects &&
811                     conf->near_copies < conf->raid_disks)) {
812                 struct bio_pair *bp;
813                 /* Sanity check -- queue functions should prevent this happening */
814                 if (bio->bi_vcnt != 1 ||
815                     bio->bi_idx != 0)
816                         goto bad_map;
817                 /* This is a one page bio that upper layers
818                  * refuse to split for us, so we need to split it.
819                  */
820                 bp = bio_split(bio,
821                                chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
822                 if (make_request(q, &bp->bio1))
823                         generic_make_request(&bp->bio1);
824                 if (make_request(q, &bp->bio2))
825                         generic_make_request(&bp->bio2);
826
827                 bio_pair_release(bp);
828                 return 0;
829         bad_map:
830                 printk("raid10_make_request bug: can't convert block across chunks"
831                        " or bigger than %dk %llu %d\n", chunk_sects/2,
832                        (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
833
834                 bio_io_error(bio);
835                 return 0;
836         }
837
838         md_write_start(mddev, bio);
839
840         /*
841          * Register the new request and wait if the reconstruction
842          * thread has put up a bar for new requests.
843          * Continue immediately if no resync is active currently.
844          */
845         wait_barrier(conf);
846
847         cpu = part_stat_lock();
848         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
849         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
850                       bio_sectors(bio));
851         part_stat_unlock();
852
853         r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
854
855         r10_bio->master_bio = bio;
856         r10_bio->sectors = bio->bi_size >> 9;
857
858         r10_bio->mddev = mddev;
859         r10_bio->sector = bio->bi_sector;
860         r10_bio->state = 0;
861
862         if (rw == READ) {
863                 /*
864                  * read balancing logic:
865                  */
866                 int disk = read_balance(conf, r10_bio);
867                 int slot = r10_bio->read_slot;
868                 if (disk < 0) {
869                         raid_end_bio_io(r10_bio);
870                         return 0;
871                 }
872                 mirror = conf->mirrors + disk;
873
874                 read_bio = bio_clone(bio, GFP_NOIO);
875
876                 r10_bio->devs[slot].bio = read_bio;
877
878                 read_bio->bi_sector = r10_bio->devs[slot].addr +
879                         mirror->rdev->data_offset;
880                 read_bio->bi_bdev = mirror->rdev->bdev;
881                 read_bio->bi_end_io = raid10_end_read_request;
882                 read_bio->bi_rw = READ | do_sync;
883                 read_bio->bi_private = r10_bio;
884
885                 generic_make_request(read_bio);
886                 return 0;
887         }
888
889         /*
890          * WRITE:
891          */
892         /* first select target devices under rcu_lock and
893          * inc refcount on their rdev.  Record them by setting
894          * bios[x] to bio
895          */
896         raid10_find_phys(conf, r10_bio);
897  retry_write:
898         blocked_rdev = NULL;
899         rcu_read_lock();
900         for (i = 0;  i < conf->copies; i++) {
901                 int d = r10_bio->devs[i].devnum;
902                 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
903                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
904                         atomic_inc(&rdev->nr_pending);
905                         blocked_rdev = rdev;
906                         break;
907                 }
908                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
909                         atomic_inc(&rdev->nr_pending);
910                         r10_bio->devs[i].bio = bio;
911                 } else {
912                         r10_bio->devs[i].bio = NULL;
913                         set_bit(R10BIO_Degraded, &r10_bio->state);
914                 }
915         }
916         rcu_read_unlock();
917
918         if (unlikely(blocked_rdev)) {
919                 /* Have to wait for this device to get unblocked, then retry */
920                 int j;
921                 int d;
922
923                 for (j = 0; j < i; j++)
924                         if (r10_bio->devs[j].bio) {
925                                 d = r10_bio->devs[j].devnum;
926                                 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
927                         }
928                 allow_barrier(conf);
929                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
930                 wait_barrier(conf);
931                 goto retry_write;
932         }
933
934         atomic_set(&r10_bio->remaining, 0);
935
936         bio_list_init(&bl);
937         for (i = 0; i < conf->copies; i++) {
938                 struct bio *mbio;
939                 int d = r10_bio->devs[i].devnum;
940                 if (!r10_bio->devs[i].bio)
941                         continue;
942
943                 mbio = bio_clone(bio, GFP_NOIO);
944                 r10_bio->devs[i].bio = mbio;
945
946                 mbio->bi_sector = r10_bio->devs[i].addr+
947                         conf->mirrors[d].rdev->data_offset;
948                 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
949                 mbio->bi_end_io = raid10_end_write_request;
950                 mbio->bi_rw = WRITE | do_sync;
951                 mbio->bi_private = r10_bio;
952
953                 atomic_inc(&r10_bio->remaining);
954                 bio_list_add(&bl, mbio);
955         }
956
957         if (unlikely(!atomic_read(&r10_bio->remaining))) {
958                 /* the array is dead */
959                 md_write_end(mddev);
960                 raid_end_bio_io(r10_bio);
961                 return 0;
962         }
963
964         bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
965         spin_lock_irqsave(&conf->device_lock, flags);
966         bio_list_merge(&conf->pending_bio_list, &bl);
967         blk_plug_device(mddev->queue);
968         spin_unlock_irqrestore(&conf->device_lock, flags);
969
970         /* In case raid10d snuck in to freeze_array */
971         wake_up(&conf->wait_barrier);
972
973         if (do_sync)
974                 md_wakeup_thread(mddev->thread);
975
976         return 0;
977 }
978
979 static void status(struct seq_file *seq, mddev_t *mddev)
980 {
981         conf_t *conf = mddev_to_conf(mddev);
982         int i;
983
984         if (conf->near_copies < conf->raid_disks)
985                 seq_printf(seq, " %dK chunks", mddev->chunk_size/1024);
986         if (conf->near_copies > 1)
987                 seq_printf(seq, " %d near-copies", conf->near_copies);
988         if (conf->far_copies > 1) {
989                 if (conf->far_offset)
990                         seq_printf(seq, " %d offset-copies", conf->far_copies);
991                 else
992                         seq_printf(seq, " %d far-copies", conf->far_copies);
993         }
994         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
995                                         conf->raid_disks - mddev->degraded);
996         for (i = 0; i < conf->raid_disks; i++)
997                 seq_printf(seq, "%s",
998                               conf->mirrors[i].rdev &&
999                               test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1000         seq_printf(seq, "]");
1001 }
1002
1003 static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1004 {
1005         char b[BDEVNAME_SIZE];
1006         conf_t *conf = mddev_to_conf(mddev);
1007
1008         /*
1009          * If it is not operational, then we have already marked it as dead
1010          * else if it is the last working disks, ignore the error, let the
1011          * next level up know.
1012          * else mark the drive as failed
1013          */
1014         if (test_bit(In_sync, &rdev->flags)
1015             && conf->raid_disks-mddev->degraded == 1)
1016                 /*
1017                  * Don't fail the drive, just return an IO error.
1018                  * The test should really be more sophisticated than
1019                  * "working_disks == 1", but it isn't critical, and
1020                  * can wait until we do more sophisticated "is the drive
1021                  * really dead" tests...
1022                  */
1023                 return;
1024         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1025                 unsigned long flags;
1026                 spin_lock_irqsave(&conf->device_lock, flags);
1027                 mddev->degraded++;
1028                 spin_unlock_irqrestore(&conf->device_lock, flags);
1029                 /*
1030                  * if recovery is running, make sure it aborts.
1031                  */
1032                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1033         }
1034         set_bit(Faulty, &rdev->flags);
1035         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1036         printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
1037                 "raid10: Operation continuing on %d devices.\n",
1038                 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
1039 }
1040
1041 static void print_conf(conf_t *conf)
1042 {
1043         int i;
1044         mirror_info_t *tmp;
1045
1046         printk("RAID10 conf printout:\n");
1047         if (!conf) {
1048                 printk("(!conf)\n");
1049                 return;
1050         }
1051         printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1052                 conf->raid_disks);
1053
1054         for (i = 0; i < conf->raid_disks; i++) {
1055                 char b[BDEVNAME_SIZE];
1056                 tmp = conf->mirrors + i;
1057                 if (tmp->rdev)
1058                         printk(" disk %d, wo:%d, o:%d, dev:%s\n",
1059                                 i, !test_bit(In_sync, &tmp->rdev->flags),
1060                                 !test_bit(Faulty, &tmp->rdev->flags),
1061                                 bdevname(tmp->rdev->bdev,b));
1062         }
1063 }
1064
1065 static void close_sync(conf_t *conf)
1066 {
1067         wait_barrier(conf);
1068         allow_barrier(conf);
1069
1070         mempool_destroy(conf->r10buf_pool);
1071         conf->r10buf_pool = NULL;
1072 }
1073
1074 /* check if there are enough drives for
1075  * every block to appear on atleast one
1076  */
1077 static int enough(conf_t *conf)
1078 {
1079         int first = 0;
1080
1081         do {
1082                 int n = conf->copies;
1083                 int cnt = 0;
1084                 while (n--) {
1085                         if (conf->mirrors[first].rdev)
1086                                 cnt++;
1087                         first = (first+1) % conf->raid_disks;
1088                 }
1089                 if (cnt == 0)
1090                         return 0;
1091         } while (first != 0);
1092         return 1;
1093 }
1094
1095 static int raid10_spare_active(mddev_t *mddev)
1096 {
1097         int i;
1098         conf_t *conf = mddev->private;
1099         mirror_info_t *tmp;
1100
1101         /*
1102          * Find all non-in_sync disks within the RAID10 configuration
1103          * and mark them in_sync
1104          */
1105         for (i = 0; i < conf->raid_disks; i++) {
1106                 tmp = conf->mirrors + i;
1107                 if (tmp->rdev
1108                     && !test_bit(Faulty, &tmp->rdev->flags)
1109                     && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1110                         unsigned long flags;
1111                         spin_lock_irqsave(&conf->device_lock, flags);
1112                         mddev->degraded--;
1113                         spin_unlock_irqrestore(&conf->device_lock, flags);
1114                 }
1115         }
1116
1117         print_conf(conf);
1118         return 0;
1119 }
1120
1121
1122 static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
1123 {
1124         conf_t *conf = mddev->private;
1125         int err = -EEXIST;
1126         int mirror;
1127         mirror_info_t *p;
1128         int first = 0;
1129         int last = mddev->raid_disks - 1;
1130
1131         if (mddev->recovery_cp < MaxSector)
1132                 /* only hot-add to in-sync arrays, as recovery is
1133                  * very different from resync
1134                  */
1135                 return -EBUSY;
1136         if (!enough(conf))
1137                 return -EINVAL;
1138
1139         if (rdev->raid_disk)
1140                 first = last = rdev->raid_disk;
1141
1142         if (rdev->saved_raid_disk >= 0 &&
1143             rdev->saved_raid_disk >= first &&
1144             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1145                 mirror = rdev->saved_raid_disk;
1146         else
1147                 mirror = first;
1148         for ( ; mirror <= last ; mirror++)
1149                 if ( !(p=conf->mirrors+mirror)->rdev) {
1150
1151                         blk_queue_stack_limits(mddev->queue,
1152                                                rdev->bdev->bd_disk->queue);
1153                         /* as we don't honour merge_bvec_fn, we must never risk
1154                          * violating it, so limit ->max_sector to one PAGE, as
1155                          * a one page request is never in violation.
1156                          */
1157                         if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
1158                             mddev->queue->max_sectors > (PAGE_SIZE>>9))
1159                                 mddev->queue->max_sectors = (PAGE_SIZE>>9);
1160
1161                         p->head_position = 0;
1162                         rdev->raid_disk = mirror;
1163                         err = 0;
1164                         if (rdev->saved_raid_disk != mirror)
1165                                 conf->fullsync = 1;
1166                         rcu_assign_pointer(p->rdev, rdev);
1167                         break;
1168                 }
1169
1170         print_conf(conf);
1171         return err;
1172 }
1173
1174 static int raid10_remove_disk(mddev_t *mddev, int number)
1175 {
1176         conf_t *conf = mddev->private;
1177         int err = 0;
1178         mdk_rdev_t *rdev;
1179         mirror_info_t *p = conf->mirrors+ number;
1180
1181         print_conf(conf);
1182         rdev = p->rdev;
1183         if (rdev) {
1184                 if (test_bit(In_sync, &rdev->flags) ||
1185                     atomic_read(&rdev->nr_pending)) {
1186                         err = -EBUSY;
1187                         goto abort;
1188                 }
1189                 /* Only remove faulty devices in recovery
1190                  * is not possible.
1191                  */
1192                 if (!test_bit(Faulty, &rdev->flags) &&
1193                     enough(conf)) {
1194                         err = -EBUSY;
1195                         goto abort;
1196                 }
1197                 p->rdev = NULL;
1198                 synchronize_rcu();
1199                 if (atomic_read(&rdev->nr_pending)) {
1200                         /* lost the race, try later */
1201                         err = -EBUSY;
1202                         p->rdev = rdev;
1203                 }
1204         }
1205 abort:
1206
1207         print_conf(conf);
1208         return err;
1209 }
1210
1211
1212 static void end_sync_read(struct bio *bio, int error)
1213 {
1214         r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1215         conf_t *conf = mddev_to_conf(r10_bio->mddev);
1216         int i,d;
1217
1218         for (i=0; i<conf->copies; i++)
1219                 if (r10_bio->devs[i].bio == bio)
1220                         break;
1221         BUG_ON(i == conf->copies);
1222         update_head_pos(i, r10_bio);
1223         d = r10_bio->devs[i].devnum;
1224
1225         if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1226                 set_bit(R10BIO_Uptodate, &r10_bio->state);
1227         else {
1228                 atomic_add(r10_bio->sectors,
1229                            &conf->mirrors[d].rdev->corrected_errors);
1230                 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
1231                         md_error(r10_bio->mddev,
1232                                  conf->mirrors[d].rdev);
1233         }
1234
1235         /* for reconstruct, we always reschedule after a read.
1236          * for resync, only after all reads
1237          */
1238         if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1239             atomic_dec_and_test(&r10_bio->remaining)) {
1240                 /* we have read all the blocks,
1241                  * do the comparison in process context in raid10d
1242                  */
1243                 reschedule_retry(r10_bio);
1244         }
1245         rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1246 }
1247
1248 static void end_sync_write(struct bio *bio, int error)
1249 {
1250         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1251         r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
1252         mddev_t *mddev = r10_bio->mddev;
1253         conf_t *conf = mddev_to_conf(mddev);
1254         int i,d;
1255
1256         for (i = 0; i < conf->copies; i++)
1257                 if (r10_bio->devs[i].bio == bio)
1258                         break;
1259         d = r10_bio->devs[i].devnum;
1260
1261         if (!uptodate)
1262                 md_error(mddev, conf->mirrors[d].rdev);
1263
1264         update_head_pos(i, r10_bio);
1265
1266         while (atomic_dec_and_test(&r10_bio->remaining)) {
1267                 if (r10_bio->master_bio == NULL) {
1268                         /* the primary of several recovery bios */
1269                         md_done_sync(mddev, r10_bio->sectors, 1);
1270                         put_buf(r10_bio);
1271                         break;
1272                 } else {
1273                         r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio;
1274                         put_buf(r10_bio);
1275                         r10_bio = r10_bio2;
1276                 }
1277         }
1278         rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1279 }
1280
1281 /*
1282  * Note: sync and recover and handled very differently for raid10
1283  * This code is for resync.
1284  * For resync, we read through virtual addresses and read all blocks.
1285  * If there is any error, we schedule a write.  The lowest numbered
1286  * drive is authoritative.
1287  * However requests come for physical address, so we need to map.
1288  * For every physical address there are raid_disks/copies virtual addresses,
1289  * which is always are least one, but is not necessarly an integer.
1290  * This means that a physical address can span multiple chunks, so we may
1291  * have to submit multiple io requests for a single sync request.
1292  */
1293 /*
1294  * We check if all blocks are in-sync and only write to blocks that
1295  * aren't in sync
1296  */
1297 static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1298 {
1299         conf_t *conf = mddev_to_conf(mddev);
1300         int i, first;
1301         struct bio *tbio, *fbio;
1302
1303         atomic_set(&r10_bio->remaining, 1);
1304
1305         /* find the first device with a block */
1306         for (i=0; i<conf->copies; i++)
1307                 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1308                         break;
1309
1310         if (i == conf->copies)
1311                 goto done;
1312
1313         first = i;
1314         fbio = r10_bio->devs[i].bio;
1315
1316         /* now find blocks with errors */
1317         for (i=0 ; i < conf->copies ; i++) {
1318                 int  j, d;
1319                 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1320
1321                 tbio = r10_bio->devs[i].bio;
1322
1323                 if (tbio->bi_end_io != end_sync_read)
1324                         continue;
1325                 if (i == first)
1326                         continue;
1327                 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1328                         /* We know that the bi_io_vec layout is the same for
1329                          * both 'first' and 'i', so we just compare them.
1330                          * All vec entries are PAGE_SIZE;
1331                          */
1332                         for (j = 0; j < vcnt; j++)
1333                                 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1334                                            page_address(tbio->bi_io_vec[j].bv_page),
1335                                            PAGE_SIZE))
1336                                         break;
1337                         if (j == vcnt)
1338                                 continue;
1339                         mddev->resync_mismatches += r10_bio->sectors;
1340                 }
1341                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1342                         /* Don't fix anything. */
1343                         continue;
1344                 /* Ok, we need to write this bio
1345                  * First we need to fixup bv_offset, bv_len and
1346                  * bi_vecs, as the read request might have corrupted these
1347                  */
1348                 tbio->bi_vcnt = vcnt;
1349                 tbio->bi_size = r10_bio->sectors << 9;
1350                 tbio->bi_idx = 0;
1351                 tbio->bi_phys_segments = 0;
1352                 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1353                 tbio->bi_flags |= 1 << BIO_UPTODATE;
1354                 tbio->bi_next = NULL;
1355                 tbio->bi_rw = WRITE;
1356                 tbio->bi_private = r10_bio;
1357                 tbio->bi_sector = r10_bio->devs[i].addr;
1358
1359                 for (j=0; j < vcnt ; j++) {
1360                         tbio->bi_io_vec[j].bv_offset = 0;
1361                         tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1362
1363                         memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1364                                page_address(fbio->bi_io_vec[j].bv_page),
1365                                PAGE_SIZE);
1366                 }
1367                 tbio->bi_end_io = end_sync_write;
1368
1369                 d = r10_bio->devs[i].devnum;
1370                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1371                 atomic_inc(&r10_bio->remaining);
1372                 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1373
1374                 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1375                 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1376                 generic_make_request(tbio);
1377         }
1378
1379 done:
1380         if (atomic_dec_and_test(&r10_bio->remaining)) {
1381                 md_done_sync(mddev, r10_bio->sectors, 1);
1382                 put_buf(r10_bio);
1383         }
1384 }
1385
1386 /*
1387  * Now for the recovery code.
1388  * Recovery happens across physical sectors.
1389  * We recover all non-is_sync drives by finding the virtual address of
1390  * each, and then choose a working drive that also has that virt address.
1391  * There is a separate r10_bio for each non-in_sync drive.
1392  * Only the first two slots are in use. The first for reading,
1393  * The second for writing.
1394  *
1395  */
1396
1397 static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
1398 {
1399         conf_t *conf = mddev_to_conf(mddev);
1400         int i, d;
1401         struct bio *bio, *wbio;
1402
1403
1404         /* move the pages across to the second bio
1405          * and submit the write request
1406          */
1407         bio = r10_bio->devs[0].bio;
1408         wbio = r10_bio->devs[1].bio;
1409         for (i=0; i < wbio->bi_vcnt; i++) {
1410                 struct page *p = bio->bi_io_vec[i].bv_page;
1411                 bio->bi_io_vec[i].bv_page = wbio->bi_io_vec[i].bv_page;
1412                 wbio->bi_io_vec[i].bv_page = p;
1413         }
1414         d = r10_bio->devs[1].devnum;
1415
1416         atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1417         md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
1418         if (test_bit(R10BIO_Uptodate, &r10_bio->state))
1419                 generic_make_request(wbio);
1420         else
1421                 bio_endio(wbio, -EIO);
1422 }
1423
1424
1425 /*
1426  * This is a kernel thread which:
1427  *
1428  *      1.      Retries failed read operations on working mirrors.
1429  *      2.      Updates the raid superblock when problems encounter.
1430  *      3.      Performs writes following reads for array synchronising.
1431  */
1432
1433 static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
1434 {
1435         int sect = 0; /* Offset from r10_bio->sector */
1436         int sectors = r10_bio->sectors;
1437         mdk_rdev_t*rdev;
1438         while(sectors) {
1439                 int s = sectors;
1440                 int sl = r10_bio->read_slot;
1441                 int success = 0;
1442                 int start;
1443
1444                 if (s > (PAGE_SIZE>>9))
1445                         s = PAGE_SIZE >> 9;
1446
1447                 rcu_read_lock();
1448                 do {
1449                         int d = r10_bio->devs[sl].devnum;
1450                         rdev = rcu_dereference(conf->mirrors[d].rdev);
1451                         if (rdev &&
1452                             test_bit(In_sync, &rdev->flags)) {
1453                                 atomic_inc(&rdev->nr_pending);
1454                                 rcu_read_unlock();
1455                                 success = sync_page_io(rdev->bdev,
1456                                                        r10_bio->devs[sl].addr +
1457                                                        sect + rdev->data_offset,
1458                                                        s<<9,
1459                                                        conf->tmppage, READ);
1460                                 rdev_dec_pending(rdev, mddev);
1461                                 rcu_read_lock();
1462                                 if (success)
1463                                         break;
1464                         }
1465                         sl++;
1466                         if (sl == conf->copies)
1467                                 sl = 0;
1468                 } while (!success && sl != r10_bio->read_slot);
1469                 rcu_read_unlock();
1470
1471                 if (!success) {
1472                         /* Cannot read from anywhere -- bye bye array */
1473                         int dn = r10_bio->devs[r10_bio->read_slot].devnum;
1474                         md_error(mddev, conf->mirrors[dn].rdev);
1475                         break;
1476                 }
1477
1478                 start = sl;
1479                 /* write it back and re-read */
1480                 rcu_read_lock();
1481                 while (sl != r10_bio->read_slot) {
1482                         int d;
1483                         if (sl==0)
1484                                 sl = conf->copies;
1485                         sl--;
1486                         d = r10_bio->devs[sl].devnum;
1487                         rdev = rcu_dereference(conf->mirrors[d].rdev);
1488                         if (rdev &&
1489                             test_bit(In_sync, &rdev->flags)) {
1490                                 atomic_inc(&rdev->nr_pending);
1491                                 rcu_read_unlock();
1492                                 atomic_add(s, &rdev->corrected_errors);
1493                                 if (sync_page_io(rdev->bdev,
1494                                                  r10_bio->devs[sl].addr +
1495                                                  sect + rdev->data_offset,
1496                                                  s<<9, conf->tmppage, WRITE)
1497                                     == 0)
1498                                         /* Well, this device is dead */
1499                                         md_error(mddev, rdev);
1500                                 rdev_dec_pending(rdev, mddev);
1501                                 rcu_read_lock();
1502                         }
1503                 }
1504                 sl = start;
1505                 while (sl != r10_bio->read_slot) {
1506                         int d;
1507                         if (sl==0)
1508                                 sl = conf->copies;
1509                         sl--;
1510                         d = r10_bio->devs[sl].devnum;
1511                         rdev = rcu_dereference(conf->mirrors[d].rdev);
1512                         if (rdev &&
1513                             test_bit(In_sync, &rdev->flags)) {
1514                                 char b[BDEVNAME_SIZE];
1515                                 atomic_inc(&rdev->nr_pending);
1516                                 rcu_read_unlock();
1517                                 if (sync_page_io(rdev->bdev,
1518                                                  r10_bio->devs[sl].addr +
1519                                                  sect + rdev->data_offset,
1520                                                  s<<9, conf->tmppage, READ) == 0)
1521                                         /* Well, this device is dead */
1522                                         md_error(mddev, rdev);
1523                                 else
1524                                         printk(KERN_INFO
1525                                                "raid10:%s: read error corrected"
1526                                                " (%d sectors at %llu on %s)\n",
1527                                                mdname(mddev), s,
1528                                                (unsigned long long)(sect+
1529                                                     rdev->data_offset),
1530                                                bdevname(rdev->bdev, b));
1531
1532                                 rdev_dec_pending(rdev, mddev);
1533                                 rcu_read_lock();
1534                         }
1535                 }
1536                 rcu_read_unlock();
1537
1538                 sectors -= s;
1539                 sect += s;
1540         }
1541 }
1542
1543 static void raid10d(mddev_t *mddev)
1544 {
1545         r10bio_t *r10_bio;
1546         struct bio *bio;
1547         unsigned long flags;
1548         conf_t *conf = mddev_to_conf(mddev);
1549         struct list_head *head = &conf->retry_list;
1550         int unplug=0;
1551         mdk_rdev_t *rdev;
1552
1553         md_check_recovery(mddev);
1554
1555         for (;;) {
1556                 char b[BDEVNAME_SIZE];
1557
1558                 unplug += flush_pending_writes(conf);
1559
1560                 spin_lock_irqsave(&conf->device_lock, flags);
1561                 if (list_empty(head)) {
1562                         spin_unlock_irqrestore(&conf->device_lock, flags);
1563                         break;
1564                 }
1565                 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1566                 list_del(head->prev);
1567                 conf->nr_queued--;
1568                 spin_unlock_irqrestore(&conf->device_lock, flags);
1569
1570                 mddev = r10_bio->mddev;
1571                 conf = mddev_to_conf(mddev);
1572                 if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
1573                         sync_request_write(mddev, r10_bio);
1574                         unplug = 1;
1575                 } else  if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1576                         recovery_request_write(mddev, r10_bio);
1577                         unplug = 1;
1578                 } else {
1579                         int mirror;
1580                         /* we got a read error. Maybe the drive is bad.  Maybe just
1581                          * the block and we can fix it.
1582                          * We freeze all other IO, and try reading the block from
1583                          * other devices.  When we find one, we re-write
1584                          * and check it that fixes the read error.
1585                          * This is all done synchronously while the array is
1586                          * frozen.
1587                          */
1588                         if (mddev->ro == 0) {
1589                                 freeze_array(conf);
1590                                 fix_read_error(conf, mddev, r10_bio);
1591                                 unfreeze_array(conf);
1592                         }
1593
1594                         bio = r10_bio->devs[r10_bio->read_slot].bio;
1595                         r10_bio->devs[r10_bio->read_slot].bio =
1596                                 mddev->ro ? IO_BLOCKED : NULL;
1597                         mirror = read_balance(conf, r10_bio);
1598                         if (mirror == -1) {
1599                                 printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
1600                                        " read error for block %llu\n",
1601                                        bdevname(bio->bi_bdev,b),
1602                                        (unsigned long long)r10_bio->sector);
1603                                 raid_end_bio_io(r10_bio);
1604                                 bio_put(bio);
1605                         } else {
1606                                 const int do_sync = bio_sync(r10_bio->master_bio);
1607                                 bio_put(bio);
1608                                 rdev = conf->mirrors[mirror].rdev;
1609                                 if (printk_ratelimit())
1610                                         printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
1611                                                " another mirror\n",
1612                                                bdevname(rdev->bdev,b),
1613                                                (unsigned long long)r10_bio->sector);
1614                                 bio = bio_clone(r10_bio->master_bio, GFP_NOIO);
1615                                 r10_bio->devs[r10_bio->read_slot].bio = bio;
1616                                 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1617                                         + rdev->data_offset;
1618                                 bio->bi_bdev = rdev->bdev;
1619                                 bio->bi_rw = READ | do_sync;
1620                                 bio->bi_private = r10_bio;
1621                                 bio->bi_end_io = raid10_end_read_request;
1622                                 unplug = 1;
1623                                 generic_make_request(bio);
1624                         }
1625                 }
1626         }
1627         if (unplug)
1628                 unplug_slaves(mddev);
1629 }
1630
1631
1632 static int init_resync(conf_t *conf)
1633 {
1634         int buffs;
1635
1636         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
1637         BUG_ON(conf->r10buf_pool);
1638         conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
1639         if (!conf->r10buf_pool)
1640                 return -ENOMEM;
1641         conf->next_resync = 0;
1642         return 0;
1643 }
1644
1645 /*
1646  * perform a "sync" on one "block"
1647  *
1648  * We need to make sure that no normal I/O request - particularly write
1649  * requests - conflict with active sync requests.
1650  *
1651  * This is achieved by tracking pending requests and a 'barrier' concept
1652  * that can be installed to exclude normal IO requests.
1653  *
1654  * Resync and recovery are handled very differently.
1655  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
1656  *
1657  * For resync, we iterate over virtual addresses, read all copies,
1658  * and update if there are differences.  If only one copy is live,
1659  * skip it.
1660  * For recovery, we iterate over physical addresses, read a good
1661  * value for each non-in_sync drive, and over-write.
1662  *
1663  * So, for recovery we may have several outstanding complex requests for a
1664  * given address, one for each out-of-sync device.  We model this by allocating
1665  * a number of r10_bio structures, one for each out-of-sync device.
1666  * As we setup these structures, we collect all bio's together into a list
1667  * which we then process collectively to add pages, and then process again
1668  * to pass to generic_make_request.
1669  *
1670  * The r10_bio structures are linked using a borrowed master_bio pointer.
1671  * This link is counted in ->remaining.  When the r10_bio that points to NULL
1672  * has its remaining count decremented to 0, the whole complex operation
1673  * is complete.
1674  *
1675  */
1676
1677 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
1678 {
1679         conf_t *conf = mddev_to_conf(mddev);
1680         r10bio_t *r10_bio;
1681         struct bio *biolist = NULL, *bio;
1682         sector_t max_sector, nr_sectors;
1683         int disk;
1684         int i;
1685         int max_sync;
1686         int sync_blocks;
1687
1688         sector_t sectors_skipped = 0;
1689         int chunks_skipped = 0;
1690
1691         if (!conf->r10buf_pool)
1692                 if (init_resync(conf))
1693                         return 0;
1694
1695  skipped:
1696         max_sector = mddev->size << 1;
1697         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1698                 max_sector = mddev->resync_max_sectors;
1699         if (sector_nr >= max_sector) {
1700                 /* If we aborted, we need to abort the
1701                  * sync on the 'current' bitmap chucks (there can
1702                  * be several when recovering multiple devices).
1703                  * as we may have started syncing it but not finished.
1704                  * We can find the current address in
1705                  * mddev->curr_resync, but for recovery,
1706                  * we need to convert that to several
1707                  * virtual addresses.
1708                  */
1709                 if (mddev->curr_resync < max_sector) { /* aborted */
1710                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
1711                                 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
1712                                                 &sync_blocks, 1);
1713                         else for (i=0; i<conf->raid_disks; i++) {
1714                                 sector_t sect =
1715                                         raid10_find_virt(conf, mddev->curr_resync, i);
1716                                 bitmap_end_sync(mddev->bitmap, sect,
1717                                                 &sync_blocks, 1);
1718                         }
1719                 } else /* completed sync */
1720                         conf->fullsync = 0;
1721
1722                 bitmap_close_sync(mddev->bitmap);
1723                 close_sync(conf);
1724                 *skipped = 1;
1725                 return sectors_skipped;
1726         }
1727         if (chunks_skipped >= conf->raid_disks) {
1728                 /* if there has been nothing to do on any drive,
1729                  * then there is nothing to do at all..
1730                  */
1731                 *skipped = 1;
1732                 return (max_sector - sector_nr) + sectors_skipped;
1733         }
1734
1735         if (max_sector > mddev->resync_max)
1736                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
1737
1738         /* make sure whole request will fit in a chunk - if chunks
1739          * are meaningful
1740          */
1741         if (conf->near_copies < conf->raid_disks &&
1742             max_sector > (sector_nr | conf->chunk_mask))
1743                 max_sector = (sector_nr | conf->chunk_mask) + 1;
1744         /*
1745          * If there is non-resync activity waiting for us then
1746          * put in a delay to throttle resync.
1747          */
1748         if (!go_faster && conf->nr_waiting)
1749                 msleep_interruptible(1000);
1750
1751         bitmap_cond_end_sync(mddev->bitmap, sector_nr);
1752
1753         /* Again, very different code for resync and recovery.
1754          * Both must result in an r10bio with a list of bios that
1755          * have bi_end_io, bi_sector, bi_bdev set,
1756          * and bi_private set to the r10bio.
1757          * For recovery, we may actually create several r10bios
1758          * with 2 bios in each, that correspond to the bios in the main one.
1759          * In this case, the subordinate r10bios link back through a
1760          * borrowed master_bio pointer, and the counter in the master
1761          * includes a ref from each subordinate.
1762          */
1763         /* First, we decide what to do and set ->bi_end_io
1764          * To end_sync_read if we want to read, and
1765          * end_sync_write if we will want to write.
1766          */
1767
1768         max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
1769         if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1770                 /* recovery... the complicated one */
1771                 int i, j, k;
1772                 r10_bio = NULL;
1773
1774                 for (i=0 ; i<conf->raid_disks; i++)
1775                         if (conf->mirrors[i].rdev &&
1776                             !test_bit(In_sync, &conf->mirrors[i].rdev->flags)) {
1777                                 int still_degraded = 0;
1778                                 /* want to reconstruct this device */
1779                                 r10bio_t *rb2 = r10_bio;
1780                                 sector_t sect = raid10_find_virt(conf, sector_nr, i);
1781                                 int must_sync;
1782                                 /* Unless we are doing a full sync, we only need
1783                                  * to recover the block if it is set in the bitmap
1784                                  */
1785                                 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1786                                                               &sync_blocks, 1);
1787                                 if (sync_blocks < max_sync)
1788                                         max_sync = sync_blocks;
1789                                 if (!must_sync &&
1790                                     !conf->fullsync) {
1791                                         /* yep, skip the sync_blocks here, but don't assume
1792                                          * that there will never be anything to do here
1793                                          */
1794                                         chunks_skipped = -1;
1795                                         continue;
1796                                 }
1797
1798                                 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1799                                 raise_barrier(conf, rb2 != NULL);
1800                                 atomic_set(&r10_bio->remaining, 0);
1801
1802                                 r10_bio->master_bio = (struct bio*)rb2;
1803                                 if (rb2)
1804                                         atomic_inc(&rb2->remaining);
1805                                 r10_bio->mddev = mddev;
1806                                 set_bit(R10BIO_IsRecover, &r10_bio->state);
1807                                 r10_bio->sector = sect;
1808
1809                                 raid10_find_phys(conf, r10_bio);
1810                                 /* Need to check if this section will still be
1811                                  * degraded
1812                                  */
1813                                 for (j=0; j<conf->copies;j++) {
1814                                         int d = r10_bio->devs[j].devnum;
1815                                         if (conf->mirrors[d].rdev == NULL ||
1816                                             test_bit(Faulty, &conf->mirrors[d].rdev->flags)) {
1817                                                 still_degraded = 1;
1818                                                 break;
1819                                         }
1820                                 }
1821                                 must_sync = bitmap_start_sync(mddev->bitmap, sect,
1822                                                               &sync_blocks, still_degraded);
1823
1824                                 for (j=0; j<conf->copies;j++) {
1825                                         int d = r10_bio->devs[j].devnum;
1826                                         if (conf->mirrors[d].rdev &&
1827                                             test_bit(In_sync, &conf->mirrors[d].rdev->flags)) {
1828                                                 /* This is where we read from */
1829                                                 bio = r10_bio->devs[0].bio;
1830                                                 bio->bi_next = biolist;
1831                                                 biolist = bio;
1832                                                 bio->bi_private = r10_bio;
1833                                                 bio->bi_end_io = end_sync_read;
1834                                                 bio->bi_rw = READ;
1835                                                 bio->bi_sector = r10_bio->devs[j].addr +
1836                                                         conf->mirrors[d].rdev->data_offset;
1837                                                 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1838                                                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1839                                                 atomic_inc(&r10_bio->remaining);
1840                                                 /* and we write to 'i' */
1841
1842                                                 for (k=0; k<conf->copies; k++)
1843                                                         if (r10_bio->devs[k].devnum == i)
1844                                                                 break;
1845                                                 BUG_ON(k == conf->copies);
1846                                                 bio = r10_bio->devs[1].bio;
1847                                                 bio->bi_next = biolist;
1848                                                 biolist = bio;
1849                                                 bio->bi_private = r10_bio;
1850                                                 bio->bi_end_io = end_sync_write;
1851                                                 bio->bi_rw = WRITE;
1852                                                 bio->bi_sector = r10_bio->devs[k].addr +
1853                                                         conf->mirrors[i].rdev->data_offset;
1854                                                 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1855
1856                                                 r10_bio->devs[0].devnum = d;
1857                                                 r10_bio->devs[1].devnum = i;
1858
1859                                                 break;
1860                                         }
1861                                 }
1862                                 if (j == conf->copies) {
1863                                         /* Cannot recover, so abort the recovery */
1864                                         put_buf(r10_bio);
1865                                         if (rb2)
1866                                                 atomic_dec(&rb2->remaining);
1867                                         r10_bio = rb2;
1868                                         if (!test_and_set_bit(MD_RECOVERY_INTR,
1869                                                               &mddev->recovery))
1870                                                 printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
1871                                                        mdname(mddev));
1872                                         break;
1873                                 }
1874                         }
1875                 if (biolist == NULL) {
1876                         while (r10_bio) {
1877                                 r10bio_t *rb2 = r10_bio;
1878                                 r10_bio = (r10bio_t*) rb2->master_bio;
1879                                 rb2->master_bio = NULL;
1880                                 put_buf(rb2);
1881                         }
1882                         goto giveup;
1883                 }
1884         } else {
1885                 /* resync. Schedule a read for every block at this virt offset */
1886                 int count = 0;
1887
1888                 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1889                                        &sync_blocks, mddev->degraded) &&
1890                     !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1891                         /* We can skip this block */
1892                         *skipped = 1;
1893                         return sync_blocks + sectors_skipped;
1894                 }
1895                 if (sync_blocks < max_sync)
1896                         max_sync = sync_blocks;
1897                 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
1898
1899                 r10_bio->mddev = mddev;
1900                 atomic_set(&r10_bio->remaining, 0);
1901                 raise_barrier(conf, 0);
1902                 conf->next_resync = sector_nr;
1903
1904                 r10_bio->master_bio = NULL;
1905                 r10_bio->sector = sector_nr;
1906                 set_bit(R10BIO_IsSync, &r10_bio->state);
1907                 raid10_find_phys(conf, r10_bio);
1908                 r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
1909
1910                 for (i=0; i<conf->copies; i++) {
1911                         int d = r10_bio->devs[i].devnum;
1912                         bio = r10_bio->devs[i].bio;
1913                         bio->bi_end_io = NULL;
1914                         clear_bit(BIO_UPTODATE, &bio->bi_flags);
1915                         if (conf->mirrors[d].rdev == NULL ||
1916                             test_bit(Faulty, &conf->mirrors[d].rdev->flags))
1917                                 continue;
1918                         atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1919                         atomic_inc(&r10_bio->remaining);
1920                         bio->bi_next = biolist;
1921                         biolist = bio;
1922                         bio->bi_private = r10_bio;
1923                         bio->bi_end_io = end_sync_read;
1924                         bio->bi_rw = READ;
1925                         bio->bi_sector = r10_bio->devs[i].addr +
1926                                 conf->mirrors[d].rdev->data_offset;
1927                         bio->bi_bdev = conf->mirrors[d].rdev->bdev;
1928                         count++;
1929                 }
1930
1931                 if (count < 2) {
1932                         for (i=0; i<conf->copies; i++) {
1933                                 int d = r10_bio->devs[i].devnum;
1934                                 if (r10_bio->devs[i].bio->bi_end_io)
1935                                         rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1936                         }
1937                         put_buf(r10_bio);
1938                         biolist = NULL;
1939                         goto giveup;
1940                 }
1941         }
1942
1943         for (bio = biolist; bio ; bio=bio->bi_next) {
1944
1945                 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1946                 if (bio->bi_end_io)
1947                         bio->bi_flags |= 1 << BIO_UPTODATE;
1948                 bio->bi_vcnt = 0;
1949                 bio->bi_idx = 0;
1950                 bio->bi_phys_segments = 0;
1951                 bio->bi_size = 0;
1952         }
1953
1954         nr_sectors = 0;
1955         if (sector_nr + max_sync < max_sector)
1956                 max_sector = sector_nr + max_sync;
1957         do {
1958                 struct page *page;
1959                 int len = PAGE_SIZE;
1960                 disk = 0;
1961                 if (sector_nr + (len>>9) > max_sector)
1962                         len = (max_sector - sector_nr) << 9;
1963                 if (len == 0)
1964                         break;
1965                 for (bio= biolist ; bio ; bio=bio->bi_next) {
1966                         page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
1967                         if (bio_add_page(bio, page, len, 0) == 0) {
1968                                 /* stop here */
1969                                 struct bio *bio2;
1970                                 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
1971                                 for (bio2 = biolist; bio2 && bio2 != bio; bio2 = bio2->bi_next) {
1972                                         /* remove last page from this bio */
1973                                         bio2->bi_vcnt--;
1974                                         bio2->bi_size -= len;
1975                                         bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
1976                                 }
1977                                 goto bio_full;
1978                         }
1979                         disk = i;
1980                 }
1981                 nr_sectors += len>>9;
1982                 sector_nr += len>>9;
1983         } while (biolist->bi_vcnt < RESYNC_PAGES);
1984  bio_full:
1985         r10_bio->sectors = nr_sectors;
1986
1987         while (biolist) {
1988                 bio = biolist;
1989                 biolist = biolist->bi_next;
1990
1991                 bio->bi_next = NULL;
1992                 r10_bio = bio->bi_private;
1993                 r10_bio->sectors = nr_sectors;
1994
1995                 if (bio->bi_end_io == end_sync_read) {
1996                         md_sync_acct(bio->bi_bdev, nr_sectors);
1997                         generic_make_request(bio);
1998                 }
1999         }
2000
2001         if (sectors_skipped)
2002                 /* pretend they weren't skipped, it makes
2003                  * no important difference in this case
2004                  */
2005                 md_done_sync(mddev, sectors_skipped, 1);
2006
2007         return sectors_skipped + nr_sectors;
2008  giveup:
2009         /* There is nowhere to write, so all non-sync
2010          * drives must be failed, so try the next chunk...
2011          */
2012         {
2013         sector_t sec = max_sector - sector_nr;
2014         sectors_skipped += sec;
2015         chunks_skipped ++;
2016         sector_nr = max_sector;
2017         goto skipped;
2018         }
2019 }
2020
2021 static int run(mddev_t *mddev)
2022 {
2023         conf_t *conf;
2024         int i, disk_idx;
2025         mirror_info_t *disk;
2026         mdk_rdev_t *rdev;
2027         struct list_head *tmp;
2028         int nc, fc, fo;
2029         sector_t stride, size;
2030
2031         if (mddev->chunk_size == 0) {
2032                 printk(KERN_ERR "md/raid10: non-zero chunk size required.\n");
2033                 return -EINVAL;
2034         }
2035
2036         nc = mddev->layout & 255;
2037         fc = (mddev->layout >> 8) & 255;
2038         fo = mddev->layout & (1<<16);
2039         if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
2040             (mddev->layout >> 17)) {
2041                 printk(KERN_ERR "raid10: %s: unsupported raid10 layout: 0x%8x\n",
2042                        mdname(mddev), mddev->layout);
2043                 goto out;
2044         }
2045         /*
2046          * copy the already verified devices into our private RAID10
2047          * bookkeeping area. [whatever we allocate in run(),
2048          * should be freed in stop()]
2049          */
2050         conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
2051         mddev->private = conf;
2052         if (!conf) {
2053                 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2054                         mdname(mddev));
2055                 goto out;
2056         }
2057         conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
2058                                  GFP_KERNEL);
2059         if (!conf->mirrors) {
2060                 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2061                        mdname(mddev));
2062                 goto out_free_conf;
2063         }
2064
2065         conf->tmppage = alloc_page(GFP_KERNEL);
2066         if (!conf->tmppage)
2067                 goto out_free_conf;
2068
2069         conf->mddev = mddev;
2070         conf->raid_disks = mddev->raid_disks;
2071         conf->near_copies = nc;
2072         conf->far_copies = fc;
2073         conf->copies = nc*fc;
2074         conf->far_offset = fo;
2075         conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
2076         conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
2077         size = mddev->size >> (conf->chunk_shift-1);
2078         sector_div(size, fc);
2079         size = size * conf->raid_disks;
2080         sector_div(size, nc);
2081         /* 'size' is now the number of chunks in the array */
2082         /* calculate "used chunks per device" in 'stride' */
2083         stride = size * conf->copies;
2084
2085         /* We need to round up when dividing by raid_disks to
2086          * get the stride size.
2087          */
2088         stride += conf->raid_disks - 1;
2089         sector_div(stride, conf->raid_disks);
2090         mddev->size = stride  << (conf->chunk_shift-1);
2091
2092         if (fo)
2093                 stride = 1;
2094         else
2095                 sector_div(stride, fc);
2096         conf->stride = stride << conf->chunk_shift;
2097
2098         conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
2099                                                 r10bio_pool_free, conf);
2100         if (!conf->r10bio_pool) {
2101                 printk(KERN_ERR "raid10: couldn't allocate memory for %s\n",
2102                         mdname(mddev));
2103                 goto out_free_conf;
2104         }
2105
2106         spin_lock_init(&conf->device_lock);
2107         mddev->queue->queue_lock = &conf->device_lock;
2108
2109         rdev_for_each(rdev, tmp, mddev) {
2110                 disk_idx = rdev->raid_disk;
2111                 if (disk_idx >= mddev->raid_disks
2112                     || disk_idx < 0)
2113                         continue;
2114                 disk = conf->mirrors + disk_idx;
2115
2116                 disk->rdev = rdev;
2117
2118                 blk_queue_stack_limits(mddev->queue,
2119                                        rdev->bdev->bd_disk->queue);
2120                 /* as we don't honour merge_bvec_fn, we must never risk
2121                  * violating it, so limit ->max_sector to one PAGE, as
2122                  * a one page request is never in violation.
2123                  */
2124                 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
2125                     mddev->queue->max_sectors > (PAGE_SIZE>>9))
2126                         mddev->queue->max_sectors = (PAGE_SIZE>>9);
2127
2128                 disk->head_position = 0;
2129         }
2130         INIT_LIST_HEAD(&conf->retry_list);
2131
2132         spin_lock_init(&conf->resync_lock);
2133         init_waitqueue_head(&conf->wait_barrier);
2134
2135         /* need to check that every block has at least one working mirror */
2136         if (!enough(conf)) {
2137                 printk(KERN_ERR "raid10: not enough operational mirrors for %s\n",
2138                        mdname(mddev));
2139                 goto out_free_conf;
2140         }
2141
2142         mddev->degraded = 0;
2143         for (i = 0; i < conf->raid_disks; i++) {
2144
2145                 disk = conf->mirrors + i;
2146
2147                 if (!disk->rdev ||
2148                     !test_bit(In_sync, &disk->rdev->flags)) {
2149                         disk->head_position = 0;
2150                         mddev->degraded++;
2151                         if (disk->rdev)
2152                                 conf->fullsync = 1;
2153                 }
2154         }
2155
2156
2157         mddev->thread = md_register_thread(raid10d, mddev, "%s_raid10");
2158         if (!mddev->thread) {
2159                 printk(KERN_ERR
2160                        "raid10: couldn't allocate thread for %s\n",
2161                        mdname(mddev));
2162                 goto out_free_conf;
2163         }
2164
2165         printk(KERN_INFO
2166                 "raid10: raid set %s active with %d out of %d devices\n",
2167                 mdname(mddev), mddev->raid_disks - mddev->degraded,
2168                 mddev->raid_disks);
2169         /*
2170          * Ok, everything is just fine now
2171          */
2172         mddev->array_sectors = size << conf->chunk_shift;
2173         mddev->resync_max_sectors = size << conf->chunk_shift;
2174
2175         mddev->queue->unplug_fn = raid10_unplug;
2176         mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2177         mddev->queue->backing_dev_info.congested_data = mddev;
2178
2179         /* Calculate max read-ahead size.
2180          * We need to readahead at least twice a whole stripe....
2181          * maybe...
2182          */
2183         {
2184                 int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE);
2185                 stripe /= conf->near_copies;
2186                 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
2187                         mddev->queue->backing_dev_info.ra_pages = 2* stripe;
2188         }
2189
2190         if (conf->near_copies < mddev->raid_disks)
2191                 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
2192         return 0;
2193
2194 out_free_conf:
2195         if (conf->r10bio_pool)
2196                 mempool_destroy(conf->r10bio_pool);
2197         safe_put_page(conf->tmppage);
2198         kfree(conf->mirrors);
2199         kfree(conf);
2200         mddev->private = NULL;
2201 out:
2202         return -EIO;
2203 }
2204
2205 static int stop(mddev_t *mddev)
2206 {
2207         conf_t *conf = mddev_to_conf(mddev);
2208
2209         md_unregister_thread(mddev->thread);
2210         mddev->thread = NULL;
2211         blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2212         if (conf->r10bio_pool)
2213                 mempool_destroy(conf->r10bio_pool);
2214         kfree(conf->mirrors);
2215         kfree(conf);
2216         mddev->private = NULL;
2217         return 0;
2218 }
2219
2220 static void raid10_quiesce(mddev_t *mddev, int state)
2221 {
2222         conf_t *conf = mddev_to_conf(mddev);
2223
2224         switch(state) {
2225         case 1:
2226                 raise_barrier(conf, 0);
2227                 break;
2228         case 0:
2229                 lower_barrier(conf);
2230                 break;
2231         }
2232         if (mddev->thread) {
2233                 if (mddev->bitmap)
2234                         mddev->thread->timeout = mddev->bitmap->daemon_sleep * HZ;
2235                 else
2236                         mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
2237                 md_wakeup_thread(mddev->thread);
2238         }
2239 }
2240
2241 static struct mdk_personality raid10_personality =
2242 {
2243         .name           = "raid10",
2244         .level          = 10,
2245         .owner          = THIS_MODULE,
2246         .make_request   = make_request,
2247         .run            = run,
2248         .stop           = stop,
2249         .status         = status,
2250         .error_handler  = error,
2251         .hot_add_disk   = raid10_add_disk,
2252         .hot_remove_disk= raid10_remove_disk,
2253         .spare_active   = raid10_spare_active,
2254         .sync_request   = sync_request,
2255         .quiesce        = raid10_quiesce,
2256 };
2257
2258 static int __init raid_init(void)
2259 {
2260         return register_md_personality(&raid10_personality);
2261 }
2262
2263 static void raid_exit(void)
2264 {
2265         unregister_md_personality(&raid10_personality);
2266 }
2267
2268 module_init(raid_init);
2269 module_exit(raid_exit);
2270 MODULE_LICENSE("GPL");
2271 MODULE_ALIAS("md-personality-9"); /* RAID10 */
2272 MODULE_ALIAS("md-raid10");
2273 MODULE_ALIAS("md-level-10");