[PATCH] md: convert md to use kzalloc throughout
[linux-2.6] / drivers / md / multipath.c
1 /*
2  * multipath.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
5  *
6  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
7  *
8  * MULTIPATH management functions.
9  *
10  * derived from raid1.c.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2, or (at your option)
15  * any later version.
16  *
17  * You should have received a copy of the GNU General Public License
18  * (for example /usr/src/linux/COPYING); if not, write to the Free
19  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/raid/multipath.h>
26 #include <linux/buffer_head.h>
27 #include <asm/atomic.h>
28
29 #define MAJOR_NR MD_MAJOR
30 #define MD_DRIVER
31 #define MD_PERSONALITY
32
33 #define MAX_WORK_PER_DISK 128
34
35 #define NR_RESERVED_BUFS        32
36
37
38 static mdk_personality_t multipath_personality;
39
40
41 static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
42 {
43         struct multipath_bh *mpb;
44         mpb = kzalloc(sizeof(*mpb), gfp_flags);
45         return mpb;
46 }
47
48 static void mp_pool_free(void *mpb, void *data)
49 {
50         kfree(mpb);
51 }
52
53 static int multipath_map (multipath_conf_t *conf)
54 {
55         int i, disks = conf->raid_disks;
56
57         /*
58          * Later we do read balancing on the read side 
59          * now we use the first available disk.
60          */
61
62         rcu_read_lock();
63         for (i = 0; i < disks; i++) {
64                 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
65                 if (rdev && test_bit(In_sync, &rdev->flags)) {
66                         atomic_inc(&rdev->nr_pending);
67                         rcu_read_unlock();
68                         return i;
69                 }
70         }
71         rcu_read_unlock();
72
73         printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
74         return (-1);
75 }
76
77 static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
78 {
79         unsigned long flags;
80         mddev_t *mddev = mp_bh->mddev;
81         multipath_conf_t *conf = mddev_to_conf(mddev);
82
83         spin_lock_irqsave(&conf->device_lock, flags);
84         list_add(&mp_bh->retry_list, &conf->retry_list);
85         spin_unlock_irqrestore(&conf->device_lock, flags);
86         md_wakeup_thread(mddev->thread);
87 }
88
89
90 /*
91  * multipath_end_bh_io() is called when we have finished servicing a multipathed
92  * operation and are ready to return a success/failure code to the buffer
93  * cache layer.
94  */
95 static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
96 {
97         struct bio *bio = mp_bh->master_bio;
98         multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
99
100         bio_endio(bio, bio->bi_size, err);
101         mempool_free(mp_bh, conf->pool);
102 }
103
104 static int multipath_end_request(struct bio *bio, unsigned int bytes_done,
105                                  int error)
106 {
107         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
108         struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
109         multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev);
110         mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
111
112         if (bio->bi_size)
113                 return 1;
114
115         if (uptodate)
116                 multipath_end_bh_io(mp_bh, 0);
117         else if (!bio_rw_ahead(bio)) {
118                 /*
119                  * oops, IO error:
120                  */
121                 char b[BDEVNAME_SIZE];
122                 md_error (mp_bh->mddev, rdev);
123                 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 
124                        bdevname(rdev->bdev,b), 
125                        (unsigned long long)bio->bi_sector);
126                 multipath_reschedule_retry(mp_bh);
127         } else
128                 multipath_end_bh_io(mp_bh, error);
129         rdev_dec_pending(rdev, conf->mddev);
130         return 0;
131 }
132
133 static void unplug_slaves(mddev_t *mddev)
134 {
135         multipath_conf_t *conf = mddev_to_conf(mddev);
136         int i;
137
138         rcu_read_lock();
139         for (i=0; i<mddev->raid_disks; i++) {
140                 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
141                 if (rdev && !test_bit(Faulty, &rdev->flags)
142                     && atomic_read(&rdev->nr_pending)) {
143                         request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
144
145                         atomic_inc(&rdev->nr_pending);
146                         rcu_read_unlock();
147
148                         if (r_queue->unplug_fn)
149                                 r_queue->unplug_fn(r_queue);
150
151                         rdev_dec_pending(rdev, mddev);
152                         rcu_read_lock();
153                 }
154         }
155         rcu_read_unlock();
156 }
157
158 static void multipath_unplug(request_queue_t *q)
159 {
160         unplug_slaves(q->queuedata);
161 }
162
163
164 static int multipath_make_request (request_queue_t *q, struct bio * bio)
165 {
166         mddev_t *mddev = q->queuedata;
167         multipath_conf_t *conf = mddev_to_conf(mddev);
168         struct multipath_bh * mp_bh;
169         struct multipath_info *multipath;
170         const int rw = bio_data_dir(bio);
171
172         if (unlikely(bio_barrier(bio))) {
173                 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
174                 return 0;
175         }
176
177         mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
178
179         mp_bh->master_bio = bio;
180         mp_bh->mddev = mddev;
181
182         disk_stat_inc(mddev->gendisk, ios[rw]);
183         disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
184
185         mp_bh->path = multipath_map(conf);
186         if (mp_bh->path < 0) {
187                 bio_endio(bio, bio->bi_size, -EIO);
188                 mempool_free(mp_bh, conf->pool);
189                 return 0;
190         }
191         multipath = conf->multipaths + mp_bh->path;
192
193         mp_bh->bio = *bio;
194         mp_bh->bio.bi_sector += multipath->rdev->data_offset;
195         mp_bh->bio.bi_bdev = multipath->rdev->bdev;
196         mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
197         mp_bh->bio.bi_end_io = multipath_end_request;
198         mp_bh->bio.bi_private = mp_bh;
199         generic_make_request(&mp_bh->bio);
200         return 0;
201 }
202
203 static void multipath_status (struct seq_file *seq, mddev_t *mddev)
204 {
205         multipath_conf_t *conf = mddev_to_conf(mddev);
206         int i;
207         
208         seq_printf (seq, " [%d/%d] [", conf->raid_disks,
209                                                  conf->working_disks);
210         for (i = 0; i < conf->raid_disks; i++)
211                 seq_printf (seq, "%s",
212                                conf->multipaths[i].rdev && 
213                                test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
214         seq_printf (seq, "]");
215 }
216
217 static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
218                                  sector_t *error_sector)
219 {
220         mddev_t *mddev = q->queuedata;
221         multipath_conf_t *conf = mddev_to_conf(mddev);
222         int i, ret = 0;
223
224         rcu_read_lock();
225         for (i=0; i<mddev->raid_disks && ret == 0; i++) {
226                 mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
227                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
228                         struct block_device *bdev = rdev->bdev;
229                         request_queue_t *r_queue = bdev_get_queue(bdev);
230
231                         if (!r_queue->issue_flush_fn)
232                                 ret = -EOPNOTSUPP;
233                         else {
234                                 atomic_inc(&rdev->nr_pending);
235                                 rcu_read_unlock();
236                                 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
237                                                               error_sector);
238                                 rdev_dec_pending(rdev, mddev);
239                                 rcu_read_lock();
240                         }
241                 }
242         }
243         rcu_read_unlock();
244         return ret;
245 }
246
247 /*
248  * Careful, this can execute in IRQ contexts as well!
249  */
250 static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
251 {
252         multipath_conf_t *conf = mddev_to_conf(mddev);
253
254         if (conf->working_disks <= 1) {
255                 /*
256                  * Uh oh, we can do nothing if this is our last path, but
257                  * first check if this is a queued request for a device
258                  * which has just failed.
259                  */
260                 printk(KERN_ALERT 
261                         "multipath: only one IO path left and IO error.\n");
262                 /* leave it active... it's all we have */
263         } else {
264                 /*
265                  * Mark disk as unusable
266                  */
267                 if (!test_bit(Faulty, &rdev->flags)) {
268                         char b[BDEVNAME_SIZE];
269                         clear_bit(In_sync, &rdev->flags);
270                         set_bit(Faulty, &rdev->flags);
271                         mddev->sb_dirty = 1;
272                         conf->working_disks--;
273                         printk(KERN_ALERT "multipath: IO failure on %s,"
274                                 " disabling IO path. \n Operation continuing"
275                                 " on %d IO paths.\n",
276                                 bdevname (rdev->bdev,b),
277                                 conf->working_disks);
278                 }
279         }
280 }
281
282 static void print_multipath_conf (multipath_conf_t *conf)
283 {
284         int i;
285         struct multipath_info *tmp;
286
287         printk("MULTIPATH conf printout:\n");
288         if (!conf) {
289                 printk("(conf==NULL)\n");
290                 return;
291         }
292         printk(" --- wd:%d rd:%d\n", conf->working_disks,
293                          conf->raid_disks);
294
295         for (i = 0; i < conf->raid_disks; i++) {
296                 char b[BDEVNAME_SIZE];
297                 tmp = conf->multipaths + i;
298                 if (tmp->rdev)
299                         printk(" disk%d, o:%d, dev:%s\n",
300                                 i,!test_bit(Faulty, &tmp->rdev->flags),
301                                bdevname(tmp->rdev->bdev,b));
302         }
303 }
304
305
306 static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
307 {
308         multipath_conf_t *conf = mddev->private;
309         int found = 0;
310         int path;
311         struct multipath_info *p;
312
313         print_multipath_conf(conf);
314
315         for (path=0; path<mddev->raid_disks; path++) 
316                 if ((p=conf->multipaths+path)->rdev == NULL) {
317                         blk_queue_stack_limits(mddev->queue,
318                                                rdev->bdev->bd_disk->queue);
319
320                 /* as we don't honour merge_bvec_fn, we must never risk
321                  * violating it, so limit ->max_sector to one PAGE, as
322                  * a one page request is never in violation.
323                  * (Note: it is very unlikely that a device with
324                  * merge_bvec_fn will be involved in multipath.)
325                  */
326                         if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
327                             mddev->queue->max_sectors > (PAGE_SIZE>>9))
328                                 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
329
330                         conf->working_disks++;
331                         rdev->raid_disk = path;
332                         set_bit(In_sync, &rdev->flags);
333                         rcu_assign_pointer(p->rdev, rdev);
334                         found = 1;
335                 }
336
337         print_multipath_conf(conf);
338         return found;
339 }
340
341 static int multipath_remove_disk(mddev_t *mddev, int number)
342 {
343         multipath_conf_t *conf = mddev->private;
344         int err = 0;
345         mdk_rdev_t *rdev;
346         struct multipath_info *p = conf->multipaths + number;
347
348         print_multipath_conf(conf);
349
350         rdev = p->rdev;
351         if (rdev) {
352                 if (test_bit(In_sync, &rdev->flags) ||
353                     atomic_read(&rdev->nr_pending)) {
354                         printk(KERN_ERR "hot-remove-disk, slot %d is identified"                                " but is still operational!\n", number);
355                         err = -EBUSY;
356                         goto abort;
357                 }
358                 p->rdev = NULL;
359                 synchronize_rcu();
360                 if (atomic_read(&rdev->nr_pending)) {
361                         /* lost the race, try later */
362                         err = -EBUSY;
363                         p->rdev = rdev;
364                 }
365         }
366 abort:
367
368         print_multipath_conf(conf);
369         return err;
370 }
371
372
373
374 /*
375  * This is a kernel thread which:
376  *
377  *      1.      Retries failed read operations on working multipaths.
378  *      2.      Updates the raid superblock when problems encounter.
379  *      3.      Performs writes following reads for array syncronising.
380  */
381
382 static void multipathd (mddev_t *mddev)
383 {
384         struct multipath_bh *mp_bh;
385         struct bio *bio;
386         unsigned long flags;
387         multipath_conf_t *conf = mddev_to_conf(mddev);
388         struct list_head *head = &conf->retry_list;
389
390         md_check_recovery(mddev);
391         for (;;) {
392                 char b[BDEVNAME_SIZE];
393                 spin_lock_irqsave(&conf->device_lock, flags);
394                 if (list_empty(head))
395                         break;
396                 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
397                 list_del(head->prev);
398                 spin_unlock_irqrestore(&conf->device_lock, flags);
399
400                 bio = &mp_bh->bio;
401                 bio->bi_sector = mp_bh->master_bio->bi_sector;
402                 
403                 if ((mp_bh->path = multipath_map (conf))<0) {
404                         printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
405                                 " error for block %llu\n",
406                                 bdevname(bio->bi_bdev,b),
407                                 (unsigned long long)bio->bi_sector);
408                         multipath_end_bh_io(mp_bh, -EIO);
409                 } else {
410                         printk(KERN_ERR "multipath: %s: redirecting sector %llu"
411                                 " to another IO path\n",
412                                 bdevname(bio->bi_bdev,b),
413                                 (unsigned long long)bio->bi_sector);
414                         *bio = *(mp_bh->master_bio);
415                         bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
416                         bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
417                         bio->bi_rw |= (1 << BIO_RW_FAILFAST);
418                         bio->bi_end_io = multipath_end_request;
419                         bio->bi_private = mp_bh;
420                         generic_make_request(bio);
421                 }
422         }
423         spin_unlock_irqrestore(&conf->device_lock, flags);
424 }
425
426 static int multipath_run (mddev_t *mddev)
427 {
428         multipath_conf_t *conf;
429         int disk_idx;
430         struct multipath_info *disk;
431         mdk_rdev_t *rdev;
432         struct list_head *tmp;
433
434         if (mddev->level != LEVEL_MULTIPATH) {
435                 printk("multipath: %s: raid level not set to multipath IO (%d)\n",
436                        mdname(mddev), mddev->level);
437                 goto out;
438         }
439         /*
440          * copy the already verified devices into our private MULTIPATH
441          * bookkeeping area. [whatever we allocate in multipath_run(),
442          * should be freed in multipath_stop()]
443          */
444
445         conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
446         mddev->private = conf;
447         if (!conf) {
448                 printk(KERN_ERR 
449                         "multipath: couldn't allocate memory for %s\n",
450                         mdname(mddev));
451                 goto out;
452         }
453
454         conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
455                                    GFP_KERNEL);
456         if (!conf->multipaths) {
457                 printk(KERN_ERR 
458                         "multipath: couldn't allocate memory for %s\n",
459                         mdname(mddev));
460                 goto out_free_conf;
461         }
462
463         conf->working_disks = 0;
464         ITERATE_RDEV(mddev,rdev,tmp) {
465                 disk_idx = rdev->raid_disk;
466                 if (disk_idx < 0 ||
467                     disk_idx >= mddev->raid_disks)
468                         continue;
469
470                 disk = conf->multipaths + disk_idx;
471                 disk->rdev = rdev;
472
473                 blk_queue_stack_limits(mddev->queue,
474                                        rdev->bdev->bd_disk->queue);
475                 /* as we don't honour merge_bvec_fn, we must never risk
476                  * violating it, not that we ever expect a device with
477                  * a merge_bvec_fn to be involved in multipath */
478                 if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
479                     mddev->queue->max_sectors > (PAGE_SIZE>>9))
480                         blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
481
482                 if (!test_bit(Faulty, &rdev->flags))
483                         conf->working_disks++;
484         }
485
486         conf->raid_disks = mddev->raid_disks;
487         mddev->sb_dirty = 1;
488         conf->mddev = mddev;
489         spin_lock_init(&conf->device_lock);
490         INIT_LIST_HEAD(&conf->retry_list);
491
492         if (!conf->working_disks) {
493                 printk(KERN_ERR "multipath: no operational IO paths for %s\n",
494                         mdname(mddev));
495                 goto out_free_conf;
496         }
497         mddev->degraded = conf->raid_disks = conf->working_disks;
498
499         conf->pool = mempool_create(NR_RESERVED_BUFS,
500                                     mp_pool_alloc, mp_pool_free,
501                                     NULL);
502         if (conf->pool == NULL) {
503                 printk(KERN_ERR 
504                         "multipath: couldn't allocate memory for %s\n",
505                         mdname(mddev));
506                 goto out_free_conf;
507         }
508
509         {
510                 mddev->thread = md_register_thread(multipathd, mddev, "%s_multipath");
511                 if (!mddev->thread) {
512                         printk(KERN_ERR "multipath: couldn't allocate thread"
513                                 " for %s\n", mdname(mddev));
514                         goto out_free_conf;
515                 }
516         }
517
518         printk(KERN_INFO 
519                 "multipath: array %s active with %d out of %d IO paths\n",
520                 mdname(mddev), conf->working_disks, mddev->raid_disks);
521         /*
522          * Ok, everything is just fine now
523          */
524         mddev->array_size = mddev->size;
525
526         mddev->queue->unplug_fn = multipath_unplug;
527         mddev->queue->issue_flush_fn = multipath_issue_flush;
528
529         return 0;
530
531 out_free_conf:
532         if (conf->pool)
533                 mempool_destroy(conf->pool);
534         kfree(conf->multipaths);
535         kfree(conf);
536         mddev->private = NULL;
537 out:
538         return -EIO;
539 }
540
541
542 static int multipath_stop (mddev_t *mddev)
543 {
544         multipath_conf_t *conf = mddev_to_conf(mddev);
545
546         md_unregister_thread(mddev->thread);
547         mddev->thread = NULL;
548         blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
549         mempool_destroy(conf->pool);
550         kfree(conf->multipaths);
551         kfree(conf);
552         mddev->private = NULL;
553         return 0;
554 }
555
556 static mdk_personality_t multipath_personality=
557 {
558         .name           = "multipath",
559         .owner          = THIS_MODULE,
560         .make_request   = multipath_make_request,
561         .run            = multipath_run,
562         .stop           = multipath_stop,
563         .status         = multipath_status,
564         .error_handler  = multipath_error,
565         .hot_add_disk   = multipath_add_disk,
566         .hot_remove_disk= multipath_remove_disk,
567 };
568
569 static int __init multipath_init (void)
570 {
571         return register_md_personality (MULTIPATH, &multipath_personality);
572 }
573
574 static void __exit multipath_exit (void)
575 {
576         unregister_md_personality (MULTIPATH);
577 }
578
579 module_init(multipath_init);
580 module_exit(multipath_exit);
581 MODULE_LICENSE("GPL");
582 MODULE_ALIAS("md-personality-7"); /* MULTIPATH */