2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/moduleparam.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
23 #include <linux/blktrace_api.h>
25 #define DM_MSG_PREFIX "core"
27 static const char *_name = DM_NAME;
29 static unsigned int major = 0;
30 static unsigned int _major = 0;
32 static DEFINE_SPINLOCK(_minor_lock);
34 * One of these is allocated per bio.
37 struct mapped_device *md;
41 unsigned long start_time;
45 * One of these is allocated per target within a bio. Hopefully
46 * this will be simplified out one day.
54 union map_info *dm_get_mapinfo(struct bio *bio)
56 if (bio && bio->bi_private)
57 return &((struct dm_target_io *)bio->bi_private)->info;
61 #define MINOR_ALLOCED ((void *)-1)
64 * Bits for the md->flags field.
66 #define DMF_BLOCK_IO 0
67 #define DMF_SUSPENDED 1
70 #define DMF_DELETING 4
71 #define DMF_NOFLUSH_SUSPENDING 5
74 * Work processed by per-device workqueue.
80 struct work_struct work;
81 struct mapped_device *md;
85 struct mapped_device {
86 struct rw_semaphore io_lock;
87 struct mutex suspend_lock;
88 spinlock_t pushback_lock;
95 struct request_queue *queue;
102 * A list of ios that arrived while we were suspended.
105 wait_queue_head_t wait;
106 struct bio_list deferred;
107 struct bio_list pushback;
110 * Processing queue (flush/barriers)
112 struct workqueue_struct *wq;
115 * The current mapping.
117 struct dm_table *map;
120 * io objects are allocated from here.
131 wait_queue_head_t eventq;
133 struct list_head uevent_list;
134 spinlock_t uevent_lock; /* Protect access to uevent_list */
137 * freeze/thaw support require holding onto a super block
139 struct super_block *frozen_sb;
140 struct block_device *suspended_bdev;
142 /* forced geometry settings */
143 struct hd_geometry geometry;
147 static struct kmem_cache *_io_cache;
148 static struct kmem_cache *_tio_cache;
150 static int __init local_init(void)
154 /* allocate a slab for the dm_ios */
155 _io_cache = KMEM_CACHE(dm_io, 0);
159 /* allocate a slab for the target ios */
160 _tio_cache = KMEM_CACHE(dm_target_io, 0);
162 goto out_free_io_cache;
164 r = dm_uevent_init();
166 goto out_free_tio_cache;
169 r = register_blkdev(_major, _name);
171 goto out_uevent_exit;
181 kmem_cache_destroy(_tio_cache);
183 kmem_cache_destroy(_io_cache);
188 static void local_exit(void)
190 kmem_cache_destroy(_tio_cache);
191 kmem_cache_destroy(_io_cache);
192 unregister_blkdev(_major, _name);
197 DMINFO("cleaned up");
200 static int (*_inits[])(void) __initdata = {
209 static void (*_exits[])(void) = {
218 static int __init dm_init(void)
220 const int count = ARRAY_SIZE(_inits);
224 for (i = 0; i < count; i++) {
239 static void __exit dm_exit(void)
241 int i = ARRAY_SIZE(_exits);
248 * Block device functions
250 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
252 struct mapped_device *md;
254 spin_lock(&_minor_lock);
256 md = bdev->bd_disk->private_data;
260 if (test_bit(DMF_FREEING, &md->flags) ||
261 test_bit(DMF_DELETING, &md->flags)) {
267 atomic_inc(&md->open_count);
270 spin_unlock(&_minor_lock);
272 return md ? 0 : -ENXIO;
275 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
277 struct mapped_device *md = disk->private_data;
278 atomic_dec(&md->open_count);
283 int dm_open_count(struct mapped_device *md)
285 return atomic_read(&md->open_count);
289 * Guarantees nothing is using the device before it's deleted.
291 int dm_lock_for_deletion(struct mapped_device *md)
295 spin_lock(&_minor_lock);
297 if (dm_open_count(md))
300 set_bit(DMF_DELETING, &md->flags);
302 spin_unlock(&_minor_lock);
307 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
309 struct mapped_device *md = bdev->bd_disk->private_data;
311 return dm_get_geometry(md, geo);
314 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
315 unsigned int cmd, unsigned long arg)
317 struct mapped_device *md = bdev->bd_disk->private_data;
318 struct dm_table *map = dm_get_table(md);
319 struct dm_target *tgt;
322 if (!map || !dm_table_get_size(map))
325 /* We only support devices that have a single target */
326 if (dm_table_get_num_targets(map) != 1)
329 tgt = dm_table_get_target(map, 0);
331 if (dm_suspended(md)) {
336 if (tgt->type->ioctl)
337 r = tgt->type->ioctl(tgt, cmd, arg);
345 static struct dm_io *alloc_io(struct mapped_device *md)
347 return mempool_alloc(md->io_pool, GFP_NOIO);
350 static void free_io(struct mapped_device *md, struct dm_io *io)
352 mempool_free(io, md->io_pool);
355 static struct dm_target_io *alloc_tio(struct mapped_device *md)
357 return mempool_alloc(md->tio_pool, GFP_NOIO);
360 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
362 mempool_free(tio, md->tio_pool);
365 static void start_io_acct(struct dm_io *io)
367 struct mapped_device *md = io->md;
370 io->start_time = jiffies;
372 cpu = part_stat_lock();
373 part_round_stats(cpu, &dm_disk(md)->part0);
375 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
378 static void end_io_acct(struct dm_io *io)
380 struct mapped_device *md = io->md;
381 struct bio *bio = io->bio;
382 unsigned long duration = jiffies - io->start_time;
384 int rw = bio_data_dir(bio);
386 cpu = part_stat_lock();
387 part_round_stats(cpu, &dm_disk(md)->part0);
388 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
391 dm_disk(md)->part0.in_flight = pending =
392 atomic_dec_return(&md->pending);
394 /* nudge anyone waiting on suspend queue */
400 * Add the bio to the list of deferred io.
402 static int queue_io(struct mapped_device *md, struct bio *bio)
404 down_write(&md->io_lock);
406 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
407 up_write(&md->io_lock);
411 bio_list_add(&md->deferred, bio);
413 up_write(&md->io_lock);
414 return 0; /* deferred successfully */
418 * Everyone (including functions in this file), should use this
419 * function to access the md->map field, and make sure they call
420 * dm_table_put() when finished.
422 struct dm_table *dm_get_table(struct mapped_device *md)
426 read_lock(&md->map_lock);
430 read_unlock(&md->map_lock);
436 * Get the geometry associated with a dm device
438 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
446 * Set the geometry of a device.
448 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
450 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
452 if (geo->start > sz) {
453 DMWARN("Start sector is beyond the geometry limits.");
462 /*-----------------------------------------------------------------
464 * A more elegant soln is in the works that uses the queue
465 * merge fn, unfortunately there are a couple of changes to
466 * the block layer that I want to make for this. So in the
467 * interests of getting something for people to use I give
468 * you this clearly demarcated crap.
469 *---------------------------------------------------------------*/
471 static int __noflush_suspending(struct mapped_device *md)
473 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
477 * Decrements the number of outstanding ios that a bio has been
478 * cloned into, completing the original io if necc.
480 static void dec_pending(struct dm_io *io, int error)
484 /* Push-back supersedes any I/O errors */
485 if (error && !(io->error > 0 && __noflush_suspending(io->md)))
488 if (atomic_dec_and_test(&io->io_count)) {
489 if (io->error == DM_ENDIO_REQUEUE) {
491 * Target requested pushing back the I/O.
492 * This must be handled before the sleeper on
493 * suspend queue merges the pushback list.
495 spin_lock_irqsave(&io->md->pushback_lock, flags);
496 if (__noflush_suspending(io->md))
497 bio_list_add(&io->md->pushback, io->bio);
499 /* noflush suspend was interrupted. */
501 spin_unlock_irqrestore(&io->md->pushback_lock, flags);
506 if (io->error != DM_ENDIO_REQUEUE) {
507 blk_add_trace_bio(io->md->queue, io->bio,
510 bio_endio(io->bio, io->error);
517 static void clone_endio(struct bio *bio, int error)
520 struct dm_target_io *tio = bio->bi_private;
521 struct mapped_device *md = tio->io->md;
522 dm_endio_fn endio = tio->ti->type->end_io;
524 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
528 r = endio(tio->ti, bio, error, &tio->info);
529 if (r < 0 || r == DM_ENDIO_REQUEUE)
531 * error and requeue request are handled
535 else if (r == DM_ENDIO_INCOMPLETE)
536 /* The target will handle the io */
539 DMWARN("unimplemented target endio return value: %d", r);
544 dec_pending(tio->io, error);
547 * Store md for cleanup instead of tio which is about to get freed.
549 bio->bi_private = md->bs;
555 static sector_t max_io_len(struct mapped_device *md,
556 sector_t sector, struct dm_target *ti)
558 sector_t offset = sector - ti->begin;
559 sector_t len = ti->len - offset;
562 * Does the target need to split even further ?
566 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
575 static void __map_bio(struct dm_target *ti, struct bio *clone,
576 struct dm_target_io *tio)
580 struct mapped_device *md;
585 BUG_ON(!clone->bi_size);
587 clone->bi_end_io = clone_endio;
588 clone->bi_private = tio;
591 * Map the clone. If r == 0 we don't need to do
592 * anything, the target has assumed ownership of
595 atomic_inc(&tio->io->io_count);
596 sector = clone->bi_sector;
597 r = ti->type->map(ti, clone, &tio->info);
598 if (r == DM_MAPIO_REMAPPED) {
599 /* the bio has been remapped so dispatch it */
601 blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
602 tio->io->bio->bi_bdev->bd_dev,
603 clone->bi_sector, sector);
605 generic_make_request(clone);
606 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
607 /* error the io and bail out, or requeue it if needed */
609 dec_pending(tio->io, r);
611 * Store bio_set for cleanup.
613 clone->bi_private = md->bs;
617 DMWARN("unimplemented target map return value: %d", r);
623 struct mapped_device *md;
624 struct dm_table *map;
628 sector_t sector_count;
632 static void dm_bio_destructor(struct bio *bio)
634 struct bio_set *bs = bio->bi_private;
640 * Creates a little bio that is just does part of a bvec.
642 static struct bio *split_bvec(struct bio *bio, sector_t sector,
643 unsigned short idx, unsigned int offset,
644 unsigned int len, struct bio_set *bs)
647 struct bio_vec *bv = bio->bi_io_vec + idx;
649 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
650 clone->bi_destructor = dm_bio_destructor;
651 *clone->bi_io_vec = *bv;
653 clone->bi_sector = sector;
654 clone->bi_bdev = bio->bi_bdev;
655 clone->bi_rw = bio->bi_rw;
657 clone->bi_size = to_bytes(len);
658 clone->bi_io_vec->bv_offset = offset;
659 clone->bi_io_vec->bv_len = clone->bi_size;
660 clone->bi_flags |= 1 << BIO_CLONED;
666 * Creates a bio that consists of range of complete bvecs.
668 static struct bio *clone_bio(struct bio *bio, sector_t sector,
669 unsigned short idx, unsigned short bv_count,
670 unsigned int len, struct bio_set *bs)
674 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
675 __bio_clone(clone, bio);
676 clone->bi_destructor = dm_bio_destructor;
677 clone->bi_sector = sector;
679 clone->bi_vcnt = idx + bv_count;
680 clone->bi_size = to_bytes(len);
681 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
686 static int __clone_and_map(struct clone_info *ci)
688 struct bio *clone, *bio = ci->bio;
689 struct dm_target *ti;
690 sector_t len = 0, max;
691 struct dm_target_io *tio;
693 ti = dm_table_find_target(ci->map, ci->sector);
694 if (!dm_target_is_valid(ti))
697 max = max_io_len(ci->md, ci->sector, ti);
700 * Allocate a target io object.
702 tio = alloc_tio(ci->md);
705 memset(&tio->info, 0, sizeof(tio->info));
707 if (ci->sector_count <= max) {
709 * Optimise for the simple case where we can do all of
710 * the remaining io with a single clone.
712 clone = clone_bio(bio, ci->sector, ci->idx,
713 bio->bi_vcnt - ci->idx, ci->sector_count,
715 __map_bio(ti, clone, tio);
716 ci->sector_count = 0;
718 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
720 * There are some bvecs that don't span targets.
721 * Do as many of these as possible.
724 sector_t remaining = max;
727 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
728 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
730 if (bv_len > remaining)
737 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
739 __map_bio(ti, clone, tio);
742 ci->sector_count -= len;
747 * Handle a bvec that must be split between two or more targets.
749 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
750 sector_t remaining = to_sector(bv->bv_len);
751 unsigned int offset = 0;
755 ti = dm_table_find_target(ci->map, ci->sector);
756 if (!dm_target_is_valid(ti))
759 max = max_io_len(ci->md, ci->sector, ti);
761 tio = alloc_tio(ci->md);
764 memset(&tio->info, 0, sizeof(tio->info));
767 len = min(remaining, max);
769 clone = split_bvec(bio, ci->sector, ci->idx,
770 bv->bv_offset + offset, len,
773 __map_bio(ti, clone, tio);
776 ci->sector_count -= len;
777 offset += to_bytes(len);
778 } while (remaining -= len);
787 * Split the bio into several clones.
789 static int __split_bio(struct mapped_device *md, struct bio *bio)
791 struct clone_info ci;
794 ci.map = dm_get_table(md);
795 if (unlikely(!ci.map))
800 ci.io = alloc_io(md);
802 atomic_set(&ci.io->io_count, 1);
805 ci.sector = bio->bi_sector;
806 ci.sector_count = bio_sectors(bio);
807 ci.idx = bio->bi_idx;
809 start_io_acct(ci.io);
810 while (ci.sector_count && !error)
811 error = __clone_and_map(&ci);
813 /* drop the extra reference count */
814 dec_pending(ci.io, error);
815 dm_table_put(ci.map);
819 /*-----------------------------------------------------------------
821 *---------------------------------------------------------------*/
823 static int dm_merge_bvec(struct request_queue *q,
824 struct bvec_merge_data *bvm,
825 struct bio_vec *biovec)
827 struct mapped_device *md = q->queuedata;
828 struct dm_table *map = dm_get_table(md);
829 struct dm_target *ti;
830 sector_t max_sectors;
836 ti = dm_table_find_target(map, bvm->bi_sector);
837 if (!dm_target_is_valid(ti))
841 * Find maximum amount of I/O that won't need splitting
843 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
844 (sector_t) BIO_MAX_SECTORS);
845 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
850 * merge_bvec_fn() returns number of bytes
851 * it can accept at this offset
852 * max is precomputed maximal io size
854 if (max_size && ti->type->merge)
855 max_size = ti->type->merge(ti, bvm, biovec, max_size);
862 * Always allow an entire first page
864 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
865 max_size = biovec->bv_len;
871 * The request function that just remaps the bio built up by
874 static int dm_request(struct request_queue *q, struct bio *bio)
877 int rw = bio_data_dir(bio);
878 struct mapped_device *md = q->queuedata;
882 * There is no use in forwarding any barrier request since we can't
883 * guarantee it is (or can be) handled by the targets correctly.
885 if (unlikely(bio_barrier(bio))) {
886 bio_endio(bio, -EOPNOTSUPP);
890 down_read(&md->io_lock);
892 cpu = part_stat_lock();
893 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
894 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
898 * If we're suspended we have to queue
901 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
902 up_read(&md->io_lock);
904 if (bio_rw(bio) != READA)
905 r = queue_io(md, bio);
911 * We're in a while loop, because someone could suspend
912 * before we get to the following read lock.
914 down_read(&md->io_lock);
917 r = __split_bio(md, bio);
918 up_read(&md->io_lock);
927 static void dm_unplug_all(struct request_queue *q)
929 struct mapped_device *md = q->queuedata;
930 struct dm_table *map = dm_get_table(md);
933 dm_table_unplug_all(map);
938 static int dm_any_congested(void *congested_data, int bdi_bits)
941 struct mapped_device *md = congested_data;
942 struct dm_table *map;
944 atomic_inc(&md->pending);
946 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
947 map = dm_get_table(md);
949 r = dm_table_any_congested(map, bdi_bits);
954 if (!atomic_dec_return(&md->pending))
955 /* nudge anyone waiting on suspend queue */
961 /*-----------------------------------------------------------------
962 * An IDR is used to keep track of allocated minor numbers.
963 *---------------------------------------------------------------*/
964 static DEFINE_IDR(_minor_idr);
966 static void free_minor(int minor)
968 spin_lock(&_minor_lock);
969 idr_remove(&_minor_idr, minor);
970 spin_unlock(&_minor_lock);
974 * See if the device with a specific minor # is free.
976 static int specific_minor(int minor)
980 if (minor >= (1 << MINORBITS))
983 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
987 spin_lock(&_minor_lock);
989 if (idr_find(&_minor_idr, minor)) {
994 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
999 idr_remove(&_minor_idr, m);
1005 spin_unlock(&_minor_lock);
1009 static int next_free_minor(int *minor)
1013 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1017 spin_lock(&_minor_lock);
1019 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1023 if (m >= (1 << MINORBITS)) {
1024 idr_remove(&_minor_idr, m);
1032 spin_unlock(&_minor_lock);
1036 static struct block_device_operations dm_blk_dops;
1039 * Allocate and initialise a blank device with a given minor.
1041 static struct mapped_device *alloc_dev(int minor)
1044 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1048 DMWARN("unable to allocate device, out of memory.");
1052 if (!try_module_get(THIS_MODULE))
1053 goto bad_module_get;
1055 /* get a minor number for the dev */
1056 if (minor == DM_ANY_MINOR)
1057 r = next_free_minor(&minor);
1059 r = specific_minor(minor);
1063 init_rwsem(&md->io_lock);
1064 mutex_init(&md->suspend_lock);
1065 spin_lock_init(&md->pushback_lock);
1066 rwlock_init(&md->map_lock);
1067 atomic_set(&md->holders, 1);
1068 atomic_set(&md->open_count, 0);
1069 atomic_set(&md->event_nr, 0);
1070 atomic_set(&md->uevent_seq, 0);
1071 INIT_LIST_HEAD(&md->uevent_list);
1072 spin_lock_init(&md->uevent_lock);
1074 md->queue = blk_alloc_queue(GFP_KERNEL);
1078 md->queue->queuedata = md;
1079 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1080 md->queue->backing_dev_info.congested_data = md;
1081 blk_queue_make_request(md->queue, dm_request);
1082 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1083 md->queue->unplug_fn = dm_unplug_all;
1084 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1086 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
1090 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1094 md->bs = bioset_create(16, 16);
1098 md->disk = alloc_disk(1);
1102 atomic_set(&md->pending, 0);
1103 init_waitqueue_head(&md->wait);
1104 init_waitqueue_head(&md->eventq);
1106 md->disk->major = _major;
1107 md->disk->first_minor = minor;
1108 md->disk->fops = &dm_blk_dops;
1109 md->disk->queue = md->queue;
1110 md->disk->private_data = md;
1111 sprintf(md->disk->disk_name, "dm-%d", minor);
1113 format_dev_t(md->name, MKDEV(_major, minor));
1115 md->wq = create_singlethread_workqueue("kdmflush");
1119 /* Populate the mapping, nobody knows we exist yet */
1120 spin_lock(&_minor_lock);
1121 old_md = idr_replace(&_minor_idr, md, minor);
1122 spin_unlock(&_minor_lock);
1124 BUG_ON(old_md != MINOR_ALLOCED);
1131 bioset_free(md->bs);
1133 mempool_destroy(md->tio_pool);
1135 mempool_destroy(md->io_pool);
1137 blk_cleanup_queue(md->queue);
1141 module_put(THIS_MODULE);
1147 static void unlock_fs(struct mapped_device *md);
1149 static void free_dev(struct mapped_device *md)
1151 int minor = MINOR(disk_devt(md->disk));
1153 if (md->suspended_bdev) {
1155 bdput(md->suspended_bdev);
1157 destroy_workqueue(md->wq);
1158 mempool_destroy(md->tio_pool);
1159 mempool_destroy(md->io_pool);
1160 bioset_free(md->bs);
1161 del_gendisk(md->disk);
1164 spin_lock(&_minor_lock);
1165 md->disk->private_data = NULL;
1166 spin_unlock(&_minor_lock);
1169 blk_cleanup_queue(md->queue);
1170 module_put(THIS_MODULE);
1175 * Bind a table to the device.
1177 static void event_callback(void *context)
1179 unsigned long flags;
1181 struct mapped_device *md = (struct mapped_device *) context;
1183 spin_lock_irqsave(&md->uevent_lock, flags);
1184 list_splice_init(&md->uevent_list, &uevents);
1185 spin_unlock_irqrestore(&md->uevent_lock, flags);
1187 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1189 atomic_inc(&md->event_nr);
1190 wake_up(&md->eventq);
1193 static void __set_size(struct mapped_device *md, sector_t size)
1195 set_capacity(md->disk, size);
1197 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
1198 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1199 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1202 static int __bind(struct mapped_device *md, struct dm_table *t)
1204 struct request_queue *q = md->queue;
1207 size = dm_table_get_size(t);
1210 * Wipe any geometry if the size of the table changed.
1212 if (size != get_capacity(md->disk))
1213 memset(&md->geometry, 0, sizeof(md->geometry));
1215 if (md->suspended_bdev)
1216 __set_size(md, size);
1221 dm_table_event_callback(t, event_callback, md);
1223 write_lock(&md->map_lock);
1225 dm_table_set_restrictions(t, q);
1226 write_unlock(&md->map_lock);
1231 static void __unbind(struct mapped_device *md)
1233 struct dm_table *map = md->map;
1238 dm_table_event_callback(map, NULL, NULL);
1239 write_lock(&md->map_lock);
1241 write_unlock(&md->map_lock);
1246 * Constructor for a new device.
1248 int dm_create(int minor, struct mapped_device **result)
1250 struct mapped_device *md;
1252 md = alloc_dev(minor);
1260 static struct mapped_device *dm_find_md(dev_t dev)
1262 struct mapped_device *md;
1263 unsigned minor = MINOR(dev);
1265 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1268 spin_lock(&_minor_lock);
1270 md = idr_find(&_minor_idr, minor);
1271 if (md && (md == MINOR_ALLOCED ||
1272 (MINOR(disk_devt(dm_disk(md))) != minor) ||
1273 test_bit(DMF_FREEING, &md->flags))) {
1279 spin_unlock(&_minor_lock);
1284 struct mapped_device *dm_get_md(dev_t dev)
1286 struct mapped_device *md = dm_find_md(dev);
1294 void *dm_get_mdptr(struct mapped_device *md)
1296 return md->interface_ptr;
1299 void dm_set_mdptr(struct mapped_device *md, void *ptr)
1301 md->interface_ptr = ptr;
1304 void dm_get(struct mapped_device *md)
1306 atomic_inc(&md->holders);
1309 const char *dm_device_name(struct mapped_device *md)
1313 EXPORT_SYMBOL_GPL(dm_device_name);
1315 void dm_put(struct mapped_device *md)
1317 struct dm_table *map;
1319 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1321 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1322 map = dm_get_table(md);
1323 idr_replace(&_minor_idr, MINOR_ALLOCED,
1324 MINOR(disk_devt(dm_disk(md))));
1325 set_bit(DMF_FREEING, &md->flags);
1326 spin_unlock(&_minor_lock);
1327 if (!dm_suspended(md)) {
1328 dm_table_presuspend_targets(map);
1329 dm_table_postsuspend_targets(map);
1336 EXPORT_SYMBOL_GPL(dm_put);
1338 static int dm_wait_for_completion(struct mapped_device *md)
1343 set_current_state(TASK_INTERRUPTIBLE);
1346 if (!atomic_read(&md->pending))
1349 if (signal_pending(current)) {
1356 set_current_state(TASK_RUNNING);
1362 * Process the deferred bios
1364 static void __flush_deferred_io(struct mapped_device *md)
1368 while ((c = bio_list_pop(&md->deferred))) {
1369 if (__split_bio(md, c))
1373 clear_bit(DMF_BLOCK_IO, &md->flags);
1376 static void __merge_pushback_list(struct mapped_device *md)
1378 unsigned long flags;
1380 spin_lock_irqsave(&md->pushback_lock, flags);
1381 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1382 bio_list_merge_head(&md->deferred, &md->pushback);
1383 bio_list_init(&md->pushback);
1384 spin_unlock_irqrestore(&md->pushback_lock, flags);
1387 static void dm_wq_work(struct work_struct *work)
1389 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work);
1390 struct mapped_device *md = req->md;
1392 down_write(&md->io_lock);
1393 switch (req->type) {
1394 case DM_WQ_FLUSH_DEFERRED:
1395 __flush_deferred_io(md);
1398 DMERR("dm_wq_work: unrecognised work type %d", req->type);
1401 up_write(&md->io_lock);
1404 static void dm_wq_queue(struct mapped_device *md, int type, void *context,
1405 struct dm_wq_req *req)
1409 req->context = context;
1410 INIT_WORK(&req->work, dm_wq_work);
1411 queue_work(md->wq, &req->work);
1414 static void dm_queue_flush(struct mapped_device *md, int type, void *context)
1416 struct dm_wq_req req;
1418 dm_wq_queue(md, type, context, &req);
1419 flush_workqueue(md->wq);
1423 * Swap in a new table (destroying old one).
1425 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1429 mutex_lock(&md->suspend_lock);
1431 /* device must be suspended */
1432 if (!dm_suspended(md))
1435 /* without bdev, the device size cannot be changed */
1436 if (!md->suspended_bdev)
1437 if (get_capacity(md->disk) != dm_table_get_size(table))
1441 r = __bind(md, table);
1444 mutex_unlock(&md->suspend_lock);
1449 * Functions to lock and unlock any filesystem running on the
1452 static int lock_fs(struct mapped_device *md)
1456 WARN_ON(md->frozen_sb);
1458 md->frozen_sb = freeze_bdev(md->suspended_bdev);
1459 if (IS_ERR(md->frozen_sb)) {
1460 r = PTR_ERR(md->frozen_sb);
1461 md->frozen_sb = NULL;
1465 set_bit(DMF_FROZEN, &md->flags);
1467 /* don't bdput right now, we don't want the bdev
1468 * to go away while it is locked.
1473 static void unlock_fs(struct mapped_device *md)
1475 if (!test_bit(DMF_FROZEN, &md->flags))
1478 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1479 md->frozen_sb = NULL;
1480 clear_bit(DMF_FROZEN, &md->flags);
1484 * We need to be able to change a mapping table under a mounted
1485 * filesystem. For example we might want to move some data in
1486 * the background. Before the table can be swapped with
1487 * dm_bind_table, dm_suspend must be called to flush any in
1488 * flight bios and ensure that any further io gets deferred.
1490 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1492 struct dm_table *map = NULL;
1493 DECLARE_WAITQUEUE(wait, current);
1495 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
1496 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1498 mutex_lock(&md->suspend_lock);
1500 if (dm_suspended(md)) {
1505 map = dm_get_table(md);
1508 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1509 * This flag is cleared before dm_suspend returns.
1512 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1514 /* This does not get reverted if there's an error later. */
1515 dm_table_presuspend_targets(map);
1517 /* bdget() can stall if the pending I/Os are not flushed */
1519 md->suspended_bdev = bdget_disk(md->disk, 0);
1520 if (!md->suspended_bdev) {
1521 DMWARN("bdget failed in dm_suspend");
1527 * Flush I/O to the device. noflush supersedes do_lockfs,
1528 * because lock_fs() needs to flush I/Os.
1538 * First we set the BLOCK_IO flag so no more ios will be mapped.
1540 down_write(&md->io_lock);
1541 set_bit(DMF_BLOCK_IO, &md->flags);
1543 add_wait_queue(&md->wait, &wait);
1544 up_write(&md->io_lock);
1548 dm_table_unplug_all(map);
1551 * Wait for the already-mapped ios to complete.
1553 r = dm_wait_for_completion(md);
1555 down_write(&md->io_lock);
1556 remove_wait_queue(&md->wait, &wait);
1559 __merge_pushback_list(md);
1560 up_write(&md->io_lock);
1562 /* were we interrupted ? */
1564 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1567 goto out; /* pushback list is already flushed, so skip flush */
1570 dm_table_postsuspend_targets(map);
1572 set_bit(DMF_SUSPENDED, &md->flags);
1575 if (r && md->suspended_bdev) {
1576 bdput(md->suspended_bdev);
1577 md->suspended_bdev = NULL;
1583 mutex_unlock(&md->suspend_lock);
1587 int dm_resume(struct mapped_device *md)
1590 struct dm_table *map = NULL;
1592 mutex_lock(&md->suspend_lock);
1593 if (!dm_suspended(md))
1596 map = dm_get_table(md);
1597 if (!map || !dm_table_get_size(map))
1600 r = dm_table_resume_targets(map);
1604 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
1608 if (md->suspended_bdev) {
1609 bdput(md->suspended_bdev);
1610 md->suspended_bdev = NULL;
1613 clear_bit(DMF_SUSPENDED, &md->flags);
1615 dm_table_unplug_all(map);
1617 dm_kobject_uevent(md);
1623 mutex_unlock(&md->suspend_lock);
1628 /*-----------------------------------------------------------------
1629 * Event notification.
1630 *---------------------------------------------------------------*/
1631 void dm_kobject_uevent(struct mapped_device *md)
1633 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
1636 uint32_t dm_next_uevent_seq(struct mapped_device *md)
1638 return atomic_add_return(1, &md->uevent_seq);
1641 uint32_t dm_get_event_nr(struct mapped_device *md)
1643 return atomic_read(&md->event_nr);
1646 int dm_wait_event(struct mapped_device *md, int event_nr)
1648 return wait_event_interruptible(md->eventq,
1649 (event_nr != atomic_read(&md->event_nr)));
1652 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1654 unsigned long flags;
1656 spin_lock_irqsave(&md->uevent_lock, flags);
1657 list_add(elist, &md->uevent_list);
1658 spin_unlock_irqrestore(&md->uevent_lock, flags);
1662 * The gendisk is only valid as long as you have a reference
1665 struct gendisk *dm_disk(struct mapped_device *md)
1670 int dm_suspended(struct mapped_device *md)
1672 return test_bit(DMF_SUSPENDED, &md->flags);
1675 int dm_noflush_suspending(struct dm_target *ti)
1677 struct mapped_device *md = dm_table_get_md(ti->table);
1678 int r = __noflush_suspending(md);
1684 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1686 static struct block_device_operations dm_blk_dops = {
1687 .open = dm_blk_open,
1688 .release = dm_blk_close,
1689 .ioctl = dm_blk_ioctl,
1690 .getgeo = dm_blk_getgeo,
1691 .owner = THIS_MODULE
1694 EXPORT_SYMBOL(dm_get_mapinfo);
1699 module_init(dm_init);
1700 module_exit(dm_exit);
1702 module_param(major, uint, 0);
1703 MODULE_PARM_DESC(major, "The major number of the device mapper");
1704 MODULE_DESCRIPTION(DM_NAME " driver");
1705 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1706 MODULE_LICENSE("GPL");