2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/blkdev.h>
13 #include <linux/namei.h>
14 #include <linux/ctype.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/mutex.h>
18 #include <asm/atomic.h>
20 #define DM_MSG_PREFIX "table"
23 #define NODE_SIZE L1_CACHE_BYTES
24 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
25 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
28 struct mapped_device *md;
33 unsigned int counts[MAX_DEPTH]; /* in nodes */
34 sector_t *index[MAX_DEPTH];
36 unsigned int num_targets;
37 unsigned int num_allocated;
39 struct dm_target *targets;
42 * Indicates the rw permissions for the new logical
43 * device. This should be a combination of FMODE_READ
48 /* a list of devices used by this table */
49 struct list_head devices;
52 * These are optimistic limits taken from all the
53 * targets, some targets will need smaller limits.
55 struct io_restrictions limits;
57 /* events get handed up using this callback */
58 void (*event_fn)(void *);
63 * Similar to ceiling(log_size(n))
65 static unsigned int int_log(unsigned int n, unsigned int base)
70 n = dm_div_up(n, base);
78 * Returns the minimum that is _not_ zero, unless both are zero.
80 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
83 * Combine two io_restrictions, always taking the lower value.
85 static void combine_restrictions_low(struct io_restrictions *lhs,
86 struct io_restrictions *rhs)
89 min_not_zero(lhs->max_sectors, rhs->max_sectors);
91 lhs->max_phys_segments =
92 min_not_zero(lhs->max_phys_segments, rhs->max_phys_segments);
94 lhs->max_hw_segments =
95 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
97 lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
99 lhs->max_segment_size =
100 min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
102 lhs->seg_boundary_mask =
103 min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
105 lhs->no_cluster |= rhs->no_cluster;
109 * Calculate the index of the child node of the n'th node k'th key.
111 static inline unsigned int get_child(unsigned int n, unsigned int k)
113 return (n * CHILDREN_PER_NODE) + k;
117 * Return the n'th node of level l from table t.
119 static inline sector_t *get_node(struct dm_table *t,
120 unsigned int l, unsigned int n)
122 return t->index[l] + (n * KEYS_PER_NODE);
126 * Return the highest key that you could lookup from the n'th
127 * node on level l of the btree.
129 static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
131 for (; l < t->depth - 1; l++)
132 n = get_child(n, CHILDREN_PER_NODE - 1);
134 if (n >= t->counts[l])
135 return (sector_t) - 1;
137 return get_node(t, l, n)[KEYS_PER_NODE - 1];
141 * Fills in a level of the btree based on the highs of the level
144 static int setup_btree_index(unsigned int l, struct dm_table *t)
149 for (n = 0U; n < t->counts[l]; n++) {
150 node = get_node(t, l, n);
152 for (k = 0U; k < KEYS_PER_NODE; k++)
153 node[k] = high(t, l + 1, get_child(n, k));
159 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
165 * Check that we're not going to overflow.
167 if (nmemb > (ULONG_MAX / elem_size))
170 size = nmemb * elem_size;
171 addr = vmalloc(size);
173 memset(addr, 0, size);
179 * highs, and targets are managed as dynamic arrays during a
182 static int alloc_targets(struct dm_table *t, unsigned int num)
185 struct dm_target *n_targets;
186 int n = t->num_targets;
189 * Allocate both the target array and offset array at once.
191 n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
196 n_targets = (struct dm_target *) (n_highs + num);
199 memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
200 memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
203 memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
206 t->num_allocated = num;
208 t->targets = n_targets;
213 int dm_table_create(struct dm_table **result, int mode,
214 unsigned num_targets, struct mapped_device *md)
216 struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
221 memset(t, 0, sizeof(*t));
222 INIT_LIST_HEAD(&t->devices);
223 atomic_set(&t->holders, 1);
226 num_targets = KEYS_PER_NODE;
228 num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
230 if (alloc_targets(t, num_targets)) {
242 int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
245 sector_t dev_size = 1;
249 * Find current size of device.
250 * Default to 1 sector if inactive.
252 t = dm_get_table(md);
254 dev_size = dm_table_get_size(t);
258 r = dm_table_create(&t, FMODE_READ, 1, md);
262 r = dm_table_add_target(t, "error", 0, dev_size, NULL);
266 r = dm_table_complete(t);
278 EXPORT_SYMBOL_GPL(dm_create_error_table);
280 static void free_devices(struct list_head *devices)
282 struct list_head *tmp, *next;
284 for (tmp = devices->next; tmp != devices; tmp = next) {
285 struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
291 static void table_destroy(struct dm_table *t)
295 /* free the indexes (see dm_table_complete) */
297 vfree(t->index[t->depth - 2]);
299 /* free the targets */
300 for (i = 0; i < t->num_targets; i++) {
301 struct dm_target *tgt = t->targets + i;
306 dm_put_target_type(tgt->type);
311 /* free the device list */
312 if (t->devices.next != &t->devices) {
313 DMWARN("devices still present during destroy: "
314 "dm_table_remove_device calls missing");
316 free_devices(&t->devices);
322 void dm_table_get(struct dm_table *t)
324 atomic_inc(&t->holders);
327 void dm_table_put(struct dm_table *t)
332 if (atomic_dec_and_test(&t->holders))
337 * Checks to see if we need to extend highs or targets.
339 static inline int check_space(struct dm_table *t)
341 if (t->num_targets >= t->num_allocated)
342 return alloc_targets(t, t->num_allocated * 2);
348 * Convert a device path to a dev_t.
350 static int lookup_device(const char *path, dev_t *dev)
356 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
359 inode = nd.dentry->d_inode;
365 if (!S_ISBLK(inode->i_mode)) {
370 *dev = inode->i_rdev;
378 * See if we've already got a device in the list.
380 static struct dm_dev *find_device(struct list_head *l, dev_t dev)
384 list_for_each_entry (dd, l, list)
385 if (dd->bdev->bd_dev == dev)
392 * Open a device so we can use it as a map destination.
394 static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
396 static char *_claim_ptr = "I belong to device-mapper";
397 struct block_device *bdev;
403 bdev = open_by_devnum(dev, d->mode);
405 return PTR_ERR(bdev);
406 r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
415 * Close a device that we've been using.
417 static void close_dev(struct dm_dev *d, struct mapped_device *md)
422 bd_release_from_disk(d->bdev, dm_disk(md));
428 * If possible (ie. blk_size[major] is set), this checks an area
429 * of a destination device is valid.
431 static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
434 dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
435 return ((start < dev_size) && (len <= (dev_size - start)));
439 * This upgrades the mode on an already open dm_dev. Being
440 * careful to leave things as they were if we fail to reopen the
443 static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
446 struct dm_dev dd_copy;
447 dev_t dev = dd->bdev->bd_dev;
451 dd->mode |= new_mode;
453 r = open_dev(dd, dev, md);
455 close_dev(&dd_copy, md);
463 * Add a device to the list, or just increment the usage count if
464 * it's already present.
466 static int __table_get_device(struct dm_table *t, struct dm_target *ti,
467 const char *path, sector_t start, sector_t len,
468 int mode, struct dm_dev **result)
473 unsigned int major, minor;
477 if (sscanf(path, "%u:%u", &major, &minor) == 2) {
478 /* Extract the major/minor numbers */
479 dev = MKDEV(major, minor);
480 if (MAJOR(dev) != major || MINOR(dev) != minor)
483 /* convert the path to a device */
484 if ((r = lookup_device(path, &dev)))
488 dd = find_device(&t->devices, dev);
490 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
497 if ((r = open_dev(dd, dev, t->md))) {
502 format_dev_t(dd->name, dev);
504 atomic_set(&dd->count, 0);
505 list_add(&dd->list, &t->devices);
507 } else if (dd->mode != (mode | dd->mode)) {
508 r = upgrade_mode(dd, mode, t->md);
512 atomic_inc(&dd->count);
514 if (!check_device_area(dd, start, len)) {
515 DMWARN("device %s too small for target", path);
516 dm_put_device(ti, dd);
526 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
527 sector_t len, int mode, struct dm_dev **result)
529 int r = __table_get_device(ti->table, ti, path,
530 start, len, mode, result);
532 request_queue_t *q = bdev_get_queue((*result)->bdev);
533 struct io_restrictions *rs = &ti->limits;
536 * Combine the device limits low.
538 * FIXME: if we move an io_restriction struct
539 * into q this would just be a call to
540 * combine_restrictions_low()
543 min_not_zero(rs->max_sectors, q->max_sectors);
545 /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
546 * currently doesn't honor MD's merge_bvec_fn routine.
547 * In this case, we'll force DM to use PAGE_SIZE or
548 * smaller I/O, just to be safe. A better fix is in the
549 * works, but add this for the time being so it will at
550 * least operate correctly.
552 if (q->merge_bvec_fn)
554 min_not_zero(rs->max_sectors,
555 (unsigned int) (PAGE_SIZE >> 9));
557 rs->max_phys_segments =
558 min_not_zero(rs->max_phys_segments,
559 q->max_phys_segments);
561 rs->max_hw_segments =
562 min_not_zero(rs->max_hw_segments, q->max_hw_segments);
564 rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
566 rs->max_segment_size =
567 min_not_zero(rs->max_segment_size, q->max_segment_size);
569 rs->seg_boundary_mask =
570 min_not_zero(rs->seg_boundary_mask,
571 q->seg_boundary_mask);
573 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
580 * Decrement a devices use count and remove it if necessary.
582 void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
584 if (atomic_dec_and_test(&dd->count)) {
585 close_dev(dd, ti->table->md);
592 * Checks to see if the target joins onto the end of the table.
594 static int adjoin(struct dm_table *table, struct dm_target *ti)
596 struct dm_target *prev;
598 if (!table->num_targets)
601 prev = &table->targets[table->num_targets - 1];
602 return (ti->begin == (prev->begin + prev->len));
606 * Used to dynamically allocate the arg array.
608 static char **realloc_argv(unsigned *array_size, char **old_argv)
613 new_size = *array_size ? *array_size * 2 : 64;
614 argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
616 memcpy(argv, old_argv, *array_size * sizeof(*argv));
617 *array_size = new_size;
625 * Destructively splits up the argument list to pass to ctr.
627 int dm_split_args(int *argc, char ***argvp, char *input)
629 char *start, *end = input, *out, **argv = NULL;
630 unsigned array_size = 0;
639 argv = realloc_argv(&array_size, argv);
646 /* Skip whitespace */
647 while (*start && isspace(*start))
651 break; /* success, we hit the end */
653 /* 'out' is used to remove any back-quotes */
656 /* Everything apart from '\0' can be quoted */
657 if (*end == '\\' && *(end + 1)) {
664 break; /* end of token */
669 /* have we already filled the array ? */
670 if ((*argc + 1) > array_size) {
671 argv = realloc_argv(&array_size, argv);
676 /* we know this is whitespace */
680 /* terminate the string and put it in the array */
690 static void check_for_valid_limits(struct io_restrictions *rs)
692 if (!rs->max_sectors)
693 rs->max_sectors = SAFE_MAX_SECTORS;
694 if (!rs->max_phys_segments)
695 rs->max_phys_segments = MAX_PHYS_SEGMENTS;
696 if (!rs->max_hw_segments)
697 rs->max_hw_segments = MAX_HW_SEGMENTS;
698 if (!rs->hardsect_size)
699 rs->hardsect_size = 1 << SECTOR_SHIFT;
700 if (!rs->max_segment_size)
701 rs->max_segment_size = MAX_SEGMENT_SIZE;
702 if (!rs->seg_boundary_mask)
703 rs->seg_boundary_mask = -1;
706 int dm_table_add_target(struct dm_table *t, const char *type,
707 sector_t start, sector_t len, char *params)
709 int r = -EINVAL, argc;
711 struct dm_target *tgt;
713 if ((r = check_space(t)))
716 tgt = t->targets + t->num_targets;
717 memset(tgt, 0, sizeof(*tgt));
720 DMERR("%s: zero-length target", dm_device_name(t->md));
724 tgt->type = dm_get_target_type(type);
726 DMERR("%s: %s: unknown target type", dm_device_name(t->md),
734 tgt->error = "Unknown error";
737 * Does this target adjoin the previous one ?
739 if (!adjoin(t, tgt)) {
740 tgt->error = "Gap in table";
745 r = dm_split_args(&argc, &argv, params);
747 tgt->error = "couldn't split parameters (insufficient memory)";
751 r = tgt->type->ctr(tgt, argc, argv);
756 t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
758 /* FIXME: the plan is to combine high here and then have
759 * the merge fn apply the target level restrictions. */
760 combine_restrictions_low(&t->limits, &tgt->limits);
764 DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
765 dm_put_target_type(tgt->type);
769 static int setup_indexes(struct dm_table *t)
772 unsigned int total = 0;
775 /* allocate the space for *all* the indexes */
776 for (i = t->depth - 2; i >= 0; i--) {
777 t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
778 total += t->counts[i];
781 indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
785 /* set up internal nodes, bottom-up */
786 for (i = t->depth - 2, total = 0; i >= 0; i--) {
787 t->index[i] = indexes;
788 indexes += (KEYS_PER_NODE * t->counts[i]);
789 setup_btree_index(i, t);
796 * Builds the btree to index the map.
798 int dm_table_complete(struct dm_table *t)
801 unsigned int leaf_nodes;
803 check_for_valid_limits(&t->limits);
805 /* how many indexes will the btree have ? */
806 leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
807 t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
809 /* leaf layer has already been set up */
810 t->counts[t->depth - 1] = leaf_nodes;
811 t->index[t->depth - 1] = t->highs;
814 r = setup_indexes(t);
819 static DEFINE_MUTEX(_event_lock);
820 void dm_table_event_callback(struct dm_table *t,
821 void (*fn)(void *), void *context)
823 mutex_lock(&_event_lock);
825 t->event_context = context;
826 mutex_unlock(&_event_lock);
829 void dm_table_event(struct dm_table *t)
832 * You can no longer call dm_table_event() from interrupt
833 * context, use a bottom half instead.
835 BUG_ON(in_interrupt());
837 mutex_lock(&_event_lock);
839 t->event_fn(t->event_context);
840 mutex_unlock(&_event_lock);
843 sector_t dm_table_get_size(struct dm_table *t)
845 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
848 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
850 if (index >= t->num_targets)
853 return t->targets + index;
857 * Search the btree for the correct target.
859 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
861 unsigned int l, n = 0, k = 0;
864 for (l = 0; l < t->depth; l++) {
866 node = get_node(t, l, n);
868 for (k = 0; k < KEYS_PER_NODE; k++)
869 if (node[k] >= sector)
873 return &t->targets[(KEYS_PER_NODE * n) + k];
876 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
879 * Make sure we obey the optimistic sub devices
882 blk_queue_max_sectors(q, t->limits.max_sectors);
883 q->max_phys_segments = t->limits.max_phys_segments;
884 q->max_hw_segments = t->limits.max_hw_segments;
885 q->hardsect_size = t->limits.hardsect_size;
886 q->max_segment_size = t->limits.max_segment_size;
887 q->seg_boundary_mask = t->limits.seg_boundary_mask;
888 if (t->limits.no_cluster)
889 q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
891 q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
895 unsigned int dm_table_get_num_targets(struct dm_table *t)
897 return t->num_targets;
900 struct list_head *dm_table_get_devices(struct dm_table *t)
905 int dm_table_get_mode(struct dm_table *t)
910 static void suspend_targets(struct dm_table *t, unsigned postsuspend)
912 int i = t->num_targets;
913 struct dm_target *ti = t->targets;
917 if (ti->type->postsuspend)
918 ti->type->postsuspend(ti);
919 } else if (ti->type->presuspend)
920 ti->type->presuspend(ti);
926 void dm_table_presuspend_targets(struct dm_table *t)
931 return suspend_targets(t, 0);
934 void dm_table_postsuspend_targets(struct dm_table *t)
939 return suspend_targets(t, 1);
942 void dm_table_resume_targets(struct dm_table *t)
946 for (i = 0; i < t->num_targets; i++) {
947 struct dm_target *ti = t->targets + i;
949 if (ti->type->resume)
950 ti->type->resume(ti);
954 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
956 struct list_head *d, *devices;
959 devices = dm_table_get_devices(t);
960 for (d = devices->next; d != devices; d = d->next) {
961 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
962 request_queue_t *q = bdev_get_queue(dd->bdev);
963 r |= bdi_congested(&q->backing_dev_info, bdi_bits);
969 void dm_table_unplug_all(struct dm_table *t)
971 struct list_head *d, *devices = dm_table_get_devices(t);
973 for (d = devices->next; d != devices; d = d->next) {
974 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
975 request_queue_t *q = bdev_get_queue(dd->bdev);
982 int dm_table_flush_all(struct dm_table *t)
984 struct list_head *d, *devices = dm_table_get_devices(t);
987 for (d = devices->next; d != devices; d = d->next) {
988 struct dm_dev *dd = list_entry(d, struct dm_dev, list);
989 request_queue_t *q = bdev_get_queue(dd->bdev);
992 if (!q->issue_flush_fn)
995 err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
1004 struct mapped_device *dm_table_get_md(struct dm_table *t)
1011 EXPORT_SYMBOL(dm_vcalloc);
1012 EXPORT_SYMBOL(dm_get_device);
1013 EXPORT_SYMBOL(dm_put_device);
1014 EXPORT_SYMBOL(dm_table_event);
1015 EXPORT_SYMBOL(dm_table_get_size);
1016 EXPORT_SYMBOL(dm_table_get_mode);
1017 EXPORT_SYMBOL(dm_table_get_md);
1018 EXPORT_SYMBOL(dm_table_put);
1019 EXPORT_SYMBOL(dm_table_get);
1020 EXPORT_SYMBOL(dm_table_unplug_all);
1021 EXPORT_SYMBOL(dm_table_flush_all);