2 * MTD device concatenation layer
4 * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * NAND support by Christian Gan <cgan@iders.ca>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/types.h>
16 #include <linux/backing-dev.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/concat.h>
21 #include <asm/div64.h>
24 * Our storage structure:
25 * Subdev points to an array of pointers to struct mtd_info objects
26 * which is allocated along with this structure
32 struct mtd_info **subdev;
36 * how to calculate the size required for the above structure,
37 * including the pointer array subdev points to:
39 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
40 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
43 * Given a pointer to the MTD object in the mtd_concat structure,
44 * we can retrieve the pointer to that structure with this macro.
46 #define CONCAT(x) ((struct mtd_concat *)(x))
49 * MTD methods which look up the relevant subdevice, translate the
50 * effective address and pass through to the subdevice.
54 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55 size_t * retlen, u_char * buf)
57 struct mtd_concat *concat = CONCAT(mtd);
63 for (i = 0; i < concat->num_subdev; i++) {
64 struct mtd_info *subdev = concat->subdev[i];
67 if (from >= subdev->size) {
68 /* Not destined for this subdev */
73 if (from + len > subdev->size)
74 /* First part goes into this subdev */
75 size = subdev->size - from;
77 /* Entire transaction goes into this subdev */
80 err = subdev->read(subdev, from, size, &retsize, buf);
82 /* Save information about bitflips! */
84 if (err == -EBADMSG) {
85 mtd->ecc_stats.failed++;
87 } else if (err == -EUCLEAN) {
88 mtd->ecc_stats.corrected++;
89 /* Do not overwrite -EBADMSG !! */
108 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
109 size_t * retlen, const u_char * buf)
111 struct mtd_concat *concat = CONCAT(mtd);
115 if (!(mtd->flags & MTD_WRITEABLE))
120 for (i = 0; i < concat->num_subdev; i++) {
121 struct mtd_info *subdev = concat->subdev[i];
122 size_t size, retsize;
124 if (to >= subdev->size) {
129 if (to + len > subdev->size)
130 size = subdev->size - to;
134 if (!(subdev->flags & MTD_WRITEABLE))
137 err = subdev->write(subdev, to, size, &retsize, buf);
155 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
156 unsigned long count, loff_t to, size_t * retlen)
158 struct mtd_concat *concat = CONCAT(mtd);
159 struct kvec *vecs_copy;
160 unsigned long entry_low, entry_high;
161 size_t total_len = 0;
165 if (!(mtd->flags & MTD_WRITEABLE))
170 /* Calculate total length of data */
171 for (i = 0; i < count; i++)
172 total_len += vecs[i].iov_len;
174 /* Do not allow write past end of device */
175 if ((to + total_len) > mtd->size)
178 /* Check alignment */
179 if (mtd->writesize > 1) {
181 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
185 /* make a copy of vecs */
186 vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
189 memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
192 for (i = 0; i < concat->num_subdev; i++) {
193 struct mtd_info *subdev = concat->subdev[i];
194 size_t size, wsize, retsize, old_iov_len;
196 if (to >= subdev->size) {
201 size = min_t(uint64_t, total_len, subdev->size - to);
202 wsize = size; /* store for future use */
204 entry_high = entry_low;
205 while (entry_high < count) {
206 if (size <= vecs_copy[entry_high].iov_len)
208 size -= vecs_copy[entry_high++].iov_len;
211 old_iov_len = vecs_copy[entry_high].iov_len;
212 vecs_copy[entry_high].iov_len = size;
214 if (!(subdev->flags & MTD_WRITEABLE))
217 err = subdev->writev(subdev, &vecs_copy[entry_low],
218 entry_high - entry_low + 1, to, &retsize);
220 vecs_copy[entry_high].iov_len = old_iov_len - size;
221 vecs_copy[entry_high].iov_base += size;
223 entry_low = entry_high;
243 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
245 struct mtd_concat *concat = CONCAT(mtd);
246 struct mtd_oob_ops devops = *ops;
249 ops->retlen = ops->oobretlen = 0;
251 for (i = 0; i < concat->num_subdev; i++) {
252 struct mtd_info *subdev = concat->subdev[i];
254 if (from >= subdev->size) {
255 from -= subdev->size;
260 if (from + devops.len > subdev->size)
261 devops.len = subdev->size - from;
263 err = subdev->read_oob(subdev, from, &devops);
264 ops->retlen += devops.retlen;
265 ops->oobretlen += devops.oobretlen;
267 /* Save information about bitflips! */
269 if (err == -EBADMSG) {
270 mtd->ecc_stats.failed++;
272 } else if (err == -EUCLEAN) {
273 mtd->ecc_stats.corrected++;
274 /* Do not overwrite -EBADMSG !! */
282 devops.len = ops->len - ops->retlen;
285 devops.datbuf += devops.retlen;
288 devops.ooblen = ops->ooblen - ops->oobretlen;
291 devops.oobbuf += ops->oobretlen;
300 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
302 struct mtd_concat *concat = CONCAT(mtd);
303 struct mtd_oob_ops devops = *ops;
306 if (!(mtd->flags & MTD_WRITEABLE))
311 for (i = 0; i < concat->num_subdev; i++) {
312 struct mtd_info *subdev = concat->subdev[i];
314 if (to >= subdev->size) {
319 /* partial write ? */
320 if (to + devops.len > subdev->size)
321 devops.len = subdev->size - to;
323 err = subdev->write_oob(subdev, to, &devops);
324 ops->retlen += devops.retlen;
329 devops.len = ops->len - ops->retlen;
332 devops.datbuf += devops.retlen;
335 devops.ooblen = ops->ooblen - ops->oobretlen;
338 devops.oobbuf += devops.oobretlen;
345 static void concat_erase_callback(struct erase_info *instr)
347 wake_up((wait_queue_head_t *) instr->priv);
350 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
353 wait_queue_head_t waitq;
354 DECLARE_WAITQUEUE(wait, current);
357 * This code was stol^H^H^H^Hinspired by mtdchar.c
359 init_waitqueue_head(&waitq);
362 erase->callback = concat_erase_callback;
363 erase->priv = (unsigned long) &waitq;
366 * FIXME: Allow INTERRUPTIBLE. Which means
367 * not having the wait_queue head on the stack.
369 err = mtd->erase(mtd, erase);
371 set_current_state(TASK_UNINTERRUPTIBLE);
372 add_wait_queue(&waitq, &wait);
373 if (erase->state != MTD_ERASE_DONE
374 && erase->state != MTD_ERASE_FAILED)
376 remove_wait_queue(&waitq, &wait);
377 set_current_state(TASK_RUNNING);
379 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
384 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
386 struct mtd_concat *concat = CONCAT(mtd);
387 struct mtd_info *subdev;
389 uint64_t length, offset = 0;
390 struct erase_info *erase;
392 if (!(mtd->flags & MTD_WRITEABLE))
395 if (instr->addr > concat->mtd.size)
398 if (instr->len + instr->addr > concat->mtd.size)
402 * Check for proper erase block alignment of the to-be-erased area.
403 * It is easier to do this based on the super device's erase
404 * region info rather than looking at each particular sub-device
407 if (!concat->mtd.numeraseregions) {
408 /* the easy case: device has uniform erase block size */
409 if (instr->addr & (concat->mtd.erasesize - 1))
411 if (instr->len & (concat->mtd.erasesize - 1))
414 /* device has variable erase size */
415 struct mtd_erase_region_info *erase_regions =
416 concat->mtd.eraseregions;
419 * Find the erase region where the to-be-erased area begins:
421 for (i = 0; i < concat->mtd.numeraseregions &&
422 instr->addr >= erase_regions[i].offset; i++) ;
426 * Now erase_regions[i] is the region in which the
427 * to-be-erased area begins. Verify that the starting
428 * offset is aligned to this region's erase size:
430 if (instr->addr & (erase_regions[i].erasesize - 1))
434 * now find the erase region where the to-be-erased area ends:
436 for (; i < concat->mtd.numeraseregions &&
437 (instr->addr + instr->len) >= erase_regions[i].offset;
441 * check if the ending offset is aligned to this region's erase size
443 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
448 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
450 /* make a local copy of instr to avoid modifying the caller's struct */
451 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
460 * find the subdevice where the to-be-erased area begins, adjust
461 * starting offset to be relative to the subdevice start
463 for (i = 0; i < concat->num_subdev; i++) {
464 subdev = concat->subdev[i];
465 if (subdev->size <= erase->addr) {
466 erase->addr -= subdev->size;
467 offset += subdev->size;
473 /* must never happen since size limit has been verified above */
474 BUG_ON(i >= concat->num_subdev);
476 /* now do the erase: */
478 for (; length > 0; i++) {
479 /* loop for all subdevices affected by this request */
480 subdev = concat->subdev[i]; /* get current subdevice */
482 /* limit length to subdevice's size: */
483 if (erase->addr + length > subdev->size)
484 erase->len = subdev->size - erase->addr;
488 if (!(subdev->flags & MTD_WRITEABLE)) {
492 length -= erase->len;
493 if ((err = concat_dev_erase(subdev, erase))) {
494 /* sanity check: should never happen since
495 * block alignment has been checked above */
496 BUG_ON(err == -EINVAL);
497 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
498 instr->fail_addr = erase->fail_addr + offset;
502 * erase->addr specifies the offset of the area to be
503 * erased *within the current subdevice*. It can be
504 * non-zero only the first time through this loop, i.e.
505 * for the first subdevice where blocks need to be erased.
506 * All the following erases must begin at the start of the
507 * current subdevice, i.e. at offset zero.
510 offset += subdev->size;
512 instr->state = erase->state;
518 instr->callback(instr);
522 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
524 struct mtd_concat *concat = CONCAT(mtd);
525 int i, err = -EINVAL;
527 if ((len + ofs) > mtd->size)
530 for (i = 0; i < concat->num_subdev; i++) {
531 struct mtd_info *subdev = concat->subdev[i];
534 if (ofs >= subdev->size) {
539 if (ofs + len > subdev->size)
540 size = subdev->size - ofs;
544 err = subdev->lock(subdev, ofs, size);
560 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
562 struct mtd_concat *concat = CONCAT(mtd);
565 if ((len + ofs) > mtd->size)
568 for (i = 0; i < concat->num_subdev; i++) {
569 struct mtd_info *subdev = concat->subdev[i];
572 if (ofs >= subdev->size) {
577 if (ofs + len > subdev->size)
578 size = subdev->size - ofs;
582 err = subdev->unlock(subdev, ofs, size);
598 static void concat_sync(struct mtd_info *mtd)
600 struct mtd_concat *concat = CONCAT(mtd);
603 for (i = 0; i < concat->num_subdev; i++) {
604 struct mtd_info *subdev = concat->subdev[i];
605 subdev->sync(subdev);
609 static int concat_suspend(struct mtd_info *mtd)
611 struct mtd_concat *concat = CONCAT(mtd);
614 for (i = 0; i < concat->num_subdev; i++) {
615 struct mtd_info *subdev = concat->subdev[i];
616 if ((rc = subdev->suspend(subdev)) < 0)
622 static void concat_resume(struct mtd_info *mtd)
624 struct mtd_concat *concat = CONCAT(mtd);
627 for (i = 0; i < concat->num_subdev; i++) {
628 struct mtd_info *subdev = concat->subdev[i];
629 subdev->resume(subdev);
633 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
635 struct mtd_concat *concat = CONCAT(mtd);
638 if (!concat->subdev[0]->block_isbad)
644 for (i = 0; i < concat->num_subdev; i++) {
645 struct mtd_info *subdev = concat->subdev[i];
647 if (ofs >= subdev->size) {
652 res = subdev->block_isbad(subdev, ofs);
659 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
661 struct mtd_concat *concat = CONCAT(mtd);
662 int i, err = -EINVAL;
664 if (!concat->subdev[0]->block_markbad)
670 for (i = 0; i < concat->num_subdev; i++) {
671 struct mtd_info *subdev = concat->subdev[i];
673 if (ofs >= subdev->size) {
678 err = subdev->block_markbad(subdev, ofs);
680 mtd->ecc_stats.badblocks++;
688 * try to support NOMMU mmaps on concatenated devices
689 * - we don't support subdev spanning as we can't guarantee it'll work
691 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
693 unsigned long offset,
696 struct mtd_concat *concat = CONCAT(mtd);
699 for (i = 0; i < concat->num_subdev; i++) {
700 struct mtd_info *subdev = concat->subdev[i];
702 if (offset >= subdev->size) {
703 offset -= subdev->size;
707 /* we've found the subdev over which the mapping will reside */
708 if (offset + len > subdev->size)
709 return (unsigned long) -EINVAL;
711 if (subdev->get_unmapped_area)
712 return subdev->get_unmapped_area(subdev, len, offset,
718 return (unsigned long) -ENOSYS;
722 * This function constructs a virtual MTD device by concatenating
723 * num_devs MTD devices. A pointer to the new device object is
724 * stored to *new_dev upon success. This function does _not_
725 * register any devices: this is the caller's responsibility.
727 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
728 int num_devs, /* number of subdevices */
730 { /* name for the new device */
733 struct mtd_concat *concat;
734 uint32_t max_erasesize, curr_erasesize;
735 int num_erase_region;
737 printk(KERN_NOTICE "Concatenating MTD devices:\n");
738 for (i = 0; i < num_devs; i++)
739 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
740 printk(KERN_NOTICE "into device \"%s\"\n", name);
742 /* allocate the device structure */
743 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
744 concat = kzalloc(size, GFP_KERNEL);
747 ("memory allocation error while creating concatenated device \"%s\"\n",
751 concat->subdev = (struct mtd_info **) (concat + 1);
754 * Set up the new "super" device's MTD object structure, check for
755 * incompatibilites between the subdevices.
757 concat->mtd.type = subdev[0]->type;
758 concat->mtd.flags = subdev[0]->flags;
759 concat->mtd.size = subdev[0]->size;
760 concat->mtd.erasesize = subdev[0]->erasesize;
761 concat->mtd.writesize = subdev[0]->writesize;
762 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
763 concat->mtd.oobsize = subdev[0]->oobsize;
764 concat->mtd.oobavail = subdev[0]->oobavail;
765 if (subdev[0]->writev)
766 concat->mtd.writev = concat_writev;
767 if (subdev[0]->read_oob)
768 concat->mtd.read_oob = concat_read_oob;
769 if (subdev[0]->write_oob)
770 concat->mtd.write_oob = concat_write_oob;
771 if (subdev[0]->block_isbad)
772 concat->mtd.block_isbad = concat_block_isbad;
773 if (subdev[0]->block_markbad)
774 concat->mtd.block_markbad = concat_block_markbad;
776 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
778 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
780 concat->subdev[0] = subdev[0];
782 for (i = 1; i < num_devs; i++) {
783 if (concat->mtd.type != subdev[i]->type) {
785 printk("Incompatible device type on \"%s\"\n",
789 if (concat->mtd.flags != subdev[i]->flags) {
791 * Expect all flags except MTD_WRITEABLE to be
792 * equal on all subdevices.
794 if ((concat->mtd.flags ^ subdev[i]->
795 flags) & ~MTD_WRITEABLE) {
797 printk("Incompatible device flags on \"%s\"\n",
801 /* if writeable attribute differs,
802 make super device writeable */
804 subdev[i]->flags & MTD_WRITEABLE;
807 /* only permit direct mapping if the BDIs are all the same
808 * - copy-mapping is still permitted
810 if (concat->mtd.backing_dev_info !=
811 subdev[i]->backing_dev_info)
812 concat->mtd.backing_dev_info =
813 &default_backing_dev_info;
815 concat->mtd.size += subdev[i]->size;
816 concat->mtd.ecc_stats.badblocks +=
817 subdev[i]->ecc_stats.badblocks;
818 if (concat->mtd.writesize != subdev[i]->writesize ||
819 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
820 concat->mtd.oobsize != subdev[i]->oobsize ||
821 !concat->mtd.read_oob != !subdev[i]->read_oob ||
822 !concat->mtd.write_oob != !subdev[i]->write_oob) {
824 printk("Incompatible OOB or ECC data on \"%s\"\n",
828 concat->subdev[i] = subdev[i];
832 concat->mtd.ecclayout = subdev[0]->ecclayout;
834 concat->num_subdev = num_devs;
835 concat->mtd.name = name;
837 concat->mtd.erase = concat_erase;
838 concat->mtd.read = concat_read;
839 concat->mtd.write = concat_write;
840 concat->mtd.sync = concat_sync;
841 concat->mtd.lock = concat_lock;
842 concat->mtd.unlock = concat_unlock;
843 concat->mtd.suspend = concat_suspend;
844 concat->mtd.resume = concat_resume;
845 concat->mtd.get_unmapped_area = concat_get_unmapped_area;
848 * Combine the erase block size info of the subdevices:
850 * first, walk the map of the new device and see how
851 * many changes in erase size we have
853 max_erasesize = curr_erasesize = subdev[0]->erasesize;
854 num_erase_region = 1;
855 for (i = 0; i < num_devs; i++) {
856 if (subdev[i]->numeraseregions == 0) {
857 /* current subdevice has uniform erase size */
858 if (subdev[i]->erasesize != curr_erasesize) {
859 /* if it differs from the last subdevice's erase size, count it */
861 curr_erasesize = subdev[i]->erasesize;
862 if (curr_erasesize > max_erasesize)
863 max_erasesize = curr_erasesize;
866 /* current subdevice has variable erase size */
868 for (j = 0; j < subdev[i]->numeraseregions; j++) {
870 /* walk the list of erase regions, count any changes */
871 if (subdev[i]->eraseregions[j].erasesize !=
875 subdev[i]->eraseregions[j].
877 if (curr_erasesize > max_erasesize)
878 max_erasesize = curr_erasesize;
884 if (num_erase_region == 1) {
886 * All subdevices have the same uniform erase size.
889 concat->mtd.erasesize = curr_erasesize;
890 concat->mtd.numeraseregions = 0;
895 * erase block size varies across the subdevices: allocate
896 * space to store the data describing the variable erase regions
898 struct mtd_erase_region_info *erase_region_p;
899 uint64_t begin, position;
901 concat->mtd.erasesize = max_erasesize;
902 concat->mtd.numeraseregions = num_erase_region;
903 concat->mtd.eraseregions = erase_region_p =
904 kmalloc(num_erase_region *
905 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
906 if (!erase_region_p) {
909 ("memory allocation error while creating erase region list"
910 " for device \"%s\"\n", name);
915 * walk the map of the new device once more and fill in
916 * in erase region info:
918 curr_erasesize = subdev[0]->erasesize;
919 begin = position = 0;
920 for (i = 0; i < num_devs; i++) {
921 if (subdev[i]->numeraseregions == 0) {
922 /* current subdevice has uniform erase size */
923 if (subdev[i]->erasesize != curr_erasesize) {
925 * fill in an mtd_erase_region_info structure for the area
926 * we have walked so far:
928 erase_region_p->offset = begin;
929 erase_region_p->erasesize =
931 tmp64 = position - begin;
932 do_div(tmp64, curr_erasesize);
933 erase_region_p->numblocks = tmp64;
936 curr_erasesize = subdev[i]->erasesize;
939 position += subdev[i]->size;
941 /* current subdevice has variable erase size */
943 for (j = 0; j < subdev[i]->numeraseregions; j++) {
944 /* walk the list of erase regions, count any changes */
945 if (subdev[i]->eraseregions[j].
946 erasesize != curr_erasesize) {
947 erase_region_p->offset = begin;
948 erase_region_p->erasesize =
950 tmp64 = position - begin;
951 do_div(tmp64, curr_erasesize);
952 erase_region_p->numblocks = tmp64;
956 subdev[i]->eraseregions[j].
961 subdev[i]->eraseregions[j].
962 numblocks * (uint64_t)curr_erasesize;
966 /* Now write the final entry */
967 erase_region_p->offset = begin;
968 erase_region_p->erasesize = curr_erasesize;
969 tmp64 = position - begin;
970 do_div(tmp64, curr_erasesize);
971 erase_region_p->numblocks = tmp64;
978 * This function destroys an MTD object obtained from concat_mtd_devs()
981 void mtd_concat_destroy(struct mtd_info *mtd)
983 struct mtd_concat *concat = CONCAT(mtd);
984 if (concat->mtd.numeraseregions)
985 kfree(concat->mtd.eraseregions);
989 EXPORT_SYMBOL(mtd_concat_create);
990 EXPORT_SYMBOL(mtd_concat_destroy);
992 MODULE_LICENSE("GPL");
993 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
994 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");