Merge branch 'topic/div64-cleanup' into for-linus
[linux-2.6] / drivers / mtd / mtdconcat.c
1 /*
2  * MTD device concatenation layer
3  *
4  * (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
5  *
6  * NAND support by Christian Gan <cgan@iders.ca>
7  *
8  * This code is GPL
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/types.h>
16 #include <linux/backing-dev.h>
17
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/concat.h>
20
21 #include <asm/div64.h>
22
23 /*
24  * Our storage structure:
25  * Subdev points to an array of pointers to struct mtd_info objects
26  * which is allocated along with this structure
27  *
28  */
29 struct mtd_concat {
30         struct mtd_info mtd;
31         int num_subdev;
32         struct mtd_info **subdev;
33 };
34
35 /*
36  * how to calculate the size required for the above structure,
37  * including the pointer array subdev points to:
38  */
39 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
40         ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
41
42 /*
43  * Given a pointer to the MTD object in the mtd_concat structure,
44  * we can retrieve the pointer to that structure with this macro.
45  */
46 #define CONCAT(x)  ((struct mtd_concat *)(x))
47
48 /*
49  * MTD methods which look up the relevant subdevice, translate the
50  * effective address and pass through to the subdevice.
51  */
52
53 static int
54 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55             size_t * retlen, u_char * buf)
56 {
57         struct mtd_concat *concat = CONCAT(mtd);
58         int ret = 0, err;
59         int i;
60
61         *retlen = 0;
62
63         for (i = 0; i < concat->num_subdev; i++) {
64                 struct mtd_info *subdev = concat->subdev[i];
65                 size_t size, retsize;
66
67                 if (from >= subdev->size) {
68                         /* Not destined for this subdev */
69                         size = 0;
70                         from -= subdev->size;
71                         continue;
72                 }
73                 if (from + len > subdev->size)
74                         /* First part goes into this subdev */
75                         size = subdev->size - from;
76                 else
77                         /* Entire transaction goes into this subdev */
78                         size = len;
79
80                 err = subdev->read(subdev, from, size, &retsize, buf);
81
82                 /* Save information about bitflips! */
83                 if (unlikely(err)) {
84                         if (err == -EBADMSG) {
85                                 mtd->ecc_stats.failed++;
86                                 ret = err;
87                         } else if (err == -EUCLEAN) {
88                                 mtd->ecc_stats.corrected++;
89                                 /* Do not overwrite -EBADMSG !! */
90                                 if (!ret)
91                                         ret = err;
92                         } else
93                                 return err;
94                 }
95
96                 *retlen += retsize;
97                 len -= size;
98                 if (len == 0)
99                         return ret;
100
101                 buf += size;
102                 from = 0;
103         }
104         return -EINVAL;
105 }
106
107 static int
108 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
109              size_t * retlen, const u_char * buf)
110 {
111         struct mtd_concat *concat = CONCAT(mtd);
112         int err = -EINVAL;
113         int i;
114
115         if (!(mtd->flags & MTD_WRITEABLE))
116                 return -EROFS;
117
118         *retlen = 0;
119
120         for (i = 0; i < concat->num_subdev; i++) {
121                 struct mtd_info *subdev = concat->subdev[i];
122                 size_t size, retsize;
123
124                 if (to >= subdev->size) {
125                         size = 0;
126                         to -= subdev->size;
127                         continue;
128                 }
129                 if (to + len > subdev->size)
130                         size = subdev->size - to;
131                 else
132                         size = len;
133
134                 if (!(subdev->flags & MTD_WRITEABLE))
135                         err = -EROFS;
136                 else
137                         err = subdev->write(subdev, to, size, &retsize, buf);
138
139                 if (err)
140                         break;
141
142                 *retlen += retsize;
143                 len -= size;
144                 if (len == 0)
145                         break;
146
147                 err = -EINVAL;
148                 buf += size;
149                 to = 0;
150         }
151         return err;
152 }
153
154 static int
155 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
156                 unsigned long count, loff_t to, size_t * retlen)
157 {
158         struct mtd_concat *concat = CONCAT(mtd);
159         struct kvec *vecs_copy;
160         unsigned long entry_low, entry_high;
161         size_t total_len = 0;
162         int i;
163         int err = -EINVAL;
164
165         if (!(mtd->flags & MTD_WRITEABLE))
166                 return -EROFS;
167
168         *retlen = 0;
169
170         /* Calculate total length of data */
171         for (i = 0; i < count; i++)
172                 total_len += vecs[i].iov_len;
173
174         /* Do not allow write past end of device */
175         if ((to + total_len) > mtd->size)
176                 return -EINVAL;
177
178         /* Check alignment */
179         if (mtd->writesize > 1) {
180                 uint64_t __to = to;
181                 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
182                         return -EINVAL;
183         }
184
185         /* make a copy of vecs */
186         vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
187         if (!vecs_copy)
188                 return -ENOMEM;
189         memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
190
191         entry_low = 0;
192         for (i = 0; i < concat->num_subdev; i++) {
193                 struct mtd_info *subdev = concat->subdev[i];
194                 size_t size, wsize, retsize, old_iov_len;
195
196                 if (to >= subdev->size) {
197                         to -= subdev->size;
198                         continue;
199                 }
200
201                 size = min_t(uint64_t, total_len, subdev->size - to);
202                 wsize = size; /* store for future use */
203
204                 entry_high = entry_low;
205                 while (entry_high < count) {
206                         if (size <= vecs_copy[entry_high].iov_len)
207                                 break;
208                         size -= vecs_copy[entry_high++].iov_len;
209                 }
210
211                 old_iov_len = vecs_copy[entry_high].iov_len;
212                 vecs_copy[entry_high].iov_len = size;
213
214                 if (!(subdev->flags & MTD_WRITEABLE))
215                         err = -EROFS;
216                 else
217                         err = subdev->writev(subdev, &vecs_copy[entry_low],
218                                 entry_high - entry_low + 1, to, &retsize);
219
220                 vecs_copy[entry_high].iov_len = old_iov_len - size;
221                 vecs_copy[entry_high].iov_base += size;
222
223                 entry_low = entry_high;
224
225                 if (err)
226                         break;
227
228                 *retlen += retsize;
229                 total_len -= wsize;
230
231                 if (total_len == 0)
232                         break;
233
234                 err = -EINVAL;
235                 to = 0;
236         }
237
238         kfree(vecs_copy);
239         return err;
240 }
241
242 static int
243 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
244 {
245         struct mtd_concat *concat = CONCAT(mtd);
246         struct mtd_oob_ops devops = *ops;
247         int i, err, ret = 0;
248
249         ops->retlen = ops->oobretlen = 0;
250
251         for (i = 0; i < concat->num_subdev; i++) {
252                 struct mtd_info *subdev = concat->subdev[i];
253
254                 if (from >= subdev->size) {
255                         from -= subdev->size;
256                         continue;
257                 }
258
259                 /* partial read ? */
260                 if (from + devops.len > subdev->size)
261                         devops.len = subdev->size - from;
262
263                 err = subdev->read_oob(subdev, from, &devops);
264                 ops->retlen += devops.retlen;
265                 ops->oobretlen += devops.oobretlen;
266
267                 /* Save information about bitflips! */
268                 if (unlikely(err)) {
269                         if (err == -EBADMSG) {
270                                 mtd->ecc_stats.failed++;
271                                 ret = err;
272                         } else if (err == -EUCLEAN) {
273                                 mtd->ecc_stats.corrected++;
274                                 /* Do not overwrite -EBADMSG !! */
275                                 if (!ret)
276                                         ret = err;
277                         } else
278                                 return err;
279                 }
280
281                 if (devops.datbuf) {
282                         devops.len = ops->len - ops->retlen;
283                         if (!devops.len)
284                                 return ret;
285                         devops.datbuf += devops.retlen;
286                 }
287                 if (devops.oobbuf) {
288                         devops.ooblen = ops->ooblen - ops->oobretlen;
289                         if (!devops.ooblen)
290                                 return ret;
291                         devops.oobbuf += ops->oobretlen;
292                 }
293
294                 from = 0;
295         }
296         return -EINVAL;
297 }
298
299 static int
300 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
301 {
302         struct mtd_concat *concat = CONCAT(mtd);
303         struct mtd_oob_ops devops = *ops;
304         int i, err;
305
306         if (!(mtd->flags & MTD_WRITEABLE))
307                 return -EROFS;
308
309         ops->retlen = 0;
310
311         for (i = 0; i < concat->num_subdev; i++) {
312                 struct mtd_info *subdev = concat->subdev[i];
313
314                 if (to >= subdev->size) {
315                         to -= subdev->size;
316                         continue;
317                 }
318
319                 /* partial write ? */
320                 if (to + devops.len > subdev->size)
321                         devops.len = subdev->size - to;
322
323                 err = subdev->write_oob(subdev, to, &devops);
324                 ops->retlen += devops.retlen;
325                 if (err)
326                         return err;
327
328                 if (devops.datbuf) {
329                         devops.len = ops->len - ops->retlen;
330                         if (!devops.len)
331                                 return 0;
332                         devops.datbuf += devops.retlen;
333                 }
334                 if (devops.oobbuf) {
335                         devops.ooblen = ops->ooblen - ops->oobretlen;
336                         if (!devops.ooblen)
337                                 return 0;
338                         devops.oobbuf += devops.oobretlen;
339                 }
340                 to = 0;
341         }
342         return -EINVAL;
343 }
344
345 static void concat_erase_callback(struct erase_info *instr)
346 {
347         wake_up((wait_queue_head_t *) instr->priv);
348 }
349
350 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
351 {
352         int err;
353         wait_queue_head_t waitq;
354         DECLARE_WAITQUEUE(wait, current);
355
356         /*
357          * This code was stol^H^H^H^Hinspired by mtdchar.c
358          */
359         init_waitqueue_head(&waitq);
360
361         erase->mtd = mtd;
362         erase->callback = concat_erase_callback;
363         erase->priv = (unsigned long) &waitq;
364
365         /*
366          * FIXME: Allow INTERRUPTIBLE. Which means
367          * not having the wait_queue head on the stack.
368          */
369         err = mtd->erase(mtd, erase);
370         if (!err) {
371                 set_current_state(TASK_UNINTERRUPTIBLE);
372                 add_wait_queue(&waitq, &wait);
373                 if (erase->state != MTD_ERASE_DONE
374                     && erase->state != MTD_ERASE_FAILED)
375                         schedule();
376                 remove_wait_queue(&waitq, &wait);
377                 set_current_state(TASK_RUNNING);
378
379                 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
380         }
381         return err;
382 }
383
384 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
385 {
386         struct mtd_concat *concat = CONCAT(mtd);
387         struct mtd_info *subdev;
388         int i, err;
389         uint64_t length, offset = 0;
390         struct erase_info *erase;
391
392         if (!(mtd->flags & MTD_WRITEABLE))
393                 return -EROFS;
394
395         if (instr->addr > concat->mtd.size)
396                 return -EINVAL;
397
398         if (instr->len + instr->addr > concat->mtd.size)
399                 return -EINVAL;
400
401         /*
402          * Check for proper erase block alignment of the to-be-erased area.
403          * It is easier to do this based on the super device's erase
404          * region info rather than looking at each particular sub-device
405          * in turn.
406          */
407         if (!concat->mtd.numeraseregions) {
408                 /* the easy case: device has uniform erase block size */
409                 if (instr->addr & (concat->mtd.erasesize - 1))
410                         return -EINVAL;
411                 if (instr->len & (concat->mtd.erasesize - 1))
412                         return -EINVAL;
413         } else {
414                 /* device has variable erase size */
415                 struct mtd_erase_region_info *erase_regions =
416                     concat->mtd.eraseregions;
417
418                 /*
419                  * Find the erase region where the to-be-erased area begins:
420                  */
421                 for (i = 0; i < concat->mtd.numeraseregions &&
422                      instr->addr >= erase_regions[i].offset; i++) ;
423                 --i;
424
425                 /*
426                  * Now erase_regions[i] is the region in which the
427                  * to-be-erased area begins. Verify that the starting
428                  * offset is aligned to this region's erase size:
429                  */
430                 if (instr->addr & (erase_regions[i].erasesize - 1))
431                         return -EINVAL;
432
433                 /*
434                  * now find the erase region where the to-be-erased area ends:
435                  */
436                 for (; i < concat->mtd.numeraseregions &&
437                      (instr->addr + instr->len) >= erase_regions[i].offset;
438                      ++i) ;
439                 --i;
440                 /*
441                  * check if the ending offset is aligned to this region's erase size
442                  */
443                 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
444                                                   1))
445                         return -EINVAL;
446         }
447
448         instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
449
450         /* make a local copy of instr to avoid modifying the caller's struct */
451         erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
452
453         if (!erase)
454                 return -ENOMEM;
455
456         *erase = *instr;
457         length = instr->len;
458
459         /*
460          * find the subdevice where the to-be-erased area begins, adjust
461          * starting offset to be relative to the subdevice start
462          */
463         for (i = 0; i < concat->num_subdev; i++) {
464                 subdev = concat->subdev[i];
465                 if (subdev->size <= erase->addr) {
466                         erase->addr -= subdev->size;
467                         offset += subdev->size;
468                 } else {
469                         break;
470                 }
471         }
472
473         /* must never happen since size limit has been verified above */
474         BUG_ON(i >= concat->num_subdev);
475
476         /* now do the erase: */
477         err = 0;
478         for (; length > 0; i++) {
479                 /* loop for all subdevices affected by this request */
480                 subdev = concat->subdev[i];     /* get current subdevice */
481
482                 /* limit length to subdevice's size: */
483                 if (erase->addr + length > subdev->size)
484                         erase->len = subdev->size - erase->addr;
485                 else
486                         erase->len = length;
487
488                 if (!(subdev->flags & MTD_WRITEABLE)) {
489                         err = -EROFS;
490                         break;
491                 }
492                 length -= erase->len;
493                 if ((err = concat_dev_erase(subdev, erase))) {
494                         /* sanity check: should never happen since
495                          * block alignment has been checked above */
496                         BUG_ON(err == -EINVAL);
497                         if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
498                                 instr->fail_addr = erase->fail_addr + offset;
499                         break;
500                 }
501                 /*
502                  * erase->addr specifies the offset of the area to be
503                  * erased *within the current subdevice*. It can be
504                  * non-zero only the first time through this loop, i.e.
505                  * for the first subdevice where blocks need to be erased.
506                  * All the following erases must begin at the start of the
507                  * current subdevice, i.e. at offset zero.
508                  */
509                 erase->addr = 0;
510                 offset += subdev->size;
511         }
512         instr->state = erase->state;
513         kfree(erase);
514         if (err)
515                 return err;
516
517         if (instr->callback)
518                 instr->callback(instr);
519         return 0;
520 }
521
522 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
523 {
524         struct mtd_concat *concat = CONCAT(mtd);
525         int i, err = -EINVAL;
526
527         if ((len + ofs) > mtd->size)
528                 return -EINVAL;
529
530         for (i = 0; i < concat->num_subdev; i++) {
531                 struct mtd_info *subdev = concat->subdev[i];
532                 uint64_t size;
533
534                 if (ofs >= subdev->size) {
535                         size = 0;
536                         ofs -= subdev->size;
537                         continue;
538                 }
539                 if (ofs + len > subdev->size)
540                         size = subdev->size - ofs;
541                 else
542                         size = len;
543
544                 err = subdev->lock(subdev, ofs, size);
545
546                 if (err)
547                         break;
548
549                 len -= size;
550                 if (len == 0)
551                         break;
552
553                 err = -EINVAL;
554                 ofs = 0;
555         }
556
557         return err;
558 }
559
560 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
561 {
562         struct mtd_concat *concat = CONCAT(mtd);
563         int i, err = 0;
564
565         if ((len + ofs) > mtd->size)
566                 return -EINVAL;
567
568         for (i = 0; i < concat->num_subdev; i++) {
569                 struct mtd_info *subdev = concat->subdev[i];
570                 uint64_t size;
571
572                 if (ofs >= subdev->size) {
573                         size = 0;
574                         ofs -= subdev->size;
575                         continue;
576                 }
577                 if (ofs + len > subdev->size)
578                         size = subdev->size - ofs;
579                 else
580                         size = len;
581
582                 err = subdev->unlock(subdev, ofs, size);
583
584                 if (err)
585                         break;
586
587                 len -= size;
588                 if (len == 0)
589                         break;
590
591                 err = -EINVAL;
592                 ofs = 0;
593         }
594
595         return err;
596 }
597
598 static void concat_sync(struct mtd_info *mtd)
599 {
600         struct mtd_concat *concat = CONCAT(mtd);
601         int i;
602
603         for (i = 0; i < concat->num_subdev; i++) {
604                 struct mtd_info *subdev = concat->subdev[i];
605                 subdev->sync(subdev);
606         }
607 }
608
609 static int concat_suspend(struct mtd_info *mtd)
610 {
611         struct mtd_concat *concat = CONCAT(mtd);
612         int i, rc = 0;
613
614         for (i = 0; i < concat->num_subdev; i++) {
615                 struct mtd_info *subdev = concat->subdev[i];
616                 if ((rc = subdev->suspend(subdev)) < 0)
617                         return rc;
618         }
619         return rc;
620 }
621
622 static void concat_resume(struct mtd_info *mtd)
623 {
624         struct mtd_concat *concat = CONCAT(mtd);
625         int i;
626
627         for (i = 0; i < concat->num_subdev; i++) {
628                 struct mtd_info *subdev = concat->subdev[i];
629                 subdev->resume(subdev);
630         }
631 }
632
633 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
634 {
635         struct mtd_concat *concat = CONCAT(mtd);
636         int i, res = 0;
637
638         if (!concat->subdev[0]->block_isbad)
639                 return res;
640
641         if (ofs > mtd->size)
642                 return -EINVAL;
643
644         for (i = 0; i < concat->num_subdev; i++) {
645                 struct mtd_info *subdev = concat->subdev[i];
646
647                 if (ofs >= subdev->size) {
648                         ofs -= subdev->size;
649                         continue;
650                 }
651
652                 res = subdev->block_isbad(subdev, ofs);
653                 break;
654         }
655
656         return res;
657 }
658
659 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
660 {
661         struct mtd_concat *concat = CONCAT(mtd);
662         int i, err = -EINVAL;
663
664         if (!concat->subdev[0]->block_markbad)
665                 return 0;
666
667         if (ofs > mtd->size)
668                 return -EINVAL;
669
670         for (i = 0; i < concat->num_subdev; i++) {
671                 struct mtd_info *subdev = concat->subdev[i];
672
673                 if (ofs >= subdev->size) {
674                         ofs -= subdev->size;
675                         continue;
676                 }
677
678                 err = subdev->block_markbad(subdev, ofs);
679                 if (!err)
680                         mtd->ecc_stats.badblocks++;
681                 break;
682         }
683
684         return err;
685 }
686
687 /*
688  * try to support NOMMU mmaps on concatenated devices
689  * - we don't support subdev spanning as we can't guarantee it'll work
690  */
691 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
692                                               unsigned long len,
693                                               unsigned long offset,
694                                               unsigned long flags)
695 {
696         struct mtd_concat *concat = CONCAT(mtd);
697         int i;
698
699         for (i = 0; i < concat->num_subdev; i++) {
700                 struct mtd_info *subdev = concat->subdev[i];
701
702                 if (offset >= subdev->size) {
703                         offset -= subdev->size;
704                         continue;
705                 }
706
707                 /* we've found the subdev over which the mapping will reside */
708                 if (offset + len > subdev->size)
709                         return (unsigned long) -EINVAL;
710
711                 if (subdev->get_unmapped_area)
712                         return subdev->get_unmapped_area(subdev, len, offset,
713                                                          flags);
714
715                 break;
716         }
717
718         return (unsigned long) -ENOSYS;
719 }
720
721 /*
722  * This function constructs a virtual MTD device by concatenating
723  * num_devs MTD devices. A pointer to the new device object is
724  * stored to *new_dev upon success. This function does _not_
725  * register any devices: this is the caller's responsibility.
726  */
727 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
728                                    int num_devs,        /* number of subdevices      */
729                                    const char *name)
730 {                               /* name for the new device   */
731         int i;
732         size_t size;
733         struct mtd_concat *concat;
734         uint32_t max_erasesize, curr_erasesize;
735         int num_erase_region;
736
737         printk(KERN_NOTICE "Concatenating MTD devices:\n");
738         for (i = 0; i < num_devs; i++)
739                 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
740         printk(KERN_NOTICE "into device \"%s\"\n", name);
741
742         /* allocate the device structure */
743         size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
744         concat = kzalloc(size, GFP_KERNEL);
745         if (!concat) {
746                 printk
747                     ("memory allocation error while creating concatenated device \"%s\"\n",
748                      name);
749                 return NULL;
750         }
751         concat->subdev = (struct mtd_info **) (concat + 1);
752
753         /*
754          * Set up the new "super" device's MTD object structure, check for
755          * incompatibilites between the subdevices.
756          */
757         concat->mtd.type = subdev[0]->type;
758         concat->mtd.flags = subdev[0]->flags;
759         concat->mtd.size = subdev[0]->size;
760         concat->mtd.erasesize = subdev[0]->erasesize;
761         concat->mtd.writesize = subdev[0]->writesize;
762         concat->mtd.subpage_sft = subdev[0]->subpage_sft;
763         concat->mtd.oobsize = subdev[0]->oobsize;
764         concat->mtd.oobavail = subdev[0]->oobavail;
765         if (subdev[0]->writev)
766                 concat->mtd.writev = concat_writev;
767         if (subdev[0]->read_oob)
768                 concat->mtd.read_oob = concat_read_oob;
769         if (subdev[0]->write_oob)
770                 concat->mtd.write_oob = concat_write_oob;
771         if (subdev[0]->block_isbad)
772                 concat->mtd.block_isbad = concat_block_isbad;
773         if (subdev[0]->block_markbad)
774                 concat->mtd.block_markbad = concat_block_markbad;
775
776         concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
777
778         concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
779
780         concat->subdev[0] = subdev[0];
781
782         for (i = 1; i < num_devs; i++) {
783                 if (concat->mtd.type != subdev[i]->type) {
784                         kfree(concat);
785                         printk("Incompatible device type on \"%s\"\n",
786                                subdev[i]->name);
787                         return NULL;
788                 }
789                 if (concat->mtd.flags != subdev[i]->flags) {
790                         /*
791                          * Expect all flags except MTD_WRITEABLE to be
792                          * equal on all subdevices.
793                          */
794                         if ((concat->mtd.flags ^ subdev[i]->
795                              flags) & ~MTD_WRITEABLE) {
796                                 kfree(concat);
797                                 printk("Incompatible device flags on \"%s\"\n",
798                                        subdev[i]->name);
799                                 return NULL;
800                         } else
801                                 /* if writeable attribute differs,
802                                    make super device writeable */
803                                 concat->mtd.flags |=
804                                     subdev[i]->flags & MTD_WRITEABLE;
805                 }
806
807                 /* only permit direct mapping if the BDIs are all the same
808                  * - copy-mapping is still permitted
809                  */
810                 if (concat->mtd.backing_dev_info !=
811                     subdev[i]->backing_dev_info)
812                         concat->mtd.backing_dev_info =
813                                 &default_backing_dev_info;
814
815                 concat->mtd.size += subdev[i]->size;
816                 concat->mtd.ecc_stats.badblocks +=
817                         subdev[i]->ecc_stats.badblocks;
818                 if (concat->mtd.writesize   !=  subdev[i]->writesize ||
819                     concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
820                     concat->mtd.oobsize    !=  subdev[i]->oobsize ||
821                     !concat->mtd.read_oob  != !subdev[i]->read_oob ||
822                     !concat->mtd.write_oob != !subdev[i]->write_oob) {
823                         kfree(concat);
824                         printk("Incompatible OOB or ECC data on \"%s\"\n",
825                                subdev[i]->name);
826                         return NULL;
827                 }
828                 concat->subdev[i] = subdev[i];
829
830         }
831
832         concat->mtd.ecclayout = subdev[0]->ecclayout;
833
834         concat->num_subdev = num_devs;
835         concat->mtd.name = name;
836
837         concat->mtd.erase = concat_erase;
838         concat->mtd.read = concat_read;
839         concat->mtd.write = concat_write;
840         concat->mtd.sync = concat_sync;
841         concat->mtd.lock = concat_lock;
842         concat->mtd.unlock = concat_unlock;
843         concat->mtd.suspend = concat_suspend;
844         concat->mtd.resume = concat_resume;
845         concat->mtd.get_unmapped_area = concat_get_unmapped_area;
846
847         /*
848          * Combine the erase block size info of the subdevices:
849          *
850          * first, walk the map of the new device and see how
851          * many changes in erase size we have
852          */
853         max_erasesize = curr_erasesize = subdev[0]->erasesize;
854         num_erase_region = 1;
855         for (i = 0; i < num_devs; i++) {
856                 if (subdev[i]->numeraseregions == 0) {
857                         /* current subdevice has uniform erase size */
858                         if (subdev[i]->erasesize != curr_erasesize) {
859                                 /* if it differs from the last subdevice's erase size, count it */
860                                 ++num_erase_region;
861                                 curr_erasesize = subdev[i]->erasesize;
862                                 if (curr_erasesize > max_erasesize)
863                                         max_erasesize = curr_erasesize;
864                         }
865                 } else {
866                         /* current subdevice has variable erase size */
867                         int j;
868                         for (j = 0; j < subdev[i]->numeraseregions; j++) {
869
870                                 /* walk the list of erase regions, count any changes */
871                                 if (subdev[i]->eraseregions[j].erasesize !=
872                                     curr_erasesize) {
873                                         ++num_erase_region;
874                                         curr_erasesize =
875                                             subdev[i]->eraseregions[j].
876                                             erasesize;
877                                         if (curr_erasesize > max_erasesize)
878                                                 max_erasesize = curr_erasesize;
879                                 }
880                         }
881                 }
882         }
883
884         if (num_erase_region == 1) {
885                 /*
886                  * All subdevices have the same uniform erase size.
887                  * This is easy:
888                  */
889                 concat->mtd.erasesize = curr_erasesize;
890                 concat->mtd.numeraseregions = 0;
891         } else {
892                 uint64_t tmp64;
893
894                 /*
895                  * erase block size varies across the subdevices: allocate
896                  * space to store the data describing the variable erase regions
897                  */
898                 struct mtd_erase_region_info *erase_region_p;
899                 uint64_t begin, position;
900
901                 concat->mtd.erasesize = max_erasesize;
902                 concat->mtd.numeraseregions = num_erase_region;
903                 concat->mtd.eraseregions = erase_region_p =
904                     kmalloc(num_erase_region *
905                             sizeof (struct mtd_erase_region_info), GFP_KERNEL);
906                 if (!erase_region_p) {
907                         kfree(concat);
908                         printk
909                             ("memory allocation error while creating erase region list"
910                              " for device \"%s\"\n", name);
911                         return NULL;
912                 }
913
914                 /*
915                  * walk the map of the new device once more and fill in
916                  * in erase region info:
917                  */
918                 curr_erasesize = subdev[0]->erasesize;
919                 begin = position = 0;
920                 for (i = 0; i < num_devs; i++) {
921                         if (subdev[i]->numeraseregions == 0) {
922                                 /* current subdevice has uniform erase size */
923                                 if (subdev[i]->erasesize != curr_erasesize) {
924                                         /*
925                                          *  fill in an mtd_erase_region_info structure for the area
926                                          *  we have walked so far:
927                                          */
928                                         erase_region_p->offset = begin;
929                                         erase_region_p->erasesize =
930                                             curr_erasesize;
931                                         tmp64 = position - begin;
932                                         do_div(tmp64, curr_erasesize);
933                                         erase_region_p->numblocks = tmp64;
934                                         begin = position;
935
936                                         curr_erasesize = subdev[i]->erasesize;
937                                         ++erase_region_p;
938                                 }
939                                 position += subdev[i]->size;
940                         } else {
941                                 /* current subdevice has variable erase size */
942                                 int j;
943                                 for (j = 0; j < subdev[i]->numeraseregions; j++) {
944                                         /* walk the list of erase regions, count any changes */
945                                         if (subdev[i]->eraseregions[j].
946                                             erasesize != curr_erasesize) {
947                                                 erase_region_p->offset = begin;
948                                                 erase_region_p->erasesize =
949                                                     curr_erasesize;
950                                                 tmp64 = position - begin;
951                                                 do_div(tmp64, curr_erasesize);
952                                                 erase_region_p->numblocks = tmp64;
953                                                 begin = position;
954
955                                                 curr_erasesize =
956                                                     subdev[i]->eraseregions[j].
957                                                     erasesize;
958                                                 ++erase_region_p;
959                                         }
960                                         position +=
961                                             subdev[i]->eraseregions[j].
962                                             numblocks * (uint64_t)curr_erasesize;
963                                 }
964                         }
965                 }
966                 /* Now write the final entry */
967                 erase_region_p->offset = begin;
968                 erase_region_p->erasesize = curr_erasesize;
969                 tmp64 = position - begin;
970                 do_div(tmp64, curr_erasesize);
971                 erase_region_p->numblocks = tmp64;
972         }
973
974         return &concat->mtd;
975 }
976
977 /*
978  * This function destroys an MTD object obtained from concat_mtd_devs()
979  */
980
981 void mtd_concat_destroy(struct mtd_info *mtd)
982 {
983         struct mtd_concat *concat = CONCAT(mtd);
984         if (concat->mtd.numeraseregions)
985                 kfree(concat->mtd.eraseregions);
986         kfree(concat);
987 }
988
989 EXPORT_SYMBOL(mtd_concat_create);
990 EXPORT_SYMBOL(mtd_concat_destroy);
991
992 MODULE_LICENSE("GPL");
993 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
994 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");