2 * rfd_ftl.c -- resident flash disk (flash translation layer)
4 * Copyright (C) 2005 Sean Young <sean@mess.org>
6 * $Id: rfd_ftl.c,v 1.5 2005/11/07 11:14:21 gleixner Exp $
8 * This type of flash translation layer (FTL) is used by the Embedded BIOS
9 * by General Software. It is known as the Resident Flash Disk (RFD), see:
11 * http://www.gensw.com/pages/prod/bios/rfd.htm
16 #include <linux/hdreg.h>
17 #include <linux/init.h>
18 #include <linux/mtd/blktrans.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/vmalloc.h>
21 #include <linux/jiffies.h>
23 #include <asm/types.h>
25 #define const_cpu_to_le16 __constant_cpu_to_le16
27 static int block_size = 0;
28 module_param(block_size, int, 0);
29 MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
31 #define PREFIX "rfd_ftl: "
33 /* Major device # for FTL device */
35 /* A request for this major has been sent to device@lanana.org */
37 #define RFD_FTL_MAJOR 95
40 /* Maximum number of partitions in an FTL region */
43 /* An erase unit should start with this value */
44 #define RFD_MAGIC 0x9193
46 /* the second value is 0xffff or 0xffc8; function unknown */
48 /* the third value is always 0xffff, ignored */
50 /* next is an array of mapping for each corresponding sector */
51 #define HEADER_MAP_OFFSET 3
52 #define SECTOR_DELETED 0x0000
53 #define SECTOR_ZERO 0xfffe
54 #define SECTOR_FREE 0xffff
56 #define SECTOR_SIZE 512
58 #define SECTORS_PER_TRACK 63
74 struct mtd_blktrans_dev mbd;
76 u_int block_size; /* size of erase unit */
77 u_int total_blocks; /* number of erase units */
78 u_int header_sectors_per_block; /* header sectors in erase unit */
79 u_int data_sectors_per_block; /* data sectors in erase unit */
80 u_int sector_count; /* sectors in translated disk */
81 u_int header_size; /* bytes in header sector */
82 int reserved_block; /* block next up for reclaim */
83 int current_block; /* block to write to */
84 u16 *header_cache; /* cached header */
93 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
95 static int build_block_map(struct partition *part, int block_no)
97 struct block *block = &part->blocks[block_no];
100 block->offset = part->block_size * block_no;
102 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
103 block->state = BLOCK_ERASED; /* assumption */
104 block->free_sectors = part->data_sectors_per_block;
105 part->reserved_block = block_no;
109 block->state = BLOCK_OK;
111 for (i=0; i<part->data_sectors_per_block; i++) {
114 entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
116 if (entry == SECTOR_DELETED)
119 if (entry == SECTOR_FREE) {
120 block->free_sectors++;
124 if (entry == SECTOR_ZERO)
127 if (entry >= part->sector_count) {
128 printk(KERN_NOTICE PREFIX
129 "'%s': unit #%d: entry %d corrupt, "
130 "sector %d out of range\n",
131 part->mbd.mtd->name, block_no, i, entry);
135 if (part->sector_map[entry] != -1) {
136 printk(KERN_NOTICE PREFIX
137 "'%s': more than one entry for sector %d\n",
138 part->mbd.mtd->name, entry);
143 part->sector_map[entry] = block->offset +
144 (i + part->header_sectors_per_block) * SECTOR_SIZE;
146 block->used_sectors++;
149 if (block->free_sectors == part->data_sectors_per_block)
150 part->reserved_block = block_no;
155 static int scan_header(struct partition *part)
157 int sectors_per_block;
162 sectors_per_block = part->block_size / SECTOR_SIZE;
163 part->total_blocks = part->mbd.mtd->size / part->block_size;
165 if (part->total_blocks < 2)
168 /* each erase block has three bytes header, followed by the map */
169 part->header_sectors_per_block =
170 ((HEADER_MAP_OFFSET + sectors_per_block) *
171 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
173 part->data_sectors_per_block = sectors_per_block -
174 part->header_sectors_per_block;
176 part->header_size = (HEADER_MAP_OFFSET +
177 part->data_sectors_per_block) * sizeof(u16);
179 part->cylinders = (part->data_sectors_per_block *
180 (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
182 part->sector_count = part->cylinders * SECTORS_PER_TRACK;
184 part->current_block = -1;
185 part->reserved_block = -1;
186 part->is_reclaiming = 0;
188 part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
189 if (!part->header_cache)
192 part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
197 part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
198 if (!part->sector_map) {
199 printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
200 "sector map", part->mbd.mtd->name);
204 for (i=0; i<part->sector_count; i++)
205 part->sector_map[i] = -1;
207 for (i=0, blocks_found=0; i<part->total_blocks; i++) {
208 rc = part->mbd.mtd->read(part->mbd.mtd,
209 i * part->block_size, part->header_size,
210 &retlen, (u_char*)part->header_cache);
212 if (!rc && retlen != part->header_size)
218 if (!build_block_map(part, i))
222 if (blocks_found == 0) {
223 printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
224 part->mbd.mtd->name);
229 if (part->reserved_block == -1) {
230 printk(KERN_NOTICE PREFIX "'%s': no empty erase unit found\n",
231 part->mbd.mtd->name);
239 vfree(part->sector_map);
240 kfree(part->header_cache);
246 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
248 struct partition *part = (struct partition*)dev;
253 if (sector >= part->sector_count)
256 addr = part->sector_map[sector];
258 rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
259 &retlen, (u_char*)buf);
260 if (!rc && retlen != SECTOR_SIZE)
264 printk(KERN_WARNING PREFIX "error reading '%s' at "
265 "0x%lx\n", part->mbd.mtd->name, addr);
269 memset(buf, 0, SECTOR_SIZE);
274 static void erase_callback(struct erase_info *erase)
276 struct partition *part;
281 part = (struct partition*)erase->priv;
283 i = erase->addr / part->block_size;
284 if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
285 printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
286 "on '%s'\n", erase->addr, part->mbd.mtd->name);
290 if (erase->state != MTD_ERASE_DONE) {
291 printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
292 "state %d\n", erase->addr,
293 part->mbd.mtd->name, erase->state);
295 part->blocks[i].state = BLOCK_FAILED;
296 part->blocks[i].free_sectors = 0;
297 part->blocks[i].used_sectors = 0;
304 magic = const_cpu_to_le16(RFD_MAGIC);
306 part->blocks[i].state = BLOCK_ERASED;
307 part->blocks[i].free_sectors = part->data_sectors_per_block;
308 part->blocks[i].used_sectors = 0;
309 part->blocks[i].erases++;
311 rc = part->mbd.mtd->write(part->mbd.mtd,
312 part->blocks[i].offset, sizeof(magic), &retlen,
315 if (!rc && retlen != sizeof(magic))
319 printk(KERN_NOTICE PREFIX "'%s': unable to write RFD "
322 part->blocks[i].offset);
323 part->blocks[i].state = BLOCK_FAILED;
326 part->blocks[i].state = BLOCK_OK;
331 static int erase_block(struct partition *part, int block)
333 struct erase_info *erase;
336 erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
340 erase->mtd = part->mbd.mtd;
341 erase->callback = erase_callback;
342 erase->addr = part->blocks[block].offset;
343 erase->len = part->block_size;
344 erase->priv = (u_long)part;
346 part->blocks[block].state = BLOCK_ERASING;
347 part->blocks[block].free_sectors = 0;
349 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
352 printk(KERN_WARNING PREFIX "erase of region %x,%x on '%s' "
353 "failed\n", erase->addr, erase->len,
354 part->mbd.mtd->name);
362 static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
369 part->is_reclaiming = 1;
371 sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
375 map = kmalloc(part->header_size, GFP_KERNEL);
379 rc = part->mbd.mtd->read(part->mbd.mtd,
380 part->blocks[block_no].offset, part->header_size,
381 &retlen, (u_char*)map);
383 if (!rc && retlen != part->header_size)
387 printk(KERN_NOTICE PREFIX "error reading '%s' at "
388 "0x%lx\n", part->mbd.mtd->name,
389 part->blocks[block_no].offset);
394 for (i=0; i<part->data_sectors_per_block; i++) {
395 u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
399 if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
402 if (entry == SECTOR_ZERO)
405 /* already warned about and ignored in build_block_map() */
406 if (entry >= part->sector_count)
409 addr = part->blocks[block_no].offset +
410 (i + part->header_sectors_per_block) * SECTOR_SIZE;
412 if (*old_sector == addr) {
414 if (!part->blocks[block_no].used_sectors--) {
415 rc = erase_block(part, block_no);
420 rc = part->mbd.mtd->read(part->mbd.mtd, addr,
421 SECTOR_SIZE, &retlen, sector_data);
423 if (!rc && retlen != SECTOR_SIZE)
427 printk(KERN_NOTICE PREFIX "'%s': Unable to "
428 "read sector for relocation\n",
429 part->mbd.mtd->name);
434 rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
446 part->is_reclaiming = 0;
451 static int reclaim_block(struct partition *part, u_long *old_sector)
453 int block, best_block, score, old_sector_block;
456 /* we have a race if sync doesn't exist */
457 if (part->mbd.mtd->sync)
458 part->mbd.mtd->sync(part->mbd.mtd);
460 score = 0x7fffffff; /* MAX_INT */
462 if (*old_sector != -1)
463 old_sector_block = *old_sector / part->block_size;
465 old_sector_block = -1;
467 for (block=0; block<part->total_blocks; block++) {
470 if (block == part->reserved_block)
474 * Postpone reclaiming if there is a free sector as
475 * more removed sectors is more efficient (have to move
478 if (part->blocks[block].free_sectors)
481 this_score = part->blocks[block].used_sectors;
483 if (block == old_sector_block)
486 /* no point in moving a full block */
487 if (part->blocks[block].used_sectors ==
488 part->data_sectors_per_block)
492 this_score += part->blocks[block].erases;
494 if (this_score < score) {
500 if (best_block == -1)
503 part->current_block = -1;
504 part->reserved_block = best_block;
506 pr_debug("reclaim_block: reclaiming block #%d with %d used "
507 "%d free sectors\n", best_block,
508 part->blocks[best_block].used_sectors,
509 part->blocks[best_block].free_sectors);
511 if (part->blocks[best_block].used_sectors)
512 rc = move_block_contents(part, best_block, old_sector);
514 rc = erase_block(part, best_block);
520 * IMPROVE: It would be best to choose the block with the most deleted sectors,
521 * because if we fill that one up first it'll have the most chance of having
522 * the least live sectors at reclaim.
524 static int find_free_block(const struct partition *part)
528 block = part->current_block == -1 ?
529 jiffies % part->total_blocks : part->current_block;
533 if (part->blocks[block].free_sectors &&
534 block != part->reserved_block)
537 if (++block >= part->total_blocks)
540 } while (block != stop);
545 static int find_writeable_block(struct partition *part, u_long *old_sector)
550 block = find_free_block(part);
553 if (!part->is_reclaiming) {
554 rc = reclaim_block(part, old_sector);
558 block = find_free_block(part);
567 rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
568 part->header_size, &retlen, (u_char*)part->header_cache);
570 if (!rc && retlen != part->header_size)
574 printk(KERN_NOTICE PREFIX "'%s': unable to read header at "
575 "0x%lx\n", part->mbd.mtd->name,
576 part->blocks[block].offset);
580 part->current_block = block;
586 static int mark_sector_deleted(struct partition *part, u_long old_addr)
588 int block, offset, rc;
591 u16 del = const_cpu_to_le16(SECTOR_DELETED);
593 block = old_addr / part->block_size;
594 offset = (old_addr % part->block_size) / SECTOR_SIZE -
595 part->header_sectors_per_block;
597 addr = part->blocks[block].offset +
598 (HEADER_MAP_OFFSET + offset) * sizeof(u16);
599 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
600 sizeof(del), &retlen, (u_char*)&del);
602 if (!rc && retlen != sizeof(del))
606 printk(KERN_WARNING PREFIX "error writing '%s' at "
607 "0x%lx\n", part->mbd.mtd->name, addr);
611 if (block == part->current_block)
612 part->header_cache[offset + HEADER_MAP_OFFSET] = del;
614 part->blocks[block].used_sectors--;
616 if (!part->blocks[block].used_sectors &&
617 !part->blocks[block].free_sectors)
618 rc = erase_block(part, block);
624 static int find_free_sector(const struct partition *part, const struct block *block)
628 i = stop = part->data_sectors_per_block - block->free_sectors;
631 if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
635 if (++i == part->data_sectors_per_block)
643 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
645 struct partition *part = (struct partition*)dev;
653 if (part->current_block == -1 ||
654 !part->blocks[part->current_block].free_sectors) {
656 rc = find_writeable_block(part, old_addr);
661 block = &part->blocks[part->current_block];
663 i = find_free_sector(part, block);
670 addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
672 rc = part->mbd.mtd->write(part->mbd.mtd,
673 addr, SECTOR_SIZE, &retlen, (u_char*)buf);
675 if (!rc && retlen != SECTOR_SIZE)
679 printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n",
680 part->mbd.mtd->name, addr);
685 part->sector_map[sector] = addr;
687 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
689 part->header_cache[i + HEADER_MAP_OFFSET] = entry;
691 addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
692 rc = part->mbd.mtd->write(part->mbd.mtd, addr,
693 sizeof(entry), &retlen, (u_char*)&entry);
695 if (!rc && retlen != sizeof(entry))
699 printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n",
700 part->mbd.mtd->name, addr);
704 block->used_sectors++;
705 block->free_sectors--;
711 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
713 struct partition *part = (struct partition*)dev;
718 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
720 if (part->reserved_block == -1) {
725 if (sector >= part->sector_count) {
730 old_addr = part->sector_map[sector];
732 for (i=0; i<SECTOR_SIZE; i++) {
736 rc = do_writesect(dev, sector, buf, &old_addr);
742 if (i == SECTOR_SIZE)
743 part->sector_map[sector] = -1;
746 rc = mark_sector_deleted(part, old_addr);
752 static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
754 struct partition *part = (struct partition*)dev;
757 geo->sectors = SECTORS_PER_TRACK;
758 geo->cylinders = part->cylinders;
763 static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
765 struct partition *part;
767 if (mtd->type != MTD_NORFLASH)
770 part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
777 part->block_size = block_size;
779 if (!mtd->erasesize) {
780 printk(KERN_NOTICE PREFIX "please provide block_size");
784 part->block_size = mtd->erasesize;
787 if (scan_header(part) == 0) {
788 part->mbd.size = part->sector_count;
789 part->mbd.blksize = SECTOR_SIZE;
791 part->mbd.devnum = -1;
792 if (!(mtd->flags & MTD_WRITEABLE))
793 part->mbd.readonly = 1;
794 else if (part->errors) {
795 printk(KERN_NOTICE PREFIX "'%s': errors found, "
796 "setting read-only", mtd->name);
797 part->mbd.readonly = 1;
800 printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
801 mtd->name, mtd->type, mtd->flags);
803 if (!add_mtd_blktrans_dev((void*)part))
810 static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
812 struct partition *part = (struct partition*)dev;
815 for (i=0; i<part->total_blocks; i++) {
816 pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
817 part->mbd.mtd->name, i, part->blocks[i].erases);
820 del_mtd_blktrans_dev(dev);
821 vfree(part->sector_map);
822 kfree(part->header_cache);
827 struct mtd_blktrans_ops rfd_ftl_tr = {
829 .major = RFD_FTL_MAJOR,
830 .part_bits = PART_BITS,
831 .readsect = rfd_ftl_readsect,
832 .writesect = rfd_ftl_writesect,
833 .getgeo = rfd_ftl_getgeo,
834 .add_mtd = rfd_ftl_add_mtd,
835 .remove_dev = rfd_ftl_remove_dev,
836 .owner = THIS_MODULE,
839 static int __init init_rfd_ftl(void)
841 return register_mtd_blktrans(&rfd_ftl_tr);
844 static void __exit cleanup_rfd_ftl(void)
846 deregister_mtd_blktrans(&rfd_ftl_tr);
849 module_init(init_rfd_ftl);
850 module_exit(cleanup_rfd_ftl);
852 MODULE_LICENSE("GPL");
853 MODULE_AUTHOR("Sean Young <sean@mess.org>");
854 MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
855 "used by General Software's Embedded BIOS");