2 * MTD map driver for AMD compatible flash chips (non-CFI)
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
8 * Copyright (c) 2001 Axis Communications AB
10 * This file is under GPL.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
30 #define DEVICE_TYPE_X8 (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
35 #define ADDR_MANUFACTURER 0x0000
36 #define ADDR_DEVICE_ID 0x0001
37 #define ADDR_SECTOR_LOCK 0x0002
38 #define ADDR_HANDSHAKE 0x0003
39 #define ADDR_UNLOCK_1 0x0555
40 #define ADDR_UNLOCK_2 0x02AA
43 #define CMD_UNLOCK_DATA_1 0x00AA
44 #define CMD_UNLOCK_DATA_2 0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46 #define CMD_UNLOCK_BYPASS_MODE 0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48 #define CMD_RESET_DATA 0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
52 #define CMD_UNLOCK_SECTOR 0x0060
55 #define MANUFACTURER_AMD 0x0001
56 #define MANUFACTURER_ATMEL 0x001F
57 #define MANUFACTURER_FUJITSU 0x0004
58 #define MANUFACTURER_ST 0x0020
59 #define MANUFACTURER_SST 0x00BF
60 #define MANUFACTURER_TOSHIBA 0x0098
63 #define AM29F800BB 0x2258
64 #define AM29F800BT 0x22D6
65 #define AM29LV800BB 0x225B
66 #define AM29LV800BT 0x22DA
67 #define AM29LV160DT 0x22C4
68 #define AM29LV160DB 0x2249
69 #define AM29BDS323D 0x22D1
72 #define AT49xV16x 0x00C0
73 #define AT49xV16xT 0x00C2
76 #define MBM29LV160TE 0x22C4
77 #define MBM29LV160BE 0x2249
78 #define MBM29LV800BB 0x225B
81 #define M29W800T 0x00D7
82 #define M29W160DT 0x22C4
83 #define M29W160DB 0x2249
86 #define SST39LF800 0x2781
87 #define SST39LF160 0x2782
90 #define TC58FVT160 0x00C2
91 #define TC58FVB160 0x0043
95 struct amd_flash_private {
99 unsigned long chipshift;
100 struct flchip chips[0];
103 struct amd_flash_info {
108 const int numeraseregions;
109 const struct mtd_erase_region_info regions[4];
114 static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
116 static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
118 static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119 static void amd_flash_sync(struct mtd_info *);
120 static int amd_flash_suspend(struct mtd_info *);
121 static void amd_flash_resume(struct mtd_info *);
122 static void amd_flash_destroy(struct mtd_info *);
123 static struct mtd_info *amd_flash_probe(struct map_info *map);
126 static struct mtd_chip_driver amd_flash_chipdrv = {
127 .probe = amd_flash_probe,
128 .destroy = amd_flash_destroy,
130 .module = THIS_MODULE
133 static inline __u32 wide_read(struct map_info *map, __u32 addr)
135 if (map->buswidth == 1) {
136 return map_read8(map, addr);
137 } else if (map->buswidth == 2) {
138 return map_read16(map, addr);
139 } else if (map->buswidth == 4) {
140 return map_read32(map, addr);
146 static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
148 if (map->buswidth == 1) {
149 map_write8(map, val, addr);
150 } else if (map->buswidth == 2) {
151 map_write16(map, val, addr);
152 } else if (map->buswidth == 4) {
153 map_write32(map, val, addr);
157 static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
159 const struct amd_flash_private *private = map->fldrv_priv;
160 if ((private->interleave == 2) &&
161 (private->device_type == DEVICE_TYPE_X16)) {
168 static inline void send_unlock(struct map_info *map, unsigned long base)
170 wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
171 base + (map->buswidth * ADDR_UNLOCK_1));
172 wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
173 base + (map->buswidth * ADDR_UNLOCK_2));
176 static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
178 send_unlock(map, base);
179 wide_write(map, make_cmd(map, cmd),
180 base + (map->buswidth * ADDR_UNLOCK_1));
183 static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
184 __u32 cmd, unsigned long addr)
186 send_unlock(map, base);
187 wide_write(map, make_cmd(map, cmd), addr);
190 static inline int flash_is_busy(struct map_info *map, unsigned long addr,
194 if ((interleave == 2) && (map->buswidth == 4)) {
197 read1 = wide_read(map, addr);
198 read2 = wide_read(map, addr);
200 return (((read1 >> 16) & D6_MASK) !=
201 ((read2 >> 16) & D6_MASK)) ||
202 (((read1 & 0xffff) & D6_MASK) !=
203 ((read2 & 0xffff) & D6_MASK));
206 return ((wide_read(map, addr) & D6_MASK) !=
207 (wide_read(map, addr) & D6_MASK));
210 static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
213 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
215 (sect_addr | (0x40 * map->buswidth)) :
216 (sect_addr & ~(0x40 * map->buswidth)) ;
218 __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
220 wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
221 wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
222 wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
223 wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
226 static inline int is_sector_locked(struct map_info *map,
227 unsigned long sect_addr)
231 wide_write(map, CMD_RESET_DATA, 0);
232 send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
234 /* status is 0x0000 for unlocked and 0x0001 for locked */
235 status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
236 wide_write(map, CMD_RESET_DATA, 0);
240 static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
243 struct map_info *map;
244 struct mtd_erase_region_info *merip;
245 int eraseoffset, erasesize, eraseblocks;
252 /* Pass the whole chip through sector by sector and check for each
253 sector if the sector and the given interval overlap */
254 for(i = 0; i < mtd->numeraseregions; i++) {
255 merip = &mtd->eraseregions[i];
257 eraseoffset = merip->offset;
258 erasesize = merip->erasesize;
259 eraseblocks = merip->numblocks;
261 if (ofs > eraseoffset + erasesize)
264 while (eraseblocks > 0) {
265 if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
266 unlock_sector(map, eraseoffset, is_unlock);
268 lock_status = is_sector_locked(map, eraseoffset);
270 if (is_unlock && lock_status) {
271 printk("Cannot unlock sector at address %x length %xx\n",
272 eraseoffset, merip->erasesize);
274 } else if (!is_unlock && !lock_status) {
275 printk("Cannot lock sector at address %x length %x\n",
276 eraseoffset, merip->erasesize);
280 eraseoffset += erasesize;
287 static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
289 return amd_flash_do_unlock(mtd, ofs, len, 1);
292 static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
294 return amd_flash_do_unlock(mtd, ofs, len, 0);
299 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300 * matching table entry (-1 if not found or alias for already found chip).
302 static int probe_new_chip(struct mtd_info *mtd, __u32 base,
303 struct flchip *chips,
304 struct amd_flash_private *private,
305 const struct amd_flash_info *table, int table_size)
309 struct map_info *map = mtd->priv;
310 struct amd_flash_private temp;
313 temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME)
315 map->fldrv_priv = &temp;
317 /* Enter autoselect mode. */
318 send_cmd(map, base, CMD_RESET_DATA);
319 send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
321 mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
322 dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
324 if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
325 ((dev_id >> 16) == (dev_id & 0xffff))) {
332 for (i = 0; i < table_size; i++) {
333 if ((mfr_id == table[i].mfr_id) &&
334 (dev_id == table[i].dev_id)) {
338 /* Is this an alias for an already found chip?
339 * In that case that chip should be in
340 * autoselect mode now.
342 for (j = 0; j < private->numchips; j++) {
347 wide_read(map, chips[j].start +
352 wide_read(map, chips[j].start +
355 if (temp.interleave == 2) {
356 mfr_id_other &= 0xffff;
357 dev_id_other &= 0xffff;
359 if ((mfr_id_other == mfr_id) &&
360 (dev_id_other == dev_id)) {
362 /* Exit autoselect mode. */
370 if (private->numchips == MAX_AMD_CHIPS) {
372 "%s: Too many flash chips "
373 "detected. Increase "
374 "MAX_AMD_CHIPS from %d.\n",
375 map->name, MAX_AMD_CHIPS);
380 chips[private->numchips].start = base;
381 chips[private->numchips].state = FL_READY;
382 chips[private->numchips].mutex =
383 &chips[private->numchips]._spinlock;
387 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
388 temp.interleave, (table[i].size)/(1024*1024),
389 table[i].name, base);
391 mtd->size += table[i].size * temp.interleave;
392 mtd->numeraseregions += table[i].numeraseregions;
398 /* Exit autoselect mode. */
399 send_cmd(map, base, CMD_RESET_DATA);
401 if (i == table_size) {
402 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
403 "mfr id 0x%x, dev id 0x%x\n", map->name,
404 base, mfr_id, dev_id);
405 map->fldrv_priv = NULL;
410 private->device_type = temp.device_type;
411 private->interleave = temp.interleave;
418 static struct mtd_info *amd_flash_probe(struct map_info *map)
420 static const struct amd_flash_info table[] = {
422 .mfr_id = MANUFACTURER_AMD,
423 .dev_id = AM29LV160DT,
424 .name = "AMD AM29LV160DT",
426 .numeraseregions = 4,
428 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
429 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
430 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
431 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
434 .mfr_id = MANUFACTURER_AMD,
435 .dev_id = AM29LV160DB,
436 .name = "AMD AM29LV160DB",
438 .numeraseregions = 4,
440 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
441 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
442 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
443 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
446 .mfr_id = MANUFACTURER_TOSHIBA,
447 .dev_id = TC58FVT160,
448 .name = "Toshiba TC58FVT160",
450 .numeraseregions = 4,
452 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
453 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
454 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
455 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
458 .mfr_id = MANUFACTURER_FUJITSU,
459 .dev_id = MBM29LV160TE,
460 .name = "Fujitsu MBM29LV160TE",
462 .numeraseregions = 4,
464 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
465 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
466 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
467 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
470 .mfr_id = MANUFACTURER_TOSHIBA,
471 .dev_id = TC58FVB160,
472 .name = "Toshiba TC58FVB160",
474 .numeraseregions = 4,
476 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
477 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
478 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
479 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
482 .mfr_id = MANUFACTURER_FUJITSU,
483 .dev_id = MBM29LV160BE,
484 .name = "Fujitsu MBM29LV160BE",
486 .numeraseregions = 4,
488 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
489 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
490 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
491 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
494 .mfr_id = MANUFACTURER_AMD,
495 .dev_id = AM29LV800BB,
496 .name = "AMD AM29LV800BB",
498 .numeraseregions = 4,
500 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
501 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
502 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
503 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
506 .mfr_id = MANUFACTURER_AMD,
507 .dev_id = AM29F800BB,
508 .name = "AMD AM29F800BB",
510 .numeraseregions = 4,
512 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
513 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
514 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
515 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
518 .mfr_id = MANUFACTURER_AMD,
519 .dev_id = AM29LV800BT,
520 .name = "AMD AM29LV800BT",
522 .numeraseregions = 4,
524 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
525 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
526 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
527 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
530 .mfr_id = MANUFACTURER_AMD,
531 .dev_id = AM29F800BT,
532 .name = "AMD AM29F800BT",
534 .numeraseregions = 4,
536 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
537 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
538 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
539 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
542 .mfr_id = MANUFACTURER_AMD,
543 .dev_id = AM29LV800BB,
544 .name = "AMD AM29LV800BB",
546 .numeraseregions = 4,
548 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
549 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
550 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
551 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
554 .mfr_id = MANUFACTURER_FUJITSU,
555 .dev_id = MBM29LV800BB,
556 .name = "Fujitsu MBM29LV800BB",
558 .numeraseregions = 4,
560 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
561 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
562 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
563 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
566 .mfr_id = MANUFACTURER_ST,
568 .name = "ST M29W800T",
570 .numeraseregions = 4,
572 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
573 { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
574 { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
575 { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
578 .mfr_id = MANUFACTURER_ST,
580 .name = "ST M29W160DT",
582 .numeraseregions = 4,
584 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
585 { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
586 { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
587 { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
590 .mfr_id = MANUFACTURER_ST,
592 .name = "ST M29W160DB",
594 .numeraseregions = 4,
596 { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
597 { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
598 { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
599 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
602 .mfr_id = MANUFACTURER_AMD,
603 .dev_id = AM29BDS323D,
604 .name = "AMD AM29BDS323D",
606 .numeraseregions = 3,
608 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
609 { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
610 { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
613 .mfr_id = MANUFACTURER_ATMEL,
615 .name = "Atmel AT49xV16x",
617 .numeraseregions = 2,
619 { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
620 { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
623 .mfr_id = MANUFACTURER_ATMEL,
624 .dev_id = AT49xV16xT,
625 .name = "Atmel AT49xV16xT",
627 .numeraseregions = 2,
629 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
630 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
635 struct mtd_info *mtd;
636 struct flchip chips[MAX_AMD_CHIPS];
637 int table_pos[MAX_AMD_CHIPS];
638 struct amd_flash_private temp;
639 struct amd_flash_private *private;
646 mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
649 "%s: kmalloc failed for info structure\n", map->name);
652 memset(mtd, 0, sizeof(*mtd));
655 memset(&temp, 0, sizeof(temp));
657 printk("%s: Probing for AMD compatible flash...\n", map->name);
659 if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
663 "%s: Found no AMD compatible device at location zero\n",
671 chips[0].state = FL_READY;
672 chips[0].mutex = &chips[0]._spinlock;
674 for (size = mtd->size; size > 1; size >>= 1) {
677 switch (temp.interleave) {
686 /* Find out if there are any more chips in the map. */
687 for (base = (1 << temp.chipshift);
689 base += (1 << temp.chipshift)) {
690 int numchips = temp.numchips;
691 table_pos[numchips] = probe_new_chip(mtd, base, chips,
692 &temp, table, ARRAY_SIZE(table));
695 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
696 mtd->numeraseregions, GFP_KERNEL);
697 if (!mtd->eraseregions) {
698 printk(KERN_WARNING "%s: Failed to allocate "
699 "memory for MTD erase region info\n", map->name);
701 map->fldrv_priv = NULL;
707 for (i = 0; i < temp.numchips; i++) {
712 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
713 mtd->eraseregions[reg_idx].offset = offset +
714 (table[table_pos[i]].regions[j].offset *
716 mtd->eraseregions[reg_idx].erasesize =
717 table[table_pos[i]].regions[j].erasesize *
719 mtd->eraseregions[reg_idx].numblocks =
720 table[table_pos[i]].regions[j].numblocks;
722 mtd->eraseregions[reg_idx].erasesize) {
724 mtd->eraseregions[reg_idx].erasesize;
726 dev_size += mtd->eraseregions[reg_idx].erasesize *
727 mtd->eraseregions[reg_idx].numblocks;
732 mtd->type = MTD_NORFLASH;
734 mtd->flags = MTD_CAP_NORFLASH;
735 mtd->name = map->name;
736 mtd->erase = amd_flash_erase;
737 mtd->read = amd_flash_read;
738 mtd->write = amd_flash_write;
739 mtd->sync = amd_flash_sync;
740 mtd->suspend = amd_flash_suspend;
741 mtd->resume = amd_flash_resume;
742 mtd->lock = amd_flash_lock;
743 mtd->unlock = amd_flash_unlock;
745 private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
746 temp.numchips), GFP_KERNEL);
749 "%s: kmalloc failed for private structure\n", map->name);
751 map->fldrv_priv = NULL;
754 memcpy(private, &temp, sizeof(temp));
755 memcpy(private->chips, chips,
756 sizeof(struct flchip) * private->numchips);
757 for (i = 0; i < private->numchips; i++) {
758 init_waitqueue_head(&private->chips[i].wq);
759 spin_lock_init(&private->chips[i]._spinlock);
762 map->fldrv_priv = private;
764 map->fldrv = &amd_flash_chipdrv;
766 __module_get(THIS_MODULE);
772 static inline int read_one_chip(struct map_info *map, struct flchip *chip,
773 loff_t adr, size_t len, u_char *buf)
775 DECLARE_WAITQUEUE(wait, current);
776 unsigned long timeo = jiffies + HZ;
779 spin_lock_bh(chip->mutex);
781 if (chip->state != FL_READY){
782 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
783 map->name, chip->state);
784 set_current_state(TASK_UNINTERRUPTIBLE);
785 add_wait_queue(&chip->wq, &wait);
787 spin_unlock_bh(chip->mutex);
790 remove_wait_queue(&chip->wq, &wait);
792 if(signal_pending(current)) {
796 timeo = jiffies + HZ;
803 chip->state = FL_READY;
805 map_copy_from(map, buf, adr, len);
808 spin_unlock_bh(chip->mutex);
815 static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
816 size_t *retlen, u_char *buf)
818 struct map_info *map = mtd->priv;
819 struct amd_flash_private *private = map->fldrv_priv;
824 if ((from + len) > mtd->size) {
825 printk(KERN_WARNING "%s: read request past end of device "
826 "(0x%lx)\n", map->name, (unsigned long)from + len);
831 /* Offset within the first chip that the first read should start. */
832 chipnum = (from >> private->chipshift);
833 ofs = from - (chipnum << private->chipshift);
838 unsigned long this_len;
840 if (chipnum >= private->numchips) {
844 if ((len + ofs - 1) >> private->chipshift) {
845 this_len = (1 << private->chipshift) - ofs;
850 ret = read_one_chip(map, &private->chips[chipnum], ofs,
869 static int write_one_word(struct map_info *map, struct flchip *chip,
870 unsigned long adr, __u32 datum)
872 unsigned long timeo = jiffies + HZ;
873 struct amd_flash_private *private = map->fldrv_priv;
874 DECLARE_WAITQUEUE(wait, current);
879 spin_lock_bh(chip->mutex);
881 if (chip->state != FL_READY){
882 printk("%s: waiting for chip to write, state = %d\n",
883 map->name, chip->state);
884 set_current_state(TASK_UNINTERRUPTIBLE);
885 add_wait_queue(&chip->wq, &wait);
887 spin_unlock_bh(chip->mutex);
890 remove_wait_queue(&chip->wq, &wait);
891 printk(KERN_INFO "%s: woke up to write\n", map->name);
892 if(signal_pending(current))
895 timeo = jiffies + HZ;
900 chip->state = FL_WRITING;
904 send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
905 wide_write(map, datum, adr);
908 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
909 if (need_resched()) {
910 spin_unlock_bh(chip->mutex);
912 spin_lock_bh(chip->mutex);
917 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
922 if ((verify = wide_read(map, adr)) != datum) {
923 printk(KERN_WARNING "%s: write to 0x%lx failed. "
924 "datum = %x, verify = %x\n",
925 map->name, adr, datum, verify);
931 chip->state = FL_READY;
933 spin_unlock_bh(chip->mutex);
940 static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
941 size_t *retlen, const u_char *buf)
943 struct map_info *map = mtd->priv;
944 struct amd_flash_private *private = map->fldrv_priv;
948 unsigned long chipstart;
955 chipnum = to >> private->chipshift;
956 ofs = to - (chipnum << private->chipshift);
957 chipstart = private->chips[chipnum].start;
959 /* If it's not bus-aligned, do the first byte write. */
960 if (ofs & (map->buswidth - 1)) {
961 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
962 int i = ofs - bus_ofs;
967 map_copy_from(map, tmp_buf,
968 bus_ofs + private->chips[chipnum].start,
970 while (len && i < map->buswidth)
971 tmp_buf[i++] = buf[n++], len--;
973 if (map->buswidth == 2) {
974 datum = *(__u16*)tmp_buf;
975 } else if (map->buswidth == 4) {
976 datum = *(__u32*)tmp_buf;
978 return -EINVAL; /* should never happen, but be safe */
981 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
991 if (ofs >> private->chipshift) {
994 if (chipnum == private->numchips) {
1000 /* We are now aligned, write as much as possible. */
1001 while(len >= map->buswidth) {
1004 if (map->buswidth == 1) {
1005 datum = *(__u8*)buf;
1006 } else if (map->buswidth == 2) {
1007 datum = *(__u16*)buf;
1008 } else if (map->buswidth == 4) {
1009 datum = *(__u32*)buf;
1014 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1020 ofs += map->buswidth;
1021 buf += map->buswidth;
1022 (*retlen) += map->buswidth;
1023 len -= map->buswidth;
1025 if (ofs >> private->chipshift) {
1028 if (chipnum == private->numchips) {
1031 chipstart = private->chips[chipnum].start;
1035 if (len & (map->buswidth - 1)) {
1040 map_copy_from(map, tmp_buf,
1041 ofs + private->chips[chipnum].start,
1044 tmp_buf[i++] = buf[n++];
1047 if (map->buswidth == 2) {
1048 datum = *(__u16*)tmp_buf;
1049 } else if (map->buswidth == 4) {
1050 datum = *(__u32*)tmp_buf;
1052 return -EINVAL; /* should never happen, but be safe */
1055 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1069 static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1070 unsigned long adr, u_long size)
1072 unsigned long timeo = jiffies + HZ;
1073 struct amd_flash_private *private = map->fldrv_priv;
1074 DECLARE_WAITQUEUE(wait, current);
1077 spin_lock_bh(chip->mutex);
1079 if (chip->state != FL_READY){
1080 set_current_state(TASK_UNINTERRUPTIBLE);
1081 add_wait_queue(&chip->wq, &wait);
1083 spin_unlock_bh(chip->mutex);
1086 remove_wait_queue(&chip->wq, &wait);
1088 if (signal_pending(current)) {
1092 timeo = jiffies + HZ;
1097 chip->state = FL_ERASING;
1101 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1102 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1104 timeo = jiffies + (HZ * 20);
1106 spin_unlock_bh(chip->mutex);
1108 spin_lock_bh(chip->mutex);
1110 while (flash_is_busy(map, adr, private->interleave)) {
1112 if (chip->state != FL_ERASING) {
1113 /* Someone's suspended the erase. Sleep */
1114 set_current_state(TASK_UNINTERRUPTIBLE);
1115 add_wait_queue(&chip->wq, &wait);
1117 spin_unlock_bh(chip->mutex);
1118 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1121 remove_wait_queue(&chip->wq, &wait);
1123 if (signal_pending(current)) {
1127 timeo = jiffies + (HZ*2); /* FIXME */
1128 spin_lock_bh(chip->mutex);
1132 /* OK Still waiting */
1133 if (time_after(jiffies, timeo)) {
1134 chip->state = FL_READY;
1135 spin_unlock_bh(chip->mutex);
1136 printk(KERN_WARNING "%s: waiting for erase to complete "
1137 "timed out.\n", map->name);
1143 /* Latency issues. Drop the lock, wait a while and retry */
1144 spin_unlock_bh(chip->mutex);
1151 spin_lock_bh(chip->mutex);
1154 /* Verify every single word */
1160 for (address = adr; address < (adr + size); address++) {
1161 if ((verify = map_read8(map, address)) != 0xFF) {
1167 chip->state = FL_READY;
1168 spin_unlock_bh(chip->mutex);
1170 "%s: verify error at 0x%x, size %ld.\n",
1171 map->name, address, size);
1179 chip->state = FL_READY;
1181 spin_unlock_bh(chip->mutex);
1188 static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1190 struct map_info *map = mtd->priv;
1191 struct amd_flash_private *private = map->fldrv_priv;
1192 unsigned long adr, len;
1197 struct mtd_erase_region_info *regions = mtd->eraseregions;
1199 if (instr->addr > mtd->size) {
1203 if ((instr->len + instr->addr) > mtd->size) {
1207 /* Check that both start and end of the requested erase are
1208 * aligned with the erasesize at the appropriate addresses.
1213 /* Skip all erase regions which are ended before the start of
1214 the requested erase. Actually, to save on the calculations,
1215 we skip to the first erase region which starts after the
1216 start of the requested erase, and then go back one.
1219 while ((i < mtd->numeraseregions) &&
1220 (instr->addr >= regions[i].offset)) {
1225 /* OK, now i is pointing at the erase region in which this
1226 * erase request starts. Check the start of the requested
1227 * erase range is aligned with the erase size which is in
1231 if (instr->addr & (regions[i].erasesize-1)) {
1235 /* Remember the erase region we start on. */
1239 /* Next, check that the end of the requested erase is aligned
1240 * with the erase region at that address.
1243 while ((i < mtd->numeraseregions) &&
1244 ((instr->addr + instr->len) >= regions[i].offset)) {
1248 /* As before, drop back one to point at the region in which
1249 * the address actually falls.
1254 if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1258 chipnum = instr->addr >> private->chipshift;
1259 adr = instr->addr - (chipnum << private->chipshift);
1265 ret = erase_one_block(map, &private->chips[chipnum], adr,
1266 regions[i].erasesize);
1272 adr += regions[i].erasesize;
1273 len -= regions[i].erasesize;
1275 if ((adr % (1 << private->chipshift)) ==
1276 ((regions[i].offset + (regions[i].erasesize *
1277 regions[i].numblocks))
1278 % (1 << private->chipshift))) {
1282 if (adr >> private->chipshift) {
1285 if (chipnum >= private->numchips) {
1291 instr->state = MTD_ERASE_DONE;
1292 mtd_erase_callback(instr);
1299 static void amd_flash_sync(struct mtd_info *mtd)
1301 struct map_info *map = mtd->priv;
1302 struct amd_flash_private *private = map->fldrv_priv;
1304 struct flchip *chip;
1306 DECLARE_WAITQUEUE(wait, current);
1308 for (i = 0; !ret && (i < private->numchips); i++) {
1309 chip = &private->chips[i];
1312 spin_lock_bh(chip->mutex);
1314 switch(chip->state) {
1318 case FL_JEDEC_QUERY:
1319 chip->oldstate = chip->state;
1320 chip->state = FL_SYNCING;
1321 /* No need to wake_up() on this state change -
1322 * as the whole point is that nobody can do anything
1323 * with the chip now anyway.
1326 spin_unlock_bh(chip->mutex);
1330 /* Not an idle state */
1331 add_wait_queue(&chip->wq, &wait);
1333 spin_unlock_bh(chip->mutex);
1337 remove_wait_queue(&chip->wq, &wait);
1343 /* Unlock the chips again */
1344 for (i--; i >= 0; i--) {
1345 chip = &private->chips[i];
1347 spin_lock_bh(chip->mutex);
1349 if (chip->state == FL_SYNCING) {
1350 chip->state = chip->oldstate;
1353 spin_unlock_bh(chip->mutex);
1359 static int amd_flash_suspend(struct mtd_info *mtd)
1361 printk("amd_flash_suspend(): not implemented!\n");
1367 static void amd_flash_resume(struct mtd_info *mtd)
1369 printk("amd_flash_resume(): not implemented!\n");
1374 static void amd_flash_destroy(struct mtd_info *mtd)
1376 struct map_info *map = mtd->priv;
1377 struct amd_flash_private *private = map->fldrv_priv;
1381 int __init amd_flash_init(void)
1383 register_mtd_chip_driver(&amd_flash_chipdrv);
1387 void __exit amd_flash_exit(void)
1389 unregister_mtd_chip_driver(&amd_flash_chipdrv);
1392 module_init(amd_flash_init);
1393 module_exit(amd_flash_exit);
1395 MODULE_LICENSE("GPL");
1396 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1397 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");