2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define MANUFACTURER_AMD 0x0001
47 #define MANUFACTURER_ATMEL 0x001F
48 #define MANUFACTURER_MACRONIX 0x00C2
49 #define MANUFACTURER_SST 0x00BF
50 #define SST49LF004B 0x0060
51 #define SST49LF040B 0x0050
52 #define SST49LF008A 0x005a
53 #define AT49BV6416 0x00d6
55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_amdstd_sync (struct mtd_info *);
61 static int cfi_amdstd_suspend (struct mtd_info *);
62 static void cfi_amdstd_resume (struct mtd_info *);
63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static void cfi_amdstd_destroy(struct mtd_info *);
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy,
80 .name = "cfi_cmdset_0002",
85 /* #define DEBUG_CFI_FEATURES */
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
91 const char* erase_suspend[3] = {
92 "Not supported", "Read only", "Read/write"
94 const char* top_bottom[6] = {
95 "No WP", "8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot", "Top boot",
97 "Uniform, Bottom WP", "Uniform, Top WP"
100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 printk(" Address sensitive unlock: %s\n",
102 (extp->SiliconRevision & 1) ? "Not required" : "Required");
104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
109 if (extp->BlkProt == 0)
110 printk(" Block protection: Not supported\n");
112 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
115 printk(" Temporary block unprotect: %s\n",
116 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 printk(" Burst mode: %s\n",
120 extp->BurstMode ? "Supported" : "Not supported");
121 if (extp->PageMode == 0)
122 printk(" Page mode: Not supported\n");
124 printk(" Page mode: %d word page\n", extp->PageMode << 2);
126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMin >> 4, extp->VppMin & 0xf);
128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 extp->VppMax >> 4, extp->VppMax & 0xf);
131 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
142 struct map_info *map = mtd->priv;
143 struct cfi_private *cfi = map->fldrv_priv;
144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 __u8 major = extp->MajorVersion;
146 __u8 minor = extp->MinorVersion;
148 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */
151 DEBUG(MTD_DEBUG_LEVEL1,
152 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
153 map->name, cfi->mfr, cfi->id);
155 /* AFAICS all 29LV400 with a bottom boot block have a device ID
156 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
157 * These were badly detected as they have the 0x80 bit set
158 * so treat them as a special case.
160 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
162 /* Macronix added CFI to their 2nd generation
163 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
164 * Fujitsu, Spansion, EON, ESI and older Macronix)
167 * Therefore also check the manufacturer.
168 * This reduces the risk of false detection due to
169 * the 8-bit device ID.
171 (cfi->mfr == MANUFACTURER_MACRONIX)) {
172 DEBUG(MTD_DEBUG_LEVEL1,
173 "%s: Macronix MX29LV400C with bottom boot block"
174 " detected\n", map->name);
175 extp->TopBottom = 2; /* bottom boot */
177 if (cfi->id & 0x80) {
178 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
179 extp->TopBottom = 3; /* top boot */
181 extp->TopBottom = 2; /* bottom boot */
184 DEBUG(MTD_DEBUG_LEVEL1,
185 "%s: AMD CFI PRI V%c.%c has no boot block field;"
186 " deduced %s from Device ID\n", map->name, major, minor,
187 extp->TopBottom == 2 ? "bottom" : "top");
192 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
194 struct map_info *map = mtd->priv;
195 struct cfi_private *cfi = map->fldrv_priv;
196 if (cfi->cfiq->BufWriteTimeoutTyp) {
197 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
198 mtd->write = cfi_amdstd_write_buffers;
202 /* Atmel chips don't use the same PRI format as AMD chips */
203 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
208 struct cfi_pri_atmel atmel_pri;
210 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
211 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
213 if (atmel_pri.Features & 0x02)
214 extp->EraseSuspend = 2;
216 /* Some chips got it backwards... */
217 if (cfi->id == AT49BV6416) {
218 if (atmel_pri.BottomBoot)
223 if (atmel_pri.BottomBoot)
229 /* burst write mode not supported */
230 cfi->cfiq->BufWriteTimeoutTyp = 0;
231 cfi->cfiq->BufWriteTimeoutMax = 0;
234 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
236 /* Setup for chips with a secsi area */
237 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
238 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
241 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
243 struct map_info *map = mtd->priv;
244 struct cfi_private *cfi = map->fldrv_priv;
245 if ((cfi->cfiq->NumEraseRegions == 1) &&
246 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
247 mtd->erase = cfi_amdstd_erase_chip;
253 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
256 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
258 mtd->lock = cfi_atmel_lock;
259 mtd->unlock = cfi_atmel_unlock;
260 mtd->flags |= MTD_POWERUP_LOCK;
263 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
265 struct map_info *map = mtd->priv;
266 struct cfi_private *cfi = map->fldrv_priv;
268 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
269 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
270 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
274 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
279 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
280 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
281 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
285 static struct cfi_fixup cfi_fixup_table[] = {
286 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
287 #ifdef AMD_BOOTLOC_BUG
288 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
289 { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
291 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
292 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
293 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
294 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
295 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
296 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
297 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
298 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
299 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
300 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
301 #if !FORCE_WORD_WRITE
302 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
306 static struct cfi_fixup jedec_fixup_table[] = {
307 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
308 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
309 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
313 static struct cfi_fixup fixup_table[] = {
314 /* The CFI vendor ids and the JEDEC vendor IDs appear
315 * to be common. It is like the devices id's are as
316 * well. This table is to pick all cases where
317 * we know that is the case.
319 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
320 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
325 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
327 struct cfi_private *cfi = map->fldrv_priv;
328 struct mtd_info *mtd;
331 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
333 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
337 mtd->type = MTD_NORFLASH;
339 /* Fill in the default mtd operations */
340 mtd->erase = cfi_amdstd_erase_varsize;
341 mtd->write = cfi_amdstd_write_words;
342 mtd->read = cfi_amdstd_read;
343 mtd->sync = cfi_amdstd_sync;
344 mtd->suspend = cfi_amdstd_suspend;
345 mtd->resume = cfi_amdstd_resume;
346 mtd->flags = MTD_CAP_NORFLASH;
347 mtd->name = map->name;
350 if (cfi->cfi_mode==CFI_MODE_CFI){
351 unsigned char bootloc;
353 * It's a real CFI chip, not one for which the probe
354 * routine faked a CFI structure. So we read the feature
357 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
358 struct cfi_pri_amdstd *extp;
360 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
366 if (extp->MajorVersion != '1' ||
367 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
368 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
369 "version %c.%c.\n", extp->MajorVersion,
376 /* Install our own private info structure */
377 cfi->cmdset_priv = extp;
379 /* Apply cfi device specific fixups */
380 cfi_fixup(mtd, cfi_fixup_table);
382 #ifdef DEBUG_CFI_FEATURES
383 /* Tell the user about it in lots of lovely detail */
384 cfi_tell_features(extp);
387 bootloc = extp->TopBottom;
388 if ((bootloc != 2) && (bootloc != 3)) {
389 printk(KERN_WARNING "%s: CFI does not contain boot "
390 "bank location. Assuming top.\n", map->name);
394 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
395 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
397 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
398 int j = (cfi->cfiq->NumEraseRegions-1)-i;
401 swap = cfi->cfiq->EraseRegionInfo[i];
402 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
403 cfi->cfiq->EraseRegionInfo[j] = swap;
406 /* Set the default CFI lock/unlock addresses */
407 cfi->addr_unlock1 = 0x555;
408 cfi->addr_unlock2 = 0x2aa;
409 /* Modify the unlock address if we are in compatibility mode */
410 if ( /* x16 in x8 mode */
411 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
412 (cfi->cfiq->InterfaceDesc ==
413 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
414 /* x32 in x16 mode */
415 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
416 (cfi->cfiq->InterfaceDesc ==
417 CFI_INTERFACE_X16_BY_X32_ASYNC)))
419 cfi->addr_unlock1 = 0xaaa;
420 cfi->addr_unlock2 = 0x555;
424 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
425 /* Apply jedec specific fixups */
426 cfi_fixup(mtd, jedec_fixup_table);
428 /* Apply generic fixups */
429 cfi_fixup(mtd, fixup_table);
431 for (i=0; i< cfi->numchips; i++) {
432 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
433 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
434 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
435 cfi->chips[i].ref_point_counter = 0;
436 init_waitqueue_head(&(cfi->chips[i].wq));
439 map->fldrv = &cfi_amdstd_chipdrv;
441 return cfi_amdstd_setup(mtd);
443 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
445 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
447 struct map_info *map = mtd->priv;
448 struct cfi_private *cfi = map->fldrv_priv;
449 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
450 unsigned long offset = 0;
453 printk(KERN_NOTICE "number of %s chips: %d\n",
454 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
455 /* Select the correct geometry setup */
456 mtd->size = devsize * cfi->numchips;
458 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
459 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
460 * mtd->numeraseregions, GFP_KERNEL);
461 if (!mtd->eraseregions) {
462 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
466 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
467 unsigned long ernum, ersize;
468 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
469 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
471 if (mtd->erasesize < ersize) {
472 mtd->erasesize = ersize;
474 for (j=0; j<cfi->numchips; j++) {
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
477 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
479 offset += (ersize * ernum);
481 if (offset != devsize) {
483 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
488 for (i=0; i<mtd->numeraseregions;i++){
489 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
490 i,mtd->eraseregions[i].offset,
491 mtd->eraseregions[i].erasesize,
492 mtd->eraseregions[i].numblocks);
496 /* FIXME: erase-suspend-program is broken. See
497 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
498 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
500 __module_get(THIS_MODULE);
505 kfree(mtd->eraseregions);
508 kfree(cfi->cmdset_priv);
514 * Return true if the chip is ready.
516 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
517 * non-suspended sector) and is indicated by no toggle bits toggling.
519 * Note that anything more complicated than checking if no bits are toggling
520 * (including checking DQ5 for an error status) is tricky to get working
521 * correctly and is therefore not done (particulary with interleaved chips
522 * as each chip must be checked independantly of the others).
524 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
528 d = map_read(map, addr);
529 t = map_read(map, addr);
531 return map_word_equal(map, d, t);
535 * Return true if the chip is ready and has the correct value.
537 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
538 * non-suspended sector) and it is indicated by no bits toggling.
540 * Error are indicated by toggling bits or bits held with the wrong value,
541 * or with bits toggling.
543 * Note that anything more complicated than checking if no bits are toggling
544 * (including checking DQ5 for an error status) is tricky to get working
545 * correctly and is therefore not done (particulary with interleaved chips
546 * as each chip must be checked independantly of the others).
549 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
553 oldd = map_read(map, addr);
554 curd = map_read(map, addr);
556 return map_word_equal(map, oldd, curd) &&
557 map_word_equal(map, curd, expected);
560 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
562 DECLARE_WAITQUEUE(wait, current);
563 struct cfi_private *cfi = map->fldrv_priv;
565 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
568 timeo = jiffies + HZ;
570 switch (chip->state) {
574 if (chip_ready(map, adr))
577 if (time_after(jiffies, timeo)) {
578 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
579 spin_unlock(chip->mutex);
582 spin_unlock(chip->mutex);
584 spin_lock(chip->mutex);
585 /* Someone else might have been playing with it. */
595 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
598 if (!( mode == FL_READY
601 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
602 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
606 /* We could check to see if we're trying to access the sector
607 * that is currently being erased. However, no user will try
608 * anything like that so we just wait for the timeout. */
611 /* It's harmless to issue the Erase-Suspend and Erase-Resume
612 * commands when the erase algorithm isn't in progress. */
613 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
614 chip->oldstate = FL_ERASING;
615 chip->state = FL_ERASE_SUSPENDING;
616 chip->erase_suspended = 1;
618 if (chip_ready(map, adr))
621 if (time_after(jiffies, timeo)) {
622 /* Should have suspended the erase by now.
623 * Send an Erase-Resume command as either
624 * there was an error (so leave the erase
625 * routine to recover from it) or we trying to
626 * use the erase-in-progress sector. */
627 map_write(map, CMD(0x30), chip->in_progress_block_addr);
628 chip->state = FL_ERASING;
629 chip->oldstate = FL_READY;
630 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
634 spin_unlock(chip->mutex);
636 spin_lock(chip->mutex);
637 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
638 So we can just loop here. */
640 chip->state = FL_READY;
643 case FL_XIP_WHILE_ERASING:
644 if (mode != FL_READY && mode != FL_POINT &&
645 (!cfip || !(cfip->EraseSuspend&2)))
647 chip->oldstate = chip->state;
648 chip->state = FL_READY;
652 /* Only if there's no operation suspended... */
653 if (mode == FL_READY && chip->oldstate == FL_READY)
658 set_current_state(TASK_UNINTERRUPTIBLE);
659 add_wait_queue(&chip->wq, &wait);
660 spin_unlock(chip->mutex);
662 remove_wait_queue(&chip->wq, &wait);
663 spin_lock(chip->mutex);
669 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
671 struct cfi_private *cfi = map->fldrv_priv;
673 switch(chip->oldstate) {
675 chip->state = chip->oldstate;
676 map_write(map, CMD(0x30), chip->in_progress_block_addr);
677 chip->oldstate = FL_READY;
678 chip->state = FL_ERASING;
681 case FL_XIP_WHILE_ERASING:
682 chip->state = chip->oldstate;
683 chip->oldstate = FL_READY;
688 /* We should really make set_vpp() count, rather than doing this */
692 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
697 #ifdef CONFIG_MTD_XIP
700 * No interrupt what so ever can be serviced while the flash isn't in array
701 * mode. This is ensured by the xip_disable() and xip_enable() functions
702 * enclosing any code path where the flash is known not to be in array mode.
703 * And within a XIP disabled code path, only functions marked with __xipram
704 * may be called and nothing else (it's a good thing to inspect generated
705 * assembly to make sure inline functions were actually inlined and that gcc
706 * didn't emit calls to its own support functions). Also configuring MTD CFI
707 * support to a single buswidth and a single interleave is also recommended.
710 static void xip_disable(struct map_info *map, struct flchip *chip,
713 /* TODO: chips with no XIP use should ignore and return */
714 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
718 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
721 struct cfi_private *cfi = map->fldrv_priv;
723 if (chip->state != FL_POINT && chip->state != FL_READY) {
724 map_write(map, CMD(0xf0), adr);
725 chip->state = FL_READY;
727 (void) map_read(map, adr);
733 * When a delay is required for the flash operation to complete, the
734 * xip_udelay() function is polling for both the given timeout and pending
735 * (but still masked) hardware interrupts. Whenever there is an interrupt
736 * pending then the flash erase operation is suspended, array mode restored
737 * and interrupts unmasked. Task scheduling might also happen at that
738 * point. The CPU eventually returns from the interrupt or the call to
739 * schedule() and the suspended flash operation is resumed for the remaining
740 * of the delay period.
742 * Warning: this function _will_ fool interrupt latency tracing tools.
745 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
746 unsigned long adr, int usec)
748 struct cfi_private *cfi = map->fldrv_priv;
749 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
750 map_word status, OK = CMD(0x80);
751 unsigned long suspended, start = xip_currtime();
756 if (xip_irqpending() && extp &&
757 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
758 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
760 * Let's suspend the erase operation when supported.
761 * Note that we currently don't try to suspend
762 * interleaved chips if there is already another
763 * operation suspended (imagine what happens
764 * when one chip was already done with the current
765 * operation while another chip suspended it, then
766 * we resume the whole thing at once). Yes, it
769 map_write(map, CMD(0xb0), adr);
770 usec -= xip_elapsed_since(start);
771 suspended = xip_currtime();
773 if (xip_elapsed_since(suspended) > 100000) {
775 * The chip doesn't want to suspend
776 * after waiting for 100 msecs.
777 * This is a critical error but there
778 * is not much we can do here.
782 status = map_read(map, adr);
783 } while (!map_word_andequal(map, status, OK, OK));
785 /* Suspend succeeded */
786 oldstate = chip->state;
787 if (!map_word_bitsset(map, status, CMD(0x40)))
789 chip->state = FL_XIP_WHILE_ERASING;
790 chip->erase_suspended = 1;
791 map_write(map, CMD(0xf0), adr);
792 (void) map_read(map, adr);
795 spin_unlock(chip->mutex);
800 * We're back. However someone else might have
801 * decided to go write to the chip if we are in
802 * a suspended erase state. If so let's wait
805 spin_lock(chip->mutex);
806 while (chip->state != FL_XIP_WHILE_ERASING) {
807 DECLARE_WAITQUEUE(wait, current);
808 set_current_state(TASK_UNINTERRUPTIBLE);
809 add_wait_queue(&chip->wq, &wait);
810 spin_unlock(chip->mutex);
812 remove_wait_queue(&chip->wq, &wait);
813 spin_lock(chip->mutex);
815 /* Disallow XIP again */
818 /* Resume the write or erase operation */
819 map_write(map, CMD(0x30), adr);
820 chip->state = oldstate;
821 start = xip_currtime();
822 } else if (usec >= 1000000/HZ) {
824 * Try to save on CPU power when waiting delay
825 * is at least a system timer tick period.
826 * No need to be extremely accurate here.
830 status = map_read(map, adr);
831 } while (!map_word_andequal(map, status, OK, OK)
832 && xip_elapsed_since(start) < usec);
835 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
838 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
839 * the flash is actively programming or erasing since we have to poll for
840 * the operation to complete anyway. We can't do that in a generic way with
841 * a XIP setup so do it before the actual flash operation in this case
842 * and stub it out from INVALIDATE_CACHE_UDELAY.
844 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
845 INVALIDATE_CACHED_RANGE(map, from, size)
847 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
848 UDELAY(map, chip, adr, usec)
853 * Activating this XIP support changes the way the code works a bit. For
854 * example the code to suspend the current process when concurrent access
855 * happens is never executed because xip_udelay() will always return with the
856 * same chip state as it was entered with. This is why there is no care for
857 * the presence of add_wait_queue() or schedule() calls from within a couple
858 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
859 * The queueing and scheduling are always happening within xip_udelay().
861 * Similarly, get_chip() and put_chip() just happen to always be executed
862 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
863 * is in array mode, therefore never executing many cases therein and not
864 * causing any problem with XIP.
869 #define xip_disable(map, chip, adr)
870 #define xip_enable(map, chip, adr)
871 #define XIP_INVAL_CACHED_RANGE(x...)
873 #define UDELAY(map, chip, adr, usec) \
875 spin_unlock(chip->mutex); \
877 spin_lock(chip->mutex); \
880 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
882 spin_unlock(chip->mutex); \
883 INVALIDATE_CACHED_RANGE(map, adr, len); \
885 spin_lock(chip->mutex); \
890 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
892 unsigned long cmd_addr;
893 struct cfi_private *cfi = map->fldrv_priv;
898 /* Ensure cmd read/writes are aligned. */
899 cmd_addr = adr & ~(map_bankwidth(map)-1);
901 spin_lock(chip->mutex);
902 ret = get_chip(map, chip, cmd_addr, FL_READY);
904 spin_unlock(chip->mutex);
908 if (chip->state != FL_POINT && chip->state != FL_READY) {
909 map_write(map, CMD(0xf0), cmd_addr);
910 chip->state = FL_READY;
913 map_copy_from(map, buf, adr, len);
915 put_chip(map, chip, cmd_addr);
917 spin_unlock(chip->mutex);
922 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
924 struct map_info *map = mtd->priv;
925 struct cfi_private *cfi = map->fldrv_priv;
930 /* ofs: offset within the first chip that the first read should start */
932 chipnum = (from >> cfi->chipshift);
933 ofs = from - (chipnum << cfi->chipshift);
939 unsigned long thislen;
941 if (chipnum >= cfi->numchips)
944 if ((len + ofs -1) >> cfi->chipshift)
945 thislen = (1<<cfi->chipshift) - ofs;
949 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
964 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
966 DECLARE_WAITQUEUE(wait, current);
967 unsigned long timeo = jiffies + HZ;
968 struct cfi_private *cfi = map->fldrv_priv;
971 spin_lock(chip->mutex);
973 if (chip->state != FL_READY){
975 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
977 set_current_state(TASK_UNINTERRUPTIBLE);
978 add_wait_queue(&chip->wq, &wait);
980 spin_unlock(chip->mutex);
983 remove_wait_queue(&chip->wq, &wait);
985 if(signal_pending(current))
988 timeo = jiffies + HZ;
995 chip->state = FL_READY;
997 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
998 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
999 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1001 map_copy_from(map, buf, adr, len);
1003 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1004 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1005 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1006 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1009 spin_unlock(chip->mutex);
1014 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1016 struct map_info *map = mtd->priv;
1017 struct cfi_private *cfi = map->fldrv_priv;
1023 /* ofs: offset within the first chip that the first read should start */
1025 /* 8 secsi bytes per chip */
1033 unsigned long thislen;
1035 if (chipnum >= cfi->numchips)
1038 if ((len + ofs -1) >> 3)
1039 thislen = (1<<3) - ofs;
1043 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1058 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1060 struct cfi_private *cfi = map->fldrv_priv;
1061 unsigned long timeo = jiffies + HZ;
1063 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1064 * have a max write time of a few hundreds usec). However, we should
1065 * use the maximum timeout value given by the chip at probe time
1066 * instead. Unfortunately, struct flchip does have a field for
1067 * maximum timeout, only for typical which can be far too short
1068 * depending of the conditions. The ' + 1' is to avoid having a
1069 * timeout of 0 jiffies if HZ is smaller than 1000.
1071 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1078 spin_lock(chip->mutex);
1079 ret = get_chip(map, chip, adr, FL_WRITING);
1081 spin_unlock(chip->mutex);
1085 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1086 __func__, adr, datum.x[0] );
1089 * Check for a NOP for the case when the datum to write is already
1090 * present - it saves time and works around buggy chips that corrupt
1091 * data at other locations when 0xff is written to a location that
1092 * already contains 0xff.
1094 oldd = map_read(map, adr);
1095 if (map_word_equal(map, oldd, datum)) {
1096 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1101 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1103 xip_disable(map, chip, adr);
1105 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1106 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1107 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1108 map_write(map, datum, adr);
1109 chip->state = FL_WRITING;
1111 INVALIDATE_CACHE_UDELAY(map, chip,
1112 adr, map_bankwidth(map),
1113 chip->word_write_time);
1115 /* See comment above for timeout value. */
1116 timeo = jiffies + uWriteTimeout;
1118 if (chip->state != FL_WRITING) {
1119 /* Someone's suspended the write. Sleep */
1120 DECLARE_WAITQUEUE(wait, current);
1122 set_current_state(TASK_UNINTERRUPTIBLE);
1123 add_wait_queue(&chip->wq, &wait);
1124 spin_unlock(chip->mutex);
1126 remove_wait_queue(&chip->wq, &wait);
1127 timeo = jiffies + (HZ / 2); /* FIXME */
1128 spin_lock(chip->mutex);
1132 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1133 xip_enable(map, chip, adr);
1134 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1135 xip_disable(map, chip, adr);
1139 if (chip_ready(map, adr))
1142 /* Latency issues. Drop the lock, wait a while and retry */
1143 UDELAY(map, chip, adr, 1);
1145 /* Did we succeed? */
1146 if (!chip_good(map, adr, datum)) {
1147 /* reset on all failures. */
1148 map_write( map, CMD(0xF0), chip->start );
1149 /* FIXME - should have reset delay before continuing */
1151 if (++retry_cnt <= MAX_WORD_RETRIES)
1156 xip_enable(map, chip, adr);
1158 chip->state = FL_READY;
1159 put_chip(map, chip, adr);
1160 spin_unlock(chip->mutex);
1166 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1167 size_t *retlen, const u_char *buf)
1169 struct map_info *map = mtd->priv;
1170 struct cfi_private *cfi = map->fldrv_priv;
1173 unsigned long ofs, chipstart;
1174 DECLARE_WAITQUEUE(wait, current);
1180 chipnum = to >> cfi->chipshift;
1181 ofs = to - (chipnum << cfi->chipshift);
1182 chipstart = cfi->chips[chipnum].start;
1184 /* If it's not bus-aligned, do the first byte write */
1185 if (ofs & (map_bankwidth(map)-1)) {
1186 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1187 int i = ofs - bus_ofs;
1192 spin_lock(cfi->chips[chipnum].mutex);
1194 if (cfi->chips[chipnum].state != FL_READY) {
1196 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1198 set_current_state(TASK_UNINTERRUPTIBLE);
1199 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1201 spin_unlock(cfi->chips[chipnum].mutex);
1204 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1206 if(signal_pending(current))
1212 /* Load 'tmp_buf' with old contents of flash */
1213 tmp_buf = map_read(map, bus_ofs+chipstart);
1215 spin_unlock(cfi->chips[chipnum].mutex);
1217 /* Number of bytes to copy from buffer */
1218 n = min_t(int, len, map_bankwidth(map)-i);
1220 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1222 ret = do_write_oneword(map, &cfi->chips[chipnum],
1232 if (ofs >> cfi->chipshift) {
1235 if (chipnum == cfi->numchips)
1240 /* We are now aligned, write as much as possible */
1241 while(len >= map_bankwidth(map)) {
1244 datum = map_word_load(map, buf);
1246 ret = do_write_oneword(map, &cfi->chips[chipnum],
1251 ofs += map_bankwidth(map);
1252 buf += map_bankwidth(map);
1253 (*retlen) += map_bankwidth(map);
1254 len -= map_bankwidth(map);
1256 if (ofs >> cfi->chipshift) {
1259 if (chipnum == cfi->numchips)
1261 chipstart = cfi->chips[chipnum].start;
1265 /* Write the trailing bytes if any */
1266 if (len & (map_bankwidth(map)-1)) {
1270 spin_lock(cfi->chips[chipnum].mutex);
1272 if (cfi->chips[chipnum].state != FL_READY) {
1274 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1276 set_current_state(TASK_UNINTERRUPTIBLE);
1277 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1279 spin_unlock(cfi->chips[chipnum].mutex);
1282 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1284 if(signal_pending(current))
1290 tmp_buf = map_read(map, ofs + chipstart);
1292 spin_unlock(cfi->chips[chipnum].mutex);
1294 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1296 ret = do_write_oneword(map, &cfi->chips[chipnum],
1309 * FIXME: interleaved mode not tested, and probably not supported!
1311 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1312 unsigned long adr, const u_char *buf,
1315 struct cfi_private *cfi = map->fldrv_priv;
1316 unsigned long timeo = jiffies + HZ;
1317 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1318 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1320 unsigned long cmd_adr;
1327 spin_lock(chip->mutex);
1328 ret = get_chip(map, chip, adr, FL_WRITING);
1330 spin_unlock(chip->mutex);
1334 datum = map_word_load(map, buf);
1336 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1337 __func__, adr, datum.x[0] );
1339 XIP_INVAL_CACHED_RANGE(map, adr, len);
1341 xip_disable(map, chip, cmd_adr);
1343 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1344 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1345 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1347 /* Write Buffer Load */
1348 map_write(map, CMD(0x25), cmd_adr);
1350 chip->state = FL_WRITING_TO_BUFFER;
1352 /* Write length of data to come */
1353 words = len / map_bankwidth(map);
1354 map_write(map, CMD(words - 1), cmd_adr);
1357 while(z < words * map_bankwidth(map)) {
1358 datum = map_word_load(map, buf);
1359 map_write(map, datum, adr + z);
1361 z += map_bankwidth(map);
1362 buf += map_bankwidth(map);
1364 z -= map_bankwidth(map);
1368 /* Write Buffer Program Confirm: GO GO GO */
1369 map_write(map, CMD(0x29), cmd_adr);
1370 chip->state = FL_WRITING;
1372 INVALIDATE_CACHE_UDELAY(map, chip,
1373 adr, map_bankwidth(map),
1374 chip->word_write_time);
1376 timeo = jiffies + uWriteTimeout;
1379 if (chip->state != FL_WRITING) {
1380 /* Someone's suspended the write. Sleep */
1381 DECLARE_WAITQUEUE(wait, current);
1383 set_current_state(TASK_UNINTERRUPTIBLE);
1384 add_wait_queue(&chip->wq, &wait);
1385 spin_unlock(chip->mutex);
1387 remove_wait_queue(&chip->wq, &wait);
1388 timeo = jiffies + (HZ / 2); /* FIXME */
1389 spin_lock(chip->mutex);
1393 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1396 if (chip_ready(map, adr)) {
1397 xip_enable(map, chip, adr);
1401 /* Latency issues. Drop the lock, wait a while and retry */
1402 UDELAY(map, chip, adr, 1);
1405 /* reset on all failures. */
1406 map_write( map, CMD(0xF0), chip->start );
1407 xip_enable(map, chip, adr);
1408 /* FIXME - should have reset delay before continuing */
1410 printk(KERN_WARNING "MTD %s(): software timeout\n",
1415 chip->state = FL_READY;
1416 put_chip(map, chip, adr);
1417 spin_unlock(chip->mutex);
1423 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1424 size_t *retlen, const u_char *buf)
1426 struct map_info *map = mtd->priv;
1427 struct cfi_private *cfi = map->fldrv_priv;
1428 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1437 chipnum = to >> cfi->chipshift;
1438 ofs = to - (chipnum << cfi->chipshift);
1440 /* If it's not bus-aligned, do the first word write */
1441 if (ofs & (map_bankwidth(map)-1)) {
1442 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1443 if (local_len > len)
1445 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1446 local_len, retlen, buf);
1453 if (ofs >> cfi->chipshift) {
1456 if (chipnum == cfi->numchips)
1461 /* Write buffer is worth it only if more than one word to write... */
1462 while (len >= map_bankwidth(map) * 2) {
1463 /* We must not cross write block boundaries */
1464 int size = wbufsize - (ofs & (wbufsize-1));
1468 if (size % map_bankwidth(map))
1469 size -= size % map_bankwidth(map);
1471 ret = do_write_buffer(map, &cfi->chips[chipnum],
1481 if (ofs >> cfi->chipshift) {
1484 if (chipnum == cfi->numchips)
1490 size_t retlen_dregs = 0;
1492 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1493 len, &retlen_dregs, buf);
1495 *retlen += retlen_dregs;
1504 * Handle devices with one erase region, that only implement
1505 * the chip erase command.
1507 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1509 struct cfi_private *cfi = map->fldrv_priv;
1510 unsigned long timeo = jiffies + HZ;
1511 unsigned long int adr;
1512 DECLARE_WAITQUEUE(wait, current);
1515 adr = cfi->addr_unlock1;
1517 spin_lock(chip->mutex);
1518 ret = get_chip(map, chip, adr, FL_WRITING);
1520 spin_unlock(chip->mutex);
1524 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1525 __func__, chip->start );
1527 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1529 xip_disable(map, chip, adr);
1531 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1532 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1533 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1534 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1535 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1536 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1538 chip->state = FL_ERASING;
1539 chip->erase_suspended = 0;
1540 chip->in_progress_block_addr = adr;
1542 INVALIDATE_CACHE_UDELAY(map, chip,
1544 chip->erase_time*500);
1546 timeo = jiffies + (HZ*20);
1549 if (chip->state != FL_ERASING) {
1550 /* Someone's suspended the erase. Sleep */
1551 set_current_state(TASK_UNINTERRUPTIBLE);
1552 add_wait_queue(&chip->wq, &wait);
1553 spin_unlock(chip->mutex);
1555 remove_wait_queue(&chip->wq, &wait);
1556 spin_lock(chip->mutex);
1559 if (chip->erase_suspended) {
1560 /* This erase was suspended and resumed.
1561 Adjust the timeout */
1562 timeo = jiffies + (HZ*20); /* FIXME */
1563 chip->erase_suspended = 0;
1566 if (chip_ready(map, adr))
1569 if (time_after(jiffies, timeo)) {
1570 printk(KERN_WARNING "MTD %s(): software timeout\n",
1575 /* Latency issues. Drop the lock, wait a while and retry */
1576 UDELAY(map, chip, adr, 1000000/HZ);
1578 /* Did we succeed? */
1579 if (!chip_good(map, adr, map_word_ff(map))) {
1580 /* reset on all failures. */
1581 map_write( map, CMD(0xF0), chip->start );
1582 /* FIXME - should have reset delay before continuing */
1587 chip->state = FL_READY;
1588 xip_enable(map, chip, adr);
1589 put_chip(map, chip, adr);
1590 spin_unlock(chip->mutex);
1596 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1598 struct cfi_private *cfi = map->fldrv_priv;
1599 unsigned long timeo = jiffies + HZ;
1600 DECLARE_WAITQUEUE(wait, current);
1605 spin_lock(chip->mutex);
1606 ret = get_chip(map, chip, adr, FL_ERASING);
1608 spin_unlock(chip->mutex);
1612 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1615 XIP_INVAL_CACHED_RANGE(map, adr, len);
1617 xip_disable(map, chip, adr);
1619 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1620 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1621 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1622 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1623 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1624 map_write(map, CMD(0x30), adr);
1626 chip->state = FL_ERASING;
1627 chip->erase_suspended = 0;
1628 chip->in_progress_block_addr = adr;
1630 INVALIDATE_CACHE_UDELAY(map, chip,
1632 chip->erase_time*500);
1634 timeo = jiffies + (HZ*20);
1637 if (chip->state != FL_ERASING) {
1638 /* Someone's suspended the erase. Sleep */
1639 set_current_state(TASK_UNINTERRUPTIBLE);
1640 add_wait_queue(&chip->wq, &wait);
1641 spin_unlock(chip->mutex);
1643 remove_wait_queue(&chip->wq, &wait);
1644 spin_lock(chip->mutex);
1647 if (chip->erase_suspended) {
1648 /* This erase was suspended and resumed.
1649 Adjust the timeout */
1650 timeo = jiffies + (HZ*20); /* FIXME */
1651 chip->erase_suspended = 0;
1654 if (chip_ready(map, adr)) {
1655 xip_enable(map, chip, adr);
1659 if (time_after(jiffies, timeo)) {
1660 xip_enable(map, chip, adr);
1661 printk(KERN_WARNING "MTD %s(): software timeout\n",
1666 /* Latency issues. Drop the lock, wait a while and retry */
1667 UDELAY(map, chip, adr, 1000000/HZ);
1669 /* Did we succeed? */
1670 if (!chip_good(map, adr, map_word_ff(map))) {
1671 /* reset on all failures. */
1672 map_write( map, CMD(0xF0), chip->start );
1673 /* FIXME - should have reset delay before continuing */
1678 chip->state = FL_READY;
1679 put_chip(map, chip, adr);
1680 spin_unlock(chip->mutex);
1685 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1687 unsigned long ofs, len;
1693 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1697 instr->state = MTD_ERASE_DONE;
1698 mtd_erase_callback(instr);
1704 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1706 struct map_info *map = mtd->priv;
1707 struct cfi_private *cfi = map->fldrv_priv;
1710 if (instr->addr != 0)
1713 if (instr->len != mtd->size)
1716 ret = do_erase_chip(map, &cfi->chips[0]);
1720 instr->state = MTD_ERASE_DONE;
1721 mtd_erase_callback(instr);
1726 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1727 unsigned long adr, int len, void *thunk)
1729 struct cfi_private *cfi = map->fldrv_priv;
1732 spin_lock(chip->mutex);
1733 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1736 chip->state = FL_LOCKING;
1738 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1739 __func__, adr, len);
1741 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1742 cfi->device_type, NULL);
1743 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1744 cfi->device_type, NULL);
1745 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1746 cfi->device_type, NULL);
1747 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1748 cfi->device_type, NULL);
1749 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1750 cfi->device_type, NULL);
1751 map_write(map, CMD(0x40), chip->start + adr);
1753 chip->state = FL_READY;
1754 put_chip(map, chip, adr + chip->start);
1758 spin_unlock(chip->mutex);
1762 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1763 unsigned long adr, int len, void *thunk)
1765 struct cfi_private *cfi = map->fldrv_priv;
1768 spin_lock(chip->mutex);
1769 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1772 chip->state = FL_UNLOCKING;
1774 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1775 __func__, adr, len);
1777 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1778 cfi->device_type, NULL);
1779 map_write(map, CMD(0x70), adr);
1781 chip->state = FL_READY;
1782 put_chip(map, chip, adr + chip->start);
1786 spin_unlock(chip->mutex);
1790 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1792 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1795 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1797 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1801 static void cfi_amdstd_sync (struct mtd_info *mtd)
1803 struct map_info *map = mtd->priv;
1804 struct cfi_private *cfi = map->fldrv_priv;
1806 struct flchip *chip;
1808 DECLARE_WAITQUEUE(wait, current);
1810 for (i=0; !ret && i<cfi->numchips; i++) {
1811 chip = &cfi->chips[i];
1814 spin_lock(chip->mutex);
1816 switch(chip->state) {
1820 case FL_JEDEC_QUERY:
1821 chip->oldstate = chip->state;
1822 chip->state = FL_SYNCING;
1823 /* No need to wake_up() on this state change -
1824 * as the whole point is that nobody can do anything
1825 * with the chip now anyway.
1828 spin_unlock(chip->mutex);
1832 /* Not an idle state */
1833 set_current_state(TASK_UNINTERRUPTIBLE);
1834 add_wait_queue(&chip->wq, &wait);
1836 spin_unlock(chip->mutex);
1840 remove_wait_queue(&chip->wq, &wait);
1846 /* Unlock the chips again */
1848 for (i--; i >=0; i--) {
1849 chip = &cfi->chips[i];
1851 spin_lock(chip->mutex);
1853 if (chip->state == FL_SYNCING) {
1854 chip->state = chip->oldstate;
1857 spin_unlock(chip->mutex);
1862 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1864 struct map_info *map = mtd->priv;
1865 struct cfi_private *cfi = map->fldrv_priv;
1867 struct flchip *chip;
1870 for (i=0; !ret && i<cfi->numchips; i++) {
1871 chip = &cfi->chips[i];
1873 spin_lock(chip->mutex);
1875 switch(chip->state) {
1879 case FL_JEDEC_QUERY:
1880 chip->oldstate = chip->state;
1881 chip->state = FL_PM_SUSPENDED;
1882 /* No need to wake_up() on this state change -
1883 * as the whole point is that nobody can do anything
1884 * with the chip now anyway.
1886 case FL_PM_SUSPENDED:
1893 spin_unlock(chip->mutex);
1896 /* Unlock the chips again */
1899 for (i--; i >=0; i--) {
1900 chip = &cfi->chips[i];
1902 spin_lock(chip->mutex);
1904 if (chip->state == FL_PM_SUSPENDED) {
1905 chip->state = chip->oldstate;
1908 spin_unlock(chip->mutex);
1916 static void cfi_amdstd_resume(struct mtd_info *mtd)
1918 struct map_info *map = mtd->priv;
1919 struct cfi_private *cfi = map->fldrv_priv;
1921 struct flchip *chip;
1923 for (i=0; i<cfi->numchips; i++) {
1925 chip = &cfi->chips[i];
1927 spin_lock(chip->mutex);
1929 if (chip->state == FL_PM_SUSPENDED) {
1930 chip->state = FL_READY;
1931 map_write(map, CMD(0xF0), chip->start);
1935 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1937 spin_unlock(chip->mutex);
1941 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1943 struct map_info *map = mtd->priv;
1944 struct cfi_private *cfi = map->fldrv_priv;
1946 kfree(cfi->cmdset_priv);
1949 kfree(mtd->eraseregions);
1952 MODULE_LICENSE("GPL");
1953 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1954 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");