2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
27 #include <asm/byteorder.h>
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/compatmac.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/xip.h>
39 #define AMD_BOOTLOC_BUG
40 #define FORCE_WORD_WRITE 0
42 #define MAX_WORD_RETRIES 3
44 #define MANUFACTURER_AMD 0x0001
45 #define MANUFACTURER_ATMEL 0x001F
46 #define MANUFACTURER_SST 0x00BF
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static void cfi_amdstd_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 .probe = NULL, /* Not usable directly */
76 .destroy = cfi_amdstd_destroy,
77 .name = "cfi_cmdset_0002",
82 /* #define DEBUG_CFI_FEATURES */
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88 const char* erase_suspend[3] = {
89 "Not supported", "Read only", "Read/write"
91 const char* top_bottom[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106 if (extp->BlkProt == 0)
107 printk(" Block protection: Not supported\n");
109 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
112 printk(" Temporary block unprotect: %s\n",
113 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 printk(" Burst mode: %s\n",
117 extp->BurstMode ? "Supported" : "Not supported");
118 if (extp->PageMode == 0)
119 printk(" Page mode: Not supported\n");
121 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMin >> 4, extp->VppMin & 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMax >> 4, extp->VppMax & 0xf);
128 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv;
141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 __u8 major = extp->MajorVersion;
143 __u8 minor = extp->MinorVersion;
145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
147 if (cfi->id & 0x80) {
148 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
149 extp->TopBottom = 3; /* top boot */
151 extp->TopBottom = 2; /* bottom boot */
157 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
159 struct map_info *map = mtd->priv;
160 struct cfi_private *cfi = map->fldrv_priv;
161 if (cfi->cfiq->BufWriteTimeoutTyp) {
162 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
163 mtd->write = cfi_amdstd_write_buffers;
167 /* Atmel chips don't use the same PRI format as AMD chips */
168 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
170 struct map_info *map = mtd->priv;
171 struct cfi_private *cfi = map->fldrv_priv;
172 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
173 struct cfi_pri_atmel atmel_pri;
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
178 if (atmel_pri.Features & 0x02)
179 extp->EraseSuspend = 2;
181 if (atmel_pri.BottomBoot)
186 /* burst write mode not supported */
187 cfi->cfiq->BufWriteTimeoutTyp = 0;
188 cfi->cfiq->BufWriteTimeoutMax = 0;
191 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
193 /* Setup for chips with a secsi area */
194 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
195 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
198 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
200 struct map_info *map = mtd->priv;
201 struct cfi_private *cfi = map->fldrv_priv;
202 if ((cfi->cfiq->NumEraseRegions == 1) &&
203 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
204 mtd->erase = cfi_amdstd_erase_chip;
210 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
213 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
215 mtd->lock = cfi_atmel_lock;
216 mtd->unlock = cfi_atmel_unlock;
217 mtd->flags |= MTD_POWERUP_LOCK;
220 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
222 struct map_info *map = mtd->priv;
223 struct cfi_private *cfi = map->fldrv_priv;
225 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
226 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
227 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
231 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
233 struct map_info *map = mtd->priv;
234 struct cfi_private *cfi = map->fldrv_priv;
236 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
237 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
238 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
242 static struct cfi_fixup cfi_fixup_table[] = {
243 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
244 #ifdef AMD_BOOTLOC_BUG
245 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
247 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
248 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
249 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
250 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
251 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
252 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
253 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
254 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
255 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
256 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
257 #if !FORCE_WORD_WRITE
258 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
262 static struct cfi_fixup jedec_fixup_table[] = {
263 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
264 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
265 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
269 static struct cfi_fixup fixup_table[] = {
270 /* The CFI vendor ids and the JEDEC vendor IDs appear
271 * to be common. It is like the devices id's are as
272 * well. This table is to pick all cases where
273 * we know that is the case.
275 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
276 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
281 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
283 struct cfi_private *cfi = map->fldrv_priv;
284 struct mtd_info *mtd;
287 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
289 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
293 mtd->type = MTD_NORFLASH;
295 /* Fill in the default mtd operations */
296 mtd->erase = cfi_amdstd_erase_varsize;
297 mtd->write = cfi_amdstd_write_words;
298 mtd->read = cfi_amdstd_read;
299 mtd->sync = cfi_amdstd_sync;
300 mtd->suspend = cfi_amdstd_suspend;
301 mtd->resume = cfi_amdstd_resume;
302 mtd->flags = MTD_CAP_NORFLASH;
303 mtd->name = map->name;
306 if (cfi->cfi_mode==CFI_MODE_CFI){
307 unsigned char bootloc;
309 * It's a real CFI chip, not one for which the probe
310 * routine faked a CFI structure. So we read the feature
313 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
314 struct cfi_pri_amdstd *extp;
316 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
322 if (extp->MajorVersion != '1' ||
323 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
324 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
325 "version %c.%c.\n", extp->MajorVersion,
332 /* Install our own private info structure */
333 cfi->cmdset_priv = extp;
335 /* Apply cfi device specific fixups */
336 cfi_fixup(mtd, cfi_fixup_table);
338 #ifdef DEBUG_CFI_FEATURES
339 /* Tell the user about it in lots of lovely detail */
340 cfi_tell_features(extp);
343 bootloc = extp->TopBottom;
344 if ((bootloc != 2) && (bootloc != 3)) {
345 printk(KERN_WARNING "%s: CFI does not contain boot "
346 "bank location. Assuming top.\n", map->name);
350 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
351 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
353 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
354 int j = (cfi->cfiq->NumEraseRegions-1)-i;
357 swap = cfi->cfiq->EraseRegionInfo[i];
358 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
359 cfi->cfiq->EraseRegionInfo[j] = swap;
362 /* Set the default CFI lock/unlock addresses */
363 cfi->addr_unlock1 = 0x555;
364 cfi->addr_unlock2 = 0x2aa;
365 /* Modify the unlock address if we are in compatibility mode */
366 if ( /* x16 in x8 mode */
367 ((cfi->device_type == CFI_DEVICETYPE_X8) &&
368 (cfi->cfiq->InterfaceDesc ==
369 CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
370 /* x32 in x16 mode */
371 ((cfi->device_type == CFI_DEVICETYPE_X16) &&
372 (cfi->cfiq->InterfaceDesc ==
373 CFI_INTERFACE_X16_BY_X32_ASYNC)))
375 cfi->addr_unlock1 = 0xaaa;
376 cfi->addr_unlock2 = 0x555;
380 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
381 /* Apply jedec specific fixups */
382 cfi_fixup(mtd, jedec_fixup_table);
384 /* Apply generic fixups */
385 cfi_fixup(mtd, fixup_table);
387 for (i=0; i< cfi->numchips; i++) {
388 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
389 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
390 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
391 cfi->chips[i].ref_point_counter = 0;
392 init_waitqueue_head(&(cfi->chips[i].wq));
395 map->fldrv = &cfi_amdstd_chipdrv;
397 return cfi_amdstd_setup(mtd);
399 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
401 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
403 struct map_info *map = mtd->priv;
404 struct cfi_private *cfi = map->fldrv_priv;
405 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
406 unsigned long offset = 0;
409 printk(KERN_NOTICE "number of %s chips: %d\n",
410 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
411 /* Select the correct geometry setup */
412 mtd->size = devsize * cfi->numchips;
414 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
415 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
416 * mtd->numeraseregions, GFP_KERNEL);
417 if (!mtd->eraseregions) {
418 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
422 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
423 unsigned long ernum, ersize;
424 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
425 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
427 if (mtd->erasesize < ersize) {
428 mtd->erasesize = ersize;
430 for (j=0; j<cfi->numchips; j++) {
431 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
432 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
433 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
435 offset += (ersize * ernum);
437 if (offset != devsize) {
439 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
444 for (i=0; i<mtd->numeraseregions;i++){
445 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
446 i,mtd->eraseregions[i].offset,
447 mtd->eraseregions[i].erasesize,
448 mtd->eraseregions[i].numblocks);
452 /* FIXME: erase-suspend-program is broken. See
453 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
454 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
456 __module_get(THIS_MODULE);
461 kfree(mtd->eraseregions);
464 kfree(cfi->cmdset_priv);
470 * Return true if the chip is ready.
472 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
473 * non-suspended sector) and is indicated by no toggle bits toggling.
475 * Note that anything more complicated than checking if no bits are toggling
476 * (including checking DQ5 for an error status) is tricky to get working
477 * correctly and is therefore not done (particulary with interleaved chips
478 * as each chip must be checked independantly of the others).
480 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
484 d = map_read(map, addr);
485 t = map_read(map, addr);
487 return map_word_equal(map, d, t);
491 * Return true if the chip is ready and has the correct value.
493 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
494 * non-suspended sector) and it is indicated by no bits toggling.
496 * Error are indicated by toggling bits or bits held with the wrong value,
497 * or with bits toggling.
499 * Note that anything more complicated than checking if no bits are toggling
500 * (including checking DQ5 for an error status) is tricky to get working
501 * correctly and is therefore not done (particulary with interleaved chips
502 * as each chip must be checked independantly of the others).
505 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
509 oldd = map_read(map, addr);
510 curd = map_read(map, addr);
512 return map_word_equal(map, oldd, curd) &&
513 map_word_equal(map, curd, expected);
516 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
518 DECLARE_WAITQUEUE(wait, current);
519 struct cfi_private *cfi = map->fldrv_priv;
521 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
524 timeo = jiffies + HZ;
526 switch (chip->state) {
530 if (chip_ready(map, adr))
533 if (time_after(jiffies, timeo)) {
534 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
535 spin_unlock(chip->mutex);
538 spin_unlock(chip->mutex);
540 spin_lock(chip->mutex);
541 /* Someone else might have been playing with it. */
551 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
554 if (!( mode == FL_READY
557 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
558 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
562 /* We could check to see if we're trying to access the sector
563 * that is currently being erased. However, no user will try
564 * anything like that so we just wait for the timeout. */
567 /* It's harmless to issue the Erase-Suspend and Erase-Resume
568 * commands when the erase algorithm isn't in progress. */
569 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
570 chip->oldstate = FL_ERASING;
571 chip->state = FL_ERASE_SUSPENDING;
572 chip->erase_suspended = 1;
574 if (chip_ready(map, adr))
577 if (time_after(jiffies, timeo)) {
578 /* Should have suspended the erase by now.
579 * Send an Erase-Resume command as either
580 * there was an error (so leave the erase
581 * routine to recover from it) or we trying to
582 * use the erase-in-progress sector. */
583 map_write(map, CMD(0x30), chip->in_progress_block_addr);
584 chip->state = FL_ERASING;
585 chip->oldstate = FL_READY;
586 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
590 spin_unlock(chip->mutex);
592 spin_lock(chip->mutex);
593 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
594 So we can just loop here. */
596 chip->state = FL_READY;
599 case FL_XIP_WHILE_ERASING:
600 if (mode != FL_READY && mode != FL_POINT &&
601 (!cfip || !(cfip->EraseSuspend&2)))
603 chip->oldstate = chip->state;
604 chip->state = FL_READY;
608 /* Only if there's no operation suspended... */
609 if (mode == FL_READY && chip->oldstate == FL_READY)
614 set_current_state(TASK_UNINTERRUPTIBLE);
615 add_wait_queue(&chip->wq, &wait);
616 spin_unlock(chip->mutex);
618 remove_wait_queue(&chip->wq, &wait);
619 spin_lock(chip->mutex);
625 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
627 struct cfi_private *cfi = map->fldrv_priv;
629 switch(chip->oldstate) {
631 chip->state = chip->oldstate;
632 map_write(map, CMD(0x30), chip->in_progress_block_addr);
633 chip->oldstate = FL_READY;
634 chip->state = FL_ERASING;
637 case FL_XIP_WHILE_ERASING:
638 chip->state = chip->oldstate;
639 chip->oldstate = FL_READY;
644 /* We should really make set_vpp() count, rather than doing this */
648 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
653 #ifdef CONFIG_MTD_XIP
656 * No interrupt what so ever can be serviced while the flash isn't in array
657 * mode. This is ensured by the xip_disable() and xip_enable() functions
658 * enclosing any code path where the flash is known not to be in array mode.
659 * And within a XIP disabled code path, only functions marked with __xipram
660 * may be called and nothing else (it's a good thing to inspect generated
661 * assembly to make sure inline functions were actually inlined and that gcc
662 * didn't emit calls to its own support functions). Also configuring MTD CFI
663 * support to a single buswidth and a single interleave is also recommended.
666 static void xip_disable(struct map_info *map, struct flchip *chip,
669 /* TODO: chips with no XIP use should ignore and return */
670 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
674 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
677 struct cfi_private *cfi = map->fldrv_priv;
679 if (chip->state != FL_POINT && chip->state != FL_READY) {
680 map_write(map, CMD(0xf0), adr);
681 chip->state = FL_READY;
683 (void) map_read(map, adr);
689 * When a delay is required for the flash operation to complete, the
690 * xip_udelay() function is polling for both the given timeout and pending
691 * (but still masked) hardware interrupts. Whenever there is an interrupt
692 * pending then the flash erase operation is suspended, array mode restored
693 * and interrupts unmasked. Task scheduling might also happen at that
694 * point. The CPU eventually returns from the interrupt or the call to
695 * schedule() and the suspended flash operation is resumed for the remaining
696 * of the delay period.
698 * Warning: this function _will_ fool interrupt latency tracing tools.
701 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
702 unsigned long adr, int usec)
704 struct cfi_private *cfi = map->fldrv_priv;
705 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
706 map_word status, OK = CMD(0x80);
707 unsigned long suspended, start = xip_currtime();
712 if (xip_irqpending() && extp &&
713 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
714 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
716 * Let's suspend the erase operation when supported.
717 * Note that we currently don't try to suspend
718 * interleaved chips if there is already another
719 * operation suspended (imagine what happens
720 * when one chip was already done with the current
721 * operation while another chip suspended it, then
722 * we resume the whole thing at once). Yes, it
725 map_write(map, CMD(0xb0), adr);
726 usec -= xip_elapsed_since(start);
727 suspended = xip_currtime();
729 if (xip_elapsed_since(suspended) > 100000) {
731 * The chip doesn't want to suspend
732 * after waiting for 100 msecs.
733 * This is a critical error but there
734 * is not much we can do here.
738 status = map_read(map, adr);
739 } while (!map_word_andequal(map, status, OK, OK));
741 /* Suspend succeeded */
742 oldstate = chip->state;
743 if (!map_word_bitsset(map, status, CMD(0x40)))
745 chip->state = FL_XIP_WHILE_ERASING;
746 chip->erase_suspended = 1;
747 map_write(map, CMD(0xf0), adr);
748 (void) map_read(map, adr);
751 spin_unlock(chip->mutex);
756 * We're back. However someone else might have
757 * decided to go write to the chip if we are in
758 * a suspended erase state. If so let's wait
761 spin_lock(chip->mutex);
762 while (chip->state != FL_XIP_WHILE_ERASING) {
763 DECLARE_WAITQUEUE(wait, current);
764 set_current_state(TASK_UNINTERRUPTIBLE);
765 add_wait_queue(&chip->wq, &wait);
766 spin_unlock(chip->mutex);
768 remove_wait_queue(&chip->wq, &wait);
769 spin_lock(chip->mutex);
771 /* Disallow XIP again */
774 /* Resume the write or erase operation */
775 map_write(map, CMD(0x30), adr);
776 chip->state = oldstate;
777 start = xip_currtime();
778 } else if (usec >= 1000000/HZ) {
780 * Try to save on CPU power when waiting delay
781 * is at least a system timer tick period.
782 * No need to be extremely accurate here.
786 status = map_read(map, adr);
787 } while (!map_word_andequal(map, status, OK, OK)
788 && xip_elapsed_since(start) < usec);
791 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
794 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
795 * the flash is actively programming or erasing since we have to poll for
796 * the operation to complete anyway. We can't do that in a generic way with
797 * a XIP setup so do it before the actual flash operation in this case
798 * and stub it out from INVALIDATE_CACHE_UDELAY.
800 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
801 INVALIDATE_CACHED_RANGE(map, from, size)
803 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
804 UDELAY(map, chip, adr, usec)
809 * Activating this XIP support changes the way the code works a bit. For
810 * example the code to suspend the current process when concurrent access
811 * happens is never executed because xip_udelay() will always return with the
812 * same chip state as it was entered with. This is why there is no care for
813 * the presence of add_wait_queue() or schedule() calls from within a couple
814 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
815 * The queueing and scheduling are always happening within xip_udelay().
817 * Similarly, get_chip() and put_chip() just happen to always be executed
818 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
819 * is in array mode, therefore never executing many cases therein and not
820 * causing any problem with XIP.
825 #define xip_disable(map, chip, adr)
826 #define xip_enable(map, chip, adr)
827 #define XIP_INVAL_CACHED_RANGE(x...)
829 #define UDELAY(map, chip, adr, usec) \
831 spin_unlock(chip->mutex); \
833 spin_lock(chip->mutex); \
836 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
838 spin_unlock(chip->mutex); \
839 INVALIDATE_CACHED_RANGE(map, adr, len); \
841 spin_lock(chip->mutex); \
846 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
848 unsigned long cmd_addr;
849 struct cfi_private *cfi = map->fldrv_priv;
854 /* Ensure cmd read/writes are aligned. */
855 cmd_addr = adr & ~(map_bankwidth(map)-1);
857 spin_lock(chip->mutex);
858 ret = get_chip(map, chip, cmd_addr, FL_READY);
860 spin_unlock(chip->mutex);
864 if (chip->state != FL_POINT && chip->state != FL_READY) {
865 map_write(map, CMD(0xf0), cmd_addr);
866 chip->state = FL_READY;
869 map_copy_from(map, buf, adr, len);
871 put_chip(map, chip, cmd_addr);
873 spin_unlock(chip->mutex);
878 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
880 struct map_info *map = mtd->priv;
881 struct cfi_private *cfi = map->fldrv_priv;
886 /* ofs: offset within the first chip that the first read should start */
888 chipnum = (from >> cfi->chipshift);
889 ofs = from - (chipnum << cfi->chipshift);
895 unsigned long thislen;
897 if (chipnum >= cfi->numchips)
900 if ((len + ofs -1) >> cfi->chipshift)
901 thislen = (1<<cfi->chipshift) - ofs;
905 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
920 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
922 DECLARE_WAITQUEUE(wait, current);
923 unsigned long timeo = jiffies + HZ;
924 struct cfi_private *cfi = map->fldrv_priv;
927 spin_lock(chip->mutex);
929 if (chip->state != FL_READY){
931 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
933 set_current_state(TASK_UNINTERRUPTIBLE);
934 add_wait_queue(&chip->wq, &wait);
936 spin_unlock(chip->mutex);
939 remove_wait_queue(&chip->wq, &wait);
941 if(signal_pending(current))
944 timeo = jiffies + HZ;
951 chip->state = FL_READY;
953 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
954 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
955 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
957 map_copy_from(map, buf, adr, len);
959 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
960 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
961 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
962 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
965 spin_unlock(chip->mutex);
970 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
972 struct map_info *map = mtd->priv;
973 struct cfi_private *cfi = map->fldrv_priv;
979 /* ofs: offset within the first chip that the first read should start */
981 /* 8 secsi bytes per chip */
989 unsigned long thislen;
991 if (chipnum >= cfi->numchips)
994 if ((len + ofs -1) >> 3)
995 thislen = (1<<3) - ofs;
999 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1014 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1016 struct cfi_private *cfi = map->fldrv_priv;
1017 unsigned long timeo = jiffies + HZ;
1019 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1020 * have a max write time of a few hundreds usec). However, we should
1021 * use the maximum timeout value given by the chip at probe time
1022 * instead. Unfortunately, struct flchip does have a field for
1023 * maximum timeout, only for typical which can be far too short
1024 * depending of the conditions. The ' + 1' is to avoid having a
1025 * timeout of 0 jiffies if HZ is smaller than 1000.
1027 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1034 spin_lock(chip->mutex);
1035 ret = get_chip(map, chip, adr, FL_WRITING);
1037 spin_unlock(chip->mutex);
1041 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1042 __func__, adr, datum.x[0] );
1045 * Check for a NOP for the case when the datum to write is already
1046 * present - it saves time and works around buggy chips that corrupt
1047 * data at other locations when 0xff is written to a location that
1048 * already contains 0xff.
1050 oldd = map_read(map, adr);
1051 if (map_word_equal(map, oldd, datum)) {
1052 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1057 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1059 xip_disable(map, chip, adr);
1061 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1062 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1063 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1064 map_write(map, datum, adr);
1065 chip->state = FL_WRITING;
1067 INVALIDATE_CACHE_UDELAY(map, chip,
1068 adr, map_bankwidth(map),
1069 chip->word_write_time);
1071 /* See comment above for timeout value. */
1072 timeo = jiffies + uWriteTimeout;
1074 if (chip->state != FL_WRITING) {
1075 /* Someone's suspended the write. Sleep */
1076 DECLARE_WAITQUEUE(wait, current);
1078 set_current_state(TASK_UNINTERRUPTIBLE);
1079 add_wait_queue(&chip->wq, &wait);
1080 spin_unlock(chip->mutex);
1082 remove_wait_queue(&chip->wq, &wait);
1083 timeo = jiffies + (HZ / 2); /* FIXME */
1084 spin_lock(chip->mutex);
1088 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1089 xip_enable(map, chip, adr);
1090 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1091 xip_disable(map, chip, adr);
1095 if (chip_ready(map, adr))
1098 /* Latency issues. Drop the lock, wait a while and retry */
1099 UDELAY(map, chip, adr, 1);
1101 /* Did we succeed? */
1102 if (!chip_good(map, adr, datum)) {
1103 /* reset on all failures. */
1104 map_write( map, CMD(0xF0), chip->start );
1105 /* FIXME - should have reset delay before continuing */
1107 if (++retry_cnt <= MAX_WORD_RETRIES)
1112 xip_enable(map, chip, adr);
1114 chip->state = FL_READY;
1115 put_chip(map, chip, adr);
1116 spin_unlock(chip->mutex);
1122 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1123 size_t *retlen, const u_char *buf)
1125 struct map_info *map = mtd->priv;
1126 struct cfi_private *cfi = map->fldrv_priv;
1129 unsigned long ofs, chipstart;
1130 DECLARE_WAITQUEUE(wait, current);
1136 chipnum = to >> cfi->chipshift;
1137 ofs = to - (chipnum << cfi->chipshift);
1138 chipstart = cfi->chips[chipnum].start;
1140 /* If it's not bus-aligned, do the first byte write */
1141 if (ofs & (map_bankwidth(map)-1)) {
1142 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1143 int i = ofs - bus_ofs;
1148 spin_lock(cfi->chips[chipnum].mutex);
1150 if (cfi->chips[chipnum].state != FL_READY) {
1152 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1154 set_current_state(TASK_UNINTERRUPTIBLE);
1155 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1157 spin_unlock(cfi->chips[chipnum].mutex);
1160 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1162 if(signal_pending(current))
1168 /* Load 'tmp_buf' with old contents of flash */
1169 tmp_buf = map_read(map, bus_ofs+chipstart);
1171 spin_unlock(cfi->chips[chipnum].mutex);
1173 /* Number of bytes to copy from buffer */
1174 n = min_t(int, len, map_bankwidth(map)-i);
1176 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1178 ret = do_write_oneword(map, &cfi->chips[chipnum],
1188 if (ofs >> cfi->chipshift) {
1191 if (chipnum == cfi->numchips)
1196 /* We are now aligned, write as much as possible */
1197 while(len >= map_bankwidth(map)) {
1200 datum = map_word_load(map, buf);
1202 ret = do_write_oneword(map, &cfi->chips[chipnum],
1207 ofs += map_bankwidth(map);
1208 buf += map_bankwidth(map);
1209 (*retlen) += map_bankwidth(map);
1210 len -= map_bankwidth(map);
1212 if (ofs >> cfi->chipshift) {
1215 if (chipnum == cfi->numchips)
1217 chipstart = cfi->chips[chipnum].start;
1221 /* Write the trailing bytes if any */
1222 if (len & (map_bankwidth(map)-1)) {
1226 spin_lock(cfi->chips[chipnum].mutex);
1228 if (cfi->chips[chipnum].state != FL_READY) {
1230 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1232 set_current_state(TASK_UNINTERRUPTIBLE);
1233 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1235 spin_unlock(cfi->chips[chipnum].mutex);
1238 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1240 if(signal_pending(current))
1246 tmp_buf = map_read(map, ofs + chipstart);
1248 spin_unlock(cfi->chips[chipnum].mutex);
1250 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1252 ret = do_write_oneword(map, &cfi->chips[chipnum],
1265 * FIXME: interleaved mode not tested, and probably not supported!
1267 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1268 unsigned long adr, const u_char *buf,
1271 struct cfi_private *cfi = map->fldrv_priv;
1272 unsigned long timeo = jiffies + HZ;
1273 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1274 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1276 unsigned long cmd_adr;
1283 spin_lock(chip->mutex);
1284 ret = get_chip(map, chip, adr, FL_WRITING);
1286 spin_unlock(chip->mutex);
1290 datum = map_word_load(map, buf);
1292 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1293 __func__, adr, datum.x[0] );
1295 XIP_INVAL_CACHED_RANGE(map, adr, len);
1297 xip_disable(map, chip, cmd_adr);
1299 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1300 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1301 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1303 /* Write Buffer Load */
1304 map_write(map, CMD(0x25), cmd_adr);
1306 chip->state = FL_WRITING_TO_BUFFER;
1308 /* Write length of data to come */
1309 words = len / map_bankwidth(map);
1310 map_write(map, CMD(words - 1), cmd_adr);
1313 while(z < words * map_bankwidth(map)) {
1314 datum = map_word_load(map, buf);
1315 map_write(map, datum, adr + z);
1317 z += map_bankwidth(map);
1318 buf += map_bankwidth(map);
1320 z -= map_bankwidth(map);
1324 /* Write Buffer Program Confirm: GO GO GO */
1325 map_write(map, CMD(0x29), cmd_adr);
1326 chip->state = FL_WRITING;
1328 INVALIDATE_CACHE_UDELAY(map, chip,
1329 adr, map_bankwidth(map),
1330 chip->word_write_time);
1332 timeo = jiffies + uWriteTimeout;
1335 if (chip->state != FL_WRITING) {
1336 /* Someone's suspended the write. Sleep */
1337 DECLARE_WAITQUEUE(wait, current);
1339 set_current_state(TASK_UNINTERRUPTIBLE);
1340 add_wait_queue(&chip->wq, &wait);
1341 spin_unlock(chip->mutex);
1343 remove_wait_queue(&chip->wq, &wait);
1344 timeo = jiffies + (HZ / 2); /* FIXME */
1345 spin_lock(chip->mutex);
1349 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1352 if (chip_ready(map, adr)) {
1353 xip_enable(map, chip, adr);
1357 /* Latency issues. Drop the lock, wait a while and retry */
1358 UDELAY(map, chip, adr, 1);
1361 /* reset on all failures. */
1362 map_write( map, CMD(0xF0), chip->start );
1363 xip_enable(map, chip, adr);
1364 /* FIXME - should have reset delay before continuing */
1366 printk(KERN_WARNING "MTD %s(): software timeout\n",
1371 chip->state = FL_READY;
1372 put_chip(map, chip, adr);
1373 spin_unlock(chip->mutex);
1379 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1380 size_t *retlen, const u_char *buf)
1382 struct map_info *map = mtd->priv;
1383 struct cfi_private *cfi = map->fldrv_priv;
1384 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1393 chipnum = to >> cfi->chipshift;
1394 ofs = to - (chipnum << cfi->chipshift);
1396 /* If it's not bus-aligned, do the first word write */
1397 if (ofs & (map_bankwidth(map)-1)) {
1398 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1399 if (local_len > len)
1401 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1402 local_len, retlen, buf);
1409 if (ofs >> cfi->chipshift) {
1412 if (chipnum == cfi->numchips)
1417 /* Write buffer is worth it only if more than one word to write... */
1418 while (len >= map_bankwidth(map) * 2) {
1419 /* We must not cross write block boundaries */
1420 int size = wbufsize - (ofs & (wbufsize-1));
1424 if (size % map_bankwidth(map))
1425 size -= size % map_bankwidth(map);
1427 ret = do_write_buffer(map, &cfi->chips[chipnum],
1437 if (ofs >> cfi->chipshift) {
1440 if (chipnum == cfi->numchips)
1446 size_t retlen_dregs = 0;
1448 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1449 len, &retlen_dregs, buf);
1451 *retlen += retlen_dregs;
1460 * Handle devices with one erase region, that only implement
1461 * the chip erase command.
1463 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1465 struct cfi_private *cfi = map->fldrv_priv;
1466 unsigned long timeo = jiffies + HZ;
1467 unsigned long int adr;
1468 DECLARE_WAITQUEUE(wait, current);
1471 adr = cfi->addr_unlock1;
1473 spin_lock(chip->mutex);
1474 ret = get_chip(map, chip, adr, FL_WRITING);
1476 spin_unlock(chip->mutex);
1480 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1481 __func__, chip->start );
1483 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1485 xip_disable(map, chip, adr);
1487 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1488 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1489 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1490 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1491 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1492 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1494 chip->state = FL_ERASING;
1495 chip->erase_suspended = 0;
1496 chip->in_progress_block_addr = adr;
1498 INVALIDATE_CACHE_UDELAY(map, chip,
1500 chip->erase_time*500);
1502 timeo = jiffies + (HZ*20);
1505 if (chip->state != FL_ERASING) {
1506 /* Someone's suspended the erase. Sleep */
1507 set_current_state(TASK_UNINTERRUPTIBLE);
1508 add_wait_queue(&chip->wq, &wait);
1509 spin_unlock(chip->mutex);
1511 remove_wait_queue(&chip->wq, &wait);
1512 spin_lock(chip->mutex);
1515 if (chip->erase_suspended) {
1516 /* This erase was suspended and resumed.
1517 Adjust the timeout */
1518 timeo = jiffies + (HZ*20); /* FIXME */
1519 chip->erase_suspended = 0;
1522 if (chip_ready(map, adr))
1525 if (time_after(jiffies, timeo)) {
1526 printk(KERN_WARNING "MTD %s(): software timeout\n",
1531 /* Latency issues. Drop the lock, wait a while and retry */
1532 UDELAY(map, chip, adr, 1000000/HZ);
1534 /* Did we succeed? */
1535 if (!chip_good(map, adr, map_word_ff(map))) {
1536 /* reset on all failures. */
1537 map_write( map, CMD(0xF0), chip->start );
1538 /* FIXME - should have reset delay before continuing */
1543 chip->state = FL_READY;
1544 xip_enable(map, chip, adr);
1545 put_chip(map, chip, adr);
1546 spin_unlock(chip->mutex);
1552 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1554 struct cfi_private *cfi = map->fldrv_priv;
1555 unsigned long timeo = jiffies + HZ;
1556 DECLARE_WAITQUEUE(wait, current);
1561 spin_lock(chip->mutex);
1562 ret = get_chip(map, chip, adr, FL_ERASING);
1564 spin_unlock(chip->mutex);
1568 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1571 XIP_INVAL_CACHED_RANGE(map, adr, len);
1573 xip_disable(map, chip, adr);
1575 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1576 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1577 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1578 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1579 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1580 map_write(map, CMD(0x30), adr);
1582 chip->state = FL_ERASING;
1583 chip->erase_suspended = 0;
1584 chip->in_progress_block_addr = adr;
1586 INVALIDATE_CACHE_UDELAY(map, chip,
1588 chip->erase_time*500);
1590 timeo = jiffies + (HZ*20);
1593 if (chip->state != FL_ERASING) {
1594 /* Someone's suspended the erase. Sleep */
1595 set_current_state(TASK_UNINTERRUPTIBLE);
1596 add_wait_queue(&chip->wq, &wait);
1597 spin_unlock(chip->mutex);
1599 remove_wait_queue(&chip->wq, &wait);
1600 spin_lock(chip->mutex);
1603 if (chip->erase_suspended) {
1604 /* This erase was suspended and resumed.
1605 Adjust the timeout */
1606 timeo = jiffies + (HZ*20); /* FIXME */
1607 chip->erase_suspended = 0;
1610 if (chip_ready(map, adr)) {
1611 xip_enable(map, chip, adr);
1615 if (time_after(jiffies, timeo)) {
1616 xip_enable(map, chip, adr);
1617 printk(KERN_WARNING "MTD %s(): software timeout\n",
1622 /* Latency issues. Drop the lock, wait a while and retry */
1623 UDELAY(map, chip, adr, 1000000/HZ);
1625 /* Did we succeed? */
1626 if (!chip_good(map, adr, map_word_ff(map))) {
1627 /* reset on all failures. */
1628 map_write( map, CMD(0xF0), chip->start );
1629 /* FIXME - should have reset delay before continuing */
1634 chip->state = FL_READY;
1635 put_chip(map, chip, adr);
1636 spin_unlock(chip->mutex);
1641 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1643 unsigned long ofs, len;
1649 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1653 instr->state = MTD_ERASE_DONE;
1654 mtd_erase_callback(instr);
1660 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1662 struct map_info *map = mtd->priv;
1663 struct cfi_private *cfi = map->fldrv_priv;
1666 if (instr->addr != 0)
1669 if (instr->len != mtd->size)
1672 ret = do_erase_chip(map, &cfi->chips[0]);
1676 instr->state = MTD_ERASE_DONE;
1677 mtd_erase_callback(instr);
1682 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1683 unsigned long adr, int len, void *thunk)
1685 struct cfi_private *cfi = map->fldrv_priv;
1688 spin_lock(chip->mutex);
1689 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1692 chip->state = FL_LOCKING;
1694 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1695 __func__, adr, len);
1697 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1698 cfi->device_type, NULL);
1699 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1700 cfi->device_type, NULL);
1701 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1702 cfi->device_type, NULL);
1703 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1704 cfi->device_type, NULL);
1705 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1706 cfi->device_type, NULL);
1707 map_write(map, CMD(0x40), chip->start + adr);
1709 chip->state = FL_READY;
1710 put_chip(map, chip, adr + chip->start);
1714 spin_unlock(chip->mutex);
1718 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1719 unsigned long adr, int len, void *thunk)
1721 struct cfi_private *cfi = map->fldrv_priv;
1724 spin_lock(chip->mutex);
1725 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1728 chip->state = FL_UNLOCKING;
1730 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1731 __func__, adr, len);
1733 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1734 cfi->device_type, NULL);
1735 map_write(map, CMD(0x70), adr);
1737 chip->state = FL_READY;
1738 put_chip(map, chip, adr + chip->start);
1742 spin_unlock(chip->mutex);
1746 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1748 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1751 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1753 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1757 static void cfi_amdstd_sync (struct mtd_info *mtd)
1759 struct map_info *map = mtd->priv;
1760 struct cfi_private *cfi = map->fldrv_priv;
1762 struct flchip *chip;
1764 DECLARE_WAITQUEUE(wait, current);
1766 for (i=0; !ret && i<cfi->numchips; i++) {
1767 chip = &cfi->chips[i];
1770 spin_lock(chip->mutex);
1772 switch(chip->state) {
1776 case FL_JEDEC_QUERY:
1777 chip->oldstate = chip->state;
1778 chip->state = FL_SYNCING;
1779 /* No need to wake_up() on this state change -
1780 * as the whole point is that nobody can do anything
1781 * with the chip now anyway.
1784 spin_unlock(chip->mutex);
1788 /* Not an idle state */
1789 set_current_state(TASK_UNINTERRUPTIBLE);
1790 add_wait_queue(&chip->wq, &wait);
1792 spin_unlock(chip->mutex);
1796 remove_wait_queue(&chip->wq, &wait);
1802 /* Unlock the chips again */
1804 for (i--; i >=0; i--) {
1805 chip = &cfi->chips[i];
1807 spin_lock(chip->mutex);
1809 if (chip->state == FL_SYNCING) {
1810 chip->state = chip->oldstate;
1813 spin_unlock(chip->mutex);
1818 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1820 struct map_info *map = mtd->priv;
1821 struct cfi_private *cfi = map->fldrv_priv;
1823 struct flchip *chip;
1826 for (i=0; !ret && i<cfi->numchips; i++) {
1827 chip = &cfi->chips[i];
1829 spin_lock(chip->mutex);
1831 switch(chip->state) {
1835 case FL_JEDEC_QUERY:
1836 chip->oldstate = chip->state;
1837 chip->state = FL_PM_SUSPENDED;
1838 /* No need to wake_up() on this state change -
1839 * as the whole point is that nobody can do anything
1840 * with the chip now anyway.
1842 case FL_PM_SUSPENDED:
1849 spin_unlock(chip->mutex);
1852 /* Unlock the chips again */
1855 for (i--; i >=0; i--) {
1856 chip = &cfi->chips[i];
1858 spin_lock(chip->mutex);
1860 if (chip->state == FL_PM_SUSPENDED) {
1861 chip->state = chip->oldstate;
1864 spin_unlock(chip->mutex);
1872 static void cfi_amdstd_resume(struct mtd_info *mtd)
1874 struct map_info *map = mtd->priv;
1875 struct cfi_private *cfi = map->fldrv_priv;
1877 struct flchip *chip;
1879 for (i=0; i<cfi->numchips; i++) {
1881 chip = &cfi->chips[i];
1883 spin_lock(chip->mutex);
1885 if (chip->state == FL_PM_SUSPENDED) {
1886 chip->state = FL_READY;
1887 map_write(map, CMD(0xF0), chip->start);
1891 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1893 spin_unlock(chip->mutex);
1897 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1899 struct map_info *map = mtd->priv;
1900 struct cfi_private *cfi = map->fldrv_priv;
1902 kfree(cfi->cmdset_priv);
1905 kfree(mtd->eraseregions);
1908 MODULE_LICENSE("GPL");
1909 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1910 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");