2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72 static void cfi_intelext_destroy(struct mtd_info *);
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 * *********** SETUP AND PROBE BITS ***********
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) {
120 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i);
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i);
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) {
135 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i);
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
147 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
153 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
155 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156 "erase on write disabled.\n");
157 extp->SuspendCmdSupport &= ~1;
161 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
168 if (cfip && (cfip->FeatureSupport&4)) {
169 cfip->FeatureSupport &= ~4;
170 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
175 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
177 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv;
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
184 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
189 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
194 static void fixup_use_point(struct mtd_info *mtd, void *param)
196 struct map_info *map = mtd->priv;
197 if (!mtd->point && map_is_linear(map)) {
198 mtd->point = cfi_intelext_point;
199 mtd->unpoint = cfi_intelext_unpoint;
203 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers;
213 static struct cfi_fixup cfi_fixup_table[] = {
214 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
217 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
220 #if !FORCE_WORD_WRITE
221 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
223 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
228 static struct cfi_fixup jedec_fixup_table[] = {
229 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
230 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
231 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
234 static struct cfi_fixup fixup_table[] = {
235 /* The CFI vendor ids and the JEDEC vendor IDs appear
236 * to be common. It is like the devices id's are as
237 * well. This table is to pick all cases where
238 * we know that is the case.
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
244 static inline struct cfi_pri_intelext *
245 read_pri_intelext(struct map_info *map, __u16 adr)
247 struct cfi_pri_intelext *extp;
248 unsigned int extp_size = sizeof(*extp);
251 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
255 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
261 unsigned int extra_size = 0;
264 /* Protection Register info */
265 extra_size += (extp->NumProtectionFields - 1) *
266 sizeof(struct cfi_intelext_otpinfo);
268 /* Burst Read info */
271 /* Number of hardware-partitions */
273 if (extp_size < sizeof(*extp) + extra_size)
275 nb_parts = extp->extra[extra_size - 1];
277 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
280 extra_size += sizeof(*rinfo);
281 if (extp_size < sizeof(*extp) + extra_size)
283 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
284 extra_size += (rinfo->NumBlockTypes - 1)
285 * sizeof(struct cfi_intelext_blockinfo);
288 if (extp_size < sizeof(*extp) + extra_size) {
290 extp_size = sizeof(*extp) + extra_size;
292 if (extp_size > 4096) {
294 "%s: cfi_pri_intelext is too fat\n",
305 /* This routine is made available to other mtd code via
306 * inter_module_register. It must only be accessed through
307 * inter_module_get which will bump the use count of this module. The
308 * addresses passed back in cfi are valid as long as the use count of
309 * this module is non-zero, i.e. between inter_module_get and
310 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
312 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
314 struct cfi_private *cfi = map->fldrv_priv;
315 struct mtd_info *mtd;
318 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
320 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
323 memset(mtd, 0, sizeof(*mtd));
325 mtd->type = MTD_NORFLASH;
327 /* Fill in the default mtd operations */
328 mtd->erase = cfi_intelext_erase_varsize;
329 mtd->read = cfi_intelext_read;
330 mtd->write = cfi_intelext_write_words;
331 mtd->sync = cfi_intelext_sync;
332 mtd->lock = cfi_intelext_lock;
333 mtd->unlock = cfi_intelext_unlock;
334 mtd->suspend = cfi_intelext_suspend;
335 mtd->resume = cfi_intelext_resume;
336 mtd->flags = MTD_CAP_NORFLASH;
337 mtd->name = map->name;
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
341 if (cfi->cfi_mode == CFI_MODE_CFI) {
343 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature
347 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
348 struct cfi_pri_intelext *extp;
350 extp = read_pri_intelext(map, adr);
356 /* Install our own private info structure */
357 cfi->cmdset_priv = extp;
359 cfi_fixup(mtd, cfi_fixup_table);
361 #ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp);
366 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
370 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
371 /* Apply jedec specific fixups */
372 cfi_fixup(mtd, jedec_fixup_table);
374 /* Apply generic fixups */
375 cfi_fixup(mtd, fixup_table);
377 for (i=0; i< cfi->numchips; i++) {
378 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0;
384 map->fldrv = &cfi_intelext_chipdrv;
386 return cfi_intelext_setup(mtd);
389 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
391 struct map_info *map = mtd->priv;
392 struct cfi_private *cfi = map->fldrv_priv;
393 unsigned long offset = 0;
395 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
397 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
399 mtd->size = devsize * cfi->numchips;
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
412 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
414 if (mtd->erasesize < ersize) {
415 mtd->erasesize = ersize;
417 for (j=0; j<cfi->numchips; j++) {
418 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
419 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
420 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
422 offset += (ersize * ernum);
425 if (offset != devsize) {
427 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
431 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks);
438 #ifdef CONFIG_MTD_OTP
439 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
440 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
441 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
442 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
443 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
444 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
447 /* This function has the potential to distort the reality
448 a bit and therefore should be called last. */
449 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
452 __module_get(THIS_MODULE);
453 register_reboot_notifier(&mtd->reboot_notifier);
458 kfree(mtd->eraseregions);
461 kfree(cfi->cmdset_priv);
465 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
466 struct cfi_private **pcfi)
468 struct map_info *map = mtd->priv;
469 struct cfi_private *cfi = *pcfi;
470 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
473 * Probing of multi-partition flash ships.
475 * To support multiple partitions when available, we simply arrange
476 * for each of them to have their own flchip structure even if they
477 * are on the same physical chip. This means completely recreating
478 * a new cfi_private structure right here which is a blatent code
479 * layering violation, but this is still the least intrusive
480 * arrangement at this point. This can be rearranged in the future
481 * if someone feels motivated enough. --nico
483 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
484 && extp->FeatureSupport & (1 << 9)) {
485 struct cfi_private *newcfi;
487 struct flchip_shared *shared;
488 int offs, numregions, numparts, partshift, numvirtchips, i, j;
490 /* Protection Register info */
491 offs = (extp->NumProtectionFields - 1) *
492 sizeof(struct cfi_intelext_otpinfo);
494 /* Burst Read info */
497 /* Number of partition regions */
498 numregions = extp->extra[offs];
501 /* Number of hardware partitions */
503 for (i = 0; i < numregions; i++) {
504 struct cfi_intelext_regioninfo *rinfo;
505 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
506 numparts += rinfo->NumIdentPartitions;
507 offs += sizeof(*rinfo)
508 + (rinfo->NumBlockTypes - 1) *
509 sizeof(struct cfi_intelext_blockinfo);
513 * All functions below currently rely on all chips having
514 * the same geometry so we'll just assume that all hardware
515 * partitions are of the same size too.
517 partshift = cfi->chipshift - __ffs(numparts);
519 if ((1 << partshift) < mtd->erasesize) {
521 "%s: bad number of hw partitions (%d)\n",
522 __FUNCTION__, numparts);
526 numvirtchips = cfi->numchips * numparts;
527 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
530 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
535 memcpy(newcfi, cfi, sizeof(struct cfi_private));
536 newcfi->numchips = numvirtchips;
537 newcfi->chipshift = partshift;
539 chip = &newcfi->chips[0];
540 for (i = 0; i < cfi->numchips; i++) {
541 shared[i].writing = shared[i].erasing = NULL;
542 spin_lock_init(&shared[i].lock);
543 for (j = 0; j < numparts; j++) {
544 *chip = cfi->chips[i];
545 chip->start += j << partshift;
546 chip->priv = &shared[i];
547 /* those should be reset too since
548 they create memory references. */
549 init_waitqueue_head(&chip->wq);
550 spin_lock_init(&chip->_spinlock);
551 chip->mutex = &chip->_spinlock;
556 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
557 "--> %d partitions of %d KiB\n",
558 map->name, cfi->numchips, cfi->interleave,
559 newcfi->numchips, 1<<(newcfi->chipshift-10));
561 map->fldrv_priv = newcfi;
570 * *********** CHIP ACCESS FUNCTIONS ***********
573 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
575 DECLARE_WAITQUEUE(wait, current);
576 struct cfi_private *cfi = map->fldrv_priv;
577 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
579 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
582 timeo = jiffies + HZ;
584 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
586 * OK. We have possibility for contension on the write/erase
587 * operations which are global to the real chip and not per
588 * partition. So let's fight it over in the partition which
589 * currently has authority on the operation.
591 * The rules are as follows:
593 * - any write operation must own shared->writing.
595 * - any erase operation must own _both_ shared->writing and
598 * - contension arbitration is handled in the owner's context.
600 * The 'shared' struct can be read when its lock is taken.
601 * However any writes to it can only be made when the current
602 * owner's lock is also held.
604 struct flchip_shared *shared = chip->priv;
605 struct flchip *contender;
606 spin_lock(&shared->lock);
607 contender = shared->writing;
608 if (contender && contender != chip) {
610 * The engine to perform desired operation on this
611 * partition is already in use by someone else.
612 * Let's fight over it in the context of the chip
613 * currently using it. If it is possible to suspend,
614 * that other partition will do just that, otherwise
615 * it'll happily send us to sleep. In any case, when
616 * get_chip returns success we're clear to go ahead.
618 int ret = spin_trylock(contender->mutex);
619 spin_unlock(&shared->lock);
622 spin_unlock(chip->mutex);
623 ret = get_chip(map, contender, contender->start, mode);
624 spin_lock(chip->mutex);
626 spin_unlock(contender->mutex);
629 timeo = jiffies + HZ;
630 spin_lock(&shared->lock);
634 shared->writing = chip;
635 if (mode == FL_ERASING)
636 shared->erasing = chip;
637 if (contender && contender != chip)
638 spin_unlock(contender->mutex);
639 spin_unlock(&shared->lock);
642 switch (chip->state) {
646 status = map_read(map, adr);
647 if (map_word_andequal(map, status, status_OK, status_OK))
650 /* At this point we're fine with write operations
651 in other partitions as they don't conflict. */
652 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
655 if (time_after(jiffies, timeo)) {
656 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
660 spin_unlock(chip->mutex);
662 spin_lock(chip->mutex);
663 /* Someone else might have been playing with it. */
674 !(cfip->FeatureSupport & 2) ||
675 !(mode == FL_READY || mode == FL_POINT ||
676 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
681 map_write(map, CMD(0xB0), adr);
683 /* If the flash has finished erasing, then 'erase suspend'
684 * appears to make some (28F320) flash devices switch to
685 * 'read' mode. Make sure that we switch to 'read status'
686 * mode so we get the right data. --rmk
688 map_write(map, CMD(0x70), adr);
689 chip->oldstate = FL_ERASING;
690 chip->state = FL_ERASE_SUSPENDING;
691 chip->erase_suspended = 1;
693 status = map_read(map, adr);
694 if (map_word_andequal(map, status, status_OK, status_OK))
697 if (time_after(jiffies, timeo)) {
698 /* Urgh. Resume and pretend we weren't here. */
699 map_write(map, CMD(0xd0), adr);
700 /* Make sure we're in 'read status' mode if it had finished */
701 map_write(map, CMD(0x70), adr);
702 chip->state = FL_ERASING;
703 chip->oldstate = FL_READY;
704 printk(KERN_ERR "Chip not ready after erase "
705 "suspended: status = 0x%lx\n", status.x[0]);
709 spin_unlock(chip->mutex);
711 spin_lock(chip->mutex);
712 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
713 So we can just loop here. */
715 chip->state = FL_STATUS;
718 case FL_XIP_WHILE_ERASING:
719 if (mode != FL_READY && mode != FL_POINT &&
720 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
722 chip->oldstate = chip->state;
723 chip->state = FL_READY;
727 /* Only if there's no operation suspended... */
728 if (mode == FL_READY && chip->oldstate == FL_READY)
733 set_current_state(TASK_UNINTERRUPTIBLE);
734 add_wait_queue(&chip->wq, &wait);
735 spin_unlock(chip->mutex);
737 remove_wait_queue(&chip->wq, &wait);
738 spin_lock(chip->mutex);
743 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
745 struct cfi_private *cfi = map->fldrv_priv;
748 struct flchip_shared *shared = chip->priv;
749 spin_lock(&shared->lock);
750 if (shared->writing == chip && chip->oldstate == FL_READY) {
751 /* We own the ability to write, but we're done */
752 shared->writing = shared->erasing;
753 if (shared->writing && shared->writing != chip) {
754 /* give back ownership to who we loaned it from */
755 struct flchip *loaner = shared->writing;
756 spin_lock(loaner->mutex);
757 spin_unlock(&shared->lock);
758 spin_unlock(chip->mutex);
759 put_chip(map, loaner, loaner->start);
760 spin_lock(chip->mutex);
761 spin_unlock(loaner->mutex);
765 shared->erasing = NULL;
766 shared->writing = NULL;
767 } else if (shared->erasing == chip && shared->writing != chip) {
769 * We own the ability to erase without the ability
770 * to write, which means the erase was suspended
771 * and some other partition is currently writing.
772 * Don't let the switch below mess things up since
773 * we don't have ownership to resume anything.
775 spin_unlock(&shared->lock);
779 spin_unlock(&shared->lock);
782 switch(chip->oldstate) {
784 chip->state = chip->oldstate;
785 /* What if one interleaved chip has finished and the
786 other hasn't? The old code would leave the finished
787 one in READY mode. That's bad, and caused -EROFS
788 errors to be returned from do_erase_oneblock because
789 that's the only bit it checked for at the time.
790 As the state machine appears to explicitly allow
791 sending the 0x70 (Read Status) command to an erasing
792 chip and expecting it to be ignored, that's what we
794 map_write(map, CMD(0xd0), adr);
795 map_write(map, CMD(0x70), adr);
796 chip->oldstate = FL_READY;
797 chip->state = FL_ERASING;
800 case FL_XIP_WHILE_ERASING:
801 chip->state = chip->oldstate;
802 chip->oldstate = FL_READY;
808 /* We should really make set_vpp() count, rather than doing this */
812 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
817 #ifdef CONFIG_MTD_XIP
820 * No interrupt what so ever can be serviced while the flash isn't in array
821 * mode. This is ensured by the xip_disable() and xip_enable() functions
822 * enclosing any code path where the flash is known not to be in array mode.
823 * And within a XIP disabled code path, only functions marked with __xipram
824 * may be called and nothing else (it's a good thing to inspect generated
825 * assembly to make sure inline functions were actually inlined and that gcc
826 * didn't emit calls to its own support functions). Also configuring MTD CFI
827 * support to a single buswidth and a single interleave is also recommended.
830 static void xip_disable(struct map_info *map, struct flchip *chip,
833 /* TODO: chips with no XIP use should ignore and return */
834 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
838 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
841 struct cfi_private *cfi = map->fldrv_priv;
842 if (chip->state != FL_POINT && chip->state != FL_READY) {
843 map_write(map, CMD(0xff), adr);
844 chip->state = FL_READY;
846 (void) map_read(map, adr);
852 * When a delay is required for the flash operation to complete, the
853 * xip_udelay() function is polling for both the given timeout and pending
854 * (but still masked) hardware interrupts. Whenever there is an interrupt
855 * pending then the flash erase or write operation is suspended, array mode
856 * restored and interrupts unmasked. Task scheduling might also happen at that
857 * point. The CPU eventually returns from the interrupt or the call to
858 * schedule() and the suspended flash operation is resumed for the remaining
859 * of the delay period.
861 * Warning: this function _will_ fool interrupt latency tracing tools.
864 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865 unsigned long adr, int usec)
867 struct cfi_private *cfi = map->fldrv_priv;
868 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869 map_word status, OK = CMD(0x80);
870 unsigned long suspended, start = xip_currtime();
871 flstate_t oldstate, newstate;
875 if (xip_irqpending() && cfip &&
876 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
880 * Let's suspend the erase or write operation when
881 * supported. Note that we currently don't try to
882 * suspend interleaved chips if there is already
883 * another operation suspended (imagine what happens
884 * when one chip was already done with the current
885 * operation while another chip suspended it, then
886 * we resume the whole thing at once). Yes, it
889 map_write(map, CMD(0xb0), adr);
890 map_write(map, CMD(0x70), adr);
891 usec -= xip_elapsed_since(start);
892 suspended = xip_currtime();
894 if (xip_elapsed_since(suspended) > 100000) {
896 * The chip doesn't want to suspend
897 * after waiting for 100 msecs.
898 * This is a critical error but there
899 * is not much we can do here.
903 status = map_read(map, adr);
904 } while (!map_word_andequal(map, status, OK, OK));
906 /* Suspend succeeded */
907 oldstate = chip->state;
908 if (oldstate == FL_ERASING) {
909 if (!map_word_bitsset(map, status, CMD(0x40)))
911 newstate = FL_XIP_WHILE_ERASING;
912 chip->erase_suspended = 1;
914 if (!map_word_bitsset(map, status, CMD(0x04)))
916 newstate = FL_XIP_WHILE_WRITING;
917 chip->write_suspended = 1;
919 chip->state = newstate;
920 map_write(map, CMD(0xff), adr);
921 (void) map_read(map, adr);
922 asm volatile (".rep 8; nop; .endr");
924 spin_unlock(chip->mutex);
925 asm volatile (".rep 8; nop; .endr");
929 * We're back. However someone else might have
930 * decided to go write to the chip if we are in
931 * a suspended erase state. If so let's wait
934 spin_lock(chip->mutex);
935 while (chip->state != newstate) {
936 DECLARE_WAITQUEUE(wait, current);
937 set_current_state(TASK_UNINTERRUPTIBLE);
938 add_wait_queue(&chip->wq, &wait);
939 spin_unlock(chip->mutex);
941 remove_wait_queue(&chip->wq, &wait);
942 spin_lock(chip->mutex);
944 /* Disallow XIP again */
947 /* Resume the write or erase operation */
948 map_write(map, CMD(0xd0), adr);
949 map_write(map, CMD(0x70), adr);
950 chip->state = oldstate;
951 start = xip_currtime();
952 } else if (usec >= 1000000/HZ) {
954 * Try to save on CPU power when waiting delay
955 * is at least a system timer tick period.
956 * No need to be extremely accurate here.
960 status = map_read(map, adr);
961 } while (!map_word_andequal(map, status, OK, OK)
962 && xip_elapsed_since(start) < usec);
965 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
968 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969 * the flash is actively programming or erasing since we have to poll for
970 * the operation to complete anyway. We can't do that in a generic way with
971 * a XIP setup so do it before the actual flash operation in this case
972 * and stub it out from INVALIDATE_CACHE_UDELAY.
974 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
975 INVALIDATE_CACHED_RANGE(map, from, size)
977 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
978 UDELAY(map, chip, adr, usec)
983 * Activating this XIP support changes the way the code works a bit. For
984 * example the code to suspend the current process when concurrent access
985 * happens is never executed because xip_udelay() will always return with the
986 * same chip state as it was entered with. This is why there is no care for
987 * the presence of add_wait_queue() or schedule() calls from within a couple
988 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
989 * The queueing and scheduling are always happening within xip_udelay().
991 * Similarly, get_chip() and put_chip() just happen to always be executed
992 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
993 * is in array mode, therefore never executing many cases therein and not
994 * causing any problem with XIP.
999 #define xip_disable(map, chip, adr)
1000 #define xip_enable(map, chip, adr)
1001 #define XIP_INVAL_CACHED_RANGE(x...)
1003 #define UDELAY(map, chip, adr, usec) \
1005 spin_unlock(chip->mutex); \
1007 spin_lock(chip->mutex); \
1010 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1012 spin_unlock(chip->mutex); \
1013 INVALIDATE_CACHED_RANGE(map, adr, len); \
1015 spin_lock(chip->mutex); \
1020 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1022 unsigned long cmd_addr;
1023 struct cfi_private *cfi = map->fldrv_priv;
1028 /* Ensure cmd read/writes are aligned. */
1029 cmd_addr = adr & ~(map_bankwidth(map)-1);
1031 spin_lock(chip->mutex);
1033 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1036 if (chip->state != FL_POINT && chip->state != FL_READY)
1037 map_write(map, CMD(0xff), cmd_addr);
1039 chip->state = FL_POINT;
1040 chip->ref_point_counter++;
1042 spin_unlock(chip->mutex);
1047 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1049 struct map_info *map = mtd->priv;
1050 struct cfi_private *cfi = map->fldrv_priv;
1055 if (!map->virt || (from + len > mtd->size))
1058 *mtdbuf = (void *)map->virt + from;
1061 /* Now lock the chip(s) to POINT state */
1063 /* ofs: offset within the first chip that the first read should start */
1064 chipnum = (from >> cfi->chipshift);
1065 ofs = from - (chipnum << cfi->chipshift);
1068 unsigned long thislen;
1070 if (chipnum >= cfi->numchips)
1073 if ((len + ofs -1) >> cfi->chipshift)
1074 thislen = (1<<cfi->chipshift) - ofs;
1078 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1091 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1093 struct map_info *map = mtd->priv;
1094 struct cfi_private *cfi = map->fldrv_priv;
1098 /* Now unlock the chip(s) POINT state */
1100 /* ofs: offset within the first chip that the first read should start */
1101 chipnum = (from >> cfi->chipshift);
1102 ofs = from - (chipnum << cfi->chipshift);
1105 unsigned long thislen;
1106 struct flchip *chip;
1108 chip = &cfi->chips[chipnum];
1109 if (chipnum >= cfi->numchips)
1112 if ((len + ofs -1) >> cfi->chipshift)
1113 thislen = (1<<cfi->chipshift) - ofs;
1117 spin_lock(chip->mutex);
1118 if (chip->state == FL_POINT) {
1119 chip->ref_point_counter--;
1120 if(chip->ref_point_counter == 0)
1121 chip->state = FL_READY;
1123 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1125 put_chip(map, chip, chip->start);
1126 spin_unlock(chip->mutex);
1134 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1136 unsigned long cmd_addr;
1137 struct cfi_private *cfi = map->fldrv_priv;
1142 /* Ensure cmd read/writes are aligned. */
1143 cmd_addr = adr & ~(map_bankwidth(map)-1);
1145 spin_lock(chip->mutex);
1146 ret = get_chip(map, chip, cmd_addr, FL_READY);
1148 spin_unlock(chip->mutex);
1152 if (chip->state != FL_POINT && chip->state != FL_READY) {
1153 map_write(map, CMD(0xff), cmd_addr);
1155 chip->state = FL_READY;
1158 map_copy_from(map, buf, adr, len);
1160 put_chip(map, chip, cmd_addr);
1162 spin_unlock(chip->mutex);
1166 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1168 struct map_info *map = mtd->priv;
1169 struct cfi_private *cfi = map->fldrv_priv;
1174 /* ofs: offset within the first chip that the first read should start */
1175 chipnum = (from >> cfi->chipshift);
1176 ofs = from - (chipnum << cfi->chipshift);
1181 unsigned long thislen;
1183 if (chipnum >= cfi->numchips)
1186 if ((len + ofs -1) >> cfi->chipshift)
1187 thislen = (1<<cfi->chipshift) - ofs;
1191 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1205 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1206 unsigned long adr, map_word datum, int mode)
1208 struct cfi_private *cfi = map->fldrv_priv;
1209 map_word status, status_OK, write_cmd;
1210 unsigned long timeo;
1215 /* Let's determine this according to the interleave only once */
1216 status_OK = CMD(0x80);
1218 case FL_WRITING: write_cmd = CMD(0x40); break;
1219 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1220 default: return -EINVAL;
1223 spin_lock(chip->mutex);
1224 ret = get_chip(map, chip, adr, mode);
1226 spin_unlock(chip->mutex);
1230 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1232 xip_disable(map, chip, adr);
1233 map_write(map, write_cmd, adr);
1234 map_write(map, datum, adr);
1237 INVALIDATE_CACHE_UDELAY(map, chip,
1238 adr, map_bankwidth(map),
1239 chip->word_write_time);
1241 timeo = jiffies + (HZ/2);
1244 if (chip->state != mode) {
1245 /* Someone's suspended the write. Sleep */
1246 DECLARE_WAITQUEUE(wait, current);
1248 set_current_state(TASK_UNINTERRUPTIBLE);
1249 add_wait_queue(&chip->wq, &wait);
1250 spin_unlock(chip->mutex);
1252 remove_wait_queue(&chip->wq, &wait);
1253 timeo = jiffies + (HZ / 2); /* FIXME */
1254 spin_lock(chip->mutex);
1258 status = map_read(map, adr);
1259 if (map_word_andequal(map, status, status_OK, status_OK))
1262 /* OK Still waiting */
1263 if (time_after(jiffies, timeo)) {
1264 chip->state = FL_STATUS;
1265 xip_enable(map, chip, adr);
1266 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1271 /* Latency issues. Drop the lock, wait a while and retry */
1273 UDELAY(map, chip, adr, 1);
1276 chip->word_write_time--;
1277 if (!chip->word_write_time)
1278 chip->word_write_time++;
1281 chip->word_write_time++;
1283 /* Done and happy. */
1284 chip->state = FL_STATUS;
1286 /* check for lock bit */
1287 if (map_word_bitsset(map, status, CMD(0x02))) {
1289 map_write(map, CMD(0x50), adr);
1290 /* put back into read status register mode */
1291 map_write(map, CMD(0x70), adr);
1295 xip_enable(map, chip, adr);
1296 out: put_chip(map, chip, adr);
1297 spin_unlock(chip->mutex);
1303 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1305 struct map_info *map = mtd->priv;
1306 struct cfi_private *cfi = map->fldrv_priv;
1315 chipnum = to >> cfi->chipshift;
1316 ofs = to - (chipnum << cfi->chipshift);
1318 /* If it's not bus-aligned, do the first byte write */
1319 if (ofs & (map_bankwidth(map)-1)) {
1320 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1321 int gap = ofs - bus_ofs;
1325 n = min_t(int, len, map_bankwidth(map)-gap);
1326 datum = map_word_ff(map);
1327 datum = map_word_load_partial(map, datum, buf, gap, n);
1329 ret = do_write_oneword(map, &cfi->chips[chipnum],
1330 bus_ofs, datum, FL_WRITING);
1339 if (ofs >> cfi->chipshift) {
1342 if (chipnum == cfi->numchips)
1347 while(len >= map_bankwidth(map)) {
1348 map_word datum = map_word_load(map, buf);
1350 ret = do_write_oneword(map, &cfi->chips[chipnum],
1351 ofs, datum, FL_WRITING);
1355 ofs += map_bankwidth(map);
1356 buf += map_bankwidth(map);
1357 (*retlen) += map_bankwidth(map);
1358 len -= map_bankwidth(map);
1360 if (ofs >> cfi->chipshift) {
1363 if (chipnum == cfi->numchips)
1368 if (len & (map_bankwidth(map)-1)) {
1371 datum = map_word_ff(map);
1372 datum = map_word_load_partial(map, datum, buf, 0, len);
1374 ret = do_write_oneword(map, &cfi->chips[chipnum],
1375 ofs, datum, FL_WRITING);
1386 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1387 unsigned long adr, const u_char *buf, int len)
1389 struct cfi_private *cfi = map->fldrv_priv;
1390 map_word status, status_OK;
1391 unsigned long cmd_adr, timeo;
1392 int wbufsize, z, ret=0, bytes, words;
1394 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1396 cmd_adr = adr & ~(wbufsize-1);
1398 /* Let's determine this according to the interleave only once */
1399 status_OK = CMD(0x80);
1401 spin_lock(chip->mutex);
1402 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1404 spin_unlock(chip->mutex);
1408 XIP_INVAL_CACHED_RANGE(map, adr, len);
1410 xip_disable(map, chip, cmd_adr);
1412 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1413 [...], the device will not accept any more Write to Buffer commands".
1414 So we must check here and reset those bits if they're set. Otherwise
1415 we're just pissing in the wind */
1416 if (chip->state != FL_STATUS)
1417 map_write(map, CMD(0x70), cmd_adr);
1418 status = map_read(map, cmd_adr);
1419 if (map_word_bitsset(map, status, CMD(0x30))) {
1420 xip_enable(map, chip, cmd_adr);
1421 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1422 xip_disable(map, chip, cmd_adr);
1423 map_write(map, CMD(0x50), cmd_adr);
1424 map_write(map, CMD(0x70), cmd_adr);
1427 chip->state = FL_WRITING_TO_BUFFER;
1431 map_write(map, CMD(0xe8), cmd_adr);
1433 status = map_read(map, cmd_adr);
1434 if (map_word_andequal(map, status, status_OK, status_OK))
1437 UDELAY(map, chip, cmd_adr, 1);
1440 /* Argh. Not ready for write to buffer */
1442 map_write(map, CMD(0x70), cmd_adr);
1443 chip->state = FL_STATUS;
1444 Xstatus = map_read(map, cmd_adr);
1445 /* Odd. Clear status bits */
1446 map_write(map, CMD(0x50), cmd_adr);
1447 map_write(map, CMD(0x70), cmd_adr);
1448 xip_enable(map, chip, cmd_adr);
1449 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1450 status.x[0], Xstatus.x[0]);
1456 /* Write length of data to come */
1457 bytes = len & (map_bankwidth(map)-1);
1458 words = len / map_bankwidth(map);
1459 map_write(map, CMD(words - !bytes), cmd_adr );
1463 while(z < words * map_bankwidth(map)) {
1464 map_word datum = map_word_load(map, buf);
1465 map_write(map, datum, adr+z);
1467 z += map_bankwidth(map);
1468 buf += map_bankwidth(map);
1474 datum = map_word_ff(map);
1475 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1476 map_write(map, datum, adr+z);
1480 map_write(map, CMD(0xd0), cmd_adr);
1481 chip->state = FL_WRITING;
1483 INVALIDATE_CACHE_UDELAY(map, chip,
1485 chip->buffer_write_time);
1487 timeo = jiffies + (HZ/2);
1490 if (chip->state != FL_WRITING) {
1491 /* Someone's suspended the write. Sleep */
1492 DECLARE_WAITQUEUE(wait, current);
1493 set_current_state(TASK_UNINTERRUPTIBLE);
1494 add_wait_queue(&chip->wq, &wait);
1495 spin_unlock(chip->mutex);
1497 remove_wait_queue(&chip->wq, &wait);
1498 timeo = jiffies + (HZ / 2); /* FIXME */
1499 spin_lock(chip->mutex);
1503 status = map_read(map, cmd_adr);
1504 if (map_word_andequal(map, status, status_OK, status_OK))
1507 /* OK Still waiting */
1508 if (time_after(jiffies, timeo)) {
1509 chip->state = FL_STATUS;
1510 xip_enable(map, chip, cmd_adr);
1511 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1516 /* Latency issues. Drop the lock, wait a while and retry */
1518 UDELAY(map, chip, cmd_adr, 1);
1521 chip->buffer_write_time--;
1522 if (!chip->buffer_write_time)
1523 chip->buffer_write_time++;
1526 chip->buffer_write_time++;
1528 /* Done and happy. */
1529 chip->state = FL_STATUS;
1531 /* check for lock bit */
1532 if (map_word_bitsset(map, status, CMD(0x02))) {
1534 map_write(map, CMD(0x50), cmd_adr);
1535 /* put back into read status register mode */
1536 map_write(map, CMD(0x70), adr);
1540 xip_enable(map, chip, cmd_adr);
1541 out: put_chip(map, chip, cmd_adr);
1542 spin_unlock(chip->mutex);
1546 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1547 size_t len, size_t *retlen, const u_char *buf)
1549 struct map_info *map = mtd->priv;
1550 struct cfi_private *cfi = map->fldrv_priv;
1551 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1560 chipnum = to >> cfi->chipshift;
1561 ofs = to - (chipnum << cfi->chipshift);
1563 /* If it's not bus-aligned, do the first word write */
1564 if (ofs & (map_bankwidth(map)-1)) {
1565 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1566 if (local_len > len)
1568 ret = cfi_intelext_write_words(mtd, to, local_len,
1576 if (ofs >> cfi->chipshift) {
1579 if (chipnum == cfi->numchips)
1585 /* We must not cross write block boundaries */
1586 int size = wbufsize - (ofs & (wbufsize-1));
1590 ret = do_write_buffer(map, &cfi->chips[chipnum],
1600 if (ofs >> cfi->chipshift) {
1603 if (chipnum == cfi->numchips)
1610 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, int len, void *thunk)
1613 struct cfi_private *cfi = map->fldrv_priv;
1614 map_word status, status_OK;
1615 unsigned long timeo;
1617 DECLARE_WAITQUEUE(wait, current);
1622 /* Let's determine this according to the interleave only once */
1623 status_OK = CMD(0x80);
1626 spin_lock(chip->mutex);
1627 ret = get_chip(map, chip, adr, FL_ERASING);
1629 spin_unlock(chip->mutex);
1633 XIP_INVAL_CACHED_RANGE(map, adr, len);
1635 xip_disable(map, chip, adr);
1637 /* Clear the status register first */
1638 map_write(map, CMD(0x50), adr);
1641 map_write(map, CMD(0x20), adr);
1642 map_write(map, CMD(0xD0), adr);
1643 chip->state = FL_ERASING;
1644 chip->erase_suspended = 0;
1646 INVALIDATE_CACHE_UDELAY(map, chip,
1648 chip->erase_time*1000/2);
1650 /* FIXME. Use a timer to check this, and return immediately. */
1651 /* Once the state machine's known to be working I'll do that */
1653 timeo = jiffies + (HZ*20);
1655 if (chip->state != FL_ERASING) {
1656 /* Someone's suspended the erase. Sleep */
1657 set_current_state(TASK_UNINTERRUPTIBLE);
1658 add_wait_queue(&chip->wq, &wait);
1659 spin_unlock(chip->mutex);
1661 remove_wait_queue(&chip->wq, &wait);
1662 spin_lock(chip->mutex);
1665 if (chip->erase_suspended) {
1666 /* This erase was suspended and resumed.
1667 Adjust the timeout */
1668 timeo = jiffies + (HZ*20); /* FIXME */
1669 chip->erase_suspended = 0;
1672 status = map_read(map, adr);
1673 if (map_word_andequal(map, status, status_OK, status_OK))
1676 /* OK Still waiting */
1677 if (time_after(jiffies, timeo)) {
1679 map_write(map, CMD(0x70), adr);
1680 chip->state = FL_STATUS;
1681 Xstatus = map_read(map, adr);
1682 /* Clear status bits */
1683 map_write(map, CMD(0x50), adr);
1684 map_write(map, CMD(0x70), adr);
1685 xip_enable(map, chip, adr);
1686 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1687 adr, status.x[0], Xstatus.x[0]);
1692 /* Latency issues. Drop the lock, wait a while and retry */
1693 UDELAY(map, chip, adr, 1000000/HZ);
1696 /* We've broken this before. It doesn't hurt to be safe */
1697 map_write(map, CMD(0x70), adr);
1698 chip->state = FL_STATUS;
1699 status = map_read(map, adr);
1701 /* check for lock bit */
1702 if (map_word_bitsset(map, status, CMD(0x3a))) {
1703 unsigned long chipstatus;
1705 /* Reset the error bits */
1706 map_write(map, CMD(0x50), adr);
1707 map_write(map, CMD(0x70), adr);
1708 xip_enable(map, chip, adr);
1710 chipstatus = MERGESTATUS(status);
1712 if ((chipstatus & 0x30) == 0x30) {
1713 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
1715 } else if (chipstatus & 0x02) {
1716 /* Protection bit set */
1718 } else if (chipstatus & 0x8) {
1720 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
1722 } else if (chipstatus & 0x20) {
1724 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1725 timeo = jiffies + HZ;
1726 put_chip(map, chip, adr);
1727 spin_unlock(chip->mutex);
1730 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
1734 xip_enable(map, chip, adr);
1738 out: put_chip(map, chip, adr);
1739 spin_unlock(chip->mutex);
1743 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1745 unsigned long ofs, len;
1751 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1755 instr->state = MTD_ERASE_DONE;
1756 mtd_erase_callback(instr);
1761 static void cfi_intelext_sync (struct mtd_info *mtd)
1763 struct map_info *map = mtd->priv;
1764 struct cfi_private *cfi = map->fldrv_priv;
1766 struct flchip *chip;
1769 for (i=0; !ret && i<cfi->numchips; i++) {
1770 chip = &cfi->chips[i];
1772 spin_lock(chip->mutex);
1773 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1776 chip->oldstate = chip->state;
1777 chip->state = FL_SYNCING;
1778 /* No need to wake_up() on this state change -
1779 * as the whole point is that nobody can do anything
1780 * with the chip now anyway.
1783 spin_unlock(chip->mutex);
1786 /* Unlock the chips again */
1788 for (i--; i >=0; i--) {
1789 chip = &cfi->chips[i];
1791 spin_lock(chip->mutex);
1793 if (chip->state == FL_SYNCING) {
1794 chip->state = chip->oldstate;
1795 chip->oldstate = FL_READY;
1798 spin_unlock(chip->mutex);
1802 #ifdef DEBUG_LOCK_BITS
1803 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1804 struct flchip *chip,
1806 int len, void *thunk)
1808 struct cfi_private *cfi = map->fldrv_priv;
1809 int status, ofs_factor = cfi->interleave * cfi->device_type;
1812 xip_disable(map, chip, adr+(2*ofs_factor));
1813 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1814 chip->state = FL_JEDEC_QUERY;
1815 status = cfi_read_query(map, adr+(2*ofs_factor));
1816 xip_enable(map, chip, 0);
1817 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1823 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1824 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1826 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1827 unsigned long adr, int len, void *thunk)
1829 struct cfi_private *cfi = map->fldrv_priv;
1830 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1831 map_word status, status_OK;
1832 unsigned long timeo = jiffies + HZ;
1837 /* Let's determine this according to the interleave only once */
1838 status_OK = CMD(0x80);
1840 spin_lock(chip->mutex);
1841 ret = get_chip(map, chip, adr, FL_LOCKING);
1843 spin_unlock(chip->mutex);
1848 xip_disable(map, chip, adr);
1850 map_write(map, CMD(0x60), adr);
1851 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1852 map_write(map, CMD(0x01), adr);
1853 chip->state = FL_LOCKING;
1854 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1855 map_write(map, CMD(0xD0), adr);
1856 chip->state = FL_UNLOCKING;
1861 * If Instant Individual Block Locking supported then no need
1865 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1866 UDELAY(map, chip, adr, 1000000/HZ);
1868 /* FIXME. Use a timer to check this, and return immediately. */
1869 /* Once the state machine's known to be working I'll do that */
1871 timeo = jiffies + (HZ*20);
1874 status = map_read(map, adr);
1875 if (map_word_andequal(map, status, status_OK, status_OK))
1878 /* OK Still waiting */
1879 if (time_after(jiffies, timeo)) {
1881 map_write(map, CMD(0x70), adr);
1882 chip->state = FL_STATUS;
1883 Xstatus = map_read(map, adr);
1884 xip_enable(map, chip, adr);
1885 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1886 status.x[0], Xstatus.x[0]);
1887 put_chip(map, chip, adr);
1888 spin_unlock(chip->mutex);
1892 /* Latency issues. Drop the lock, wait a while and retry */
1893 UDELAY(map, chip, adr, 1);
1896 /* Done and happy. */
1897 chip->state = FL_STATUS;
1898 xip_enable(map, chip, adr);
1899 put_chip(map, chip, adr);
1900 spin_unlock(chip->mutex);
1904 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1908 #ifdef DEBUG_LOCK_BITS
1909 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1910 __FUNCTION__, ofs, len);
1911 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1915 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1916 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918 #ifdef DEBUG_LOCK_BITS
1919 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1928 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1932 #ifdef DEBUG_LOCK_BITS
1933 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1934 __FUNCTION__, ofs, len);
1935 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1939 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1940 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942 #ifdef DEBUG_LOCK_BITS
1943 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1952 #ifdef CONFIG_MTD_OTP
1954 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1955 u_long data_offset, u_char *buf, u_int size,
1956 u_long prot_offset, u_int groupno, u_int groupsize);
1959 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1960 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1962 struct cfi_private *cfi = map->fldrv_priv;
1965 spin_lock(chip->mutex);
1966 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1968 spin_unlock(chip->mutex);
1972 /* let's ensure we're not reading back cached data from array mode */
1973 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1975 xip_disable(map, chip, chip->start);
1976 if (chip->state != FL_JEDEC_QUERY) {
1977 map_write(map, CMD(0x90), chip->start);
1978 chip->state = FL_JEDEC_QUERY;
1980 map_copy_from(map, buf, chip->start + offset, size);
1981 xip_enable(map, chip, chip->start);
1983 /* then ensure we don't keep OTP data in the cache */
1984 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1986 put_chip(map, chip, chip->start);
1987 spin_unlock(chip->mutex);
1992 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1993 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1998 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1999 int gap = offset - bus_ofs;
2000 int n = min_t(int, size, map_bankwidth(map)-gap);
2001 map_word datum = map_word_ff(map);
2003 datum = map_word_load_partial(map, datum, buf, gap, n);
2004 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2017 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2018 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2020 struct cfi_private *cfi = map->fldrv_priv;
2023 /* make sure area matches group boundaries */
2027 datum = map_word_ff(map);
2028 datum = map_word_clr(map, datum, CMD(1 << grpno));
2029 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2032 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2033 size_t *retlen, u_char *buf,
2034 otp_op_t action, int user_regs)
2036 struct map_info *map = mtd->priv;
2037 struct cfi_private *cfi = map->fldrv_priv;
2038 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2039 struct flchip *chip;
2040 struct cfi_intelext_otpinfo *otp;
2041 u_long devsize, reg_prot_offset, data_offset;
2042 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2043 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2048 /* Check that we actually have some OTP registers */
2049 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2052 /* we need real chips here not virtual ones */
2053 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2054 chip_step = devsize >> cfi->chipshift;
2057 /* Some chips have OTP located in the _top_ partition only.
2058 For example: Intel 28F256L18T (T means top-parameter device) */
2059 if (cfi->mfr == MANUFACTURER_INTEL) {
2064 chip_num = chip_step - 1;
2068 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2069 chip = &cfi->chips[chip_num];
2070 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2072 /* first OTP region */
2074 reg_prot_offset = extp->ProtRegAddr;
2075 reg_fact_groups = 1;
2076 reg_fact_size = 1 << extp->FactProtRegSize;
2077 reg_user_groups = 1;
2078 reg_user_size = 1 << extp->UserProtRegSize;
2081 /* flash geometry fixup */
2082 data_offset = reg_prot_offset + 1;
2083 data_offset *= cfi->interleave * cfi->device_type;
2084 reg_prot_offset *= cfi->interleave * cfi->device_type;
2085 reg_fact_size *= cfi->interleave;
2086 reg_user_size *= cfi->interleave;
2089 groups = reg_user_groups;
2090 groupsize = reg_user_size;
2091 /* skip over factory reg area */
2092 groupno = reg_fact_groups;
2093 data_offset += reg_fact_groups * reg_fact_size;
2095 groups = reg_fact_groups;
2096 groupsize = reg_fact_size;
2100 while (len > 0 && groups > 0) {
2103 * Special case: if action is NULL
2104 * we fill buf with otp_info records.
2106 struct otp_info *otpinfo;
2108 len -= sizeof(struct otp_info);
2111 ret = do_otp_read(map, chip,
2113 (u_char *)&lockword,
2118 otpinfo = (struct otp_info *)buf;
2119 otpinfo->start = from;
2120 otpinfo->length = groupsize;
2122 !map_word_bitsset(map, lockword,
2125 buf += sizeof(*otpinfo);
2126 *retlen += sizeof(*otpinfo);
2127 } else if (from >= groupsize) {
2129 data_offset += groupsize;
2131 int size = groupsize;
2132 data_offset += from;
2137 ret = action(map, chip, data_offset,
2138 buf, size, reg_prot_offset,
2139 groupno, groupsize);
2145 data_offset += size;
2151 /* next OTP region */
2152 if (++field == extp->NumProtectionFields)
2154 reg_prot_offset = otp->ProtRegAddr;
2155 reg_fact_groups = otp->FactGroups;
2156 reg_fact_size = 1 << otp->FactProtRegSize;
2157 reg_user_groups = otp->UserGroups;
2158 reg_user_size = 1 << otp->UserProtRegSize;
2166 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2167 size_t len, size_t *retlen,
2170 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2171 buf, do_otp_read, 0);
2174 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2175 size_t len, size_t *retlen,
2178 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2179 buf, do_otp_read, 1);
2182 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2183 size_t len, size_t *retlen,
2186 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2187 buf, do_otp_write, 1);
2190 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2191 loff_t from, size_t len)
2194 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2195 NULL, do_otp_lock, 1);
2198 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2199 struct otp_info *buf, size_t len)
2204 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2205 return ret ? : retlen;
2208 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2209 struct otp_info *buf, size_t len)
2214 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2215 return ret ? : retlen;
2220 static int cfi_intelext_suspend(struct mtd_info *mtd)
2222 struct map_info *map = mtd->priv;
2223 struct cfi_private *cfi = map->fldrv_priv;
2225 struct flchip *chip;
2228 for (i=0; !ret && i<cfi->numchips; i++) {
2229 chip = &cfi->chips[i];
2231 spin_lock(chip->mutex);
2233 switch (chip->state) {
2237 case FL_JEDEC_QUERY:
2238 if (chip->oldstate == FL_READY) {
2239 chip->oldstate = chip->state;
2240 chip->state = FL_PM_SUSPENDED;
2241 /* No need to wake_up() on this state change -
2242 * as the whole point is that nobody can do anything
2243 * with the chip now anyway.
2246 /* There seems to be an operation pending. We must wait for it. */
2247 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2252 /* Should we actually wait? Once upon a time these routines weren't
2253 allowed to. Or should we return -EAGAIN, because the upper layers
2254 ought to have already shut down anything which was using the device
2255 anyway? The latter for now. */
2256 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2258 case FL_PM_SUSPENDED:
2261 spin_unlock(chip->mutex);
2264 /* Unlock the chips again */
2267 for (i--; i >=0; i--) {
2268 chip = &cfi->chips[i];
2270 spin_lock(chip->mutex);
2272 if (chip->state == FL_PM_SUSPENDED) {
2273 /* No need to force it into a known state here,
2274 because we're returning failure, and it didn't
2276 chip->state = chip->oldstate;
2277 chip->oldstate = FL_READY;
2280 spin_unlock(chip->mutex);
2287 static void cfi_intelext_resume(struct mtd_info *mtd)
2289 struct map_info *map = mtd->priv;
2290 struct cfi_private *cfi = map->fldrv_priv;
2292 struct flchip *chip;
2294 for (i=0; i<cfi->numchips; i++) {
2296 chip = &cfi->chips[i];
2298 spin_lock(chip->mutex);
2300 /* Go to known state. Chip may have been power cycled */
2301 if (chip->state == FL_PM_SUSPENDED) {
2302 map_write(map, CMD(0xFF), cfi->chips[i].start);
2303 chip->oldstate = chip->state = FL_READY;
2307 spin_unlock(chip->mutex);
2311 static int cfi_intelext_reset(struct mtd_info *mtd)
2313 struct map_info *map = mtd->priv;
2314 struct cfi_private *cfi = map->fldrv_priv;
2317 for (i=0; i < cfi->numchips; i++) {
2318 struct flchip *chip = &cfi->chips[i];
2320 /* force the completion of any ongoing operation
2321 and switch to array mode so any bootloader in
2322 flash is accessible for soft reboot. */
2323 spin_lock(chip->mutex);
2324 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2326 map_write(map, CMD(0xff), chip->start);
2327 chip->state = FL_READY;
2329 spin_unlock(chip->mutex);
2335 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2338 struct mtd_info *mtd;
2340 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2341 cfi_intelext_reset(mtd);
2345 static void cfi_intelext_destroy(struct mtd_info *mtd)
2347 struct map_info *map = mtd->priv;
2348 struct cfi_private *cfi = map->fldrv_priv;
2349 cfi_intelext_reset(mtd);
2350 unregister_reboot_notifier(&mtd->reboot_notifier);
2351 kfree(cfi->cmdset_priv);
2353 kfree(cfi->chips[0].priv);
2355 kfree(mtd->eraseregions);
2358 static char im_name_1[]="cfi_cmdset_0001";
2359 static char im_name_3[]="cfi_cmdset_0003";
2361 static int __init cfi_intelext_init(void)
2363 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2364 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2368 static void __exit cfi_intelext_exit(void)
2370 inter_module_unregister(im_name_1);
2371 inter_module_unregister(im_name_3);
2374 module_init(cfi_intelext_init);
2375 module_exit(cfi_intelext_exit);
2377 MODULE_LICENSE("GPL");
2378 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2379 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");