2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.182 2005/08/06 04:40:41 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55 static void cfi_intelext_sync (struct mtd_info *);
56 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
68 static int cfi_intelext_suspend (struct mtd_info *);
69 static void cfi_intelext_resume (struct mtd_info *);
70 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72 static void cfi_intelext_destroy(struct mtd_info *);
74 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
84 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 * *********** SETUP AND PROBE BITS ***********
94 static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
101 /* #define DEBUG_LOCK_BITS */
102 /* #define DEBUG_CFI_FEATURES */
104 #ifdef DEBUG_CFI_FEATURES
105 static void cfi_tell_features(struct cfi_pri_intelext *extp)
108 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
109 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
110 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
111 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
112 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
113 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
114 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
115 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
116 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
117 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
118 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
119 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
120 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
121 for (i=11; i<32; i++) {
122 if (extp->FeatureSupport & (1<<i))
123 printk(" - Unknown Bit %X: supported\n", i);
126 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
127 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
128 for (i=1; i<8; i++) {
129 if (extp->SuspendCmdSupport & (1<<i))
130 printk(" - Unknown Bit %X: supported\n", i);
133 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
134 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
135 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
136 for (i=2; i<3; i++) {
137 if (extp->BlkStatusRegMask & (1<<i))
138 printk(" - Unknown Bit %X Active: yes\n",i);
140 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
141 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
142 for (i=6; i<16; i++) {
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
147 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
148 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
149 if (extp->VppOptimal)
150 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
151 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
156 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
157 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 struct map_info *map = mtd->priv;
160 struct cfi_private *cfi = map->fldrv_priv;
161 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
164 "erase on write disabled.\n");
165 extp->SuspendCmdSupport &= ~1;
169 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
170 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176 if (cfip && (cfip->FeatureSupport&4)) {
177 cfip->FeatureSupport &= ~4;
178 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
183 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 struct map_info *map = mtd->priv;
186 struct cfi_private *cfi = map->fldrv_priv;
188 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
189 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
192 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 struct map_info *map = mtd->priv;
195 struct cfi_private *cfi = map->fldrv_priv;
197 /* Note this is done after the region info is endian swapped */
198 cfi->cfiq->EraseRegionInfo[1] =
199 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
202 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 struct map_info *map = mtd->priv;
205 if (!mtd->point && map_is_linear(map)) {
206 mtd->point = cfi_intelext_point;
207 mtd->unpoint = cfi_intelext_unpoint;
211 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 struct map_info *map = mtd->priv;
214 struct cfi_private *cfi = map->fldrv_priv;
215 if (cfi->cfiq->BufWriteTimeoutTyp) {
216 printk(KERN_INFO "Using buffer write method\n" );
217 mtd->write = cfi_intelext_write_buffers;
221 static struct cfi_fixup cfi_fixup_table[] = {
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
225 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
226 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
228 #if !FORCE_WORD_WRITE
229 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
231 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
232 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
236 static struct cfi_fixup jedec_fixup_table[] = {
237 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
238 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
239 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 static struct cfi_fixup fixup_table[] = {
243 /* The CFI vendor ids and the JEDEC vendor IDs appear
244 * to be common. It is like the devices id's are as
245 * well. This table is to pick all cases where
246 * we know that is the case.
248 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
252 static inline struct cfi_pri_intelext *
253 read_pri_intelext(struct map_info *map, __u16 adr)
255 struct cfi_pri_intelext *extp;
256 unsigned int extp_size = sizeof(*extp);
259 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
263 if (extp->MajorVersion != '1' ||
264 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
265 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
266 "version %c.%c.\n", extp->MajorVersion,
272 /* Do some byteswapping if necessary */
273 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
274 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
275 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
277 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
278 unsigned int extra_size = 0;
281 /* Protection Register info */
282 extra_size += (extp->NumProtectionFields - 1) *
283 sizeof(struct cfi_intelext_otpinfo);
285 /* Burst Read info */
286 extra_size += (extp->MinorVersion < '4') ? 6 : 5;
288 /* Number of hardware-partitions */
290 if (extp_size < sizeof(*extp) + extra_size)
292 nb_parts = extp->extra[extra_size - 1];
294 /* skip the sizeof(partregion) field in CFI 1.4 */
295 if (extp->MinorVersion >= '4')
298 for (i = 0; i < nb_parts; i++) {
299 struct cfi_intelext_regioninfo *rinfo;
300 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
301 extra_size += sizeof(*rinfo);
302 if (extp_size < sizeof(*extp) + extra_size)
304 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
305 extra_size += (rinfo->NumBlockTypes - 1)
306 * sizeof(struct cfi_intelext_blockinfo);
309 if (extp->MinorVersion >= '4')
310 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
312 if (extp_size < sizeof(*extp) + extra_size) {
314 extp_size = sizeof(*extp) + extra_size;
316 if (extp_size > 4096) {
318 "%s: cfi_pri_intelext is too fat\n",
329 /* This routine is made available to other mtd code via
330 * inter_module_register. It must only be accessed through
331 * inter_module_get which will bump the use count of this module. The
332 * addresses passed back in cfi are valid as long as the use count of
333 * this module is non-zero, i.e. between inter_module_get and
334 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
336 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
338 struct cfi_private *cfi = map->fldrv_priv;
339 struct mtd_info *mtd;
342 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
344 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
347 memset(mtd, 0, sizeof(*mtd));
349 mtd->type = MTD_NORFLASH;
351 /* Fill in the default mtd operations */
352 mtd->erase = cfi_intelext_erase_varsize;
353 mtd->read = cfi_intelext_read;
354 mtd->write = cfi_intelext_write_words;
355 mtd->sync = cfi_intelext_sync;
356 mtd->lock = cfi_intelext_lock;
357 mtd->unlock = cfi_intelext_unlock;
358 mtd->suspend = cfi_intelext_suspend;
359 mtd->resume = cfi_intelext_resume;
360 mtd->flags = MTD_CAP_NORFLASH;
361 mtd->name = map->name;
363 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
365 if (cfi->cfi_mode == CFI_MODE_CFI) {
367 * It's a real CFI chip, not one for which the probe
368 * routine faked a CFI structure. So we read the feature
371 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
372 struct cfi_pri_intelext *extp;
374 extp = read_pri_intelext(map, adr);
380 /* Install our own private info structure */
381 cfi->cmdset_priv = extp;
383 cfi_fixup(mtd, cfi_fixup_table);
385 #ifdef DEBUG_CFI_FEATURES
386 /* Tell the user about it in lots of lovely detail */
387 cfi_tell_features(extp);
390 if(extp->SuspendCmdSupport & 1) {
391 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
394 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
395 /* Apply jedec specific fixups */
396 cfi_fixup(mtd, jedec_fixup_table);
398 /* Apply generic fixups */
399 cfi_fixup(mtd, fixup_table);
401 for (i=0; i< cfi->numchips; i++) {
402 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
403 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
404 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
405 cfi->chips[i].ref_point_counter = 0;
408 map->fldrv = &cfi_intelext_chipdrv;
410 return cfi_intelext_setup(mtd);
413 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
415 struct map_info *map = mtd->priv;
416 struct cfi_private *cfi = map->fldrv_priv;
417 unsigned long offset = 0;
419 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
421 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
423 mtd->size = devsize * cfi->numchips;
425 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
426 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
427 * mtd->numeraseregions, GFP_KERNEL);
428 if (!mtd->eraseregions) {
429 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
433 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
434 unsigned long ernum, ersize;
435 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
436 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
438 if (mtd->erasesize < ersize) {
439 mtd->erasesize = ersize;
441 for (j=0; j<cfi->numchips; j++) {
442 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
443 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
444 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
446 offset += (ersize * ernum);
449 if (offset != devsize) {
451 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
455 for (i=0; i<mtd->numeraseregions;i++){
456 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
457 i,mtd->eraseregions[i].offset,
458 mtd->eraseregions[i].erasesize,
459 mtd->eraseregions[i].numblocks);
462 #ifdef CONFIG_MTD_OTP
463 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
464 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
465 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
466 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
467 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
468 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
471 /* This function has the potential to distort the reality
472 a bit and therefore should be called last. */
473 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
476 __module_get(THIS_MODULE);
477 register_reboot_notifier(&mtd->reboot_notifier);
482 if(mtd->eraseregions)
483 kfree(mtd->eraseregions);
486 kfree(cfi->cmdset_priv);
490 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
491 struct cfi_private **pcfi)
493 struct map_info *map = mtd->priv;
494 struct cfi_private *cfi = *pcfi;
495 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
498 * Probing of multi-partition flash ships.
500 * To support multiple partitions when available, we simply arrange
501 * for each of them to have their own flchip structure even if they
502 * are on the same physical chip. This means completely recreating
503 * a new cfi_private structure right here which is a blatent code
504 * layering violation, but this is still the least intrusive
505 * arrangement at this point. This can be rearranged in the future
506 * if someone feels motivated enough. --nico
508 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
509 && extp->FeatureSupport & (1 << 9)) {
510 struct cfi_private *newcfi;
512 struct flchip_shared *shared;
513 int offs, numregions, numparts, partshift, numvirtchips, i, j;
515 /* Protection Register info */
516 offs = (extp->NumProtectionFields - 1) *
517 sizeof(struct cfi_intelext_otpinfo);
519 /* Burst Read info */
520 offs += (extp->MinorVersion < '4') ? 6 : 5;
522 /* Number of partition regions */
523 numregions = extp->extra[offs];
526 /* skip the sizeof(partregion) field in CFI 1.4 */
527 if (extp->MinorVersion >= '4')
530 /* Number of hardware partitions */
532 for (i = 0; i < numregions; i++) {
533 struct cfi_intelext_regioninfo *rinfo;
534 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
535 numparts += rinfo->NumIdentPartitions;
536 offs += sizeof(*rinfo)
537 + (rinfo->NumBlockTypes - 1) *
538 sizeof(struct cfi_intelext_blockinfo);
541 /* Programming Region info */
542 if (extp->MinorVersion >= '4') {
543 struct cfi_intelext_programming_regioninfo *prinfo;
544 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
545 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
546 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
547 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
548 mtd->flags |= MTD_PROGRAM_REGIONS;
549 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
550 map->name, MTD_PROGREGION_SIZE(mtd),
551 MTD_PROGREGION_CTRLMODE_VALID(mtd),
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
556 * All functions below currently rely on all chips having
557 * the same geometry so we'll just assume that all hardware
558 * partitions are of the same size too.
560 partshift = cfi->chipshift - __ffs(numparts);
562 if ((1 << partshift) < mtd->erasesize) {
564 "%s: bad number of hw partitions (%d)\n",
565 __FUNCTION__, numparts);
569 numvirtchips = cfi->numchips * numparts;
570 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
573 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578 memcpy(newcfi, cfi, sizeof(struct cfi_private));
579 newcfi->numchips = numvirtchips;
580 newcfi->chipshift = partshift;
582 chip = &newcfi->chips[0];
583 for (i = 0; i < cfi->numchips; i++) {
584 shared[i].writing = shared[i].erasing = NULL;
585 spin_lock_init(&shared[i].lock);
586 for (j = 0; j < numparts; j++) {
587 *chip = cfi->chips[i];
588 chip->start += j << partshift;
589 chip->priv = &shared[i];
590 /* those should be reset too since
591 they create memory references. */
592 init_waitqueue_head(&chip->wq);
593 spin_lock_init(&chip->_spinlock);
594 chip->mutex = &chip->_spinlock;
599 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
600 "--> %d partitions of %d KiB\n",
601 map->name, cfi->numchips, cfi->interleave,
602 newcfi->numchips, 1<<(newcfi->chipshift-10));
604 map->fldrv_priv = newcfi;
613 * *********** CHIP ACCESS FUNCTIONS ***********
616 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
618 DECLARE_WAITQUEUE(wait, current);
619 struct cfi_private *cfi = map->fldrv_priv;
620 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
622 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
625 timeo = jiffies + HZ;
627 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
629 * OK. We have possibility for contension on the write/erase
630 * operations which are global to the real chip and not per
631 * partition. So let's fight it over in the partition which
632 * currently has authority on the operation.
634 * The rules are as follows:
636 * - any write operation must own shared->writing.
638 * - any erase operation must own _both_ shared->writing and
641 * - contension arbitration is handled in the owner's context.
643 * The 'shared' struct can be read when its lock is taken.
644 * However any writes to it can only be made when the current
645 * owner's lock is also held.
647 struct flchip_shared *shared = chip->priv;
648 struct flchip *contender;
649 spin_lock(&shared->lock);
650 contender = shared->writing;
651 if (contender && contender != chip) {
653 * The engine to perform desired operation on this
654 * partition is already in use by someone else.
655 * Let's fight over it in the context of the chip
656 * currently using it. If it is possible to suspend,
657 * that other partition will do just that, otherwise
658 * it'll happily send us to sleep. In any case, when
659 * get_chip returns success we're clear to go ahead.
661 int ret = spin_trylock(contender->mutex);
662 spin_unlock(&shared->lock);
665 spin_unlock(chip->mutex);
666 ret = get_chip(map, contender, contender->start, mode);
667 spin_lock(chip->mutex);
669 spin_unlock(contender->mutex);
672 timeo = jiffies + HZ;
673 spin_lock(&shared->lock);
677 shared->writing = chip;
678 if (mode == FL_ERASING)
679 shared->erasing = chip;
680 if (contender && contender != chip)
681 spin_unlock(contender->mutex);
682 spin_unlock(&shared->lock);
685 switch (chip->state) {
689 status = map_read(map, adr);
690 if (map_word_andequal(map, status, status_OK, status_OK))
693 /* At this point we're fine with write operations
694 in other partitions as they don't conflict. */
695 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698 if (time_after(jiffies, timeo)) {
699 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
700 map->name, status.x[0]);
703 spin_unlock(chip->mutex);
705 spin_lock(chip->mutex);
706 /* Someone else might have been playing with it. */
717 !(cfip->FeatureSupport & 2) ||
718 !(mode == FL_READY || mode == FL_POINT ||
719 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
724 map_write(map, CMD(0xB0), adr);
726 /* If the flash has finished erasing, then 'erase suspend'
727 * appears to make some (28F320) flash devices switch to
728 * 'read' mode. Make sure that we switch to 'read status'
729 * mode so we get the right data. --rmk
731 map_write(map, CMD(0x70), adr);
732 chip->oldstate = FL_ERASING;
733 chip->state = FL_ERASE_SUSPENDING;
734 chip->erase_suspended = 1;
736 status = map_read(map, adr);
737 if (map_word_andequal(map, status, status_OK, status_OK))
740 if (time_after(jiffies, timeo)) {
741 /* Urgh. Resume and pretend we weren't here. */
742 map_write(map, CMD(0xd0), adr);
743 /* Make sure we're in 'read status' mode if it had finished */
744 map_write(map, CMD(0x70), adr);
745 chip->state = FL_ERASING;
746 chip->oldstate = FL_READY;
747 printk(KERN_ERR "%s: Chip not ready after erase "
748 "suspended: status = 0x%lx\n", map->name, status.x[0]);
752 spin_unlock(chip->mutex);
754 spin_lock(chip->mutex);
755 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
756 So we can just loop here. */
758 chip->state = FL_STATUS;
761 case FL_XIP_WHILE_ERASING:
762 if (mode != FL_READY && mode != FL_POINT &&
763 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
765 chip->oldstate = chip->state;
766 chip->state = FL_READY;
770 /* Only if there's no operation suspended... */
771 if (mode == FL_READY && chip->oldstate == FL_READY)
776 set_current_state(TASK_UNINTERRUPTIBLE);
777 add_wait_queue(&chip->wq, &wait);
778 spin_unlock(chip->mutex);
780 remove_wait_queue(&chip->wq, &wait);
781 spin_lock(chip->mutex);
786 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
788 struct cfi_private *cfi = map->fldrv_priv;
791 struct flchip_shared *shared = chip->priv;
792 spin_lock(&shared->lock);
793 if (shared->writing == chip && chip->oldstate == FL_READY) {
794 /* We own the ability to write, but we're done */
795 shared->writing = shared->erasing;
796 if (shared->writing && shared->writing != chip) {
797 /* give back ownership to who we loaned it from */
798 struct flchip *loaner = shared->writing;
799 spin_lock(loaner->mutex);
800 spin_unlock(&shared->lock);
801 spin_unlock(chip->mutex);
802 put_chip(map, loaner, loaner->start);
803 spin_lock(chip->mutex);
804 spin_unlock(loaner->mutex);
808 shared->erasing = NULL;
809 shared->writing = NULL;
810 } else if (shared->erasing == chip && shared->writing != chip) {
812 * We own the ability to erase without the ability
813 * to write, which means the erase was suspended
814 * and some other partition is currently writing.
815 * Don't let the switch below mess things up since
816 * we don't have ownership to resume anything.
818 spin_unlock(&shared->lock);
822 spin_unlock(&shared->lock);
825 switch(chip->oldstate) {
827 chip->state = chip->oldstate;
828 /* What if one interleaved chip has finished and the
829 other hasn't? The old code would leave the finished
830 one in READY mode. That's bad, and caused -EROFS
831 errors to be returned from do_erase_oneblock because
832 that's the only bit it checked for at the time.
833 As the state machine appears to explicitly allow
834 sending the 0x70 (Read Status) command to an erasing
835 chip and expecting it to be ignored, that's what we
837 map_write(map, CMD(0xd0), adr);
838 map_write(map, CMD(0x70), adr);
839 chip->oldstate = FL_READY;
840 chip->state = FL_ERASING;
843 case FL_XIP_WHILE_ERASING:
844 chip->state = chip->oldstate;
845 chip->oldstate = FL_READY;
851 /* We should really make set_vpp() count, rather than doing this */
855 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
860 #ifdef CONFIG_MTD_XIP
863 * No interrupt what so ever can be serviced while the flash isn't in array
864 * mode. This is ensured by the xip_disable() and xip_enable() functions
865 * enclosing any code path where the flash is known not to be in array mode.
866 * And within a XIP disabled code path, only functions marked with __xipram
867 * may be called and nothing else (it's a good thing to inspect generated
868 * assembly to make sure inline functions were actually inlined and that gcc
869 * didn't emit calls to its own support functions). Also configuring MTD CFI
870 * support to a single buswidth and a single interleave is also recommended.
873 static void xip_disable(struct map_info *map, struct flchip *chip,
876 /* TODO: chips with no XIP use should ignore and return */
877 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
881 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884 struct cfi_private *cfi = map->fldrv_priv;
885 if (chip->state != FL_POINT && chip->state != FL_READY) {
886 map_write(map, CMD(0xff), adr);
887 chip->state = FL_READY;
889 (void) map_read(map, adr);
895 * When a delay is required for the flash operation to complete, the
896 * xip_udelay() function is polling for both the given timeout and pending
897 * (but still masked) hardware interrupts. Whenever there is an interrupt
898 * pending then the flash erase or write operation is suspended, array mode
899 * restored and interrupts unmasked. Task scheduling might also happen at that
900 * point. The CPU eventually returns from the interrupt or the call to
901 * schedule() and the suspended flash operation is resumed for the remaining
902 * of the delay period.
904 * Warning: this function _will_ fool interrupt latency tracing tools.
907 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
908 unsigned long adr, int usec)
910 struct cfi_private *cfi = map->fldrv_priv;
911 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
912 map_word status, OK = CMD(0x80);
913 unsigned long suspended, start = xip_currtime();
914 flstate_t oldstate, newstate;
918 if (xip_irqpending() && cfip &&
919 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
920 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
921 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
923 * Let's suspend the erase or write operation when
924 * supported. Note that we currently don't try to
925 * suspend interleaved chips if there is already
926 * another operation suspended (imagine what happens
927 * when one chip was already done with the current
928 * operation while another chip suspended it, then
929 * we resume the whole thing at once). Yes, it
932 map_write(map, CMD(0xb0), adr);
933 map_write(map, CMD(0x70), adr);
934 usec -= xip_elapsed_since(start);
935 suspended = xip_currtime();
937 if (xip_elapsed_since(suspended) > 100000) {
939 * The chip doesn't want to suspend
940 * after waiting for 100 msecs.
941 * This is a critical error but there
942 * is not much we can do here.
946 status = map_read(map, adr);
947 } while (!map_word_andequal(map, status, OK, OK));
949 /* Suspend succeeded */
950 oldstate = chip->state;
951 if (oldstate == FL_ERASING) {
952 if (!map_word_bitsset(map, status, CMD(0x40)))
954 newstate = FL_XIP_WHILE_ERASING;
955 chip->erase_suspended = 1;
957 if (!map_word_bitsset(map, status, CMD(0x04)))
959 newstate = FL_XIP_WHILE_WRITING;
960 chip->write_suspended = 1;
962 chip->state = newstate;
963 map_write(map, CMD(0xff), adr);
964 (void) map_read(map, adr);
965 asm volatile (".rep 8; nop; .endr");
967 spin_unlock(chip->mutex);
968 asm volatile (".rep 8; nop; .endr");
972 * We're back. However someone else might have
973 * decided to go write to the chip if we are in
974 * a suspended erase state. If so let's wait
977 spin_lock(chip->mutex);
978 while (chip->state != newstate) {
979 DECLARE_WAITQUEUE(wait, current);
980 set_current_state(TASK_UNINTERRUPTIBLE);
981 add_wait_queue(&chip->wq, &wait);
982 spin_unlock(chip->mutex);
984 remove_wait_queue(&chip->wq, &wait);
985 spin_lock(chip->mutex);
987 /* Disallow XIP again */
990 /* Resume the write or erase operation */
991 map_write(map, CMD(0xd0), adr);
992 map_write(map, CMD(0x70), adr);
993 chip->state = oldstate;
994 start = xip_currtime();
995 } else if (usec >= 1000000/HZ) {
997 * Try to save on CPU power when waiting delay
998 * is at least a system timer tick period.
999 * No need to be extremely accurate here.
1003 status = map_read(map, adr);
1004 } while (!map_word_andequal(map, status, OK, OK)
1005 && xip_elapsed_since(start) < usec);
1008 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1011 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1012 * the flash is actively programming or erasing since we have to poll for
1013 * the operation to complete anyway. We can't do that in a generic way with
1014 * a XIP setup so do it before the actual flash operation in this case
1015 * and stub it out from INVALIDATE_CACHE_UDELAY.
1017 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1018 INVALIDATE_CACHED_RANGE(map, from, size)
1020 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1021 UDELAY(map, chip, adr, usec)
1026 * Activating this XIP support changes the way the code works a bit. For
1027 * example the code to suspend the current process when concurrent access
1028 * happens is never executed because xip_udelay() will always return with the
1029 * same chip state as it was entered with. This is why there is no care for
1030 * the presence of add_wait_queue() or schedule() calls from within a couple
1031 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1032 * The queueing and scheduling are always happening within xip_udelay().
1034 * Similarly, get_chip() and put_chip() just happen to always be executed
1035 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1036 * is in array mode, therefore never executing many cases therein and not
1037 * causing any problem with XIP.
1042 #define xip_disable(map, chip, adr)
1043 #define xip_enable(map, chip, adr)
1044 #define XIP_INVAL_CACHED_RANGE(x...)
1046 #define UDELAY(map, chip, adr, usec) \
1048 spin_unlock(chip->mutex); \
1050 spin_lock(chip->mutex); \
1053 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1055 spin_unlock(chip->mutex); \
1056 INVALIDATE_CACHED_RANGE(map, adr, len); \
1058 spin_lock(chip->mutex); \
1063 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1065 unsigned long cmd_addr;
1066 struct cfi_private *cfi = map->fldrv_priv;
1071 /* Ensure cmd read/writes are aligned. */
1072 cmd_addr = adr & ~(map_bankwidth(map)-1);
1074 spin_lock(chip->mutex);
1076 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1079 if (chip->state != FL_POINT && chip->state != FL_READY)
1080 map_write(map, CMD(0xff), cmd_addr);
1082 chip->state = FL_POINT;
1083 chip->ref_point_counter++;
1085 spin_unlock(chip->mutex);
1090 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1092 struct map_info *map = mtd->priv;
1093 struct cfi_private *cfi = map->fldrv_priv;
1098 if (!map->virt || (from + len > mtd->size))
1101 *mtdbuf = (void *)map->virt + from;
1104 /* Now lock the chip(s) to POINT state */
1106 /* ofs: offset within the first chip that the first read should start */
1107 chipnum = (from >> cfi->chipshift);
1108 ofs = from - (chipnum << cfi->chipshift);
1111 unsigned long thislen;
1113 if (chipnum >= cfi->numchips)
1116 if ((len + ofs -1) >> cfi->chipshift)
1117 thislen = (1<<cfi->chipshift) - ofs;
1121 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1134 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1136 struct map_info *map = mtd->priv;
1137 struct cfi_private *cfi = map->fldrv_priv;
1141 /* Now unlock the chip(s) POINT state */
1143 /* ofs: offset within the first chip that the first read should start */
1144 chipnum = (from >> cfi->chipshift);
1145 ofs = from - (chipnum << cfi->chipshift);
1148 unsigned long thislen;
1149 struct flchip *chip;
1151 chip = &cfi->chips[chipnum];
1152 if (chipnum >= cfi->numchips)
1155 if ((len + ofs -1) >> cfi->chipshift)
1156 thislen = (1<<cfi->chipshift) - ofs;
1160 spin_lock(chip->mutex);
1161 if (chip->state == FL_POINT) {
1162 chip->ref_point_counter--;
1163 if(chip->ref_point_counter == 0)
1164 chip->state = FL_READY;
1166 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1168 put_chip(map, chip, chip->start);
1169 spin_unlock(chip->mutex);
1177 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1179 unsigned long cmd_addr;
1180 struct cfi_private *cfi = map->fldrv_priv;
1185 /* Ensure cmd read/writes are aligned. */
1186 cmd_addr = adr & ~(map_bankwidth(map)-1);
1188 spin_lock(chip->mutex);
1189 ret = get_chip(map, chip, cmd_addr, FL_READY);
1191 spin_unlock(chip->mutex);
1195 if (chip->state != FL_POINT && chip->state != FL_READY) {
1196 map_write(map, CMD(0xff), cmd_addr);
1198 chip->state = FL_READY;
1201 map_copy_from(map, buf, adr, len);
1203 put_chip(map, chip, cmd_addr);
1205 spin_unlock(chip->mutex);
1209 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1211 struct map_info *map = mtd->priv;
1212 struct cfi_private *cfi = map->fldrv_priv;
1217 /* ofs: offset within the first chip that the first read should start */
1218 chipnum = (from >> cfi->chipshift);
1219 ofs = from - (chipnum << cfi->chipshift);
1224 unsigned long thislen;
1226 if (chipnum >= cfi->numchips)
1229 if ((len + ofs -1) >> cfi->chipshift)
1230 thislen = (1<<cfi->chipshift) - ofs;
1234 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1248 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1249 unsigned long adr, map_word datum, int mode)
1251 struct cfi_private *cfi = map->fldrv_priv;
1252 map_word status, status_OK, write_cmd;
1253 unsigned long timeo;
1258 /* Let's determine those according to the interleave only once */
1259 status_OK = CMD(0x80);
1262 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1265 write_cmd = CMD(0xc0);
1271 spin_lock(chip->mutex);
1272 ret = get_chip(map, chip, adr, mode);
1274 spin_unlock(chip->mutex);
1278 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1280 xip_disable(map, chip, adr);
1281 map_write(map, write_cmd, adr);
1282 map_write(map, datum, adr);
1285 INVALIDATE_CACHE_UDELAY(map, chip,
1286 adr, map_bankwidth(map),
1287 chip->word_write_time);
1289 timeo = jiffies + (HZ/2);
1292 if (chip->state != mode) {
1293 /* Someone's suspended the write. Sleep */
1294 DECLARE_WAITQUEUE(wait, current);
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 add_wait_queue(&chip->wq, &wait);
1298 spin_unlock(chip->mutex);
1300 remove_wait_queue(&chip->wq, &wait);
1301 timeo = jiffies + (HZ / 2); /* FIXME */
1302 spin_lock(chip->mutex);
1306 status = map_read(map, adr);
1307 if (map_word_andequal(map, status, status_OK, status_OK))
1310 /* OK Still waiting */
1311 if (time_after(jiffies, timeo)) {
1312 map_write(map, CMD(0x70), adr);
1313 chip->state = FL_STATUS;
1314 xip_enable(map, chip, adr);
1315 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1320 /* Latency issues. Drop the lock, wait a while and retry */
1322 UDELAY(map, chip, adr, 1);
1325 chip->word_write_time--;
1326 if (!chip->word_write_time)
1327 chip->word_write_time = 1;
1330 chip->word_write_time++;
1332 /* Done and happy. */
1333 chip->state = FL_STATUS;
1335 /* check for errors */
1336 if (map_word_bitsset(map, status, CMD(0x1a))) {
1337 unsigned long chipstatus = MERGESTATUS(status);
1340 map_write(map, CMD(0x50), adr);
1341 map_write(map, CMD(0x70), adr);
1342 xip_enable(map, chip, adr);
1344 if (chipstatus & 0x02) {
1346 } else if (chipstatus & 0x08) {
1347 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1350 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1357 xip_enable(map, chip, adr);
1358 out: put_chip(map, chip, adr);
1359 spin_unlock(chip->mutex);
1364 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1366 struct map_info *map = mtd->priv;
1367 struct cfi_private *cfi = map->fldrv_priv;
1376 chipnum = to >> cfi->chipshift;
1377 ofs = to - (chipnum << cfi->chipshift);
1379 /* If it's not bus-aligned, do the first byte write */
1380 if (ofs & (map_bankwidth(map)-1)) {
1381 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1382 int gap = ofs - bus_ofs;
1386 n = min_t(int, len, map_bankwidth(map)-gap);
1387 datum = map_word_ff(map);
1388 datum = map_word_load_partial(map, datum, buf, gap, n);
1390 ret = do_write_oneword(map, &cfi->chips[chipnum],
1391 bus_ofs, datum, FL_WRITING);
1400 if (ofs >> cfi->chipshift) {
1403 if (chipnum == cfi->numchips)
1408 while(len >= map_bankwidth(map)) {
1409 map_word datum = map_word_load(map, buf);
1411 ret = do_write_oneword(map, &cfi->chips[chipnum],
1412 ofs, datum, FL_WRITING);
1416 ofs += map_bankwidth(map);
1417 buf += map_bankwidth(map);
1418 (*retlen) += map_bankwidth(map);
1419 len -= map_bankwidth(map);
1421 if (ofs >> cfi->chipshift) {
1424 if (chipnum == cfi->numchips)
1429 if (len & (map_bankwidth(map)-1)) {
1432 datum = map_word_ff(map);
1433 datum = map_word_load_partial(map, datum, buf, 0, len);
1435 ret = do_write_oneword(map, &cfi->chips[chipnum],
1436 ofs, datum, FL_WRITING);
1447 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1448 unsigned long adr, const u_char *buf, int len)
1450 struct cfi_private *cfi = map->fldrv_priv;
1451 map_word status, status_OK, write_cmd;
1452 unsigned long cmd_adr, timeo;
1453 int wbufsize, z, ret=0, bytes, words;
1455 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1457 cmd_adr = adr & ~(wbufsize-1);
1459 /* Let's determine this according to the interleave only once */
1460 status_OK = CMD(0x80);
1461 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1463 spin_lock(chip->mutex);
1464 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1466 spin_unlock(chip->mutex);
1470 XIP_INVAL_CACHED_RANGE(map, adr, len);
1472 xip_disable(map, chip, cmd_adr);
1474 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1475 [...], the device will not accept any more Write to Buffer commands".
1476 So we must check here and reset those bits if they're set. Otherwise
1477 we're just pissing in the wind */
1478 if (chip->state != FL_STATUS)
1479 map_write(map, CMD(0x70), cmd_adr);
1480 status = map_read(map, cmd_adr);
1481 if (map_word_bitsset(map, status, CMD(0x30))) {
1482 xip_enable(map, chip, cmd_adr);
1483 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1484 xip_disable(map, chip, cmd_adr);
1485 map_write(map, CMD(0x50), cmd_adr);
1486 map_write(map, CMD(0x70), cmd_adr);
1489 chip->state = FL_WRITING_TO_BUFFER;
1493 map_write(map, write_cmd, cmd_adr);
1495 status = map_read(map, cmd_adr);
1496 if (map_word_andequal(map, status, status_OK, status_OK))
1499 UDELAY(map, chip, cmd_adr, 1);
1502 /* Argh. Not ready for write to buffer */
1504 map_write(map, CMD(0x70), cmd_adr);
1505 chip->state = FL_STATUS;
1506 Xstatus = map_read(map, cmd_adr);
1507 /* Odd. Clear status bits */
1508 map_write(map, CMD(0x50), cmd_adr);
1509 map_write(map, CMD(0x70), cmd_adr);
1510 xip_enable(map, chip, cmd_adr);
1511 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1512 map->name, status.x[0], Xstatus.x[0]);
1518 /* Write length of data to come */
1519 bytes = len & (map_bankwidth(map)-1);
1520 words = len / map_bankwidth(map);
1521 map_write(map, CMD(words - !bytes), cmd_adr );
1525 while(z < words * map_bankwidth(map)) {
1526 map_word datum = map_word_load(map, buf);
1527 map_write(map, datum, adr+z);
1529 z += map_bankwidth(map);
1530 buf += map_bankwidth(map);
1536 datum = map_word_ff(map);
1537 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1538 map_write(map, datum, adr+z);
1542 map_write(map, CMD(0xd0), cmd_adr);
1543 chip->state = FL_WRITING;
1545 INVALIDATE_CACHE_UDELAY(map, chip,
1547 chip->buffer_write_time);
1549 timeo = jiffies + (HZ/2);
1552 if (chip->state != FL_WRITING) {
1553 /* Someone's suspended the write. Sleep */
1554 DECLARE_WAITQUEUE(wait, current);
1555 set_current_state(TASK_UNINTERRUPTIBLE);
1556 add_wait_queue(&chip->wq, &wait);
1557 spin_unlock(chip->mutex);
1559 remove_wait_queue(&chip->wq, &wait);
1560 timeo = jiffies + (HZ / 2); /* FIXME */
1561 spin_lock(chip->mutex);
1565 status = map_read(map, cmd_adr);
1566 if (map_word_andequal(map, status, status_OK, status_OK))
1569 /* OK Still waiting */
1570 if (time_after(jiffies, timeo)) {
1571 map_write(map, CMD(0x70), cmd_adr);
1572 chip->state = FL_STATUS;
1573 xip_enable(map, chip, cmd_adr);
1574 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1579 /* Latency issues. Drop the lock, wait a while and retry */
1581 UDELAY(map, chip, cmd_adr, 1);
1584 chip->buffer_write_time--;
1585 if (!chip->buffer_write_time)
1586 chip->buffer_write_time = 1;
1589 chip->buffer_write_time++;
1591 /* Done and happy. */
1592 chip->state = FL_STATUS;
1594 /* check for errors */
1595 if (map_word_bitsset(map, status, CMD(0x1a))) {
1596 unsigned long chipstatus = MERGESTATUS(status);
1599 map_write(map, CMD(0x50), cmd_adr);
1600 map_write(map, CMD(0x70), cmd_adr);
1601 xip_enable(map, chip, cmd_adr);
1603 if (chipstatus & 0x02) {
1605 } else if (chipstatus & 0x08) {
1606 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1609 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1616 xip_enable(map, chip, cmd_adr);
1617 out: put_chip(map, chip, cmd_adr);
1618 spin_unlock(chip->mutex);
1622 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1623 size_t len, size_t *retlen, const u_char *buf)
1625 struct map_info *map = mtd->priv;
1626 struct cfi_private *cfi = map->fldrv_priv;
1627 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1636 chipnum = to >> cfi->chipshift;
1637 ofs = to - (chipnum << cfi->chipshift);
1639 /* If it's not bus-aligned, do the first word write */
1640 if (ofs & (map_bankwidth(map)-1)) {
1641 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1642 if (local_len > len)
1644 ret = cfi_intelext_write_words(mtd, to, local_len,
1652 if (ofs >> cfi->chipshift) {
1655 if (chipnum == cfi->numchips)
1661 /* We must not cross write block boundaries */
1662 int size = wbufsize - (ofs & (wbufsize-1));
1666 ret = do_write_buffer(map, &cfi->chips[chipnum],
1676 if (ofs >> cfi->chipshift) {
1679 if (chipnum == cfi->numchips)
1686 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1687 unsigned long adr, int len, void *thunk)
1689 struct cfi_private *cfi = map->fldrv_priv;
1690 map_word status, status_OK;
1691 unsigned long timeo;
1693 DECLARE_WAITQUEUE(wait, current);
1698 /* Let's determine this according to the interleave only once */
1699 status_OK = CMD(0x80);
1702 spin_lock(chip->mutex);
1703 ret = get_chip(map, chip, adr, FL_ERASING);
1705 spin_unlock(chip->mutex);
1709 XIP_INVAL_CACHED_RANGE(map, adr, len);
1711 xip_disable(map, chip, adr);
1713 /* Clear the status register first */
1714 map_write(map, CMD(0x50), adr);
1717 map_write(map, CMD(0x20), adr);
1718 map_write(map, CMD(0xD0), adr);
1719 chip->state = FL_ERASING;
1720 chip->erase_suspended = 0;
1722 INVALIDATE_CACHE_UDELAY(map, chip,
1724 chip->erase_time*1000/2);
1726 /* FIXME. Use a timer to check this, and return immediately. */
1727 /* Once the state machine's known to be working I'll do that */
1729 timeo = jiffies + (HZ*20);
1731 if (chip->state != FL_ERASING) {
1732 /* Someone's suspended the erase. Sleep */
1733 set_current_state(TASK_UNINTERRUPTIBLE);
1734 add_wait_queue(&chip->wq, &wait);
1735 spin_unlock(chip->mutex);
1737 remove_wait_queue(&chip->wq, &wait);
1738 spin_lock(chip->mutex);
1741 if (chip->erase_suspended) {
1742 /* This erase was suspended and resumed.
1743 Adjust the timeout */
1744 timeo = jiffies + (HZ*20); /* FIXME */
1745 chip->erase_suspended = 0;
1748 status = map_read(map, adr);
1749 if (map_word_andequal(map, status, status_OK, status_OK))
1752 /* OK Still waiting */
1753 if (time_after(jiffies, timeo)) {
1754 map_write(map, CMD(0x70), adr);
1755 chip->state = FL_STATUS;
1756 xip_enable(map, chip, adr);
1757 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1762 /* Latency issues. Drop the lock, wait a while and retry */
1763 UDELAY(map, chip, adr, 1000000/HZ);
1766 /* We've broken this before. It doesn't hurt to be safe */
1767 map_write(map, CMD(0x70), adr);
1768 chip->state = FL_STATUS;
1769 status = map_read(map, adr);
1771 /* check for errors */
1772 if (map_word_bitsset(map, status, CMD(0x3a))) {
1773 unsigned long chipstatus = MERGESTATUS(status);
1775 /* Reset the error bits */
1776 map_write(map, CMD(0x50), adr);
1777 map_write(map, CMD(0x70), adr);
1778 xip_enable(map, chip, adr);
1780 if ((chipstatus & 0x30) == 0x30) {
1781 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1783 } else if (chipstatus & 0x02) {
1784 /* Protection bit set */
1786 } else if (chipstatus & 0x8) {
1788 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1790 } else if (chipstatus & 0x20 && retries--) {
1791 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1792 timeo = jiffies + HZ;
1793 put_chip(map, chip, adr);
1794 spin_unlock(chip->mutex);
1797 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1804 xip_enable(map, chip, adr);
1805 out: put_chip(map, chip, adr);
1806 spin_unlock(chip->mutex);
1810 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1812 unsigned long ofs, len;
1818 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1822 instr->state = MTD_ERASE_DONE;
1823 mtd_erase_callback(instr);
1828 static void cfi_intelext_sync (struct mtd_info *mtd)
1830 struct map_info *map = mtd->priv;
1831 struct cfi_private *cfi = map->fldrv_priv;
1833 struct flchip *chip;
1836 for (i=0; !ret && i<cfi->numchips; i++) {
1837 chip = &cfi->chips[i];
1839 spin_lock(chip->mutex);
1840 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1843 chip->oldstate = chip->state;
1844 chip->state = FL_SYNCING;
1845 /* No need to wake_up() on this state change -
1846 * as the whole point is that nobody can do anything
1847 * with the chip now anyway.
1850 spin_unlock(chip->mutex);
1853 /* Unlock the chips again */
1855 for (i--; i >=0; i--) {
1856 chip = &cfi->chips[i];
1858 spin_lock(chip->mutex);
1860 if (chip->state == FL_SYNCING) {
1861 chip->state = chip->oldstate;
1862 chip->oldstate = FL_READY;
1865 spin_unlock(chip->mutex);
1869 #ifdef DEBUG_LOCK_BITS
1870 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1871 struct flchip *chip,
1873 int len, void *thunk)
1875 struct cfi_private *cfi = map->fldrv_priv;
1876 int status, ofs_factor = cfi->interleave * cfi->device_type;
1879 xip_disable(map, chip, adr+(2*ofs_factor));
1880 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1881 chip->state = FL_JEDEC_QUERY;
1882 status = cfi_read_query(map, adr+(2*ofs_factor));
1883 xip_enable(map, chip, 0);
1884 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1890 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1891 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1893 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1894 unsigned long adr, int len, void *thunk)
1896 struct cfi_private *cfi = map->fldrv_priv;
1897 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1898 map_word status, status_OK;
1899 unsigned long timeo = jiffies + HZ;
1904 /* Let's determine this according to the interleave only once */
1905 status_OK = CMD(0x80);
1907 spin_lock(chip->mutex);
1908 ret = get_chip(map, chip, adr, FL_LOCKING);
1910 spin_unlock(chip->mutex);
1915 xip_disable(map, chip, adr);
1917 map_write(map, CMD(0x60), adr);
1918 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1919 map_write(map, CMD(0x01), adr);
1920 chip->state = FL_LOCKING;
1921 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1922 map_write(map, CMD(0xD0), adr);
1923 chip->state = FL_UNLOCKING;
1928 * If Instant Individual Block Locking supported then no need
1932 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1933 UDELAY(map, chip, adr, 1000000/HZ);
1935 /* FIXME. Use a timer to check this, and return immediately. */
1936 /* Once the state machine's known to be working I'll do that */
1938 timeo = jiffies + (HZ*20);
1941 status = map_read(map, adr);
1942 if (map_word_andequal(map, status, status_OK, status_OK))
1945 /* OK Still waiting */
1946 if (time_after(jiffies, timeo)) {
1947 map_write(map, CMD(0x70), adr);
1948 chip->state = FL_STATUS;
1949 xip_enable(map, chip, adr);
1950 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1951 put_chip(map, chip, adr);
1952 spin_unlock(chip->mutex);
1956 /* Latency issues. Drop the lock, wait a while and retry */
1957 UDELAY(map, chip, adr, 1);
1960 /* Done and happy. */
1961 chip->state = FL_STATUS;
1962 xip_enable(map, chip, adr);
1963 put_chip(map, chip, adr);
1964 spin_unlock(chip->mutex);
1968 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1972 #ifdef DEBUG_LOCK_BITS
1973 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1974 __FUNCTION__, ofs, len);
1975 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1979 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1980 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1982 #ifdef DEBUG_LOCK_BITS
1983 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1985 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1992 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1996 #ifdef DEBUG_LOCK_BITS
1997 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1998 __FUNCTION__, ofs, len);
1999 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2003 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2004 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2006 #ifdef DEBUG_LOCK_BITS
2007 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2009 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2016 #ifdef CONFIG_MTD_OTP
2018 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2019 u_long data_offset, u_char *buf, u_int size,
2020 u_long prot_offset, u_int groupno, u_int groupsize);
2023 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2024 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2026 struct cfi_private *cfi = map->fldrv_priv;
2029 spin_lock(chip->mutex);
2030 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2032 spin_unlock(chip->mutex);
2036 /* let's ensure we're not reading back cached data from array mode */
2037 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2039 xip_disable(map, chip, chip->start);
2040 if (chip->state != FL_JEDEC_QUERY) {
2041 map_write(map, CMD(0x90), chip->start);
2042 chip->state = FL_JEDEC_QUERY;
2044 map_copy_from(map, buf, chip->start + offset, size);
2045 xip_enable(map, chip, chip->start);
2047 /* then ensure we don't keep OTP data in the cache */
2048 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2050 put_chip(map, chip, chip->start);
2051 spin_unlock(chip->mutex);
2056 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2057 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2062 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2063 int gap = offset - bus_ofs;
2064 int n = min_t(int, size, map_bankwidth(map)-gap);
2065 map_word datum = map_word_ff(map);
2067 datum = map_word_load_partial(map, datum, buf, gap, n);
2068 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2081 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2082 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2084 struct cfi_private *cfi = map->fldrv_priv;
2087 /* make sure area matches group boundaries */
2091 datum = map_word_ff(map);
2092 datum = map_word_clr(map, datum, CMD(1 << grpno));
2093 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2096 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2097 size_t *retlen, u_char *buf,
2098 otp_op_t action, int user_regs)
2100 struct map_info *map = mtd->priv;
2101 struct cfi_private *cfi = map->fldrv_priv;
2102 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2103 struct flchip *chip;
2104 struct cfi_intelext_otpinfo *otp;
2105 u_long devsize, reg_prot_offset, data_offset;
2106 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2107 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2112 /* Check that we actually have some OTP registers */
2113 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2116 /* we need real chips here not virtual ones */
2117 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2118 chip_step = devsize >> cfi->chipshift;
2121 /* Some chips have OTP located in the _top_ partition only.
2122 For example: Intel 28F256L18T (T means top-parameter device) */
2123 if (cfi->mfr == MANUFACTURER_INTEL) {
2128 chip_num = chip_step - 1;
2132 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2133 chip = &cfi->chips[chip_num];
2134 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2136 /* first OTP region */
2138 reg_prot_offset = extp->ProtRegAddr;
2139 reg_fact_groups = 1;
2140 reg_fact_size = 1 << extp->FactProtRegSize;
2141 reg_user_groups = 1;
2142 reg_user_size = 1 << extp->UserProtRegSize;
2145 /* flash geometry fixup */
2146 data_offset = reg_prot_offset + 1;
2147 data_offset *= cfi->interleave * cfi->device_type;
2148 reg_prot_offset *= cfi->interleave * cfi->device_type;
2149 reg_fact_size *= cfi->interleave;
2150 reg_user_size *= cfi->interleave;
2153 groups = reg_user_groups;
2154 groupsize = reg_user_size;
2155 /* skip over factory reg area */
2156 groupno = reg_fact_groups;
2157 data_offset += reg_fact_groups * reg_fact_size;
2159 groups = reg_fact_groups;
2160 groupsize = reg_fact_size;
2164 while (len > 0 && groups > 0) {
2167 * Special case: if action is NULL
2168 * we fill buf with otp_info records.
2170 struct otp_info *otpinfo;
2172 len -= sizeof(struct otp_info);
2175 ret = do_otp_read(map, chip,
2177 (u_char *)&lockword,
2182 otpinfo = (struct otp_info *)buf;
2183 otpinfo->start = from;
2184 otpinfo->length = groupsize;
2186 !map_word_bitsset(map, lockword,
2189 buf += sizeof(*otpinfo);
2190 *retlen += sizeof(*otpinfo);
2191 } else if (from >= groupsize) {
2193 data_offset += groupsize;
2195 int size = groupsize;
2196 data_offset += from;
2201 ret = action(map, chip, data_offset,
2202 buf, size, reg_prot_offset,
2203 groupno, groupsize);
2209 data_offset += size;
2215 /* next OTP region */
2216 if (++field == extp->NumProtectionFields)
2218 reg_prot_offset = otp->ProtRegAddr;
2219 reg_fact_groups = otp->FactGroups;
2220 reg_fact_size = 1 << otp->FactProtRegSize;
2221 reg_user_groups = otp->UserGroups;
2222 reg_user_size = 1 << otp->UserProtRegSize;
2230 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2231 size_t len, size_t *retlen,
2234 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2235 buf, do_otp_read, 0);
2238 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2239 size_t len, size_t *retlen,
2242 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2243 buf, do_otp_read, 1);
2246 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2247 size_t len, size_t *retlen,
2250 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2251 buf, do_otp_write, 1);
2254 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2255 loff_t from, size_t len)
2258 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2259 NULL, do_otp_lock, 1);
2262 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2263 struct otp_info *buf, size_t len)
2268 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2269 return ret ? : retlen;
2272 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2273 struct otp_info *buf, size_t len)
2278 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2279 return ret ? : retlen;
2284 static int cfi_intelext_suspend(struct mtd_info *mtd)
2286 struct map_info *map = mtd->priv;
2287 struct cfi_private *cfi = map->fldrv_priv;
2289 struct flchip *chip;
2292 for (i=0; !ret && i<cfi->numchips; i++) {
2293 chip = &cfi->chips[i];
2295 spin_lock(chip->mutex);
2297 switch (chip->state) {
2301 case FL_JEDEC_QUERY:
2302 if (chip->oldstate == FL_READY) {
2303 chip->oldstate = chip->state;
2304 chip->state = FL_PM_SUSPENDED;
2305 /* No need to wake_up() on this state change -
2306 * as the whole point is that nobody can do anything
2307 * with the chip now anyway.
2310 /* There seems to be an operation pending. We must wait for it. */
2311 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2316 /* Should we actually wait? Once upon a time these routines weren't
2317 allowed to. Or should we return -EAGAIN, because the upper layers
2318 ought to have already shut down anything which was using the device
2319 anyway? The latter for now. */
2320 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2322 case FL_PM_SUSPENDED:
2325 spin_unlock(chip->mutex);
2328 /* Unlock the chips again */
2331 for (i--; i >=0; i--) {
2332 chip = &cfi->chips[i];
2334 spin_lock(chip->mutex);
2336 if (chip->state == FL_PM_SUSPENDED) {
2337 /* No need to force it into a known state here,
2338 because we're returning failure, and it didn't
2340 chip->state = chip->oldstate;
2341 chip->oldstate = FL_READY;
2344 spin_unlock(chip->mutex);
2351 static void cfi_intelext_resume(struct mtd_info *mtd)
2353 struct map_info *map = mtd->priv;
2354 struct cfi_private *cfi = map->fldrv_priv;
2356 struct flchip *chip;
2358 for (i=0; i<cfi->numchips; i++) {
2360 chip = &cfi->chips[i];
2362 spin_lock(chip->mutex);
2364 /* Go to known state. Chip may have been power cycled */
2365 if (chip->state == FL_PM_SUSPENDED) {
2366 map_write(map, CMD(0xFF), cfi->chips[i].start);
2367 chip->oldstate = chip->state = FL_READY;
2371 spin_unlock(chip->mutex);
2375 static int cfi_intelext_reset(struct mtd_info *mtd)
2377 struct map_info *map = mtd->priv;
2378 struct cfi_private *cfi = map->fldrv_priv;
2381 for (i=0; i < cfi->numchips; i++) {
2382 struct flchip *chip = &cfi->chips[i];
2384 /* force the completion of any ongoing operation
2385 and switch to array mode so any bootloader in
2386 flash is accessible for soft reboot. */
2387 spin_lock(chip->mutex);
2388 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2390 map_write(map, CMD(0xff), chip->start);
2391 chip->state = FL_READY;
2393 spin_unlock(chip->mutex);
2399 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2402 struct mtd_info *mtd;
2404 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2405 cfi_intelext_reset(mtd);
2409 static void cfi_intelext_destroy(struct mtd_info *mtd)
2411 struct map_info *map = mtd->priv;
2412 struct cfi_private *cfi = map->fldrv_priv;
2413 cfi_intelext_reset(mtd);
2414 unregister_reboot_notifier(&mtd->reboot_notifier);
2415 kfree(cfi->cmdset_priv);
2417 kfree(cfi->chips[0].priv);
2419 kfree(mtd->eraseregions);
2422 static char im_name_0001[] = "cfi_cmdset_0001";
2423 static char im_name_0003[] = "cfi_cmdset_0003";
2424 static char im_name_0200[] = "cfi_cmdset_0200";
2426 static int __init cfi_intelext_init(void)
2428 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2429 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2430 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2434 static void __exit cfi_intelext_exit(void)
2436 inter_module_unregister(im_name_0001);
2437 inter_module_unregister(im_name_0003);
2438 inter_module_unregister(im_name_0200);
2441 module_init(cfi_intelext_init);
2442 module_exit(cfi_intelext_exit);
2444 MODULE_LICENSE("GPL");
2445 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2446 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");