2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.185 2005/11/07 11:14:22 gleixner Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
73 static void cfi_intelext_destroy(struct mtd_info *);
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92 * *********** SETUP AND PROBE BITS ***********
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
230 #if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
238 static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
244 static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 unsigned int extra_size = 0;
283 /* Protection Register info */
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
287 /* Burst Read info */
289 if (extp_size < sizeof(*extp) + extra_size)
291 extra_size += extp->extra[extra_size-1];
293 /* Number of hardware-partitions */
295 if (extp_size < sizeof(*extp) + extra_size)
297 nb_parts = extp->extra[extra_size - 1];
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
317 if (extp_size < sizeof(*extp) + extra_size) {
319 extp_size = sizeof(*extp) + extra_size;
321 if (extp_size > 4096) {
323 "%s: cfi_pri_intelext is too fat\n",
334 /* This routine is made available to other mtd code via
335 * inter_module_register. It must only be accessed through
336 * inter_module_get which will bump the use count of this module. The
337 * addresses passed back in cfi are valid as long as the use count of
338 * this module is non-zero, i.e. between inter_module_get and
339 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
341 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
343 struct cfi_private *cfi = map->fldrv_priv;
344 struct mtd_info *mtd;
347 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
349 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
352 memset(mtd, 0, sizeof(*mtd));
354 mtd->type = MTD_NORFLASH;
356 /* Fill in the default mtd operations */
357 mtd->erase = cfi_intelext_erase_varsize;
358 mtd->read = cfi_intelext_read;
359 mtd->write = cfi_intelext_write_words;
360 mtd->sync = cfi_intelext_sync;
361 mtd->lock = cfi_intelext_lock;
362 mtd->unlock = cfi_intelext_unlock;
363 mtd->suspend = cfi_intelext_suspend;
364 mtd->resume = cfi_intelext_resume;
365 mtd->flags = MTD_CAP_NORFLASH;
366 mtd->name = map->name;
368 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
370 if (cfi->cfi_mode == CFI_MODE_CFI) {
372 * It's a real CFI chip, not one for which the probe
373 * routine faked a CFI structure. So we read the feature
376 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 struct cfi_pri_intelext *extp;
379 extp = read_pri_intelext(map, adr);
385 /* Install our own private info structure */
386 cfi->cmdset_priv = extp;
388 cfi_fixup(mtd, cfi_fixup_table);
390 #ifdef DEBUG_CFI_FEATURES
391 /* Tell the user about it in lots of lovely detail */
392 cfi_tell_features(extp);
395 if(extp->SuspendCmdSupport & 1) {
396 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
399 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
400 /* Apply jedec specific fixups */
401 cfi_fixup(mtd, jedec_fixup_table);
403 /* Apply generic fixups */
404 cfi_fixup(mtd, fixup_table);
406 for (i=0; i< cfi->numchips; i++) {
407 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
410 cfi->chips[i].ref_point_counter = 0;
413 map->fldrv = &cfi_intelext_chipdrv;
415 return cfi_intelext_setup(mtd);
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
420 struct map_info *map = mtd->priv;
421 struct cfi_private *cfi = map->fldrv_priv;
422 unsigned long offset = 0;
424 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
426 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
428 mtd->size = devsize * cfi->numchips;
430 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432 * mtd->numeraseregions, GFP_KERNEL);
433 if (!mtd->eraseregions) {
434 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
438 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439 unsigned long ernum, ersize;
440 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
443 if (mtd->erasesize < ersize) {
444 mtd->erasesize = ersize;
446 for (j=0; j<cfi->numchips; j++) {
447 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
451 offset += (ersize * ernum);
454 if (offset != devsize) {
456 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
460 for (i=0; i<mtd->numeraseregions;i++){
461 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462 i,mtd->eraseregions[i].offset,
463 mtd->eraseregions[i].erasesize,
464 mtd->eraseregions[i].numblocks);
467 #ifdef CONFIG_MTD_OTP
468 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
476 /* This function has the potential to distort the reality
477 a bit and therefore should be called last. */
478 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
481 __module_get(THIS_MODULE);
482 register_reboot_notifier(&mtd->reboot_notifier);
487 kfree(mtd->eraseregions);
490 kfree(cfi->cmdset_priv);
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495 struct cfi_private **pcfi)
497 struct map_info *map = mtd->priv;
498 struct cfi_private *cfi = *pcfi;
499 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
502 * Probing of multi-partition flash ships.
504 * To support multiple partitions when available, we simply arrange
505 * for each of them to have their own flchip structure even if they
506 * are on the same physical chip. This means completely recreating
507 * a new cfi_private structure right here which is a blatent code
508 * layering violation, but this is still the least intrusive
509 * arrangement at this point. This can be rearranged in the future
510 * if someone feels motivated enough. --nico
512 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513 && extp->FeatureSupport & (1 << 9)) {
514 struct cfi_private *newcfi;
516 struct flchip_shared *shared;
517 int offs, numregions, numparts, partshift, numvirtchips, i, j;
519 /* Protection Register info */
520 offs = (extp->NumProtectionFields - 1) *
521 sizeof(struct cfi_intelext_otpinfo);
523 /* Burst Read info */
524 offs += extp->extra[offs+1]+2;
526 /* Number of partition regions */
527 numregions = extp->extra[offs];
530 /* skip the sizeof(partregion) field in CFI 1.4 */
531 if (extp->MinorVersion >= '4')
534 /* Number of hardware partitions */
536 for (i = 0; i < numregions; i++) {
537 struct cfi_intelext_regioninfo *rinfo;
538 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539 numparts += rinfo->NumIdentPartitions;
540 offs += sizeof(*rinfo)
541 + (rinfo->NumBlockTypes - 1) *
542 sizeof(struct cfi_intelext_blockinfo);
545 /* Programming Region info */
546 if (extp->MinorVersion >= '4') {
547 struct cfi_intelext_programming_regioninfo *prinfo;
548 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
550 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552 mtd->flags |= MTD_PROGRAM_REGIONS;
553 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554 map->name, MTD_PROGREGION_SIZE(mtd),
555 MTD_PROGREGION_CTRLMODE_VALID(mtd),
556 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
560 * All functions below currently rely on all chips having
561 * the same geometry so we'll just assume that all hardware
562 * partitions are of the same size too.
564 partshift = cfi->chipshift - __ffs(numparts);
566 if ((1 << partshift) < mtd->erasesize) {
568 "%s: bad number of hw partitions (%d)\n",
569 __FUNCTION__, numparts);
573 numvirtchips = cfi->numchips * numparts;
574 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
577 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
582 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583 newcfi->numchips = numvirtchips;
584 newcfi->chipshift = partshift;
586 chip = &newcfi->chips[0];
587 for (i = 0; i < cfi->numchips; i++) {
588 shared[i].writing = shared[i].erasing = NULL;
589 spin_lock_init(&shared[i].lock);
590 for (j = 0; j < numparts; j++) {
591 *chip = cfi->chips[i];
592 chip->start += j << partshift;
593 chip->priv = &shared[i];
594 /* those should be reset too since
595 they create memory references. */
596 init_waitqueue_head(&chip->wq);
597 spin_lock_init(&chip->_spinlock);
598 chip->mutex = &chip->_spinlock;
603 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604 "--> %d partitions of %d KiB\n",
605 map->name, cfi->numchips, cfi->interleave,
606 newcfi->numchips, 1<<(newcfi->chipshift-10));
608 map->fldrv_priv = newcfi;
617 * *********** CHIP ACCESS FUNCTIONS ***********
620 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
622 DECLARE_WAITQUEUE(wait, current);
623 struct cfi_private *cfi = map->fldrv_priv;
624 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
626 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
629 timeo = jiffies + HZ;
631 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
633 * OK. We have possibility for contension on the write/erase
634 * operations which are global to the real chip and not per
635 * partition. So let's fight it over in the partition which
636 * currently has authority on the operation.
638 * The rules are as follows:
640 * - any write operation must own shared->writing.
642 * - any erase operation must own _both_ shared->writing and
645 * - contension arbitration is handled in the owner's context.
647 * The 'shared' struct can be read when its lock is taken.
648 * However any writes to it can only be made when the current
649 * owner's lock is also held.
651 struct flchip_shared *shared = chip->priv;
652 struct flchip *contender;
653 spin_lock(&shared->lock);
654 contender = shared->writing;
655 if (contender && contender != chip) {
657 * The engine to perform desired operation on this
658 * partition is already in use by someone else.
659 * Let's fight over it in the context of the chip
660 * currently using it. If it is possible to suspend,
661 * that other partition will do just that, otherwise
662 * it'll happily send us to sleep. In any case, when
663 * get_chip returns success we're clear to go ahead.
665 int ret = spin_trylock(contender->mutex);
666 spin_unlock(&shared->lock);
669 spin_unlock(chip->mutex);
670 ret = get_chip(map, contender, contender->start, mode);
671 spin_lock(chip->mutex);
673 spin_unlock(contender->mutex);
676 timeo = jiffies + HZ;
677 spin_lock(&shared->lock);
681 shared->writing = chip;
682 if (mode == FL_ERASING)
683 shared->erasing = chip;
684 if (contender && contender != chip)
685 spin_unlock(contender->mutex);
686 spin_unlock(&shared->lock);
689 switch (chip->state) {
693 status = map_read(map, adr);
694 if (map_word_andequal(map, status, status_OK, status_OK))
697 /* At this point we're fine with write operations
698 in other partitions as they don't conflict. */
699 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
702 if (time_after(jiffies, timeo)) {
703 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
704 map->name, status.x[0]);
707 spin_unlock(chip->mutex);
709 spin_lock(chip->mutex);
710 /* Someone else might have been playing with it. */
721 !(cfip->FeatureSupport & 2) ||
722 !(mode == FL_READY || mode == FL_POINT ||
723 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
728 map_write(map, CMD(0xB0), adr);
730 /* If the flash has finished erasing, then 'erase suspend'
731 * appears to make some (28F320) flash devices switch to
732 * 'read' mode. Make sure that we switch to 'read status'
733 * mode so we get the right data. --rmk
735 map_write(map, CMD(0x70), adr);
736 chip->oldstate = FL_ERASING;
737 chip->state = FL_ERASE_SUSPENDING;
738 chip->erase_suspended = 1;
740 status = map_read(map, adr);
741 if (map_word_andequal(map, status, status_OK, status_OK))
744 if (time_after(jiffies, timeo)) {
745 /* Urgh. Resume and pretend we weren't here. */
746 map_write(map, CMD(0xd0), adr);
747 /* Make sure we're in 'read status' mode if it had finished */
748 map_write(map, CMD(0x70), adr);
749 chip->state = FL_ERASING;
750 chip->oldstate = FL_READY;
751 printk(KERN_ERR "%s: Chip not ready after erase "
752 "suspended: status = 0x%lx\n", map->name, status.x[0]);
756 spin_unlock(chip->mutex);
758 spin_lock(chip->mutex);
759 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
760 So we can just loop here. */
762 chip->state = FL_STATUS;
765 case FL_XIP_WHILE_ERASING:
766 if (mode != FL_READY && mode != FL_POINT &&
767 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
769 chip->oldstate = chip->state;
770 chip->state = FL_READY;
774 /* Only if there's no operation suspended... */
775 if (mode == FL_READY && chip->oldstate == FL_READY)
780 set_current_state(TASK_UNINTERRUPTIBLE);
781 add_wait_queue(&chip->wq, &wait);
782 spin_unlock(chip->mutex);
784 remove_wait_queue(&chip->wq, &wait);
785 spin_lock(chip->mutex);
790 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
792 struct cfi_private *cfi = map->fldrv_priv;
795 struct flchip_shared *shared = chip->priv;
796 spin_lock(&shared->lock);
797 if (shared->writing == chip && chip->oldstate == FL_READY) {
798 /* We own the ability to write, but we're done */
799 shared->writing = shared->erasing;
800 if (shared->writing && shared->writing != chip) {
801 /* give back ownership to who we loaned it from */
802 struct flchip *loaner = shared->writing;
803 spin_lock(loaner->mutex);
804 spin_unlock(&shared->lock);
805 spin_unlock(chip->mutex);
806 put_chip(map, loaner, loaner->start);
807 spin_lock(chip->mutex);
808 spin_unlock(loaner->mutex);
812 shared->erasing = NULL;
813 shared->writing = NULL;
814 } else if (shared->erasing == chip && shared->writing != chip) {
816 * We own the ability to erase without the ability
817 * to write, which means the erase was suspended
818 * and some other partition is currently writing.
819 * Don't let the switch below mess things up since
820 * we don't have ownership to resume anything.
822 spin_unlock(&shared->lock);
826 spin_unlock(&shared->lock);
829 switch(chip->oldstate) {
831 chip->state = chip->oldstate;
832 /* What if one interleaved chip has finished and the
833 other hasn't? The old code would leave the finished
834 one in READY mode. That's bad, and caused -EROFS
835 errors to be returned from do_erase_oneblock because
836 that's the only bit it checked for at the time.
837 As the state machine appears to explicitly allow
838 sending the 0x70 (Read Status) command to an erasing
839 chip and expecting it to be ignored, that's what we
841 map_write(map, CMD(0xd0), adr);
842 map_write(map, CMD(0x70), adr);
843 chip->oldstate = FL_READY;
844 chip->state = FL_ERASING;
847 case FL_XIP_WHILE_ERASING:
848 chip->state = chip->oldstate;
849 chip->oldstate = FL_READY;
855 /* We should really make set_vpp() count, rather than doing this */
859 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
864 #ifdef CONFIG_MTD_XIP
867 * No interrupt what so ever can be serviced while the flash isn't in array
868 * mode. This is ensured by the xip_disable() and xip_enable() functions
869 * enclosing any code path where the flash is known not to be in array mode.
870 * And within a XIP disabled code path, only functions marked with __xipram
871 * may be called and nothing else (it's a good thing to inspect generated
872 * assembly to make sure inline functions were actually inlined and that gcc
873 * didn't emit calls to its own support functions). Also configuring MTD CFI
874 * support to a single buswidth and a single interleave is also recommended.
877 static void xip_disable(struct map_info *map, struct flchip *chip,
880 /* TODO: chips with no XIP use should ignore and return */
881 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
885 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
888 struct cfi_private *cfi = map->fldrv_priv;
889 if (chip->state != FL_POINT && chip->state != FL_READY) {
890 map_write(map, CMD(0xff), adr);
891 chip->state = FL_READY;
893 (void) map_read(map, adr);
899 * When a delay is required for the flash operation to complete, the
900 * xip_udelay() function is polling for both the given timeout and pending
901 * (but still masked) hardware interrupts. Whenever there is an interrupt
902 * pending then the flash erase or write operation is suspended, array mode
903 * restored and interrupts unmasked. Task scheduling might also happen at that
904 * point. The CPU eventually returns from the interrupt or the call to
905 * schedule() and the suspended flash operation is resumed for the remaining
906 * of the delay period.
908 * Warning: this function _will_ fool interrupt latency tracing tools.
911 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
912 unsigned long adr, int usec)
914 struct cfi_private *cfi = map->fldrv_priv;
915 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
916 map_word status, OK = CMD(0x80);
917 unsigned long suspended, start = xip_currtime();
918 flstate_t oldstate, newstate;
922 if (xip_irqpending() && cfip &&
923 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
924 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
925 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
927 * Let's suspend the erase or write operation when
928 * supported. Note that we currently don't try to
929 * suspend interleaved chips if there is already
930 * another operation suspended (imagine what happens
931 * when one chip was already done with the current
932 * operation while another chip suspended it, then
933 * we resume the whole thing at once). Yes, it
936 map_write(map, CMD(0xb0), adr);
937 map_write(map, CMD(0x70), adr);
938 usec -= xip_elapsed_since(start);
939 suspended = xip_currtime();
941 if (xip_elapsed_since(suspended) > 100000) {
943 * The chip doesn't want to suspend
944 * after waiting for 100 msecs.
945 * This is a critical error but there
946 * is not much we can do here.
950 status = map_read(map, adr);
951 } while (!map_word_andequal(map, status, OK, OK));
953 /* Suspend succeeded */
954 oldstate = chip->state;
955 if (oldstate == FL_ERASING) {
956 if (!map_word_bitsset(map, status, CMD(0x40)))
958 newstate = FL_XIP_WHILE_ERASING;
959 chip->erase_suspended = 1;
961 if (!map_word_bitsset(map, status, CMD(0x04)))
963 newstate = FL_XIP_WHILE_WRITING;
964 chip->write_suspended = 1;
966 chip->state = newstate;
967 map_write(map, CMD(0xff), adr);
968 (void) map_read(map, adr);
969 asm volatile (".rep 8; nop; .endr");
971 spin_unlock(chip->mutex);
972 asm volatile (".rep 8; nop; .endr");
976 * We're back. However someone else might have
977 * decided to go write to the chip if we are in
978 * a suspended erase state. If so let's wait
981 spin_lock(chip->mutex);
982 while (chip->state != newstate) {
983 DECLARE_WAITQUEUE(wait, current);
984 set_current_state(TASK_UNINTERRUPTIBLE);
985 add_wait_queue(&chip->wq, &wait);
986 spin_unlock(chip->mutex);
988 remove_wait_queue(&chip->wq, &wait);
989 spin_lock(chip->mutex);
991 /* Disallow XIP again */
994 /* Resume the write or erase operation */
995 map_write(map, CMD(0xd0), adr);
996 map_write(map, CMD(0x70), adr);
997 chip->state = oldstate;
998 start = xip_currtime();
999 } else if (usec >= 1000000/HZ) {
1001 * Try to save on CPU power when waiting delay
1002 * is at least a system timer tick period.
1003 * No need to be extremely accurate here.
1007 status = map_read(map, adr);
1008 } while (!map_word_andequal(map, status, OK, OK)
1009 && xip_elapsed_since(start) < usec);
1012 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1015 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1016 * the flash is actively programming or erasing since we have to poll for
1017 * the operation to complete anyway. We can't do that in a generic way with
1018 * a XIP setup so do it before the actual flash operation in this case
1019 * and stub it out from INVALIDATE_CACHE_UDELAY.
1021 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1022 INVALIDATE_CACHED_RANGE(map, from, size)
1024 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1025 UDELAY(map, chip, adr, usec)
1030 * Activating this XIP support changes the way the code works a bit. For
1031 * example the code to suspend the current process when concurrent access
1032 * happens is never executed because xip_udelay() will always return with the
1033 * same chip state as it was entered with. This is why there is no care for
1034 * the presence of add_wait_queue() or schedule() calls from within a couple
1035 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1036 * The queueing and scheduling are always happening within xip_udelay().
1038 * Similarly, get_chip() and put_chip() just happen to always be executed
1039 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1040 * is in array mode, therefore never executing many cases therein and not
1041 * causing any problem with XIP.
1046 #define xip_disable(map, chip, adr)
1047 #define xip_enable(map, chip, adr)
1048 #define XIP_INVAL_CACHED_RANGE(x...)
1050 #define UDELAY(map, chip, adr, usec) \
1052 spin_unlock(chip->mutex); \
1054 spin_lock(chip->mutex); \
1057 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1059 spin_unlock(chip->mutex); \
1060 INVALIDATE_CACHED_RANGE(map, adr, len); \
1062 spin_lock(chip->mutex); \
1067 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1069 unsigned long cmd_addr;
1070 struct cfi_private *cfi = map->fldrv_priv;
1075 /* Ensure cmd read/writes are aligned. */
1076 cmd_addr = adr & ~(map_bankwidth(map)-1);
1078 spin_lock(chip->mutex);
1080 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1083 if (chip->state != FL_POINT && chip->state != FL_READY)
1084 map_write(map, CMD(0xff), cmd_addr);
1086 chip->state = FL_POINT;
1087 chip->ref_point_counter++;
1089 spin_unlock(chip->mutex);
1094 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1096 struct map_info *map = mtd->priv;
1097 struct cfi_private *cfi = map->fldrv_priv;
1102 if (!map->virt || (from + len > mtd->size))
1105 *mtdbuf = (void *)map->virt + from;
1108 /* Now lock the chip(s) to POINT state */
1110 /* ofs: offset within the first chip that the first read should start */
1111 chipnum = (from >> cfi->chipshift);
1112 ofs = from - (chipnum << cfi->chipshift);
1115 unsigned long thislen;
1117 if (chipnum >= cfi->numchips)
1120 if ((len + ofs -1) >> cfi->chipshift)
1121 thislen = (1<<cfi->chipshift) - ofs;
1125 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1138 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1140 struct map_info *map = mtd->priv;
1141 struct cfi_private *cfi = map->fldrv_priv;
1145 /* Now unlock the chip(s) POINT state */
1147 /* ofs: offset within the first chip that the first read should start */
1148 chipnum = (from >> cfi->chipshift);
1149 ofs = from - (chipnum << cfi->chipshift);
1152 unsigned long thislen;
1153 struct flchip *chip;
1155 chip = &cfi->chips[chipnum];
1156 if (chipnum >= cfi->numchips)
1159 if ((len + ofs -1) >> cfi->chipshift)
1160 thislen = (1<<cfi->chipshift) - ofs;
1164 spin_lock(chip->mutex);
1165 if (chip->state == FL_POINT) {
1166 chip->ref_point_counter--;
1167 if(chip->ref_point_counter == 0)
1168 chip->state = FL_READY;
1170 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1172 put_chip(map, chip, chip->start);
1173 spin_unlock(chip->mutex);
1181 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1183 unsigned long cmd_addr;
1184 struct cfi_private *cfi = map->fldrv_priv;
1189 /* Ensure cmd read/writes are aligned. */
1190 cmd_addr = adr & ~(map_bankwidth(map)-1);
1192 spin_lock(chip->mutex);
1193 ret = get_chip(map, chip, cmd_addr, FL_READY);
1195 spin_unlock(chip->mutex);
1199 if (chip->state != FL_POINT && chip->state != FL_READY) {
1200 map_write(map, CMD(0xff), cmd_addr);
1202 chip->state = FL_READY;
1205 map_copy_from(map, buf, adr, len);
1207 put_chip(map, chip, cmd_addr);
1209 spin_unlock(chip->mutex);
1213 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1215 struct map_info *map = mtd->priv;
1216 struct cfi_private *cfi = map->fldrv_priv;
1221 /* ofs: offset within the first chip that the first read should start */
1222 chipnum = (from >> cfi->chipshift);
1223 ofs = from - (chipnum << cfi->chipshift);
1228 unsigned long thislen;
1230 if (chipnum >= cfi->numchips)
1233 if ((len + ofs -1) >> cfi->chipshift)
1234 thislen = (1<<cfi->chipshift) - ofs;
1238 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1252 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1253 unsigned long adr, map_word datum, int mode)
1255 struct cfi_private *cfi = map->fldrv_priv;
1256 map_word status, status_OK, write_cmd;
1257 unsigned long timeo;
1262 /* Let's determine those according to the interleave only once */
1263 status_OK = CMD(0x80);
1266 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1269 write_cmd = CMD(0xc0);
1275 spin_lock(chip->mutex);
1276 ret = get_chip(map, chip, adr, mode);
1278 spin_unlock(chip->mutex);
1282 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1284 xip_disable(map, chip, adr);
1285 map_write(map, write_cmd, adr);
1286 map_write(map, datum, adr);
1289 INVALIDATE_CACHE_UDELAY(map, chip,
1290 adr, map_bankwidth(map),
1291 chip->word_write_time);
1293 timeo = jiffies + (HZ/2);
1296 if (chip->state != mode) {
1297 /* Someone's suspended the write. Sleep */
1298 DECLARE_WAITQUEUE(wait, current);
1300 set_current_state(TASK_UNINTERRUPTIBLE);
1301 add_wait_queue(&chip->wq, &wait);
1302 spin_unlock(chip->mutex);
1304 remove_wait_queue(&chip->wq, &wait);
1305 timeo = jiffies + (HZ / 2); /* FIXME */
1306 spin_lock(chip->mutex);
1310 status = map_read(map, adr);
1311 if (map_word_andequal(map, status, status_OK, status_OK))
1314 /* OK Still waiting */
1315 if (time_after(jiffies, timeo)) {
1316 map_write(map, CMD(0x70), adr);
1317 chip->state = FL_STATUS;
1318 xip_enable(map, chip, adr);
1319 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1324 /* Latency issues. Drop the lock, wait a while and retry */
1326 UDELAY(map, chip, adr, 1);
1329 chip->word_write_time--;
1330 if (!chip->word_write_time)
1331 chip->word_write_time = 1;
1334 chip->word_write_time++;
1336 /* Done and happy. */
1337 chip->state = FL_STATUS;
1339 /* check for errors */
1340 if (map_word_bitsset(map, status, CMD(0x1a))) {
1341 unsigned long chipstatus = MERGESTATUS(status);
1344 map_write(map, CMD(0x50), adr);
1345 map_write(map, CMD(0x70), adr);
1346 xip_enable(map, chip, adr);
1348 if (chipstatus & 0x02) {
1350 } else if (chipstatus & 0x08) {
1351 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1354 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1361 xip_enable(map, chip, adr);
1362 out: put_chip(map, chip, adr);
1363 spin_unlock(chip->mutex);
1368 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1370 struct map_info *map = mtd->priv;
1371 struct cfi_private *cfi = map->fldrv_priv;
1380 chipnum = to >> cfi->chipshift;
1381 ofs = to - (chipnum << cfi->chipshift);
1383 /* If it's not bus-aligned, do the first byte write */
1384 if (ofs & (map_bankwidth(map)-1)) {
1385 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1386 int gap = ofs - bus_ofs;
1390 n = min_t(int, len, map_bankwidth(map)-gap);
1391 datum = map_word_ff(map);
1392 datum = map_word_load_partial(map, datum, buf, gap, n);
1394 ret = do_write_oneword(map, &cfi->chips[chipnum],
1395 bus_ofs, datum, FL_WRITING);
1404 if (ofs >> cfi->chipshift) {
1407 if (chipnum == cfi->numchips)
1412 while(len >= map_bankwidth(map)) {
1413 map_word datum = map_word_load(map, buf);
1415 ret = do_write_oneword(map, &cfi->chips[chipnum],
1416 ofs, datum, FL_WRITING);
1420 ofs += map_bankwidth(map);
1421 buf += map_bankwidth(map);
1422 (*retlen) += map_bankwidth(map);
1423 len -= map_bankwidth(map);
1425 if (ofs >> cfi->chipshift) {
1428 if (chipnum == cfi->numchips)
1433 if (len & (map_bankwidth(map)-1)) {
1436 datum = map_word_ff(map);
1437 datum = map_word_load_partial(map, datum, buf, 0, len);
1439 ret = do_write_oneword(map, &cfi->chips[chipnum],
1440 ofs, datum, FL_WRITING);
1451 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1452 unsigned long adr, const struct kvec **pvec,
1453 unsigned long *pvec_seek, int len)
1455 struct cfi_private *cfi = map->fldrv_priv;
1456 map_word status, status_OK, write_cmd, datum;
1457 unsigned long cmd_adr, timeo;
1458 int wbufsize, z, ret=0, word_gap, words;
1459 const struct kvec *vec;
1460 unsigned long vec_seek;
1462 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1464 cmd_adr = adr & ~(wbufsize-1);
1466 /* Let's determine this according to the interleave only once */
1467 status_OK = CMD(0x80);
1468 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1470 spin_lock(chip->mutex);
1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1473 spin_unlock(chip->mutex);
1477 XIP_INVAL_CACHED_RANGE(map, adr, len);
1479 xip_disable(map, chip, cmd_adr);
1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1482 [...], the device will not accept any more Write to Buffer commands".
1483 So we must check here and reset those bits if they're set. Otherwise
1484 we're just pissing in the wind */
1485 if (chip->state != FL_STATUS)
1486 map_write(map, CMD(0x70), cmd_adr);
1487 status = map_read(map, cmd_adr);
1488 if (map_word_bitsset(map, status, CMD(0x30))) {
1489 xip_enable(map, chip, cmd_adr);
1490 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1491 xip_disable(map, chip, cmd_adr);
1492 map_write(map, CMD(0x50), cmd_adr);
1493 map_write(map, CMD(0x70), cmd_adr);
1496 chip->state = FL_WRITING_TO_BUFFER;
1500 map_write(map, write_cmd, cmd_adr);
1502 status = map_read(map, cmd_adr);
1503 if (map_word_andequal(map, status, status_OK, status_OK))
1506 UDELAY(map, chip, cmd_adr, 1);
1509 /* Argh. Not ready for write to buffer */
1511 map_write(map, CMD(0x70), cmd_adr);
1512 chip->state = FL_STATUS;
1513 Xstatus = map_read(map, cmd_adr);
1514 /* Odd. Clear status bits */
1515 map_write(map, CMD(0x50), cmd_adr);
1516 map_write(map, CMD(0x70), cmd_adr);
1517 xip_enable(map, chip, cmd_adr);
1518 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1519 map->name, status.x[0], Xstatus.x[0]);
1525 /* Figure out the number of words to write */
1526 word_gap = (-adr & (map_bankwidth(map)-1));
1527 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1531 word_gap = map_bankwidth(map) - word_gap;
1533 datum = map_word_ff(map);
1536 /* Write length of data to come */
1537 map_write(map, CMD(words), cmd_adr );
1541 vec_seek = *pvec_seek;
1543 int n = map_bankwidth(map) - word_gap;
1544 if (n > vec->iov_len - vec_seek)
1545 n = vec->iov_len - vec_seek;
1549 if (!word_gap && len < map_bankwidth(map))
1550 datum = map_word_ff(map);
1552 datum = map_word_load_partial(map, datum,
1553 vec->iov_base + vec_seek,
1558 if (!len || word_gap == map_bankwidth(map)) {
1559 map_write(map, datum, adr);
1560 adr += map_bankwidth(map);
1565 if (vec_seek == vec->iov_len) {
1571 *pvec_seek = vec_seek;
1574 map_write(map, CMD(0xd0), cmd_adr);
1575 chip->state = FL_WRITING;
1577 INVALIDATE_CACHE_UDELAY(map, chip,
1579 chip->buffer_write_time);
1581 timeo = jiffies + (HZ/2);
1584 if (chip->state != FL_WRITING) {
1585 /* Someone's suspended the write. Sleep */
1586 DECLARE_WAITQUEUE(wait, current);
1587 set_current_state(TASK_UNINTERRUPTIBLE);
1588 add_wait_queue(&chip->wq, &wait);
1589 spin_unlock(chip->mutex);
1591 remove_wait_queue(&chip->wq, &wait);
1592 timeo = jiffies + (HZ / 2); /* FIXME */
1593 spin_lock(chip->mutex);
1597 status = map_read(map, cmd_adr);
1598 if (map_word_andequal(map, status, status_OK, status_OK))
1601 /* OK Still waiting */
1602 if (time_after(jiffies, timeo)) {
1603 map_write(map, CMD(0x70), cmd_adr);
1604 chip->state = FL_STATUS;
1605 xip_enable(map, chip, cmd_adr);
1606 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1611 /* Latency issues. Drop the lock, wait a while and retry */
1613 UDELAY(map, chip, cmd_adr, 1);
1616 chip->buffer_write_time--;
1617 if (!chip->buffer_write_time)
1618 chip->buffer_write_time = 1;
1621 chip->buffer_write_time++;
1623 /* Done and happy. */
1624 chip->state = FL_STATUS;
1626 /* check for errors */
1627 if (map_word_bitsset(map, status, CMD(0x1a))) {
1628 unsigned long chipstatus = MERGESTATUS(status);
1631 map_write(map, CMD(0x50), cmd_adr);
1632 map_write(map, CMD(0x70), cmd_adr);
1633 xip_enable(map, chip, cmd_adr);
1635 if (chipstatus & 0x02) {
1637 } else if (chipstatus & 0x08) {
1638 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1641 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1648 xip_enable(map, chip, cmd_adr);
1649 out: put_chip(map, chip, cmd_adr);
1650 spin_unlock(chip->mutex);
1654 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1655 unsigned long count, loff_t to, size_t *retlen)
1657 struct map_info *map = mtd->priv;
1658 struct cfi_private *cfi = map->fldrv_priv;
1659 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1662 unsigned long ofs, vec_seek, i;
1665 for (i = 0; i < count; i++)
1666 len += vecs[i].iov_len;
1672 chipnum = to >> cfi->chipshift;
1673 ofs = to - (chipnum << cfi->chipshift);
1677 /* We must not cross write block boundaries */
1678 int size = wbufsize - (ofs & (wbufsize-1));
1682 ret = do_write_buffer(map, &cfi->chips[chipnum],
1683 ofs, &vecs, &vec_seek, size);
1691 if (ofs >> cfi->chipshift) {
1694 if (chipnum == cfi->numchips)
1702 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1703 size_t len, size_t *retlen, const u_char *buf)
1707 vec.iov_base = (void *) buf;
1710 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1713 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1714 unsigned long adr, int len, void *thunk)
1716 struct cfi_private *cfi = map->fldrv_priv;
1717 map_word status, status_OK;
1718 unsigned long timeo;
1720 DECLARE_WAITQUEUE(wait, current);
1725 /* Let's determine this according to the interleave only once */
1726 status_OK = CMD(0x80);
1729 spin_lock(chip->mutex);
1730 ret = get_chip(map, chip, adr, FL_ERASING);
1732 spin_unlock(chip->mutex);
1736 XIP_INVAL_CACHED_RANGE(map, adr, len);
1738 xip_disable(map, chip, adr);
1740 /* Clear the status register first */
1741 map_write(map, CMD(0x50), adr);
1744 map_write(map, CMD(0x20), adr);
1745 map_write(map, CMD(0xD0), adr);
1746 chip->state = FL_ERASING;
1747 chip->erase_suspended = 0;
1749 INVALIDATE_CACHE_UDELAY(map, chip,
1751 chip->erase_time*1000/2);
1753 /* FIXME. Use a timer to check this, and return immediately. */
1754 /* Once the state machine's known to be working I'll do that */
1756 timeo = jiffies + (HZ*20);
1758 if (chip->state != FL_ERASING) {
1759 /* Someone's suspended the erase. Sleep */
1760 set_current_state(TASK_UNINTERRUPTIBLE);
1761 add_wait_queue(&chip->wq, &wait);
1762 spin_unlock(chip->mutex);
1764 remove_wait_queue(&chip->wq, &wait);
1765 spin_lock(chip->mutex);
1768 if (chip->erase_suspended) {
1769 /* This erase was suspended and resumed.
1770 Adjust the timeout */
1771 timeo = jiffies + (HZ*20); /* FIXME */
1772 chip->erase_suspended = 0;
1775 status = map_read(map, adr);
1776 if (map_word_andequal(map, status, status_OK, status_OK))
1779 /* OK Still waiting */
1780 if (time_after(jiffies, timeo)) {
1781 map_write(map, CMD(0x70), adr);
1782 chip->state = FL_STATUS;
1783 xip_enable(map, chip, adr);
1784 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1789 /* Latency issues. Drop the lock, wait a while and retry */
1790 UDELAY(map, chip, adr, 1000000/HZ);
1793 /* We've broken this before. It doesn't hurt to be safe */
1794 map_write(map, CMD(0x70), adr);
1795 chip->state = FL_STATUS;
1796 status = map_read(map, adr);
1798 /* check for errors */
1799 if (map_word_bitsset(map, status, CMD(0x3a))) {
1800 unsigned long chipstatus = MERGESTATUS(status);
1802 /* Reset the error bits */
1803 map_write(map, CMD(0x50), adr);
1804 map_write(map, CMD(0x70), adr);
1805 xip_enable(map, chip, adr);
1807 if ((chipstatus & 0x30) == 0x30) {
1808 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1810 } else if (chipstatus & 0x02) {
1811 /* Protection bit set */
1813 } else if (chipstatus & 0x8) {
1815 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1817 } else if (chipstatus & 0x20 && retries--) {
1818 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1819 timeo = jiffies + HZ;
1820 put_chip(map, chip, adr);
1821 spin_unlock(chip->mutex);
1824 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1831 xip_enable(map, chip, adr);
1832 out: put_chip(map, chip, adr);
1833 spin_unlock(chip->mutex);
1837 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1839 unsigned long ofs, len;
1845 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1849 instr->state = MTD_ERASE_DONE;
1850 mtd_erase_callback(instr);
1855 static void cfi_intelext_sync (struct mtd_info *mtd)
1857 struct map_info *map = mtd->priv;
1858 struct cfi_private *cfi = map->fldrv_priv;
1860 struct flchip *chip;
1863 for (i=0; !ret && i<cfi->numchips; i++) {
1864 chip = &cfi->chips[i];
1866 spin_lock(chip->mutex);
1867 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1870 chip->oldstate = chip->state;
1871 chip->state = FL_SYNCING;
1872 /* No need to wake_up() on this state change -
1873 * as the whole point is that nobody can do anything
1874 * with the chip now anyway.
1877 spin_unlock(chip->mutex);
1880 /* Unlock the chips again */
1882 for (i--; i >=0; i--) {
1883 chip = &cfi->chips[i];
1885 spin_lock(chip->mutex);
1887 if (chip->state == FL_SYNCING) {
1888 chip->state = chip->oldstate;
1889 chip->oldstate = FL_READY;
1892 spin_unlock(chip->mutex);
1896 #ifdef DEBUG_LOCK_BITS
1897 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1898 struct flchip *chip,
1900 int len, void *thunk)
1902 struct cfi_private *cfi = map->fldrv_priv;
1903 int status, ofs_factor = cfi->interleave * cfi->device_type;
1906 xip_disable(map, chip, adr+(2*ofs_factor));
1907 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1908 chip->state = FL_JEDEC_QUERY;
1909 status = cfi_read_query(map, adr+(2*ofs_factor));
1910 xip_enable(map, chip, 0);
1911 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1917 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1918 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1920 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1921 unsigned long adr, int len, void *thunk)
1923 struct cfi_private *cfi = map->fldrv_priv;
1924 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1925 map_word status, status_OK;
1926 unsigned long timeo = jiffies + HZ;
1931 /* Let's determine this according to the interleave only once */
1932 status_OK = CMD(0x80);
1934 spin_lock(chip->mutex);
1935 ret = get_chip(map, chip, adr, FL_LOCKING);
1937 spin_unlock(chip->mutex);
1942 xip_disable(map, chip, adr);
1944 map_write(map, CMD(0x60), adr);
1945 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1946 map_write(map, CMD(0x01), adr);
1947 chip->state = FL_LOCKING;
1948 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1949 map_write(map, CMD(0xD0), adr);
1950 chip->state = FL_UNLOCKING;
1955 * If Instant Individual Block Locking supported then no need
1959 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1960 UDELAY(map, chip, adr, 1000000/HZ);
1962 /* FIXME. Use a timer to check this, and return immediately. */
1963 /* Once the state machine's known to be working I'll do that */
1965 timeo = jiffies + (HZ*20);
1968 status = map_read(map, adr);
1969 if (map_word_andequal(map, status, status_OK, status_OK))
1972 /* OK Still waiting */
1973 if (time_after(jiffies, timeo)) {
1974 map_write(map, CMD(0x70), adr);
1975 chip->state = FL_STATUS;
1976 xip_enable(map, chip, adr);
1977 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1978 put_chip(map, chip, adr);
1979 spin_unlock(chip->mutex);
1983 /* Latency issues. Drop the lock, wait a while and retry */
1984 UDELAY(map, chip, adr, 1);
1987 /* Done and happy. */
1988 chip->state = FL_STATUS;
1989 xip_enable(map, chip, adr);
1990 put_chip(map, chip, adr);
1991 spin_unlock(chip->mutex);
1995 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1999 #ifdef DEBUG_LOCK_BITS
2000 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2001 __FUNCTION__, ofs, len);
2002 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2006 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2007 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2009 #ifdef DEBUG_LOCK_BITS
2010 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2012 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2019 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2023 #ifdef DEBUG_LOCK_BITS
2024 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2025 __FUNCTION__, ofs, len);
2026 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2030 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2031 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2033 #ifdef DEBUG_LOCK_BITS
2034 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2036 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2043 #ifdef CONFIG_MTD_OTP
2045 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2046 u_long data_offset, u_char *buf, u_int size,
2047 u_long prot_offset, u_int groupno, u_int groupsize);
2050 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2051 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2053 struct cfi_private *cfi = map->fldrv_priv;
2056 spin_lock(chip->mutex);
2057 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2059 spin_unlock(chip->mutex);
2063 /* let's ensure we're not reading back cached data from array mode */
2064 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2066 xip_disable(map, chip, chip->start);
2067 if (chip->state != FL_JEDEC_QUERY) {
2068 map_write(map, CMD(0x90), chip->start);
2069 chip->state = FL_JEDEC_QUERY;
2071 map_copy_from(map, buf, chip->start + offset, size);
2072 xip_enable(map, chip, chip->start);
2074 /* then ensure we don't keep OTP data in the cache */
2075 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2077 put_chip(map, chip, chip->start);
2078 spin_unlock(chip->mutex);
2083 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2084 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2089 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2090 int gap = offset - bus_ofs;
2091 int n = min_t(int, size, map_bankwidth(map)-gap);
2092 map_word datum = map_word_ff(map);
2094 datum = map_word_load_partial(map, datum, buf, gap, n);
2095 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2108 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2109 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2111 struct cfi_private *cfi = map->fldrv_priv;
2114 /* make sure area matches group boundaries */
2118 datum = map_word_ff(map);
2119 datum = map_word_clr(map, datum, CMD(1 << grpno));
2120 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2123 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2124 size_t *retlen, u_char *buf,
2125 otp_op_t action, int user_regs)
2127 struct map_info *map = mtd->priv;
2128 struct cfi_private *cfi = map->fldrv_priv;
2129 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2130 struct flchip *chip;
2131 struct cfi_intelext_otpinfo *otp;
2132 u_long devsize, reg_prot_offset, data_offset;
2133 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2134 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2139 /* Check that we actually have some OTP registers */
2140 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2143 /* we need real chips here not virtual ones */
2144 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2145 chip_step = devsize >> cfi->chipshift;
2148 /* Some chips have OTP located in the _top_ partition only.
2149 For example: Intel 28F256L18T (T means top-parameter device) */
2150 if (cfi->mfr == MANUFACTURER_INTEL) {
2155 chip_num = chip_step - 1;
2159 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2160 chip = &cfi->chips[chip_num];
2161 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2163 /* first OTP region */
2165 reg_prot_offset = extp->ProtRegAddr;
2166 reg_fact_groups = 1;
2167 reg_fact_size = 1 << extp->FactProtRegSize;
2168 reg_user_groups = 1;
2169 reg_user_size = 1 << extp->UserProtRegSize;
2172 /* flash geometry fixup */
2173 data_offset = reg_prot_offset + 1;
2174 data_offset *= cfi->interleave * cfi->device_type;
2175 reg_prot_offset *= cfi->interleave * cfi->device_type;
2176 reg_fact_size *= cfi->interleave;
2177 reg_user_size *= cfi->interleave;
2180 groups = reg_user_groups;
2181 groupsize = reg_user_size;
2182 /* skip over factory reg area */
2183 groupno = reg_fact_groups;
2184 data_offset += reg_fact_groups * reg_fact_size;
2186 groups = reg_fact_groups;
2187 groupsize = reg_fact_size;
2191 while (len > 0 && groups > 0) {
2194 * Special case: if action is NULL
2195 * we fill buf with otp_info records.
2197 struct otp_info *otpinfo;
2199 len -= sizeof(struct otp_info);
2202 ret = do_otp_read(map, chip,
2204 (u_char *)&lockword,
2209 otpinfo = (struct otp_info *)buf;
2210 otpinfo->start = from;
2211 otpinfo->length = groupsize;
2213 !map_word_bitsset(map, lockword,
2216 buf += sizeof(*otpinfo);
2217 *retlen += sizeof(*otpinfo);
2218 } else if (from >= groupsize) {
2220 data_offset += groupsize;
2222 int size = groupsize;
2223 data_offset += from;
2228 ret = action(map, chip, data_offset,
2229 buf, size, reg_prot_offset,
2230 groupno, groupsize);
2236 data_offset += size;
2242 /* next OTP region */
2243 if (++field == extp->NumProtectionFields)
2245 reg_prot_offset = otp->ProtRegAddr;
2246 reg_fact_groups = otp->FactGroups;
2247 reg_fact_size = 1 << otp->FactProtRegSize;
2248 reg_user_groups = otp->UserGroups;
2249 reg_user_size = 1 << otp->UserProtRegSize;
2257 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2258 size_t len, size_t *retlen,
2261 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2262 buf, do_otp_read, 0);
2265 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2266 size_t len, size_t *retlen,
2269 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2270 buf, do_otp_read, 1);
2273 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2274 size_t len, size_t *retlen,
2277 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2278 buf, do_otp_write, 1);
2281 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2282 loff_t from, size_t len)
2285 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2286 NULL, do_otp_lock, 1);
2289 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2290 struct otp_info *buf, size_t len)
2295 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2296 return ret ? : retlen;
2299 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2300 struct otp_info *buf, size_t len)
2305 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2306 return ret ? : retlen;
2311 static int cfi_intelext_suspend(struct mtd_info *mtd)
2313 struct map_info *map = mtd->priv;
2314 struct cfi_private *cfi = map->fldrv_priv;
2316 struct flchip *chip;
2319 for (i=0; !ret && i<cfi->numchips; i++) {
2320 chip = &cfi->chips[i];
2322 spin_lock(chip->mutex);
2324 switch (chip->state) {
2328 case FL_JEDEC_QUERY:
2329 if (chip->oldstate == FL_READY) {
2330 chip->oldstate = chip->state;
2331 chip->state = FL_PM_SUSPENDED;
2332 /* No need to wake_up() on this state change -
2333 * as the whole point is that nobody can do anything
2334 * with the chip now anyway.
2337 /* There seems to be an operation pending. We must wait for it. */
2338 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2343 /* Should we actually wait? Once upon a time these routines weren't
2344 allowed to. Or should we return -EAGAIN, because the upper layers
2345 ought to have already shut down anything which was using the device
2346 anyway? The latter for now. */
2347 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2349 case FL_PM_SUSPENDED:
2352 spin_unlock(chip->mutex);
2355 /* Unlock the chips again */
2358 for (i--; i >=0; i--) {
2359 chip = &cfi->chips[i];
2361 spin_lock(chip->mutex);
2363 if (chip->state == FL_PM_SUSPENDED) {
2364 /* No need to force it into a known state here,
2365 because we're returning failure, and it didn't
2367 chip->state = chip->oldstate;
2368 chip->oldstate = FL_READY;
2371 spin_unlock(chip->mutex);
2378 static void cfi_intelext_resume(struct mtd_info *mtd)
2380 struct map_info *map = mtd->priv;
2381 struct cfi_private *cfi = map->fldrv_priv;
2383 struct flchip *chip;
2385 for (i=0; i<cfi->numchips; i++) {
2387 chip = &cfi->chips[i];
2389 spin_lock(chip->mutex);
2391 /* Go to known state. Chip may have been power cycled */
2392 if (chip->state == FL_PM_SUSPENDED) {
2393 map_write(map, CMD(0xFF), cfi->chips[i].start);
2394 chip->oldstate = chip->state = FL_READY;
2398 spin_unlock(chip->mutex);
2402 static int cfi_intelext_reset(struct mtd_info *mtd)
2404 struct map_info *map = mtd->priv;
2405 struct cfi_private *cfi = map->fldrv_priv;
2408 for (i=0; i < cfi->numchips; i++) {
2409 struct flchip *chip = &cfi->chips[i];
2411 /* force the completion of any ongoing operation
2412 and switch to array mode so any bootloader in
2413 flash is accessible for soft reboot. */
2414 spin_lock(chip->mutex);
2415 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2417 map_write(map, CMD(0xff), chip->start);
2418 chip->state = FL_READY;
2420 spin_unlock(chip->mutex);
2426 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2429 struct mtd_info *mtd;
2431 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2432 cfi_intelext_reset(mtd);
2436 static void cfi_intelext_destroy(struct mtd_info *mtd)
2438 struct map_info *map = mtd->priv;
2439 struct cfi_private *cfi = map->fldrv_priv;
2440 cfi_intelext_reset(mtd);
2441 unregister_reboot_notifier(&mtd->reboot_notifier);
2442 kfree(cfi->cmdset_priv);
2444 kfree(cfi->chips[0].priv);
2446 kfree(mtd->eraseregions);
2449 static char im_name_0001[] = "cfi_cmdset_0001";
2450 static char im_name_0003[] = "cfi_cmdset_0003";
2451 static char im_name_0200[] = "cfi_cmdset_0200";
2453 static int __init cfi_intelext_init(void)
2455 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2456 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2457 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2461 static void __exit cfi_intelext_exit(void)
2463 inter_module_unregister(im_name_0001);
2464 inter_module_unregister(im_name_0003);
2465 inter_module_unregister(im_name_0200);
2468 module_init(cfi_intelext_init);
2469 module_exit(cfi_intelext_exit);
2471 MODULE_LICENSE("GPL");
2472 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2473 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");