2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68 struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70 struct otp_info *, size_t);
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76 static void cfi_intelext_destroy(struct mtd_info *);
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84 size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 * *********** SETUP AND PROBE BITS ***********
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99 .probe = NULL, /* Not usable directly */
100 .destroy = cfi_intelext_destroy,
101 .name = "cfi_cmdset_0001",
102 .module = THIS_MODULE
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
112 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
114 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125 for (i=11; i<32; i++) {
126 if (extp->FeatureSupport & (1<<i))
127 printk(" - Unknown Bit %X: supported\n", i);
130 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132 for (i=1; i<8; i++) {
133 if (extp->SuspendCmdSupport & (1<<i))
134 printk(" - Unknown Bit %X: supported\n", i);
137 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140 for (i=2; i<3; i++) {
141 if (extp->BlkStatusRegMask & (1<<i))
142 printk(" - Unknown Bit %X Active: yes\n",i);
144 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146 for (i=6; i<16; i++) {
147 if (extp->BlkStatusRegMask & (1<<i))
148 printk(" - Unknown Bit %X Active: yes\n",i);
151 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153 if (extp->VppOptimal)
154 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
167 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168 "erase on write disabled.\n");
169 extp->SuspendCmdSupport &= ~1;
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
180 if (cfip && (cfip->FeatureSupport&4)) {
181 cfip->FeatureSupport &= ~4;
182 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
189 struct map_info *map = mtd->priv;
190 struct cfi_private *cfi = map->fldrv_priv;
192 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
193 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
198 struct map_info *map = mtd->priv;
199 struct cfi_private *cfi = map->fldrv_priv;
201 /* Note this is done after the region info is endian swapped */
202 cfi->cfiq->EraseRegionInfo[1] =
203 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
208 struct map_info *map = mtd->priv;
209 if (!mtd->point && map_is_linear(map)) {
210 mtd->point = cfi_intelext_point;
211 mtd->unpoint = cfi_intelext_unpoint;
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
217 struct map_info *map = mtd->priv;
218 struct cfi_private *cfi = map->fldrv_priv;
219 if (cfi->cfiq->BufWriteTimeoutTyp) {
220 printk(KERN_INFO "Using buffer write method\n" );
221 mtd->write = cfi_intelext_write_buffers;
222 mtd->writev = cfi_intelext_writev;
227 * Some chips power-up with all sectors locked by default.
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
231 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232 mtd->flags |= MTD_STUPID_LOCK;
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
242 #if !FORCE_WORD_WRITE
243 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
245 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
251 static struct cfi_fixup jedec_fixup_table[] = {
252 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
253 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
254 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
257 static struct cfi_fixup fixup_table[] = {
258 /* The CFI vendor ids and the JEDEC vendor IDs appear
259 * to be common. It is like the devices id's are as
260 * well. This table is to pick all cases where
261 * we know that is the case.
263 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
270 struct cfi_pri_intelext *extp;
271 unsigned int extp_size = sizeof(*extp);
274 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
278 if (extp->MajorVersion != '1' ||
279 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
281 "version %c.%c.\n", extp->MajorVersion,
287 /* Do some byteswapping if necessary */
288 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
292 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293 unsigned int extra_size = 0;
296 /* Protection Register info */
297 extra_size += (extp->NumProtectionFields - 1) *
298 sizeof(struct cfi_intelext_otpinfo);
300 /* Burst Read info */
302 if (extp_size < sizeof(*extp) + extra_size)
304 extra_size += extp->extra[extra_size-1];
306 /* Number of hardware-partitions */
308 if (extp_size < sizeof(*extp) + extra_size)
310 nb_parts = extp->extra[extra_size - 1];
312 /* skip the sizeof(partregion) field in CFI 1.4 */
313 if (extp->MinorVersion >= '4')
316 for (i = 0; i < nb_parts; i++) {
317 struct cfi_intelext_regioninfo *rinfo;
318 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319 extra_size += sizeof(*rinfo);
320 if (extp_size < sizeof(*extp) + extra_size)
322 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323 extra_size += (rinfo->NumBlockTypes - 1)
324 * sizeof(struct cfi_intelext_blockinfo);
327 if (extp->MinorVersion >= '4')
328 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
330 if (extp_size < sizeof(*extp) + extra_size) {
332 extp_size = sizeof(*extp) + extra_size;
334 if (extp_size > 4096) {
336 "%s: cfi_pri_intelext is too fat\n",
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
349 struct cfi_private *cfi = map->fldrv_priv;
350 struct mtd_info *mtd;
353 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
355 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
359 mtd->type = MTD_NORFLASH;
361 /* Fill in the default mtd operations */
362 mtd->erase = cfi_intelext_erase_varsize;
363 mtd->read = cfi_intelext_read;
364 mtd->write = cfi_intelext_write_words;
365 mtd->sync = cfi_intelext_sync;
366 mtd->lock = cfi_intelext_lock;
367 mtd->unlock = cfi_intelext_unlock;
368 mtd->suspend = cfi_intelext_suspend;
369 mtd->resume = cfi_intelext_resume;
370 mtd->flags = MTD_CAP_NORFLASH;
371 mtd->name = map->name;
374 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
376 if (cfi->cfi_mode == CFI_MODE_CFI) {
378 * It's a real CFI chip, not one for which the probe
379 * routine faked a CFI structure. So we read the feature
382 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383 struct cfi_pri_intelext *extp;
385 extp = read_pri_intelext(map, adr);
391 /* Install our own private info structure */
392 cfi->cmdset_priv = extp;
394 cfi_fixup(mtd, cfi_fixup_table);
396 #ifdef DEBUG_CFI_FEATURES
397 /* Tell the user about it in lots of lovely detail */
398 cfi_tell_features(extp);
401 if(extp->SuspendCmdSupport & 1) {
402 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
405 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406 /* Apply jedec specific fixups */
407 cfi_fixup(mtd, jedec_fixup_table);
409 /* Apply generic fixups */
410 cfi_fixup(mtd, fixup_table);
412 for (i=0; i< cfi->numchips; i++) {
413 if (cfi->cfiq->WordWriteTimeoutTyp)
414 cfi->chips[i].word_write_time =
415 1<<cfi->cfiq->WordWriteTimeoutTyp;
417 cfi->chips[i].word_write_time = 50000;
419 if (cfi->cfiq->BufWriteTimeoutTyp)
420 cfi->chips[i].buffer_write_time =
421 1<<cfi->cfiq->BufWriteTimeoutTyp;
422 /* No default; if it isn't specified, we won't use it */
424 if (cfi->cfiq->BlockEraseTimeoutTyp)
425 cfi->chips[i].erase_time =
426 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
428 cfi->chips[i].erase_time = 2000000;
430 cfi->chips[i].ref_point_counter = 0;
431 init_waitqueue_head(&(cfi->chips[i].wq));
434 map->fldrv = &cfi_intelext_chipdrv;
436 return cfi_intelext_setup(mtd);
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
446 struct map_info *map = mtd->priv;
447 struct cfi_private *cfi = map->fldrv_priv;
448 unsigned long offset = 0;
450 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
452 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454 mtd->size = devsize * cfi->numchips;
456 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458 * mtd->numeraseregions, GFP_KERNEL);
459 if (!mtd->eraseregions) {
460 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
464 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465 unsigned long ernum, ersize;
466 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
469 if (mtd->erasesize < ersize) {
470 mtd->erasesize = ersize;
472 for (j=0; j<cfi->numchips; j++) {
473 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
478 offset += (ersize * ernum);
481 if (offset != devsize) {
483 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
487 for (i=0; i<mtd->numeraseregions;i++){
488 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489 i,mtd->eraseregions[i].offset,
490 mtd->eraseregions[i].erasesize,
491 mtd->eraseregions[i].numblocks);
494 #ifdef CONFIG_MTD_OTP
495 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
503 /* This function has the potential to distort the reality
504 a bit and therefore should be called last. */
505 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
508 __module_get(THIS_MODULE);
509 register_reboot_notifier(&mtd->reboot_notifier);
514 kfree(mtd->eraseregions);
517 kfree(cfi->cmdset_priv);
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522 struct cfi_private **pcfi)
524 struct map_info *map = mtd->priv;
525 struct cfi_private *cfi = *pcfi;
526 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
529 * Probing of multi-partition flash ships.
531 * To support multiple partitions when available, we simply arrange
532 * for each of them to have their own flchip structure even if they
533 * are on the same physical chip. This means completely recreating
534 * a new cfi_private structure right here which is a blatent code
535 * layering violation, but this is still the least intrusive
536 * arrangement at this point. This can be rearranged in the future
537 * if someone feels motivated enough. --nico
539 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540 && extp->FeatureSupport & (1 << 9)) {
541 struct cfi_private *newcfi;
543 struct flchip_shared *shared;
544 int offs, numregions, numparts, partshift, numvirtchips, i, j;
546 /* Protection Register info */
547 offs = (extp->NumProtectionFields - 1) *
548 sizeof(struct cfi_intelext_otpinfo);
550 /* Burst Read info */
551 offs += extp->extra[offs+1]+2;
553 /* Number of partition regions */
554 numregions = extp->extra[offs];
557 /* skip the sizeof(partregion) field in CFI 1.4 */
558 if (extp->MinorVersion >= '4')
561 /* Number of hardware partitions */
563 for (i = 0; i < numregions; i++) {
564 struct cfi_intelext_regioninfo *rinfo;
565 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566 numparts += rinfo->NumIdentPartitions;
567 offs += sizeof(*rinfo)
568 + (rinfo->NumBlockTypes - 1) *
569 sizeof(struct cfi_intelext_blockinfo);
572 /* Programming Region info */
573 if (extp->MinorVersion >= '4') {
574 struct cfi_intelext_programming_regioninfo *prinfo;
575 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577 mtd->flags &= ~MTD_BIT_WRITEABLE;
578 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579 map->name, mtd->writesize,
580 cfi->interleave * prinfo->ControlValid,
581 cfi->interleave * prinfo->ControlInvalid);
585 * All functions below currently rely on all chips having
586 * the same geometry so we'll just assume that all hardware
587 * partitions are of the same size too.
589 partshift = cfi->chipshift - __ffs(numparts);
591 if ((1 << partshift) < mtd->erasesize) {
593 "%s: bad number of hw partitions (%d)\n",
594 __FUNCTION__, numparts);
598 numvirtchips = cfi->numchips * numparts;
599 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
602 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
607 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608 newcfi->numchips = numvirtchips;
609 newcfi->chipshift = partshift;
611 chip = &newcfi->chips[0];
612 for (i = 0; i < cfi->numchips; i++) {
613 shared[i].writing = shared[i].erasing = NULL;
614 spin_lock_init(&shared[i].lock);
615 for (j = 0; j < numparts; j++) {
616 *chip = cfi->chips[i];
617 chip->start += j << partshift;
618 chip->priv = &shared[i];
619 /* those should be reset too since
620 they create memory references. */
621 init_waitqueue_head(&chip->wq);
622 spin_lock_init(&chip->_spinlock);
623 chip->mutex = &chip->_spinlock;
628 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629 "--> %d partitions of %d KiB\n",
630 map->name, cfi->numchips, cfi->interleave,
631 newcfi->numchips, 1<<(newcfi->chipshift-10));
633 map->fldrv_priv = newcfi;
642 * *********** CHIP ACCESS FUNCTIONS ***********
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
647 DECLARE_WAITQUEUE(wait, current);
648 struct cfi_private *cfi = map->fldrv_priv;
649 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
651 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
654 timeo = jiffies + HZ;
656 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per
660 * partition. So let's fight it over in the partition which
661 * currently has authority on the operation.
663 * The rules are as follows:
665 * - any write operation must own shared->writing.
667 * - any erase operation must own _both_ shared->writing and
670 * - contension arbitration is handled in the owner's context.
672 * The 'shared' struct can be read and/or written only when
675 struct flchip_shared *shared = chip->priv;
676 struct flchip *contender;
677 spin_lock(&shared->lock);
678 contender = shared->writing;
679 if (contender && contender != chip) {
681 * The engine to perform desired operation on this
682 * partition is already in use by someone else.
683 * Let's fight over it in the context of the chip
684 * currently using it. If it is possible to suspend,
685 * that other partition will do just that, otherwise
686 * it'll happily send us to sleep. In any case, when
687 * get_chip returns success we're clear to go ahead.
689 int ret = spin_trylock(contender->mutex);
690 spin_unlock(&shared->lock);
693 spin_unlock(chip->mutex);
694 ret = get_chip(map, contender, contender->start, mode);
695 spin_lock(chip->mutex);
697 spin_unlock(contender->mutex);
700 timeo = jiffies + HZ;
701 spin_lock(&shared->lock);
702 spin_unlock(contender->mutex);
706 shared->writing = chip;
707 if (mode == FL_ERASING)
708 shared->erasing = chip;
709 spin_unlock(&shared->lock);
712 switch (chip->state) {
716 status = map_read(map, adr);
717 if (map_word_andequal(map, status, status_OK, status_OK))
720 /* At this point we're fine with write operations
721 in other partitions as they don't conflict. */
722 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
725 if (time_after(jiffies, timeo)) {
726 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727 map->name, status.x[0]);
730 spin_unlock(chip->mutex);
732 spin_lock(chip->mutex);
733 /* Someone else might have been playing with it. */
744 !(cfip->FeatureSupport & 2) ||
745 !(mode == FL_READY || mode == FL_POINT ||
746 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
751 map_write(map, CMD(0xB0), adr);
753 /* If the flash has finished erasing, then 'erase suspend'
754 * appears to make some (28F320) flash devices switch to
755 * 'read' mode. Make sure that we switch to 'read status'
756 * mode so we get the right data. --rmk
758 map_write(map, CMD(0x70), adr);
759 chip->oldstate = FL_ERASING;
760 chip->state = FL_ERASE_SUSPENDING;
761 chip->erase_suspended = 1;
763 status = map_read(map, adr);
764 if (map_word_andequal(map, status, status_OK, status_OK))
767 if (time_after(jiffies, timeo)) {
768 /* Urgh. Resume and pretend we weren't here. */
769 map_write(map, CMD(0xd0), adr);
770 /* Make sure we're in 'read status' mode if it had finished */
771 map_write(map, CMD(0x70), adr);
772 chip->state = FL_ERASING;
773 chip->oldstate = FL_READY;
774 printk(KERN_ERR "%s: Chip not ready after erase "
775 "suspended: status = 0x%lx\n", map->name, status.x[0]);
779 spin_unlock(chip->mutex);
781 spin_lock(chip->mutex);
782 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783 So we can just loop here. */
785 chip->state = FL_STATUS;
788 case FL_XIP_WHILE_ERASING:
789 if (mode != FL_READY && mode != FL_POINT &&
790 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
792 chip->oldstate = chip->state;
793 chip->state = FL_READY;
797 /* Only if there's no operation suspended... */
798 if (mode == FL_READY && chip->oldstate == FL_READY)
803 set_current_state(TASK_UNINTERRUPTIBLE);
804 add_wait_queue(&chip->wq, &wait);
805 spin_unlock(chip->mutex);
807 remove_wait_queue(&chip->wq, &wait);
808 spin_lock(chip->mutex);
813 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
815 struct cfi_private *cfi = map->fldrv_priv;
818 struct flchip_shared *shared = chip->priv;
819 spin_lock(&shared->lock);
820 if (shared->writing == chip && chip->oldstate == FL_READY) {
821 /* We own the ability to write, but we're done */
822 shared->writing = shared->erasing;
823 if (shared->writing && shared->writing != chip) {
824 /* give back ownership to who we loaned it from */
825 struct flchip *loaner = shared->writing;
826 spin_lock(loaner->mutex);
827 spin_unlock(&shared->lock);
828 spin_unlock(chip->mutex);
829 put_chip(map, loaner, loaner->start);
830 spin_lock(chip->mutex);
831 spin_unlock(loaner->mutex);
835 shared->erasing = NULL;
836 shared->writing = NULL;
837 } else if (shared->erasing == chip && shared->writing != chip) {
839 * We own the ability to erase without the ability
840 * to write, which means the erase was suspended
841 * and some other partition is currently writing.
842 * Don't let the switch below mess things up since
843 * we don't have ownership to resume anything.
845 spin_unlock(&shared->lock);
849 spin_unlock(&shared->lock);
852 switch(chip->oldstate) {
854 chip->state = chip->oldstate;
855 /* What if one interleaved chip has finished and the
856 other hasn't? The old code would leave the finished
857 one in READY mode. That's bad, and caused -EROFS
858 errors to be returned from do_erase_oneblock because
859 that's the only bit it checked for at the time.
860 As the state machine appears to explicitly allow
861 sending the 0x70 (Read Status) command to an erasing
862 chip and expecting it to be ignored, that's what we
864 map_write(map, CMD(0xd0), adr);
865 map_write(map, CMD(0x70), adr);
866 chip->oldstate = FL_READY;
867 chip->state = FL_ERASING;
870 case FL_XIP_WHILE_ERASING:
871 chip->state = chip->oldstate;
872 chip->oldstate = FL_READY;
878 /* We should really make set_vpp() count, rather than doing this */
882 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
887 #ifdef CONFIG_MTD_XIP
890 * No interrupt what so ever can be serviced while the flash isn't in array
891 * mode. This is ensured by the xip_disable() and xip_enable() functions
892 * enclosing any code path where the flash is known not to be in array mode.
893 * And within a XIP disabled code path, only functions marked with __xipram
894 * may be called and nothing else (it's a good thing to inspect generated
895 * assembly to make sure inline functions were actually inlined and that gcc
896 * didn't emit calls to its own support functions). Also configuring MTD CFI
897 * support to a single buswidth and a single interleave is also recommended.
900 static void xip_disable(struct map_info *map, struct flchip *chip,
903 /* TODO: chips with no XIP use should ignore and return */
904 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
908 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
911 struct cfi_private *cfi = map->fldrv_priv;
912 if (chip->state != FL_POINT && chip->state != FL_READY) {
913 map_write(map, CMD(0xff), adr);
914 chip->state = FL_READY;
916 (void) map_read(map, adr);
922 * When a delay is required for the flash operation to complete, the
923 * xip_wait_for_operation() function is polling for both the given timeout
924 * and pending (but still masked) hardware interrupts. Whenever there is an
925 * interrupt pending then the flash erase or write operation is suspended,
926 * array mode restored and interrupts unmasked. Task scheduling might also
927 * happen at that point. The CPU eventually returns from the interrupt or
928 * the call to schedule() and the suspended flash operation is resumed for
929 * the remaining of the delay period.
931 * Warning: this function _will_ fool interrupt latency tracing tools.
934 static int __xipram xip_wait_for_operation(
935 struct map_info *map, struct flchip *chip,
936 unsigned long adr, unsigned int chip_op_time )
938 struct cfi_private *cfi = map->fldrv_priv;
939 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
940 map_word status, OK = CMD(0x80);
941 unsigned long usec, suspended, start, done;
942 flstate_t oldstate, newstate;
944 start = xip_currtime();
945 usec = chip_op_time * 8;
952 if (xip_irqpending() && cfip &&
953 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
954 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
955 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
957 * Let's suspend the erase or write operation when
958 * supported. Note that we currently don't try to
959 * suspend interleaved chips if there is already
960 * another operation suspended (imagine what happens
961 * when one chip was already done with the current
962 * operation while another chip suspended it, then
963 * we resume the whole thing at once). Yes, it
967 map_write(map, CMD(0xb0), adr);
968 map_write(map, CMD(0x70), adr);
969 suspended = xip_currtime();
971 if (xip_elapsed_since(suspended) > 100000) {
973 * The chip doesn't want to suspend
974 * after waiting for 100 msecs.
975 * This is a critical error but there
976 * is not much we can do here.
980 status = map_read(map, adr);
981 } while (!map_word_andequal(map, status, OK, OK));
983 /* Suspend succeeded */
984 oldstate = chip->state;
985 if (oldstate == FL_ERASING) {
986 if (!map_word_bitsset(map, status, CMD(0x40)))
988 newstate = FL_XIP_WHILE_ERASING;
989 chip->erase_suspended = 1;
991 if (!map_word_bitsset(map, status, CMD(0x04)))
993 newstate = FL_XIP_WHILE_WRITING;
994 chip->write_suspended = 1;
996 chip->state = newstate;
997 map_write(map, CMD(0xff), adr);
998 (void) map_read(map, adr);
999 asm volatile (".rep 8; nop; .endr");
1001 spin_unlock(chip->mutex);
1002 asm volatile (".rep 8; nop; .endr");
1006 * We're back. However someone else might have
1007 * decided to go write to the chip if we are in
1008 * a suspended erase state. If so let's wait
1011 spin_lock(chip->mutex);
1012 while (chip->state != newstate) {
1013 DECLARE_WAITQUEUE(wait, current);
1014 set_current_state(TASK_UNINTERRUPTIBLE);
1015 add_wait_queue(&chip->wq, &wait);
1016 spin_unlock(chip->mutex);
1018 remove_wait_queue(&chip->wq, &wait);
1019 spin_lock(chip->mutex);
1021 /* Disallow XIP again */
1022 local_irq_disable();
1024 /* Resume the write or erase operation */
1025 map_write(map, CMD(0xd0), adr);
1026 map_write(map, CMD(0x70), adr);
1027 chip->state = oldstate;
1028 start = xip_currtime();
1029 } else if (usec >= 1000000/HZ) {
1031 * Try to save on CPU power when waiting delay
1032 * is at least a system timer tick period.
1033 * No need to be extremely accurate here.
1037 status = map_read(map, adr);
1038 done = xip_elapsed_since(start);
1039 } while (!map_word_andequal(map, status, OK, OK)
1042 return (done >= usec) ? -ETIME : 0;
1046 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1047 * the flash is actively programming or erasing since we have to poll for
1048 * the operation to complete anyway. We can't do that in a generic way with
1049 * a XIP setup so do it before the actual flash operation in this case
1050 * and stub it out from INVAL_CACHE_AND_WAIT.
1052 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1053 INVALIDATE_CACHED_RANGE(map, from, size)
1055 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1056 xip_wait_for_operation(map, chip, cmd_adr, usec)
1060 #define xip_disable(map, chip, adr)
1061 #define xip_enable(map, chip, adr)
1062 #define XIP_INVAL_CACHED_RANGE(x...)
1063 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1065 static int inval_cache_and_wait_for_operation(
1066 struct map_info *map, struct flchip *chip,
1067 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1068 unsigned int chip_op_time)
1070 struct cfi_private *cfi = map->fldrv_priv;
1071 map_word status, status_OK = CMD(0x80);
1072 int chip_state = chip->state;
1073 unsigned int timeo, sleep_time;
1075 spin_unlock(chip->mutex);
1077 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1078 spin_lock(chip->mutex);
1080 /* set our timeout to 8 times the expected delay */
1081 timeo = chip_op_time * 8;
1084 sleep_time = chip_op_time / 2;
1087 status = map_read(map, cmd_adr);
1088 if (map_word_andequal(map, status, status_OK, status_OK))
1092 map_write(map, CMD(0x70), cmd_adr);
1093 chip->state = FL_STATUS;
1097 /* OK Still waiting. Drop the lock, wait a while and retry. */
1098 spin_unlock(chip->mutex);
1099 if (sleep_time >= 1000000/HZ) {
1101 * Half of the normal delay still remaining
1102 * can be performed with a sleeping delay instead
1105 msleep(sleep_time/1000);
1106 timeo -= sleep_time;
1107 sleep_time = 1000000/HZ;
1113 spin_lock(chip->mutex);
1115 while (chip->state != chip_state) {
1116 /* Someone's suspended the operation: sleep */
1117 DECLARE_WAITQUEUE(wait, current);
1118 set_current_state(TASK_UNINTERRUPTIBLE);
1119 add_wait_queue(&chip->wq, &wait);
1120 spin_unlock(chip->mutex);
1122 remove_wait_queue(&chip->wq, &wait);
1123 spin_lock(chip->mutex);
1127 /* Done and happy. */
1128 chip->state = FL_STATUS;
1134 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1135 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1138 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1140 unsigned long cmd_addr;
1141 struct cfi_private *cfi = map->fldrv_priv;
1146 /* Ensure cmd read/writes are aligned. */
1147 cmd_addr = adr & ~(map_bankwidth(map)-1);
1149 spin_lock(chip->mutex);
1151 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1154 if (chip->state != FL_POINT && chip->state != FL_READY)
1155 map_write(map, CMD(0xff), cmd_addr);
1157 chip->state = FL_POINT;
1158 chip->ref_point_counter++;
1160 spin_unlock(chip->mutex);
1165 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1167 struct map_info *map = mtd->priv;
1168 struct cfi_private *cfi = map->fldrv_priv;
1173 if (!map->virt || (from + len > mtd->size))
1176 *mtdbuf = (void *)map->virt + from;
1179 /* Now lock the chip(s) to POINT state */
1181 /* ofs: offset within the first chip that the first read should start */
1182 chipnum = (from >> cfi->chipshift);
1183 ofs = from - (chipnum << cfi->chipshift);
1186 unsigned long thislen;
1188 if (chipnum >= cfi->numchips)
1191 if ((len + ofs -1) >> cfi->chipshift)
1192 thislen = (1<<cfi->chipshift) - ofs;
1196 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1209 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1211 struct map_info *map = mtd->priv;
1212 struct cfi_private *cfi = map->fldrv_priv;
1216 /* Now unlock the chip(s) POINT state */
1218 /* ofs: offset within the first chip that the first read should start */
1219 chipnum = (from >> cfi->chipshift);
1220 ofs = from - (chipnum << cfi->chipshift);
1223 unsigned long thislen;
1224 struct flchip *chip;
1226 chip = &cfi->chips[chipnum];
1227 if (chipnum >= cfi->numchips)
1230 if ((len + ofs -1) >> cfi->chipshift)
1231 thislen = (1<<cfi->chipshift) - ofs;
1235 spin_lock(chip->mutex);
1236 if (chip->state == FL_POINT) {
1237 chip->ref_point_counter--;
1238 if(chip->ref_point_counter == 0)
1239 chip->state = FL_READY;
1241 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1243 put_chip(map, chip, chip->start);
1244 spin_unlock(chip->mutex);
1252 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1254 unsigned long cmd_addr;
1255 struct cfi_private *cfi = map->fldrv_priv;
1260 /* Ensure cmd read/writes are aligned. */
1261 cmd_addr = adr & ~(map_bankwidth(map)-1);
1263 spin_lock(chip->mutex);
1264 ret = get_chip(map, chip, cmd_addr, FL_READY);
1266 spin_unlock(chip->mutex);
1270 if (chip->state != FL_POINT && chip->state != FL_READY) {
1271 map_write(map, CMD(0xff), cmd_addr);
1273 chip->state = FL_READY;
1276 map_copy_from(map, buf, adr, len);
1278 put_chip(map, chip, cmd_addr);
1280 spin_unlock(chip->mutex);
1284 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1286 struct map_info *map = mtd->priv;
1287 struct cfi_private *cfi = map->fldrv_priv;
1292 /* ofs: offset within the first chip that the first read should start */
1293 chipnum = (from >> cfi->chipshift);
1294 ofs = from - (chipnum << cfi->chipshift);
1299 unsigned long thislen;
1301 if (chipnum >= cfi->numchips)
1304 if ((len + ofs -1) >> cfi->chipshift)
1305 thislen = (1<<cfi->chipshift) - ofs;
1309 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1323 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1324 unsigned long adr, map_word datum, int mode)
1326 struct cfi_private *cfi = map->fldrv_priv;
1327 map_word status, write_cmd;
1334 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1337 write_cmd = CMD(0xc0);
1343 spin_lock(chip->mutex);
1344 ret = get_chip(map, chip, adr, mode);
1346 spin_unlock(chip->mutex);
1350 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1352 xip_disable(map, chip, adr);
1353 map_write(map, write_cmd, adr);
1354 map_write(map, datum, adr);
1357 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1358 adr, map_bankwidth(map),
1359 chip->word_write_time);
1361 xip_enable(map, chip, adr);
1362 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1366 /* check for errors */
1367 status = map_read(map, adr);
1368 if (map_word_bitsset(map, status, CMD(0x1a))) {
1369 unsigned long chipstatus = MERGESTATUS(status);
1372 map_write(map, CMD(0x50), adr);
1373 map_write(map, CMD(0x70), adr);
1374 xip_enable(map, chip, adr);
1376 if (chipstatus & 0x02) {
1378 } else if (chipstatus & 0x08) {
1379 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1382 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1389 xip_enable(map, chip, adr);
1390 out: put_chip(map, chip, adr);
1391 spin_unlock(chip->mutex);
1396 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1398 struct map_info *map = mtd->priv;
1399 struct cfi_private *cfi = map->fldrv_priv;
1408 chipnum = to >> cfi->chipshift;
1409 ofs = to - (chipnum << cfi->chipshift);
1411 /* If it's not bus-aligned, do the first byte write */
1412 if (ofs & (map_bankwidth(map)-1)) {
1413 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1414 int gap = ofs - bus_ofs;
1418 n = min_t(int, len, map_bankwidth(map)-gap);
1419 datum = map_word_ff(map);
1420 datum = map_word_load_partial(map, datum, buf, gap, n);
1422 ret = do_write_oneword(map, &cfi->chips[chipnum],
1423 bus_ofs, datum, FL_WRITING);
1432 if (ofs >> cfi->chipshift) {
1435 if (chipnum == cfi->numchips)
1440 while(len >= map_bankwidth(map)) {
1441 map_word datum = map_word_load(map, buf);
1443 ret = do_write_oneword(map, &cfi->chips[chipnum],
1444 ofs, datum, FL_WRITING);
1448 ofs += map_bankwidth(map);
1449 buf += map_bankwidth(map);
1450 (*retlen) += map_bankwidth(map);
1451 len -= map_bankwidth(map);
1453 if (ofs >> cfi->chipshift) {
1456 if (chipnum == cfi->numchips)
1461 if (len & (map_bankwidth(map)-1)) {
1464 datum = map_word_ff(map);
1465 datum = map_word_load_partial(map, datum, buf, 0, len);
1467 ret = do_write_oneword(map, &cfi->chips[chipnum],
1468 ofs, datum, FL_WRITING);
1479 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1480 unsigned long adr, const struct kvec **pvec,
1481 unsigned long *pvec_seek, int len)
1483 struct cfi_private *cfi = map->fldrv_priv;
1484 map_word status, write_cmd, datum;
1485 unsigned long cmd_adr;
1486 int ret, wbufsize, word_gap, words;
1487 const struct kvec *vec;
1488 unsigned long vec_seek;
1490 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1492 cmd_adr = adr & ~(wbufsize-1);
1494 /* Let's determine this according to the interleave only once */
1495 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1497 spin_lock(chip->mutex);
1498 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1500 spin_unlock(chip->mutex);
1504 XIP_INVAL_CACHED_RANGE(map, adr, len);
1506 xip_disable(map, chip, cmd_adr);
1508 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1509 [...], the device will not accept any more Write to Buffer commands".
1510 So we must check here and reset those bits if they're set. Otherwise
1511 we're just pissing in the wind */
1512 if (chip->state != FL_STATUS) {
1513 map_write(map, CMD(0x70), cmd_adr);
1514 chip->state = FL_STATUS;
1516 status = map_read(map, cmd_adr);
1517 if (map_word_bitsset(map, status, CMD(0x30))) {
1518 xip_enable(map, chip, cmd_adr);
1519 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1520 xip_disable(map, chip, cmd_adr);
1521 map_write(map, CMD(0x50), cmd_adr);
1522 map_write(map, CMD(0x70), cmd_adr);
1525 chip->state = FL_WRITING_TO_BUFFER;
1526 map_write(map, write_cmd, cmd_adr);
1527 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1529 /* Argh. Not ready for write to buffer */
1530 map_word Xstatus = map_read(map, cmd_adr);
1531 map_write(map, CMD(0x70), cmd_adr);
1532 chip->state = FL_STATUS;
1533 status = map_read(map, cmd_adr);
1534 map_write(map, CMD(0x50), cmd_adr);
1535 map_write(map, CMD(0x70), cmd_adr);
1536 xip_enable(map, chip, cmd_adr);
1537 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1538 map->name, Xstatus.x[0], status.x[0]);
1542 /* Figure out the number of words to write */
1543 word_gap = (-adr & (map_bankwidth(map)-1));
1544 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1548 word_gap = map_bankwidth(map) - word_gap;
1550 datum = map_word_ff(map);
1553 /* Write length of data to come */
1554 map_write(map, CMD(words), cmd_adr );
1558 vec_seek = *pvec_seek;
1560 int n = map_bankwidth(map) - word_gap;
1561 if (n > vec->iov_len - vec_seek)
1562 n = vec->iov_len - vec_seek;
1566 if (!word_gap && len < map_bankwidth(map))
1567 datum = map_word_ff(map);
1569 datum = map_word_load_partial(map, datum,
1570 vec->iov_base + vec_seek,
1575 if (!len || word_gap == map_bankwidth(map)) {
1576 map_write(map, datum, adr);
1577 adr += map_bankwidth(map);
1582 if (vec_seek == vec->iov_len) {
1588 *pvec_seek = vec_seek;
1591 map_write(map, CMD(0xd0), cmd_adr);
1592 chip->state = FL_WRITING;
1594 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1596 chip->buffer_write_time);
1598 map_write(map, CMD(0x70), cmd_adr);
1599 chip->state = FL_STATUS;
1600 xip_enable(map, chip, cmd_adr);
1601 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1605 /* check for errors */
1606 status = map_read(map, cmd_adr);
1607 if (map_word_bitsset(map, status, CMD(0x1a))) {
1608 unsigned long chipstatus = MERGESTATUS(status);
1611 map_write(map, CMD(0x50), cmd_adr);
1612 map_write(map, CMD(0x70), cmd_adr);
1613 xip_enable(map, chip, cmd_adr);
1615 if (chipstatus & 0x02) {
1617 } else if (chipstatus & 0x08) {
1618 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1621 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1628 xip_enable(map, chip, cmd_adr);
1629 out: put_chip(map, chip, cmd_adr);
1630 spin_unlock(chip->mutex);
1634 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1635 unsigned long count, loff_t to, size_t *retlen)
1637 struct map_info *map = mtd->priv;
1638 struct cfi_private *cfi = map->fldrv_priv;
1639 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1642 unsigned long ofs, vec_seek, i;
1645 for (i = 0; i < count; i++)
1646 len += vecs[i].iov_len;
1652 chipnum = to >> cfi->chipshift;
1653 ofs = to - (chipnum << cfi->chipshift);
1657 /* We must not cross write block boundaries */
1658 int size = wbufsize - (ofs & (wbufsize-1));
1662 ret = do_write_buffer(map, &cfi->chips[chipnum],
1663 ofs, &vecs, &vec_seek, size);
1671 if (ofs >> cfi->chipshift) {
1674 if (chipnum == cfi->numchips)
1678 /* Be nice and reschedule with the chip in a usable state for other
1687 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1688 size_t len, size_t *retlen, const u_char *buf)
1692 vec.iov_base = (void *) buf;
1695 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1698 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1699 unsigned long adr, int len, void *thunk)
1701 struct cfi_private *cfi = map->fldrv_priv;
1709 spin_lock(chip->mutex);
1710 ret = get_chip(map, chip, adr, FL_ERASING);
1712 spin_unlock(chip->mutex);
1716 XIP_INVAL_CACHED_RANGE(map, adr, len);
1718 xip_disable(map, chip, adr);
1720 /* Clear the status register first */
1721 map_write(map, CMD(0x50), adr);
1724 map_write(map, CMD(0x20), adr);
1725 map_write(map, CMD(0xD0), adr);
1726 chip->state = FL_ERASING;
1727 chip->erase_suspended = 0;
1729 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1733 map_write(map, CMD(0x70), adr);
1734 chip->state = FL_STATUS;
1735 xip_enable(map, chip, adr);
1736 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1740 /* We've broken this before. It doesn't hurt to be safe */
1741 map_write(map, CMD(0x70), adr);
1742 chip->state = FL_STATUS;
1743 status = map_read(map, adr);
1745 /* check for errors */
1746 if (map_word_bitsset(map, status, CMD(0x3a))) {
1747 unsigned long chipstatus = MERGESTATUS(status);
1749 /* Reset the error bits */
1750 map_write(map, CMD(0x50), adr);
1751 map_write(map, CMD(0x70), adr);
1752 xip_enable(map, chip, adr);
1754 if ((chipstatus & 0x30) == 0x30) {
1755 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1757 } else if (chipstatus & 0x02) {
1758 /* Protection bit set */
1760 } else if (chipstatus & 0x8) {
1762 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1764 } else if (chipstatus & 0x20 && retries--) {
1765 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1766 put_chip(map, chip, adr);
1767 spin_unlock(chip->mutex);
1770 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1777 xip_enable(map, chip, adr);
1778 out: put_chip(map, chip, adr);
1779 spin_unlock(chip->mutex);
1783 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1785 unsigned long ofs, len;
1791 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1795 instr->state = MTD_ERASE_DONE;
1796 mtd_erase_callback(instr);
1801 static void cfi_intelext_sync (struct mtd_info *mtd)
1803 struct map_info *map = mtd->priv;
1804 struct cfi_private *cfi = map->fldrv_priv;
1806 struct flchip *chip;
1809 for (i=0; !ret && i<cfi->numchips; i++) {
1810 chip = &cfi->chips[i];
1812 spin_lock(chip->mutex);
1813 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1816 chip->oldstate = chip->state;
1817 chip->state = FL_SYNCING;
1818 /* No need to wake_up() on this state change -
1819 * as the whole point is that nobody can do anything
1820 * with the chip now anyway.
1823 spin_unlock(chip->mutex);
1826 /* Unlock the chips again */
1828 for (i--; i >=0; i--) {
1829 chip = &cfi->chips[i];
1831 spin_lock(chip->mutex);
1833 if (chip->state == FL_SYNCING) {
1834 chip->state = chip->oldstate;
1835 chip->oldstate = FL_READY;
1838 spin_unlock(chip->mutex);
1842 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1843 struct flchip *chip,
1845 int len, void *thunk)
1847 struct cfi_private *cfi = map->fldrv_priv;
1848 int status, ofs_factor = cfi->interleave * cfi->device_type;
1851 xip_disable(map, chip, adr+(2*ofs_factor));
1852 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1853 chip->state = FL_JEDEC_QUERY;
1854 status = cfi_read_query(map, adr+(2*ofs_factor));
1855 xip_enable(map, chip, 0);
1859 #ifdef DEBUG_LOCK_BITS
1860 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1861 struct flchip *chip,
1863 int len, void *thunk)
1865 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1866 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1871 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1872 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1874 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1875 unsigned long adr, int len, void *thunk)
1877 struct cfi_private *cfi = map->fldrv_priv;
1878 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1884 spin_lock(chip->mutex);
1885 ret = get_chip(map, chip, adr, FL_LOCKING);
1887 spin_unlock(chip->mutex);
1892 xip_disable(map, chip, adr);
1894 map_write(map, CMD(0x60), adr);
1895 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1896 map_write(map, CMD(0x01), adr);
1897 chip->state = FL_LOCKING;
1898 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1899 map_write(map, CMD(0xD0), adr);
1900 chip->state = FL_UNLOCKING;
1905 * If Instant Individual Block Locking supported then no need
1908 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1910 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1912 map_write(map, CMD(0x70), adr);
1913 chip->state = FL_STATUS;
1914 xip_enable(map, chip, adr);
1915 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1919 xip_enable(map, chip, adr);
1920 out: put_chip(map, chip, adr);
1921 spin_unlock(chip->mutex);
1925 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1929 #ifdef DEBUG_LOCK_BITS
1930 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1931 __FUNCTION__, ofs, len);
1932 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1936 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1937 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1939 #ifdef DEBUG_LOCK_BITS
1940 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1942 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1949 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1953 #ifdef DEBUG_LOCK_BITS
1954 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1955 __FUNCTION__, ofs, len);
1956 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1960 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1961 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1963 #ifdef DEBUG_LOCK_BITS
1964 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1966 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1973 #ifdef CONFIG_MTD_OTP
1975 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1976 u_long data_offset, u_char *buf, u_int size,
1977 u_long prot_offset, u_int groupno, u_int groupsize);
1980 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1981 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1983 struct cfi_private *cfi = map->fldrv_priv;
1986 spin_lock(chip->mutex);
1987 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1989 spin_unlock(chip->mutex);
1993 /* let's ensure we're not reading back cached data from array mode */
1994 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1996 xip_disable(map, chip, chip->start);
1997 if (chip->state != FL_JEDEC_QUERY) {
1998 map_write(map, CMD(0x90), chip->start);
1999 chip->state = FL_JEDEC_QUERY;
2001 map_copy_from(map, buf, chip->start + offset, size);
2002 xip_enable(map, chip, chip->start);
2004 /* then ensure we don't keep OTP data in the cache */
2005 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2007 put_chip(map, chip, chip->start);
2008 spin_unlock(chip->mutex);
2013 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2014 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2019 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2020 int gap = offset - bus_ofs;
2021 int n = min_t(int, size, map_bankwidth(map)-gap);
2022 map_word datum = map_word_ff(map);
2024 datum = map_word_load_partial(map, datum, buf, gap, n);
2025 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2038 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2039 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2041 struct cfi_private *cfi = map->fldrv_priv;
2044 /* make sure area matches group boundaries */
2048 datum = map_word_ff(map);
2049 datum = map_word_clr(map, datum, CMD(1 << grpno));
2050 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2053 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2054 size_t *retlen, u_char *buf,
2055 otp_op_t action, int user_regs)
2057 struct map_info *map = mtd->priv;
2058 struct cfi_private *cfi = map->fldrv_priv;
2059 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2060 struct flchip *chip;
2061 struct cfi_intelext_otpinfo *otp;
2062 u_long devsize, reg_prot_offset, data_offset;
2063 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2064 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2069 /* Check that we actually have some OTP registers */
2070 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2073 /* we need real chips here not virtual ones */
2074 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2075 chip_step = devsize >> cfi->chipshift;
2078 /* Some chips have OTP located in the _top_ partition only.
2079 For example: Intel 28F256L18T (T means top-parameter device) */
2080 if (cfi->mfr == MANUFACTURER_INTEL) {
2085 chip_num = chip_step - 1;
2089 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2090 chip = &cfi->chips[chip_num];
2091 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2093 /* first OTP region */
2095 reg_prot_offset = extp->ProtRegAddr;
2096 reg_fact_groups = 1;
2097 reg_fact_size = 1 << extp->FactProtRegSize;
2098 reg_user_groups = 1;
2099 reg_user_size = 1 << extp->UserProtRegSize;
2102 /* flash geometry fixup */
2103 data_offset = reg_prot_offset + 1;
2104 data_offset *= cfi->interleave * cfi->device_type;
2105 reg_prot_offset *= cfi->interleave * cfi->device_type;
2106 reg_fact_size *= cfi->interleave;
2107 reg_user_size *= cfi->interleave;
2110 groups = reg_user_groups;
2111 groupsize = reg_user_size;
2112 /* skip over factory reg area */
2113 groupno = reg_fact_groups;
2114 data_offset += reg_fact_groups * reg_fact_size;
2116 groups = reg_fact_groups;
2117 groupsize = reg_fact_size;
2121 while (len > 0 && groups > 0) {
2124 * Special case: if action is NULL
2125 * we fill buf with otp_info records.
2127 struct otp_info *otpinfo;
2129 len -= sizeof(struct otp_info);
2132 ret = do_otp_read(map, chip,
2134 (u_char *)&lockword,
2139 otpinfo = (struct otp_info *)buf;
2140 otpinfo->start = from;
2141 otpinfo->length = groupsize;
2143 !map_word_bitsset(map, lockword,
2146 buf += sizeof(*otpinfo);
2147 *retlen += sizeof(*otpinfo);
2148 } else if (from >= groupsize) {
2150 data_offset += groupsize;
2152 int size = groupsize;
2153 data_offset += from;
2158 ret = action(map, chip, data_offset,
2159 buf, size, reg_prot_offset,
2160 groupno, groupsize);
2166 data_offset += size;
2172 /* next OTP region */
2173 if (++field == extp->NumProtectionFields)
2175 reg_prot_offset = otp->ProtRegAddr;
2176 reg_fact_groups = otp->FactGroups;
2177 reg_fact_size = 1 << otp->FactProtRegSize;
2178 reg_user_groups = otp->UserGroups;
2179 reg_user_size = 1 << otp->UserProtRegSize;
2187 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2188 size_t len, size_t *retlen,
2191 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2192 buf, do_otp_read, 0);
2195 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2196 size_t len, size_t *retlen,
2199 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2200 buf, do_otp_read, 1);
2203 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2204 size_t len, size_t *retlen,
2207 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2208 buf, do_otp_write, 1);
2211 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2212 loff_t from, size_t len)
2215 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2216 NULL, do_otp_lock, 1);
2219 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2220 struct otp_info *buf, size_t len)
2225 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2226 return ret ? : retlen;
2229 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2230 struct otp_info *buf, size_t len)
2235 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2236 return ret ? : retlen;
2241 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2243 struct mtd_erase_region_info *region;
2244 int block, status, i;
2248 for (i = 0; i < mtd->numeraseregions; i++) {
2249 region = &mtd->eraseregions[i];
2250 if (!region->lockmap)
2253 for (block = 0; block < region->numblocks; block++){
2254 len = region->erasesize;
2255 adr = region->offset + block * len;
2257 status = cfi_varsize_frob(mtd,
2258 do_getlockstatus_oneblock, adr, len, 0);
2260 set_bit(block, region->lockmap);
2262 clear_bit(block, region->lockmap);
2267 static int cfi_intelext_suspend(struct mtd_info *mtd)
2269 struct map_info *map = mtd->priv;
2270 struct cfi_private *cfi = map->fldrv_priv;
2271 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2273 struct flchip *chip;
2276 if ((mtd->flags & MTD_STUPID_LOCK)
2277 && extp && (extp->FeatureSupport & (1 << 5)))
2278 cfi_intelext_save_locks(mtd);
2280 for (i=0; !ret && i<cfi->numchips; i++) {
2281 chip = &cfi->chips[i];
2283 spin_lock(chip->mutex);
2285 switch (chip->state) {
2289 case FL_JEDEC_QUERY:
2290 if (chip->oldstate == FL_READY) {
2291 /* place the chip in a known state before suspend */
2292 map_write(map, CMD(0xFF), cfi->chips[i].start);
2293 chip->oldstate = chip->state;
2294 chip->state = FL_PM_SUSPENDED;
2295 /* No need to wake_up() on this state change -
2296 * as the whole point is that nobody can do anything
2297 * with the chip now anyway.
2300 /* There seems to be an operation pending. We must wait for it. */
2301 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2306 /* Should we actually wait? Once upon a time these routines weren't
2307 allowed to. Or should we return -EAGAIN, because the upper layers
2308 ought to have already shut down anything which was using the device
2309 anyway? The latter for now. */
2310 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2312 case FL_PM_SUSPENDED:
2315 spin_unlock(chip->mutex);
2318 /* Unlock the chips again */
2321 for (i--; i >=0; i--) {
2322 chip = &cfi->chips[i];
2324 spin_lock(chip->mutex);
2326 if (chip->state == FL_PM_SUSPENDED) {
2327 /* No need to force it into a known state here,
2328 because we're returning failure, and it didn't
2330 chip->state = chip->oldstate;
2331 chip->oldstate = FL_READY;
2334 spin_unlock(chip->mutex);
2341 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2343 struct mtd_erase_region_info *region;
2348 for (i = 0; i < mtd->numeraseregions; i++) {
2349 region = &mtd->eraseregions[i];
2350 if (!region->lockmap)
2353 for (block = 0; block < region->numblocks; block++) {
2354 len = region->erasesize;
2355 adr = region->offset + block * len;
2357 if (!test_bit(block, region->lockmap))
2358 cfi_intelext_unlock(mtd, adr, len);
2363 static void cfi_intelext_resume(struct mtd_info *mtd)
2365 struct map_info *map = mtd->priv;
2366 struct cfi_private *cfi = map->fldrv_priv;
2367 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2369 struct flchip *chip;
2371 for (i=0; i<cfi->numchips; i++) {
2373 chip = &cfi->chips[i];
2375 spin_lock(chip->mutex);
2377 /* Go to known state. Chip may have been power cycled */
2378 if (chip->state == FL_PM_SUSPENDED) {
2379 map_write(map, CMD(0xFF), cfi->chips[i].start);
2380 chip->oldstate = chip->state = FL_READY;
2384 spin_unlock(chip->mutex);
2387 if ((mtd->flags & MTD_STUPID_LOCK)
2388 && extp && (extp->FeatureSupport & (1 << 5)))
2389 cfi_intelext_restore_locks(mtd);
2392 static int cfi_intelext_reset(struct mtd_info *mtd)
2394 struct map_info *map = mtd->priv;
2395 struct cfi_private *cfi = map->fldrv_priv;
2398 for (i=0; i < cfi->numchips; i++) {
2399 struct flchip *chip = &cfi->chips[i];
2401 /* force the completion of any ongoing operation
2402 and switch to array mode so any bootloader in
2403 flash is accessible for soft reboot. */
2404 spin_lock(chip->mutex);
2405 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2407 map_write(map, CMD(0xff), chip->start);
2408 chip->state = FL_READY;
2410 spin_unlock(chip->mutex);
2416 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2419 struct mtd_info *mtd;
2421 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2422 cfi_intelext_reset(mtd);
2426 static void cfi_intelext_destroy(struct mtd_info *mtd)
2428 struct map_info *map = mtd->priv;
2429 struct cfi_private *cfi = map->fldrv_priv;
2430 struct mtd_erase_region_info *region;
2432 cfi_intelext_reset(mtd);
2433 unregister_reboot_notifier(&mtd->reboot_notifier);
2434 kfree(cfi->cmdset_priv);
2436 kfree(cfi->chips[0].priv);
2438 for (i = 0; i < mtd->numeraseregions; i++) {
2439 region = &mtd->eraseregions[i];
2440 if (region->lockmap)
2441 kfree(region->lockmap);
2443 kfree(mtd->eraseregions);
2446 MODULE_LICENSE("GPL");
2447 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2448 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2449 MODULE_ALIAS("cfi_cmdset_0003");
2450 MODULE_ALIAS("cfi_cmdset_0200");