2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@cam.org>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
46 #define MANUFACTURER_INTEL 0x0089
47 #define I82802AB 0x00ad
48 #define I82802AC 0x00ac
49 #define MANUFACTURER_ST 0x0020
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
53 #define AT49BV640D 0x02de
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 struct otp_info *, size_t);
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
77 static void cfi_intelext_destroy(struct mtd_info *);
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
96 * *********** SETUP AND PROBE BITS ***********
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100 .probe = NULL, /* Not usable directly */
101 .destroy = cfi_intelext_destroy,
102 .name = "cfi_cmdset_0001",
103 .module = THIS_MODULE
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
113 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
115 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126 for (i=11; i<32; i++) {
127 if (extp->FeatureSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i);
131 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133 for (i=1; i<8; i++) {
134 if (extp->SuspendCmdSupport & (1<<i))
135 printk(" - Unknown Bit %X: supported\n", i);
138 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141 for (i=2; i<3; i++) {
142 if (extp->BlkStatusRegMask & (1<<i))
143 printk(" - Unknown Bit %X Active: yes\n",i);
145 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147 for (i=6; i<16; i++) {
148 if (extp->BlkStatusRegMask & (1<<i))
149 printk(" - Unknown Bit %X Active: yes\n",i);
152 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154 if (extp->VppOptimal)
155 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
163 struct map_info *map = mtd->priv;
164 struct cfi_private *cfi = map->fldrv_priv;
165 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166 struct cfi_pri_atmel atmel_pri;
167 uint32_t features = 0;
169 /* Reverse byteswapping */
170 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
174 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
177 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
179 if (atmel_pri.Features & 0x01) /* chip erase supported */
181 if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 if (atmel_pri.Features & 0x04) /* program suspend supported */
185 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 if (atmel_pri.Features & 0x20) /* page mode read supported */
189 if (atmel_pri.Features & 0x40) /* queued erase supported */
191 if (atmel_pri.Features & 0x80) /* Protection bits supported */
194 extp->FeatureSupport = features;
196 /* burst write mode not supported */
197 cfi->cfiq->BufWriteTimeoutTyp = 0;
198 cfi->cfiq->BufWriteTimeoutMax = 0;
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
209 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210 "erase on write disabled.\n");
211 extp->SuspendCmdSupport &= ~1;
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
218 struct map_info *map = mtd->priv;
219 struct cfi_private *cfi = map->fldrv_priv;
220 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
222 if (cfip && (cfip->FeatureSupport&4)) {
223 cfip->FeatureSupport &= ~4;
224 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
231 struct map_info *map = mtd->priv;
232 struct cfi_private *cfi = map->fldrv_priv;
234 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
235 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
243 /* Note this is done after the region info is endian swapped */
244 cfi->cfiq->EraseRegionInfo[1] =
245 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
250 struct map_info *map = mtd->priv;
251 if (!mtd->point && map_is_linear(map)) {
252 mtd->point = cfi_intelext_point;
253 mtd->unpoint = cfi_intelext_unpoint;
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
259 struct map_info *map = mtd->priv;
260 struct cfi_private *cfi = map->fldrv_priv;
261 if (cfi->cfiq->BufWriteTimeoutTyp) {
262 printk(KERN_INFO "Using buffer write method\n" );
263 mtd->write = cfi_intelext_write_buffers;
264 mtd->writev = cfi_intelext_writev;
269 * Some chips power-up with all sectors locked by default.
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv;
275 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
277 if (cfip->FeatureSupport&32) {
278 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279 mtd->flags |= MTD_POWERUP_LOCK;
283 static struct cfi_fixup cfi_fixup_table[] = {
284 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
291 #if !FORCE_WORD_WRITE
292 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
294 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296 { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
300 static struct cfi_fixup jedec_fixup_table[] = {
301 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
302 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
305 { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
308 static struct cfi_fixup fixup_table[] = {
309 /* The CFI vendor ids and the JEDEC vendor IDs appear
310 * to be common. It is like the devices id's are as
311 * well. This table is to pick all cases where
312 * we know that is the case.
314 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
318 static inline struct cfi_pri_intelext *
319 read_pri_intelext(struct map_info *map, __u16 adr)
321 struct cfi_pri_intelext *extp;
322 unsigned int extp_size = sizeof(*extp);
325 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
329 if (extp->MajorVersion != '1' ||
330 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
332 "version %c.%c.\n", extp->MajorVersion,
338 /* Do some byteswapping if necessary */
339 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
343 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
344 unsigned int extra_size = 0;
347 /* Protection Register info */
348 extra_size += (extp->NumProtectionFields - 1) *
349 sizeof(struct cfi_intelext_otpinfo);
351 /* Burst Read info */
353 if (extp_size < sizeof(*extp) + extra_size)
355 extra_size += extp->extra[extra_size-1];
357 /* Number of hardware-partitions */
359 if (extp_size < sizeof(*extp) + extra_size)
361 nb_parts = extp->extra[extra_size - 1];
363 /* skip the sizeof(partregion) field in CFI 1.4 */
364 if (extp->MinorVersion >= '4')
367 for (i = 0; i < nb_parts; i++) {
368 struct cfi_intelext_regioninfo *rinfo;
369 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370 extra_size += sizeof(*rinfo);
371 if (extp_size < sizeof(*extp) + extra_size)
373 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374 extra_size += (rinfo->NumBlockTypes - 1)
375 * sizeof(struct cfi_intelext_blockinfo);
378 if (extp->MinorVersion >= '4')
379 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
381 if (extp_size < sizeof(*extp) + extra_size) {
383 extp_size = sizeof(*extp) + extra_size;
385 if (extp_size > 4096) {
387 "%s: cfi_pri_intelext is too fat\n",
398 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
400 struct cfi_private *cfi = map->fldrv_priv;
401 struct mtd_info *mtd;
404 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
406 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
410 mtd->type = MTD_NORFLASH;
412 /* Fill in the default mtd operations */
413 mtd->erase = cfi_intelext_erase_varsize;
414 mtd->read = cfi_intelext_read;
415 mtd->write = cfi_intelext_write_words;
416 mtd->sync = cfi_intelext_sync;
417 mtd->lock = cfi_intelext_lock;
418 mtd->unlock = cfi_intelext_unlock;
419 mtd->suspend = cfi_intelext_suspend;
420 mtd->resume = cfi_intelext_resume;
421 mtd->flags = MTD_CAP_NORFLASH;
422 mtd->name = map->name;
425 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
427 if (cfi->cfi_mode == CFI_MODE_CFI) {
429 * It's a real CFI chip, not one for which the probe
430 * routine faked a CFI structure. So we read the feature
433 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434 struct cfi_pri_intelext *extp;
436 extp = read_pri_intelext(map, adr);
442 /* Install our own private info structure */
443 cfi->cmdset_priv = extp;
445 cfi_fixup(mtd, cfi_fixup_table);
447 #ifdef DEBUG_CFI_FEATURES
448 /* Tell the user about it in lots of lovely detail */
449 cfi_tell_features(extp);
452 if(extp->SuspendCmdSupport & 1) {
453 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
456 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457 /* Apply jedec specific fixups */
458 cfi_fixup(mtd, jedec_fixup_table);
460 /* Apply generic fixups */
461 cfi_fixup(mtd, fixup_table);
463 for (i=0; i< cfi->numchips; i++) {
464 if (cfi->cfiq->WordWriteTimeoutTyp)
465 cfi->chips[i].word_write_time =
466 1<<cfi->cfiq->WordWriteTimeoutTyp;
468 cfi->chips[i].word_write_time = 50000;
470 if (cfi->cfiq->BufWriteTimeoutTyp)
471 cfi->chips[i].buffer_write_time =
472 1<<cfi->cfiq->BufWriteTimeoutTyp;
473 /* No default; if it isn't specified, we won't use it */
475 if (cfi->cfiq->BlockEraseTimeoutTyp)
476 cfi->chips[i].erase_time =
477 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
479 cfi->chips[i].erase_time = 2000000;
481 if (cfi->cfiq->WordWriteTimeoutTyp &&
482 cfi->cfiq->WordWriteTimeoutMax)
483 cfi->chips[i].word_write_time_max =
484 1<<(cfi->cfiq->WordWriteTimeoutTyp +
485 cfi->cfiq->WordWriteTimeoutMax);
487 cfi->chips[i].word_write_time_max = 50000 * 8;
489 if (cfi->cfiq->BufWriteTimeoutTyp &&
490 cfi->cfiq->BufWriteTimeoutMax)
491 cfi->chips[i].buffer_write_time_max =
492 1<<(cfi->cfiq->BufWriteTimeoutTyp +
493 cfi->cfiq->BufWriteTimeoutMax);
495 if (cfi->cfiq->BlockEraseTimeoutTyp &&
496 cfi->cfiq->BlockEraseTimeoutMax)
497 cfi->chips[i].erase_time_max =
498 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
499 cfi->cfiq->BlockEraseTimeoutMax);
501 cfi->chips[i].erase_time_max = 2000000 * 8;
503 cfi->chips[i].ref_point_counter = 0;
504 init_waitqueue_head(&(cfi->chips[i].wq));
507 map->fldrv = &cfi_intelext_chipdrv;
509 return cfi_intelext_setup(mtd);
511 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
512 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
513 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
514 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
515 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
517 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
519 struct map_info *map = mtd->priv;
520 struct cfi_private *cfi = map->fldrv_priv;
521 unsigned long offset = 0;
523 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
525 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
527 mtd->size = devsize * cfi->numchips;
529 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
530 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
531 * mtd->numeraseregions, GFP_KERNEL);
532 if (!mtd->eraseregions) {
533 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
537 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
538 unsigned long ernum, ersize;
539 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
540 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
542 if (mtd->erasesize < ersize) {
543 mtd->erasesize = ersize;
545 for (j=0; j<cfi->numchips; j++) {
546 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
547 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
548 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
549 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
551 offset += (ersize * ernum);
554 if (offset != devsize) {
556 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
560 for (i=0; i<mtd->numeraseregions;i++){
561 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
562 i,mtd->eraseregions[i].offset,
563 mtd->eraseregions[i].erasesize,
564 mtd->eraseregions[i].numblocks);
567 #ifdef CONFIG_MTD_OTP
568 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
569 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
570 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
571 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
572 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
573 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
576 /* This function has the potential to distort the reality
577 a bit and therefore should be called last. */
578 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
581 __module_get(THIS_MODULE);
582 register_reboot_notifier(&mtd->reboot_notifier);
587 kfree(mtd->eraseregions);
590 kfree(cfi->cmdset_priv);
594 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
595 struct cfi_private **pcfi)
597 struct map_info *map = mtd->priv;
598 struct cfi_private *cfi = *pcfi;
599 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
602 * Probing of multi-partition flash chips.
604 * To support multiple partitions when available, we simply arrange
605 * for each of them to have their own flchip structure even if they
606 * are on the same physical chip. This means completely recreating
607 * a new cfi_private structure right here which is a blatent code
608 * layering violation, but this is still the least intrusive
609 * arrangement at this point. This can be rearranged in the future
610 * if someone feels motivated enough. --nico
612 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
613 && extp->FeatureSupport & (1 << 9)) {
614 struct cfi_private *newcfi;
616 struct flchip_shared *shared;
617 int offs, numregions, numparts, partshift, numvirtchips, i, j;
619 /* Protection Register info */
620 offs = (extp->NumProtectionFields - 1) *
621 sizeof(struct cfi_intelext_otpinfo);
623 /* Burst Read info */
624 offs += extp->extra[offs+1]+2;
626 /* Number of partition regions */
627 numregions = extp->extra[offs];
630 /* skip the sizeof(partregion) field in CFI 1.4 */
631 if (extp->MinorVersion >= '4')
634 /* Number of hardware partitions */
636 for (i = 0; i < numregions; i++) {
637 struct cfi_intelext_regioninfo *rinfo;
638 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
639 numparts += rinfo->NumIdentPartitions;
640 offs += sizeof(*rinfo)
641 + (rinfo->NumBlockTypes - 1) *
642 sizeof(struct cfi_intelext_blockinfo);
648 /* Programming Region info */
649 if (extp->MinorVersion >= '4') {
650 struct cfi_intelext_programming_regioninfo *prinfo;
651 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
652 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
653 mtd->flags &= ~MTD_BIT_WRITEABLE;
654 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
655 map->name, mtd->writesize,
656 cfi->interleave * prinfo->ControlValid,
657 cfi->interleave * prinfo->ControlInvalid);
661 * All functions below currently rely on all chips having
662 * the same geometry so we'll just assume that all hardware
663 * partitions are of the same size too.
665 partshift = cfi->chipshift - __ffs(numparts);
667 if ((1 << partshift) < mtd->erasesize) {
669 "%s: bad number of hw partitions (%d)\n",
674 numvirtchips = cfi->numchips * numparts;
675 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
678 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
683 memcpy(newcfi, cfi, sizeof(struct cfi_private));
684 newcfi->numchips = numvirtchips;
685 newcfi->chipshift = partshift;
687 chip = &newcfi->chips[0];
688 for (i = 0; i < cfi->numchips; i++) {
689 shared[i].writing = shared[i].erasing = NULL;
690 spin_lock_init(&shared[i].lock);
691 for (j = 0; j < numparts; j++) {
692 *chip = cfi->chips[i];
693 chip->start += j << partshift;
694 chip->priv = &shared[i];
695 /* those should be reset too since
696 they create memory references. */
697 init_waitqueue_head(&chip->wq);
698 spin_lock_init(&chip->_spinlock);
699 chip->mutex = &chip->_spinlock;
704 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
705 "--> %d partitions of %d KiB\n",
706 map->name, cfi->numchips, cfi->interleave,
707 newcfi->numchips, 1<<(newcfi->chipshift-10));
709 map->fldrv_priv = newcfi;
718 * *********** CHIP ACCESS FUNCTIONS ***********
720 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
722 DECLARE_WAITQUEUE(wait, current);
723 struct cfi_private *cfi = map->fldrv_priv;
724 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
725 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
726 unsigned long timeo = jiffies + HZ;
728 /* Prevent setting state FL_SYNCING for chip in suspended state. */
729 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
732 switch (chip->state) {
736 status = map_read(map, adr);
737 if (map_word_andequal(map, status, status_OK, status_OK))
740 /* At this point we're fine with write operations
741 in other partitions as they don't conflict. */
742 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
745 spin_unlock(chip->mutex);
747 spin_lock(chip->mutex);
748 /* Someone else might have been playing with it. */
759 !(cfip->FeatureSupport & 2) ||
760 !(mode == FL_READY || mode == FL_POINT ||
761 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
766 map_write(map, CMD(0xB0), adr);
768 /* If the flash has finished erasing, then 'erase suspend'
769 * appears to make some (28F320) flash devices switch to
770 * 'read' mode. Make sure that we switch to 'read status'
771 * mode so we get the right data. --rmk
773 map_write(map, CMD(0x70), adr);
774 chip->oldstate = FL_ERASING;
775 chip->state = FL_ERASE_SUSPENDING;
776 chip->erase_suspended = 1;
778 status = map_read(map, adr);
779 if (map_word_andequal(map, status, status_OK, status_OK))
782 if (time_after(jiffies, timeo)) {
783 /* Urgh. Resume and pretend we weren't here. */
784 map_write(map, CMD(0xd0), adr);
785 /* Make sure we're in 'read status' mode if it had finished */
786 map_write(map, CMD(0x70), adr);
787 chip->state = FL_ERASING;
788 chip->oldstate = FL_READY;
789 printk(KERN_ERR "%s: Chip not ready after erase "
790 "suspended: status = 0x%lx\n", map->name, status.x[0]);
794 spin_unlock(chip->mutex);
796 spin_lock(chip->mutex);
797 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
798 So we can just loop here. */
800 chip->state = FL_STATUS;
803 case FL_XIP_WHILE_ERASING:
804 if (mode != FL_READY && mode != FL_POINT &&
805 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
807 chip->oldstate = chip->state;
808 chip->state = FL_READY;
812 /* The machine is rebooting now,so no one can get chip anymore */
815 /* Only if there's no operation suspended... */
816 if (mode == FL_READY && chip->oldstate == FL_READY)
821 set_current_state(TASK_UNINTERRUPTIBLE);
822 add_wait_queue(&chip->wq, &wait);
823 spin_unlock(chip->mutex);
825 remove_wait_queue(&chip->wq, &wait);
826 spin_lock(chip->mutex);
831 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
834 DECLARE_WAITQUEUE(wait, current);
838 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
839 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
841 * OK. We have possibility for contention on the write/erase
842 * operations which are global to the real chip and not per
843 * partition. So let's fight it over in the partition which
844 * currently has authority on the operation.
846 * The rules are as follows:
848 * - any write operation must own shared->writing.
850 * - any erase operation must own _both_ shared->writing and
853 * - contention arbitration is handled in the owner's context.
855 * The 'shared' struct can be read and/or written only when
858 struct flchip_shared *shared = chip->priv;
859 struct flchip *contender;
860 spin_lock(&shared->lock);
861 contender = shared->writing;
862 if (contender && contender != chip) {
864 * The engine to perform desired operation on this
865 * partition is already in use by someone else.
866 * Let's fight over it in the context of the chip
867 * currently using it. If it is possible to suspend,
868 * that other partition will do just that, otherwise
869 * it'll happily send us to sleep. In any case, when
870 * get_chip returns success we're clear to go ahead.
872 ret = spin_trylock(contender->mutex);
873 spin_unlock(&shared->lock);
876 spin_unlock(chip->mutex);
877 ret = chip_ready(map, contender, contender->start, mode);
878 spin_lock(chip->mutex);
880 if (ret == -EAGAIN) {
881 spin_unlock(contender->mutex);
885 spin_unlock(contender->mutex);
888 spin_lock(&shared->lock);
890 /* We should not own chip if it is already
891 * in FL_SYNCING state. Put contender and retry. */
892 if (chip->state == FL_SYNCING) {
893 put_chip(map, contender, contender->start);
894 spin_unlock(contender->mutex);
897 spin_unlock(contender->mutex);
900 /* Check if we already have suspended erase
901 * on this chip. Sleep. */
902 if (mode == FL_ERASING && shared->erasing
903 && shared->erasing->oldstate == FL_ERASING) {
904 spin_unlock(&shared->lock);
905 set_current_state(TASK_UNINTERRUPTIBLE);
906 add_wait_queue(&chip->wq, &wait);
907 spin_unlock(chip->mutex);
909 remove_wait_queue(&chip->wq, &wait);
910 spin_lock(chip->mutex);
915 shared->writing = chip;
916 if (mode == FL_ERASING)
917 shared->erasing = chip;
918 spin_unlock(&shared->lock);
920 ret = chip_ready(map, chip, adr, mode);
927 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
929 struct cfi_private *cfi = map->fldrv_priv;
932 struct flchip_shared *shared = chip->priv;
933 spin_lock(&shared->lock);
934 if (shared->writing == chip && chip->oldstate == FL_READY) {
935 /* We own the ability to write, but we're done */
936 shared->writing = shared->erasing;
937 if (shared->writing && shared->writing != chip) {
938 /* give back ownership to who we loaned it from */
939 struct flchip *loaner = shared->writing;
940 spin_lock(loaner->mutex);
941 spin_unlock(&shared->lock);
942 spin_unlock(chip->mutex);
943 put_chip(map, loaner, loaner->start);
944 spin_lock(chip->mutex);
945 spin_unlock(loaner->mutex);
949 shared->erasing = NULL;
950 shared->writing = NULL;
951 } else if (shared->erasing == chip && shared->writing != chip) {
953 * We own the ability to erase without the ability
954 * to write, which means the erase was suspended
955 * and some other partition is currently writing.
956 * Don't let the switch below mess things up since
957 * we don't have ownership to resume anything.
959 spin_unlock(&shared->lock);
963 spin_unlock(&shared->lock);
966 switch(chip->oldstate) {
968 chip->state = chip->oldstate;
969 /* What if one interleaved chip has finished and the
970 other hasn't? The old code would leave the finished
971 one in READY mode. That's bad, and caused -EROFS
972 errors to be returned from do_erase_oneblock because
973 that's the only bit it checked for at the time.
974 As the state machine appears to explicitly allow
975 sending the 0x70 (Read Status) command to an erasing
976 chip and expecting it to be ignored, that's what we
978 map_write(map, CMD(0xd0), adr);
979 map_write(map, CMD(0x70), adr);
980 chip->oldstate = FL_READY;
981 chip->state = FL_ERASING;
984 case FL_XIP_WHILE_ERASING:
985 chip->state = chip->oldstate;
986 chip->oldstate = FL_READY;
992 /* We should really make set_vpp() count, rather than doing this */
996 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1001 #ifdef CONFIG_MTD_XIP
1004 * No interrupt what so ever can be serviced while the flash isn't in array
1005 * mode. This is ensured by the xip_disable() and xip_enable() functions
1006 * enclosing any code path where the flash is known not to be in array mode.
1007 * And within a XIP disabled code path, only functions marked with __xipram
1008 * may be called and nothing else (it's a good thing to inspect generated
1009 * assembly to make sure inline functions were actually inlined and that gcc
1010 * didn't emit calls to its own support functions). Also configuring MTD CFI
1011 * support to a single buswidth and a single interleave is also recommended.
1014 static void xip_disable(struct map_info *map, struct flchip *chip,
1017 /* TODO: chips with no XIP use should ignore and return */
1018 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1019 local_irq_disable();
1022 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1025 struct cfi_private *cfi = map->fldrv_priv;
1026 if (chip->state != FL_POINT && chip->state != FL_READY) {
1027 map_write(map, CMD(0xff), adr);
1028 chip->state = FL_READY;
1030 (void) map_read(map, adr);
1036 * When a delay is required for the flash operation to complete, the
1037 * xip_wait_for_operation() function is polling for both the given timeout
1038 * and pending (but still masked) hardware interrupts. Whenever there is an
1039 * interrupt pending then the flash erase or write operation is suspended,
1040 * array mode restored and interrupts unmasked. Task scheduling might also
1041 * happen at that point. The CPU eventually returns from the interrupt or
1042 * the call to schedule() and the suspended flash operation is resumed for
1043 * the remaining of the delay period.
1045 * Warning: this function _will_ fool interrupt latency tracing tools.
1048 static int __xipram xip_wait_for_operation(
1049 struct map_info *map, struct flchip *chip,
1050 unsigned long adr, unsigned int chip_op_time_max)
1052 struct cfi_private *cfi = map->fldrv_priv;
1053 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1054 map_word status, OK = CMD(0x80);
1055 unsigned long usec, suspended, start, done;
1056 flstate_t oldstate, newstate;
1058 start = xip_currtime();
1059 usec = chip_op_time_max;
1066 if (xip_irqpending() && cfip &&
1067 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1068 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1069 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1071 * Let's suspend the erase or write operation when
1072 * supported. Note that we currently don't try to
1073 * suspend interleaved chips if there is already
1074 * another operation suspended (imagine what happens
1075 * when one chip was already done with the current
1076 * operation while another chip suspended it, then
1077 * we resume the whole thing at once). Yes, it
1081 map_write(map, CMD(0xb0), adr);
1082 map_write(map, CMD(0x70), adr);
1083 suspended = xip_currtime();
1085 if (xip_elapsed_since(suspended) > 100000) {
1087 * The chip doesn't want to suspend
1088 * after waiting for 100 msecs.
1089 * This is a critical error but there
1090 * is not much we can do here.
1094 status = map_read(map, adr);
1095 } while (!map_word_andequal(map, status, OK, OK));
1097 /* Suspend succeeded */
1098 oldstate = chip->state;
1099 if (oldstate == FL_ERASING) {
1100 if (!map_word_bitsset(map, status, CMD(0x40)))
1102 newstate = FL_XIP_WHILE_ERASING;
1103 chip->erase_suspended = 1;
1105 if (!map_word_bitsset(map, status, CMD(0x04)))
1107 newstate = FL_XIP_WHILE_WRITING;
1108 chip->write_suspended = 1;
1110 chip->state = newstate;
1111 map_write(map, CMD(0xff), adr);
1112 (void) map_read(map, adr);
1115 spin_unlock(chip->mutex);
1120 * We're back. However someone else might have
1121 * decided to go write to the chip if we are in
1122 * a suspended erase state. If so let's wait
1125 spin_lock(chip->mutex);
1126 while (chip->state != newstate) {
1127 DECLARE_WAITQUEUE(wait, current);
1128 set_current_state(TASK_UNINTERRUPTIBLE);
1129 add_wait_queue(&chip->wq, &wait);
1130 spin_unlock(chip->mutex);
1132 remove_wait_queue(&chip->wq, &wait);
1133 spin_lock(chip->mutex);
1135 /* Disallow XIP again */
1136 local_irq_disable();
1138 /* Resume the write or erase operation */
1139 map_write(map, CMD(0xd0), adr);
1140 map_write(map, CMD(0x70), adr);
1141 chip->state = oldstate;
1142 start = xip_currtime();
1143 } else if (usec >= 1000000/HZ) {
1145 * Try to save on CPU power when waiting delay
1146 * is at least a system timer tick period.
1147 * No need to be extremely accurate here.
1151 status = map_read(map, adr);
1152 done = xip_elapsed_since(start);
1153 } while (!map_word_andequal(map, status, OK, OK)
1156 return (done >= usec) ? -ETIME : 0;
1160 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1161 * the flash is actively programming or erasing since we have to poll for
1162 * the operation to complete anyway. We can't do that in a generic way with
1163 * a XIP setup so do it before the actual flash operation in this case
1164 * and stub it out from INVAL_CACHE_AND_WAIT.
1166 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1167 INVALIDATE_CACHED_RANGE(map, from, size)
1169 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1170 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1174 #define xip_disable(map, chip, adr)
1175 #define xip_enable(map, chip, adr)
1176 #define XIP_INVAL_CACHED_RANGE(x...)
1177 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1179 static int inval_cache_and_wait_for_operation(
1180 struct map_info *map, struct flchip *chip,
1181 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1182 unsigned int chip_op_time, unsigned int chip_op_time_max)
1184 struct cfi_private *cfi = map->fldrv_priv;
1185 map_word status, status_OK = CMD(0x80);
1186 int chip_state = chip->state;
1187 unsigned int timeo, sleep_time, reset_timeo;
1189 spin_unlock(chip->mutex);
1191 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1192 spin_lock(chip->mutex);
1194 timeo = chip_op_time_max;
1197 reset_timeo = timeo;
1198 sleep_time = chip_op_time / 2;
1201 status = map_read(map, cmd_adr);
1202 if (map_word_andequal(map, status, status_OK, status_OK))
1206 map_write(map, CMD(0x70), cmd_adr);
1207 chip->state = FL_STATUS;
1211 /* OK Still waiting. Drop the lock, wait a while and retry. */
1212 spin_unlock(chip->mutex);
1213 if (sleep_time >= 1000000/HZ) {
1215 * Half of the normal delay still remaining
1216 * can be performed with a sleeping delay instead
1219 msleep(sleep_time/1000);
1220 timeo -= sleep_time;
1221 sleep_time = 1000000/HZ;
1227 spin_lock(chip->mutex);
1229 while (chip->state != chip_state) {
1230 /* Someone's suspended the operation: sleep */
1231 DECLARE_WAITQUEUE(wait, current);
1232 set_current_state(TASK_UNINTERRUPTIBLE);
1233 add_wait_queue(&chip->wq, &wait);
1234 spin_unlock(chip->mutex);
1236 remove_wait_queue(&chip->wq, &wait);
1237 spin_lock(chip->mutex);
1239 if (chip->erase_suspended || chip->write_suspended) {
1240 /* Suspend has occured while sleep: reset timeout */
1241 timeo = reset_timeo;
1242 chip->erase_suspended = 0;
1243 chip->write_suspended = 0;
1247 /* Done and happy. */
1248 chip->state = FL_STATUS;
1254 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1255 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1258 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1260 unsigned long cmd_addr;
1261 struct cfi_private *cfi = map->fldrv_priv;
1266 /* Ensure cmd read/writes are aligned. */
1267 cmd_addr = adr & ~(map_bankwidth(map)-1);
1269 spin_lock(chip->mutex);
1271 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1274 if (chip->state != FL_POINT && chip->state != FL_READY)
1275 map_write(map, CMD(0xff), cmd_addr);
1277 chip->state = FL_POINT;
1278 chip->ref_point_counter++;
1280 spin_unlock(chip->mutex);
1285 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1286 size_t *retlen, void **virt, resource_size_t *phys)
1288 struct map_info *map = mtd->priv;
1289 struct cfi_private *cfi = map->fldrv_priv;
1290 unsigned long ofs, last_end = 0;
1294 if (!map->virt || (from + len > mtd->size))
1297 /* Now lock the chip(s) to POINT state */
1299 /* ofs: offset within the first chip that the first read should start */
1300 chipnum = (from >> cfi->chipshift);
1301 ofs = from - (chipnum << cfi->chipshift);
1303 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1306 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1309 unsigned long thislen;
1311 if (chipnum >= cfi->numchips)
1314 /* We cannot point across chips that are virtually disjoint */
1316 last_end = cfi->chips[chipnum].start;
1317 else if (cfi->chips[chipnum].start != last_end)
1320 if ((len + ofs -1) >> cfi->chipshift)
1321 thislen = (1<<cfi->chipshift) - ofs;
1325 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1333 last_end += 1 << cfi->chipshift;
1339 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1341 struct map_info *map = mtd->priv;
1342 struct cfi_private *cfi = map->fldrv_priv;
1346 /* Now unlock the chip(s) POINT state */
1348 /* ofs: offset within the first chip that the first read should start */
1349 chipnum = (from >> cfi->chipshift);
1350 ofs = from - (chipnum << cfi->chipshift);
1353 unsigned long thislen;
1354 struct flchip *chip;
1356 chip = &cfi->chips[chipnum];
1357 if (chipnum >= cfi->numchips)
1360 if ((len + ofs -1) >> cfi->chipshift)
1361 thislen = (1<<cfi->chipshift) - ofs;
1365 spin_lock(chip->mutex);
1366 if (chip->state == FL_POINT) {
1367 chip->ref_point_counter--;
1368 if(chip->ref_point_counter == 0)
1369 chip->state = FL_READY;
1371 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1373 put_chip(map, chip, chip->start);
1374 spin_unlock(chip->mutex);
1382 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1384 unsigned long cmd_addr;
1385 struct cfi_private *cfi = map->fldrv_priv;
1390 /* Ensure cmd read/writes are aligned. */
1391 cmd_addr = adr & ~(map_bankwidth(map)-1);
1393 spin_lock(chip->mutex);
1394 ret = get_chip(map, chip, cmd_addr, FL_READY);
1396 spin_unlock(chip->mutex);
1400 if (chip->state != FL_POINT && chip->state != FL_READY) {
1401 map_write(map, CMD(0xff), cmd_addr);
1403 chip->state = FL_READY;
1406 map_copy_from(map, buf, adr, len);
1408 put_chip(map, chip, cmd_addr);
1410 spin_unlock(chip->mutex);
1414 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1416 struct map_info *map = mtd->priv;
1417 struct cfi_private *cfi = map->fldrv_priv;
1422 /* ofs: offset within the first chip that the first read should start */
1423 chipnum = (from >> cfi->chipshift);
1424 ofs = from - (chipnum << cfi->chipshift);
1429 unsigned long thislen;
1431 if (chipnum >= cfi->numchips)
1434 if ((len + ofs -1) >> cfi->chipshift)
1435 thislen = (1<<cfi->chipshift) - ofs;
1439 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1453 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1454 unsigned long adr, map_word datum, int mode)
1456 struct cfi_private *cfi = map->fldrv_priv;
1457 map_word status, write_cmd;
1464 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1467 write_cmd = CMD(0xc0);
1473 spin_lock(chip->mutex);
1474 ret = get_chip(map, chip, adr, mode);
1476 spin_unlock(chip->mutex);
1480 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1482 xip_disable(map, chip, adr);
1483 map_write(map, write_cmd, adr);
1484 map_write(map, datum, adr);
1487 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1488 adr, map_bankwidth(map),
1489 chip->word_write_time,
1490 chip->word_write_time_max);
1492 xip_enable(map, chip, adr);
1493 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1497 /* check for errors */
1498 status = map_read(map, adr);
1499 if (map_word_bitsset(map, status, CMD(0x1a))) {
1500 unsigned long chipstatus = MERGESTATUS(status);
1503 map_write(map, CMD(0x50), adr);
1504 map_write(map, CMD(0x70), adr);
1505 xip_enable(map, chip, adr);
1507 if (chipstatus & 0x02) {
1509 } else if (chipstatus & 0x08) {
1510 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1513 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1520 xip_enable(map, chip, adr);
1521 out: put_chip(map, chip, adr);
1522 spin_unlock(chip->mutex);
1527 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1529 struct map_info *map = mtd->priv;
1530 struct cfi_private *cfi = map->fldrv_priv;
1539 chipnum = to >> cfi->chipshift;
1540 ofs = to - (chipnum << cfi->chipshift);
1542 /* If it's not bus-aligned, do the first byte write */
1543 if (ofs & (map_bankwidth(map)-1)) {
1544 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1545 int gap = ofs - bus_ofs;
1549 n = min_t(int, len, map_bankwidth(map)-gap);
1550 datum = map_word_ff(map);
1551 datum = map_word_load_partial(map, datum, buf, gap, n);
1553 ret = do_write_oneword(map, &cfi->chips[chipnum],
1554 bus_ofs, datum, FL_WRITING);
1563 if (ofs >> cfi->chipshift) {
1566 if (chipnum == cfi->numchips)
1571 while(len >= map_bankwidth(map)) {
1572 map_word datum = map_word_load(map, buf);
1574 ret = do_write_oneword(map, &cfi->chips[chipnum],
1575 ofs, datum, FL_WRITING);
1579 ofs += map_bankwidth(map);
1580 buf += map_bankwidth(map);
1581 (*retlen) += map_bankwidth(map);
1582 len -= map_bankwidth(map);
1584 if (ofs >> cfi->chipshift) {
1587 if (chipnum == cfi->numchips)
1592 if (len & (map_bankwidth(map)-1)) {
1595 datum = map_word_ff(map);
1596 datum = map_word_load_partial(map, datum, buf, 0, len);
1598 ret = do_write_oneword(map, &cfi->chips[chipnum],
1599 ofs, datum, FL_WRITING);
1610 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1611 unsigned long adr, const struct kvec **pvec,
1612 unsigned long *pvec_seek, int len)
1614 struct cfi_private *cfi = map->fldrv_priv;
1615 map_word status, write_cmd, datum;
1616 unsigned long cmd_adr;
1617 int ret, wbufsize, word_gap, words;
1618 const struct kvec *vec;
1619 unsigned long vec_seek;
1620 unsigned long initial_adr;
1621 int initial_len = len;
1623 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1626 cmd_adr = adr & ~(wbufsize-1);
1628 /* Let's determine this according to the interleave only once */
1629 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1631 spin_lock(chip->mutex);
1632 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1634 spin_unlock(chip->mutex);
1638 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1640 xip_disable(map, chip, cmd_adr);
1642 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1643 [...], the device will not accept any more Write to Buffer commands".
1644 So we must check here and reset those bits if they're set. Otherwise
1645 we're just pissing in the wind */
1646 if (chip->state != FL_STATUS) {
1647 map_write(map, CMD(0x70), cmd_adr);
1648 chip->state = FL_STATUS;
1650 status = map_read(map, cmd_adr);
1651 if (map_word_bitsset(map, status, CMD(0x30))) {
1652 xip_enable(map, chip, cmd_adr);
1653 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1654 xip_disable(map, chip, cmd_adr);
1655 map_write(map, CMD(0x50), cmd_adr);
1656 map_write(map, CMD(0x70), cmd_adr);
1659 chip->state = FL_WRITING_TO_BUFFER;
1660 map_write(map, write_cmd, cmd_adr);
1661 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1663 /* Argh. Not ready for write to buffer */
1664 map_word Xstatus = map_read(map, cmd_adr);
1665 map_write(map, CMD(0x70), cmd_adr);
1666 chip->state = FL_STATUS;
1667 status = map_read(map, cmd_adr);
1668 map_write(map, CMD(0x50), cmd_adr);
1669 map_write(map, CMD(0x70), cmd_adr);
1670 xip_enable(map, chip, cmd_adr);
1671 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1672 map->name, Xstatus.x[0], status.x[0]);
1676 /* Figure out the number of words to write */
1677 word_gap = (-adr & (map_bankwidth(map)-1));
1678 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1682 word_gap = map_bankwidth(map) - word_gap;
1684 datum = map_word_ff(map);
1687 /* Write length of data to come */
1688 map_write(map, CMD(words), cmd_adr );
1692 vec_seek = *pvec_seek;
1694 int n = map_bankwidth(map) - word_gap;
1695 if (n > vec->iov_len - vec_seek)
1696 n = vec->iov_len - vec_seek;
1700 if (!word_gap && len < map_bankwidth(map))
1701 datum = map_word_ff(map);
1703 datum = map_word_load_partial(map, datum,
1704 vec->iov_base + vec_seek,
1709 if (!len || word_gap == map_bankwidth(map)) {
1710 map_write(map, datum, adr);
1711 adr += map_bankwidth(map);
1716 if (vec_seek == vec->iov_len) {
1722 *pvec_seek = vec_seek;
1725 map_write(map, CMD(0xd0), cmd_adr);
1726 chip->state = FL_WRITING;
1728 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1729 initial_adr, initial_len,
1730 chip->buffer_write_time,
1731 chip->buffer_write_time_max);
1733 map_write(map, CMD(0x70), cmd_adr);
1734 chip->state = FL_STATUS;
1735 xip_enable(map, chip, cmd_adr);
1736 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1740 /* check for errors */
1741 status = map_read(map, cmd_adr);
1742 if (map_word_bitsset(map, status, CMD(0x1a))) {
1743 unsigned long chipstatus = MERGESTATUS(status);
1746 map_write(map, CMD(0x50), cmd_adr);
1747 map_write(map, CMD(0x70), cmd_adr);
1748 xip_enable(map, chip, cmd_adr);
1750 if (chipstatus & 0x02) {
1752 } else if (chipstatus & 0x08) {
1753 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1756 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1763 xip_enable(map, chip, cmd_adr);
1764 out: put_chip(map, chip, cmd_adr);
1765 spin_unlock(chip->mutex);
1769 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1770 unsigned long count, loff_t to, size_t *retlen)
1772 struct map_info *map = mtd->priv;
1773 struct cfi_private *cfi = map->fldrv_priv;
1774 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1777 unsigned long ofs, vec_seek, i;
1780 for (i = 0; i < count; i++)
1781 len += vecs[i].iov_len;
1787 chipnum = to >> cfi->chipshift;
1788 ofs = to - (chipnum << cfi->chipshift);
1792 /* We must not cross write block boundaries */
1793 int size = wbufsize - (ofs & (wbufsize-1));
1797 ret = do_write_buffer(map, &cfi->chips[chipnum],
1798 ofs, &vecs, &vec_seek, size);
1806 if (ofs >> cfi->chipshift) {
1809 if (chipnum == cfi->numchips)
1813 /* Be nice and reschedule with the chip in a usable state for other
1822 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1823 size_t len, size_t *retlen, const u_char *buf)
1827 vec.iov_base = (void *) buf;
1830 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1833 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1834 unsigned long adr, int len, void *thunk)
1836 struct cfi_private *cfi = map->fldrv_priv;
1844 spin_lock(chip->mutex);
1845 ret = get_chip(map, chip, adr, FL_ERASING);
1847 spin_unlock(chip->mutex);
1851 XIP_INVAL_CACHED_RANGE(map, adr, len);
1853 xip_disable(map, chip, adr);
1855 /* Clear the status register first */
1856 map_write(map, CMD(0x50), adr);
1859 map_write(map, CMD(0x20), adr);
1860 map_write(map, CMD(0xD0), adr);
1861 chip->state = FL_ERASING;
1862 chip->erase_suspended = 0;
1864 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1867 chip->erase_time_max);
1869 map_write(map, CMD(0x70), adr);
1870 chip->state = FL_STATUS;
1871 xip_enable(map, chip, adr);
1872 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1876 /* We've broken this before. It doesn't hurt to be safe */
1877 map_write(map, CMD(0x70), adr);
1878 chip->state = FL_STATUS;
1879 status = map_read(map, adr);
1881 /* check for errors */
1882 if (map_word_bitsset(map, status, CMD(0x3a))) {
1883 unsigned long chipstatus = MERGESTATUS(status);
1885 /* Reset the error bits */
1886 map_write(map, CMD(0x50), adr);
1887 map_write(map, CMD(0x70), adr);
1888 xip_enable(map, chip, adr);
1890 if ((chipstatus & 0x30) == 0x30) {
1891 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1893 } else if (chipstatus & 0x02) {
1894 /* Protection bit set */
1896 } else if (chipstatus & 0x8) {
1898 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1900 } else if (chipstatus & 0x20 && retries--) {
1901 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1902 put_chip(map, chip, adr);
1903 spin_unlock(chip->mutex);
1906 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1913 xip_enable(map, chip, adr);
1914 out: put_chip(map, chip, adr);
1915 spin_unlock(chip->mutex);
1919 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1921 unsigned long ofs, len;
1927 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1931 instr->state = MTD_ERASE_DONE;
1932 mtd_erase_callback(instr);
1937 static void cfi_intelext_sync (struct mtd_info *mtd)
1939 struct map_info *map = mtd->priv;
1940 struct cfi_private *cfi = map->fldrv_priv;
1942 struct flchip *chip;
1945 for (i=0; !ret && i<cfi->numchips; i++) {
1946 chip = &cfi->chips[i];
1948 spin_lock(chip->mutex);
1949 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1952 chip->oldstate = chip->state;
1953 chip->state = FL_SYNCING;
1954 /* No need to wake_up() on this state change -
1955 * as the whole point is that nobody can do anything
1956 * with the chip now anyway.
1959 spin_unlock(chip->mutex);
1962 /* Unlock the chips again */
1964 for (i--; i >=0; i--) {
1965 chip = &cfi->chips[i];
1967 spin_lock(chip->mutex);
1969 if (chip->state == FL_SYNCING) {
1970 chip->state = chip->oldstate;
1971 chip->oldstate = FL_READY;
1974 spin_unlock(chip->mutex);
1978 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1979 struct flchip *chip,
1981 int len, void *thunk)
1983 struct cfi_private *cfi = map->fldrv_priv;
1984 int status, ofs_factor = cfi->interleave * cfi->device_type;
1987 xip_disable(map, chip, adr+(2*ofs_factor));
1988 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1989 chip->state = FL_JEDEC_QUERY;
1990 status = cfi_read_query(map, adr+(2*ofs_factor));
1991 xip_enable(map, chip, 0);
1995 #ifdef DEBUG_LOCK_BITS
1996 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1997 struct flchip *chip,
1999 int len, void *thunk)
2001 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2002 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2007 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2008 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2010 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2011 unsigned long adr, int len, void *thunk)
2013 struct cfi_private *cfi = map->fldrv_priv;
2014 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2020 spin_lock(chip->mutex);
2021 ret = get_chip(map, chip, adr, FL_LOCKING);
2023 spin_unlock(chip->mutex);
2028 xip_disable(map, chip, adr);
2030 map_write(map, CMD(0x60), adr);
2031 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2032 map_write(map, CMD(0x01), adr);
2033 chip->state = FL_LOCKING;
2034 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2035 map_write(map, CMD(0xD0), adr);
2036 chip->state = FL_UNLOCKING;
2041 * If Instant Individual Block Locking supported then no need
2044 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2046 ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2048 map_write(map, CMD(0x70), adr);
2049 chip->state = FL_STATUS;
2050 xip_enable(map, chip, adr);
2051 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2055 xip_enable(map, chip, adr);
2056 out: put_chip(map, chip, adr);
2057 spin_unlock(chip->mutex);
2061 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2065 #ifdef DEBUG_LOCK_BITS
2066 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2067 __func__, ofs, len);
2068 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2072 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2073 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2075 #ifdef DEBUG_LOCK_BITS
2076 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2078 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2085 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2089 #ifdef DEBUG_LOCK_BITS
2090 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2091 __func__, ofs, len);
2092 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2096 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2097 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2099 #ifdef DEBUG_LOCK_BITS
2100 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2102 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2109 #ifdef CONFIG_MTD_OTP
2111 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2112 u_long data_offset, u_char *buf, u_int size,
2113 u_long prot_offset, u_int groupno, u_int groupsize);
2116 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2117 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2119 struct cfi_private *cfi = map->fldrv_priv;
2122 spin_lock(chip->mutex);
2123 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2125 spin_unlock(chip->mutex);
2129 /* let's ensure we're not reading back cached data from array mode */
2130 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2132 xip_disable(map, chip, chip->start);
2133 if (chip->state != FL_JEDEC_QUERY) {
2134 map_write(map, CMD(0x90), chip->start);
2135 chip->state = FL_JEDEC_QUERY;
2137 map_copy_from(map, buf, chip->start + offset, size);
2138 xip_enable(map, chip, chip->start);
2140 /* then ensure we don't keep OTP data in the cache */
2141 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2143 put_chip(map, chip, chip->start);
2144 spin_unlock(chip->mutex);
2149 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2150 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2155 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2156 int gap = offset - bus_ofs;
2157 int n = min_t(int, size, map_bankwidth(map)-gap);
2158 map_word datum = map_word_ff(map);
2160 datum = map_word_load_partial(map, datum, buf, gap, n);
2161 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2174 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2175 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2177 struct cfi_private *cfi = map->fldrv_priv;
2180 /* make sure area matches group boundaries */
2184 datum = map_word_ff(map);
2185 datum = map_word_clr(map, datum, CMD(1 << grpno));
2186 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2189 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2190 size_t *retlen, u_char *buf,
2191 otp_op_t action, int user_regs)
2193 struct map_info *map = mtd->priv;
2194 struct cfi_private *cfi = map->fldrv_priv;
2195 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2196 struct flchip *chip;
2197 struct cfi_intelext_otpinfo *otp;
2198 u_long devsize, reg_prot_offset, data_offset;
2199 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2200 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2205 /* Check that we actually have some OTP registers */
2206 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2209 /* we need real chips here not virtual ones */
2210 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2211 chip_step = devsize >> cfi->chipshift;
2214 /* Some chips have OTP located in the _top_ partition only.
2215 For example: Intel 28F256L18T (T means top-parameter device) */
2216 if (cfi->mfr == MANUFACTURER_INTEL) {
2221 chip_num = chip_step - 1;
2225 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2226 chip = &cfi->chips[chip_num];
2227 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2229 /* first OTP region */
2231 reg_prot_offset = extp->ProtRegAddr;
2232 reg_fact_groups = 1;
2233 reg_fact_size = 1 << extp->FactProtRegSize;
2234 reg_user_groups = 1;
2235 reg_user_size = 1 << extp->UserProtRegSize;
2238 /* flash geometry fixup */
2239 data_offset = reg_prot_offset + 1;
2240 data_offset *= cfi->interleave * cfi->device_type;
2241 reg_prot_offset *= cfi->interleave * cfi->device_type;
2242 reg_fact_size *= cfi->interleave;
2243 reg_user_size *= cfi->interleave;
2246 groups = reg_user_groups;
2247 groupsize = reg_user_size;
2248 /* skip over factory reg area */
2249 groupno = reg_fact_groups;
2250 data_offset += reg_fact_groups * reg_fact_size;
2252 groups = reg_fact_groups;
2253 groupsize = reg_fact_size;
2257 while (len > 0 && groups > 0) {
2260 * Special case: if action is NULL
2261 * we fill buf with otp_info records.
2263 struct otp_info *otpinfo;
2265 len -= sizeof(struct otp_info);
2268 ret = do_otp_read(map, chip,
2270 (u_char *)&lockword,
2275 otpinfo = (struct otp_info *)buf;
2276 otpinfo->start = from;
2277 otpinfo->length = groupsize;
2279 !map_word_bitsset(map, lockword,
2282 buf += sizeof(*otpinfo);
2283 *retlen += sizeof(*otpinfo);
2284 } else if (from >= groupsize) {
2286 data_offset += groupsize;
2288 int size = groupsize;
2289 data_offset += from;
2294 ret = action(map, chip, data_offset,
2295 buf, size, reg_prot_offset,
2296 groupno, groupsize);
2302 data_offset += size;
2308 /* next OTP region */
2309 if (++field == extp->NumProtectionFields)
2311 reg_prot_offset = otp->ProtRegAddr;
2312 reg_fact_groups = otp->FactGroups;
2313 reg_fact_size = 1 << otp->FactProtRegSize;
2314 reg_user_groups = otp->UserGroups;
2315 reg_user_size = 1 << otp->UserProtRegSize;
2323 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2324 size_t len, size_t *retlen,
2327 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2328 buf, do_otp_read, 0);
2331 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2332 size_t len, size_t *retlen,
2335 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2336 buf, do_otp_read, 1);
2339 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2340 size_t len, size_t *retlen,
2343 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2344 buf, do_otp_write, 1);
2347 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2348 loff_t from, size_t len)
2351 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2352 NULL, do_otp_lock, 1);
2355 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2356 struct otp_info *buf, size_t len)
2361 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2362 return ret ? : retlen;
2365 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2366 struct otp_info *buf, size_t len)
2371 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2372 return ret ? : retlen;
2377 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2379 struct mtd_erase_region_info *region;
2380 int block, status, i;
2384 for (i = 0; i < mtd->numeraseregions; i++) {
2385 region = &mtd->eraseregions[i];
2386 if (!region->lockmap)
2389 for (block = 0; block < region->numblocks; block++){
2390 len = region->erasesize;
2391 adr = region->offset + block * len;
2393 status = cfi_varsize_frob(mtd,
2394 do_getlockstatus_oneblock, adr, len, NULL);
2396 set_bit(block, region->lockmap);
2398 clear_bit(block, region->lockmap);
2403 static int cfi_intelext_suspend(struct mtd_info *mtd)
2405 struct map_info *map = mtd->priv;
2406 struct cfi_private *cfi = map->fldrv_priv;
2407 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2409 struct flchip *chip;
2412 if ((mtd->flags & MTD_POWERUP_LOCK)
2413 && extp && (extp->FeatureSupport & (1 << 5)))
2414 cfi_intelext_save_locks(mtd);
2416 for (i=0; !ret && i<cfi->numchips; i++) {
2417 chip = &cfi->chips[i];
2419 spin_lock(chip->mutex);
2421 switch (chip->state) {
2425 case FL_JEDEC_QUERY:
2426 if (chip->oldstate == FL_READY) {
2427 /* place the chip in a known state before suspend */
2428 map_write(map, CMD(0xFF), cfi->chips[i].start);
2429 chip->oldstate = chip->state;
2430 chip->state = FL_PM_SUSPENDED;
2431 /* No need to wake_up() on this state change -
2432 * as the whole point is that nobody can do anything
2433 * with the chip now anyway.
2436 /* There seems to be an operation pending. We must wait for it. */
2437 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2442 /* Should we actually wait? Once upon a time these routines weren't
2443 allowed to. Or should we return -EAGAIN, because the upper layers
2444 ought to have already shut down anything which was using the device
2445 anyway? The latter for now. */
2446 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2448 case FL_PM_SUSPENDED:
2451 spin_unlock(chip->mutex);
2454 /* Unlock the chips again */
2457 for (i--; i >=0; i--) {
2458 chip = &cfi->chips[i];
2460 spin_lock(chip->mutex);
2462 if (chip->state == FL_PM_SUSPENDED) {
2463 /* No need to force it into a known state here,
2464 because we're returning failure, and it didn't
2466 chip->state = chip->oldstate;
2467 chip->oldstate = FL_READY;
2470 spin_unlock(chip->mutex);
2477 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2479 struct mtd_erase_region_info *region;
2484 for (i = 0; i < mtd->numeraseregions; i++) {
2485 region = &mtd->eraseregions[i];
2486 if (!region->lockmap)
2489 for (block = 0; block < region->numblocks; block++) {
2490 len = region->erasesize;
2491 adr = region->offset + block * len;
2493 if (!test_bit(block, region->lockmap))
2494 cfi_intelext_unlock(mtd, adr, len);
2499 static void cfi_intelext_resume(struct mtd_info *mtd)
2501 struct map_info *map = mtd->priv;
2502 struct cfi_private *cfi = map->fldrv_priv;
2503 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2505 struct flchip *chip;
2507 for (i=0; i<cfi->numchips; i++) {
2509 chip = &cfi->chips[i];
2511 spin_lock(chip->mutex);
2513 /* Go to known state. Chip may have been power cycled */
2514 if (chip->state == FL_PM_SUSPENDED) {
2515 map_write(map, CMD(0xFF), cfi->chips[i].start);
2516 chip->oldstate = chip->state = FL_READY;
2520 spin_unlock(chip->mutex);
2523 if ((mtd->flags & MTD_POWERUP_LOCK)
2524 && extp && (extp->FeatureSupport & (1 << 5)))
2525 cfi_intelext_restore_locks(mtd);
2528 static int cfi_intelext_reset(struct mtd_info *mtd)
2530 struct map_info *map = mtd->priv;
2531 struct cfi_private *cfi = map->fldrv_priv;
2534 for (i=0; i < cfi->numchips; i++) {
2535 struct flchip *chip = &cfi->chips[i];
2537 /* force the completion of any ongoing operation
2538 and switch to array mode so any bootloader in
2539 flash is accessible for soft reboot. */
2540 spin_lock(chip->mutex);
2541 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2543 map_write(map, CMD(0xff), chip->start);
2544 chip->state = FL_SHUTDOWN;
2546 spin_unlock(chip->mutex);
2552 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2555 struct mtd_info *mtd;
2557 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2558 cfi_intelext_reset(mtd);
2562 static void cfi_intelext_destroy(struct mtd_info *mtd)
2564 struct map_info *map = mtd->priv;
2565 struct cfi_private *cfi = map->fldrv_priv;
2566 struct mtd_erase_region_info *region;
2568 cfi_intelext_reset(mtd);
2569 unregister_reboot_notifier(&mtd->reboot_notifier);
2570 kfree(cfi->cmdset_priv);
2572 kfree(cfi->chips[0].priv);
2574 for (i = 0; i < mtd->numeraseregions; i++) {
2575 region = &mtd->eraseregions[i];
2576 if (region->lockmap)
2577 kfree(region->lockmap);
2579 kfree(mtd->eraseregions);
2582 MODULE_LICENSE("GPL");
2583 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2584 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2585 MODULE_ALIAS("cfi_cmdset_0003");
2586 MODULE_ALIAS("cfi_cmdset_0200");