2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
45 #define MANUFACTURER_INTEL 0x0089
46 #define I82802AB 0x00ad
47 #define I82802AC 0x00ac
48 #define MANUFACTURER_ST 0x0020
49 #define M50LPW080 0x002F
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
73 static void cfi_intelext_destroy(struct mtd_info *);
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92 * *********** SETUP AND PROBE BITS ***********
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
219 mtd->writev = cfi_intelext_writev;
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
230 #if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
238 static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
244 static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
265 if (extp->MajorVersion != '1' ||
266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280 unsigned int extra_size = 0;
283 /* Protection Register info */
284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
287 /* Burst Read info */
289 if (extp_size < sizeof(*extp) + extra_size)
291 extra_size += extp->extra[extra_size-1];
293 /* Number of hardware-partitions */
295 if (extp_size < sizeof(*extp) + extra_size)
297 nb_parts = extp->extra[extra_size - 1];
299 /* skip the sizeof(partregion) field in CFI 1.4 */
300 if (extp->MinorVersion >= '4')
303 for (i = 0; i < nb_parts; i++) {
304 struct cfi_intelext_regioninfo *rinfo;
305 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306 extra_size += sizeof(*rinfo);
307 if (extp_size < sizeof(*extp) + extra_size)
309 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310 extra_size += (rinfo->NumBlockTypes - 1)
311 * sizeof(struct cfi_intelext_blockinfo);
314 if (extp->MinorVersion >= '4')
315 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
317 if (extp_size < sizeof(*extp) + extra_size) {
319 extp_size = sizeof(*extp) + extra_size;
321 if (extp_size > 4096) {
323 "%s: cfi_pri_intelext is too fat\n",
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
336 struct cfi_private *cfi = map->fldrv_priv;
337 struct mtd_info *mtd;
340 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
342 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
346 mtd->type = MTD_NORFLASH;
348 /* Fill in the default mtd operations */
349 mtd->erase = cfi_intelext_erase_varsize;
350 mtd->read = cfi_intelext_read;
351 mtd->write = cfi_intelext_write_words;
352 mtd->sync = cfi_intelext_sync;
353 mtd->lock = cfi_intelext_lock;
354 mtd->unlock = cfi_intelext_unlock;
355 mtd->suspend = cfi_intelext_suspend;
356 mtd->resume = cfi_intelext_resume;
357 mtd->flags = MTD_CAP_NORFLASH;
358 mtd->name = map->name;
361 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
363 if (cfi->cfi_mode == CFI_MODE_CFI) {
365 * It's a real CFI chip, not one for which the probe
366 * routine faked a CFI structure. So we read the feature
369 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370 struct cfi_pri_intelext *extp;
372 extp = read_pri_intelext(map, adr);
378 /* Install our own private info structure */
379 cfi->cmdset_priv = extp;
381 cfi_fixup(mtd, cfi_fixup_table);
383 #ifdef DEBUG_CFI_FEATURES
384 /* Tell the user about it in lots of lovely detail */
385 cfi_tell_features(extp);
388 if(extp->SuspendCmdSupport & 1) {
389 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
392 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393 /* Apply jedec specific fixups */
394 cfi_fixup(mtd, jedec_fixup_table);
396 /* Apply generic fixups */
397 cfi_fixup(mtd, fixup_table);
399 for (i=0; i< cfi->numchips; i++) {
400 if (cfi->cfiq->WordWriteTimeoutTyp)
401 cfi->chips[i].word_write_time =
402 1<<cfi->cfiq->WordWriteTimeoutTyp;
404 cfi->chips[i].word_write_time = 50000;
406 if (cfi->cfiq->BufWriteTimeoutTyp)
407 cfi->chips[i].buffer_write_time =
408 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 /* No default; if it isn't specified, we won't use it */
411 if (cfi->cfiq->BlockEraseTimeoutTyp)
412 cfi->chips[i].erase_time =
413 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
415 cfi->chips[i].erase_time = 2000000;
417 cfi->chips[i].ref_point_counter = 0;
418 init_waitqueue_head(&(cfi->chips[i].wq));
421 map->fldrv = &cfi_intelext_chipdrv;
423 return cfi_intelext_setup(mtd);
425 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
426 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
427 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
428 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
429 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
431 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
433 struct map_info *map = mtd->priv;
434 struct cfi_private *cfi = map->fldrv_priv;
435 unsigned long offset = 0;
437 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
439 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
441 mtd->size = devsize * cfi->numchips;
443 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
444 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
445 * mtd->numeraseregions, GFP_KERNEL);
446 if (!mtd->eraseregions) {
447 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
451 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
452 unsigned long ernum, ersize;
453 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
454 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
456 if (mtd->erasesize < ersize) {
457 mtd->erasesize = ersize;
459 for (j=0; j<cfi->numchips; j++) {
460 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
461 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
462 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
464 offset += (ersize * ernum);
467 if (offset != devsize) {
469 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
473 for (i=0; i<mtd->numeraseregions;i++){
474 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
475 i,mtd->eraseregions[i].offset,
476 mtd->eraseregions[i].erasesize,
477 mtd->eraseregions[i].numblocks);
480 #ifdef CONFIG_MTD_OTP
481 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
482 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
483 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
484 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
485 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
486 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
489 /* This function has the potential to distort the reality
490 a bit and therefore should be called last. */
491 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
494 __module_get(THIS_MODULE);
495 register_reboot_notifier(&mtd->reboot_notifier);
500 kfree(mtd->eraseregions);
503 kfree(cfi->cmdset_priv);
507 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
508 struct cfi_private **pcfi)
510 struct map_info *map = mtd->priv;
511 struct cfi_private *cfi = *pcfi;
512 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
515 * Probing of multi-partition flash ships.
517 * To support multiple partitions when available, we simply arrange
518 * for each of them to have their own flchip structure even if they
519 * are on the same physical chip. This means completely recreating
520 * a new cfi_private structure right here which is a blatent code
521 * layering violation, but this is still the least intrusive
522 * arrangement at this point. This can be rearranged in the future
523 * if someone feels motivated enough. --nico
525 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
526 && extp->FeatureSupport & (1 << 9)) {
527 struct cfi_private *newcfi;
529 struct flchip_shared *shared;
530 int offs, numregions, numparts, partshift, numvirtchips, i, j;
532 /* Protection Register info */
533 offs = (extp->NumProtectionFields - 1) *
534 sizeof(struct cfi_intelext_otpinfo);
536 /* Burst Read info */
537 offs += extp->extra[offs+1]+2;
539 /* Number of partition regions */
540 numregions = extp->extra[offs];
543 /* skip the sizeof(partregion) field in CFI 1.4 */
544 if (extp->MinorVersion >= '4')
547 /* Number of hardware partitions */
549 for (i = 0; i < numregions; i++) {
550 struct cfi_intelext_regioninfo *rinfo;
551 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
552 numparts += rinfo->NumIdentPartitions;
553 offs += sizeof(*rinfo)
554 + (rinfo->NumBlockTypes - 1) *
555 sizeof(struct cfi_intelext_blockinfo);
558 /* Programming Region info */
559 if (extp->MinorVersion >= '4') {
560 struct cfi_intelext_programming_regioninfo *prinfo;
561 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
562 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
563 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
564 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
565 mtd->flags &= ~MTD_BIT_WRITEABLE;
566 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
567 map->name, mtd->writesize,
568 MTD_PROGREGION_CTRLMODE_VALID(mtd),
569 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
573 * All functions below currently rely on all chips having
574 * the same geometry so we'll just assume that all hardware
575 * partitions are of the same size too.
577 partshift = cfi->chipshift - __ffs(numparts);
579 if ((1 << partshift) < mtd->erasesize) {
581 "%s: bad number of hw partitions (%d)\n",
582 __FUNCTION__, numparts);
586 numvirtchips = cfi->numchips * numparts;
587 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
590 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
595 memcpy(newcfi, cfi, sizeof(struct cfi_private));
596 newcfi->numchips = numvirtchips;
597 newcfi->chipshift = partshift;
599 chip = &newcfi->chips[0];
600 for (i = 0; i < cfi->numchips; i++) {
601 shared[i].writing = shared[i].erasing = NULL;
602 spin_lock_init(&shared[i].lock);
603 for (j = 0; j < numparts; j++) {
604 *chip = cfi->chips[i];
605 chip->start += j << partshift;
606 chip->priv = &shared[i];
607 /* those should be reset too since
608 they create memory references. */
609 init_waitqueue_head(&chip->wq);
610 spin_lock_init(&chip->_spinlock);
611 chip->mutex = &chip->_spinlock;
616 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
617 "--> %d partitions of %d KiB\n",
618 map->name, cfi->numchips, cfi->interleave,
619 newcfi->numchips, 1<<(newcfi->chipshift-10));
621 map->fldrv_priv = newcfi;
630 * *********** CHIP ACCESS FUNCTIONS ***********
633 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
635 DECLARE_WAITQUEUE(wait, current);
636 struct cfi_private *cfi = map->fldrv_priv;
637 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
639 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
642 timeo = jiffies + HZ;
644 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
646 * OK. We have possibility for contension on the write/erase
647 * operations which are global to the real chip and not per
648 * partition. So let's fight it over in the partition which
649 * currently has authority on the operation.
651 * The rules are as follows:
653 * - any write operation must own shared->writing.
655 * - any erase operation must own _both_ shared->writing and
658 * - contension arbitration is handled in the owner's context.
660 * The 'shared' struct can be read and/or written only when
663 struct flchip_shared *shared = chip->priv;
664 struct flchip *contender;
665 spin_lock(&shared->lock);
666 contender = shared->writing;
667 if (contender && contender != chip) {
669 * The engine to perform desired operation on this
670 * partition is already in use by someone else.
671 * Let's fight over it in the context of the chip
672 * currently using it. If it is possible to suspend,
673 * that other partition will do just that, otherwise
674 * it'll happily send us to sleep. In any case, when
675 * get_chip returns success we're clear to go ahead.
677 int ret = spin_trylock(contender->mutex);
678 spin_unlock(&shared->lock);
681 spin_unlock(chip->mutex);
682 ret = get_chip(map, contender, contender->start, mode);
683 spin_lock(chip->mutex);
685 spin_unlock(contender->mutex);
688 timeo = jiffies + HZ;
689 spin_lock(&shared->lock);
690 spin_unlock(contender->mutex);
694 shared->writing = chip;
695 if (mode == FL_ERASING)
696 shared->erasing = chip;
697 spin_unlock(&shared->lock);
700 switch (chip->state) {
704 status = map_read(map, adr);
705 if (map_word_andequal(map, status, status_OK, status_OK))
708 /* At this point we're fine with write operations
709 in other partitions as they don't conflict. */
710 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
713 if (time_after(jiffies, timeo)) {
714 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
715 map->name, status.x[0]);
718 spin_unlock(chip->mutex);
720 spin_lock(chip->mutex);
721 /* Someone else might have been playing with it. */
732 !(cfip->FeatureSupport & 2) ||
733 !(mode == FL_READY || mode == FL_POINT ||
734 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
739 map_write(map, CMD(0xB0), adr);
741 /* If the flash has finished erasing, then 'erase suspend'
742 * appears to make some (28F320) flash devices switch to
743 * 'read' mode. Make sure that we switch to 'read status'
744 * mode so we get the right data. --rmk
746 map_write(map, CMD(0x70), adr);
747 chip->oldstate = FL_ERASING;
748 chip->state = FL_ERASE_SUSPENDING;
749 chip->erase_suspended = 1;
751 status = map_read(map, adr);
752 if (map_word_andequal(map, status, status_OK, status_OK))
755 if (time_after(jiffies, timeo)) {
756 /* Urgh. Resume and pretend we weren't here. */
757 map_write(map, CMD(0xd0), adr);
758 /* Make sure we're in 'read status' mode if it had finished */
759 map_write(map, CMD(0x70), adr);
760 chip->state = FL_ERASING;
761 chip->oldstate = FL_READY;
762 printk(KERN_ERR "%s: Chip not ready after erase "
763 "suspended: status = 0x%lx\n", map->name, status.x[0]);
767 spin_unlock(chip->mutex);
769 spin_lock(chip->mutex);
770 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
771 So we can just loop here. */
773 chip->state = FL_STATUS;
776 case FL_XIP_WHILE_ERASING:
777 if (mode != FL_READY && mode != FL_POINT &&
778 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
780 chip->oldstate = chip->state;
781 chip->state = FL_READY;
785 /* Only if there's no operation suspended... */
786 if (mode == FL_READY && chip->oldstate == FL_READY)
791 set_current_state(TASK_UNINTERRUPTIBLE);
792 add_wait_queue(&chip->wq, &wait);
793 spin_unlock(chip->mutex);
795 remove_wait_queue(&chip->wq, &wait);
796 spin_lock(chip->mutex);
801 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
803 struct cfi_private *cfi = map->fldrv_priv;
806 struct flchip_shared *shared = chip->priv;
807 spin_lock(&shared->lock);
808 if (shared->writing == chip && chip->oldstate == FL_READY) {
809 /* We own the ability to write, but we're done */
810 shared->writing = shared->erasing;
811 if (shared->writing && shared->writing != chip) {
812 /* give back ownership to who we loaned it from */
813 struct flchip *loaner = shared->writing;
814 spin_lock(loaner->mutex);
815 spin_unlock(&shared->lock);
816 spin_unlock(chip->mutex);
817 put_chip(map, loaner, loaner->start);
818 spin_lock(chip->mutex);
819 spin_unlock(loaner->mutex);
823 shared->erasing = NULL;
824 shared->writing = NULL;
825 } else if (shared->erasing == chip && shared->writing != chip) {
827 * We own the ability to erase without the ability
828 * to write, which means the erase was suspended
829 * and some other partition is currently writing.
830 * Don't let the switch below mess things up since
831 * we don't have ownership to resume anything.
833 spin_unlock(&shared->lock);
837 spin_unlock(&shared->lock);
840 switch(chip->oldstate) {
842 chip->state = chip->oldstate;
843 /* What if one interleaved chip has finished and the
844 other hasn't? The old code would leave the finished
845 one in READY mode. That's bad, and caused -EROFS
846 errors to be returned from do_erase_oneblock because
847 that's the only bit it checked for at the time.
848 As the state machine appears to explicitly allow
849 sending the 0x70 (Read Status) command to an erasing
850 chip and expecting it to be ignored, that's what we
852 map_write(map, CMD(0xd0), adr);
853 map_write(map, CMD(0x70), adr);
854 chip->oldstate = FL_READY;
855 chip->state = FL_ERASING;
858 case FL_XIP_WHILE_ERASING:
859 chip->state = chip->oldstate;
860 chip->oldstate = FL_READY;
866 /* We should really make set_vpp() count, rather than doing this */
870 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
875 #ifdef CONFIG_MTD_XIP
878 * No interrupt what so ever can be serviced while the flash isn't in array
879 * mode. This is ensured by the xip_disable() and xip_enable() functions
880 * enclosing any code path where the flash is known not to be in array mode.
881 * And within a XIP disabled code path, only functions marked with __xipram
882 * may be called and nothing else (it's a good thing to inspect generated
883 * assembly to make sure inline functions were actually inlined and that gcc
884 * didn't emit calls to its own support functions). Also configuring MTD CFI
885 * support to a single buswidth and a single interleave is also recommended.
888 static void xip_disable(struct map_info *map, struct flchip *chip,
891 /* TODO: chips with no XIP use should ignore and return */
892 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
896 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
899 struct cfi_private *cfi = map->fldrv_priv;
900 if (chip->state != FL_POINT && chip->state != FL_READY) {
901 map_write(map, CMD(0xff), adr);
902 chip->state = FL_READY;
904 (void) map_read(map, adr);
910 * When a delay is required for the flash operation to complete, the
911 * xip_wait_for_operation() function is polling for both the given timeout
912 * and pending (but still masked) hardware interrupts. Whenever there is an
913 * interrupt pending then the flash erase or write operation is suspended,
914 * array mode restored and interrupts unmasked. Task scheduling might also
915 * happen at that point. The CPU eventually returns from the interrupt or
916 * the call to schedule() and the suspended flash operation is resumed for
917 * the remaining of the delay period.
919 * Warning: this function _will_ fool interrupt latency tracing tools.
922 static int __xipram xip_wait_for_operation(
923 struct map_info *map, struct flchip *chip,
924 unsigned long adr, unsigned int chip_op_time )
926 struct cfi_private *cfi = map->fldrv_priv;
927 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
928 map_word status, OK = CMD(0x80);
929 unsigned long usec, suspended, start, done;
930 flstate_t oldstate, newstate;
932 start = xip_currtime();
933 usec = chip_op_time * 8;
940 if (xip_irqpending() && cfip &&
941 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
942 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
943 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
945 * Let's suspend the erase or write operation when
946 * supported. Note that we currently don't try to
947 * suspend interleaved chips if there is already
948 * another operation suspended (imagine what happens
949 * when one chip was already done with the current
950 * operation while another chip suspended it, then
951 * we resume the whole thing at once). Yes, it
955 map_write(map, CMD(0xb0), adr);
956 map_write(map, CMD(0x70), adr);
957 suspended = xip_currtime();
959 if (xip_elapsed_since(suspended) > 100000) {
961 * The chip doesn't want to suspend
962 * after waiting for 100 msecs.
963 * This is a critical error but there
964 * is not much we can do here.
968 status = map_read(map, adr);
969 } while (!map_word_andequal(map, status, OK, OK));
971 /* Suspend succeeded */
972 oldstate = chip->state;
973 if (oldstate == FL_ERASING) {
974 if (!map_word_bitsset(map, status, CMD(0x40)))
976 newstate = FL_XIP_WHILE_ERASING;
977 chip->erase_suspended = 1;
979 if (!map_word_bitsset(map, status, CMD(0x04)))
981 newstate = FL_XIP_WHILE_WRITING;
982 chip->write_suspended = 1;
984 chip->state = newstate;
985 map_write(map, CMD(0xff), adr);
986 (void) map_read(map, adr);
987 asm volatile (".rep 8; nop; .endr");
989 spin_unlock(chip->mutex);
990 asm volatile (".rep 8; nop; .endr");
994 * We're back. However someone else might have
995 * decided to go write to the chip if we are in
996 * a suspended erase state. If so let's wait
999 spin_lock(chip->mutex);
1000 while (chip->state != newstate) {
1001 DECLARE_WAITQUEUE(wait, current);
1002 set_current_state(TASK_UNINTERRUPTIBLE);
1003 add_wait_queue(&chip->wq, &wait);
1004 spin_unlock(chip->mutex);
1006 remove_wait_queue(&chip->wq, &wait);
1007 spin_lock(chip->mutex);
1009 /* Disallow XIP again */
1010 local_irq_disable();
1012 /* Resume the write or erase operation */
1013 map_write(map, CMD(0xd0), adr);
1014 map_write(map, CMD(0x70), adr);
1015 chip->state = oldstate;
1016 start = xip_currtime();
1017 } else if (usec >= 1000000/HZ) {
1019 * Try to save on CPU power when waiting delay
1020 * is at least a system timer tick period.
1021 * No need to be extremely accurate here.
1025 status = map_read(map, adr);
1026 done = xip_elapsed_since(start);
1027 } while (!map_word_andequal(map, status, OK, OK)
1030 return (done >= usec) ? -ETIME : 0;
1034 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1035 * the flash is actively programming or erasing since we have to poll for
1036 * the operation to complete anyway. We can't do that in a generic way with
1037 * a XIP setup so do it before the actual flash operation in this case
1038 * and stub it out from INVAL_CACHE_AND_WAIT.
1040 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1041 INVALIDATE_CACHED_RANGE(map, from, size)
1043 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1044 xip_wait_for_operation(map, chip, cmd_adr, usec)
1048 #define xip_disable(map, chip, adr)
1049 #define xip_enable(map, chip, adr)
1050 #define XIP_INVAL_CACHED_RANGE(x...)
1051 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1053 static int inval_cache_and_wait_for_operation(
1054 struct map_info *map, struct flchip *chip,
1055 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1056 unsigned int chip_op_time)
1058 struct cfi_private *cfi = map->fldrv_priv;
1059 map_word status, status_OK = CMD(0x80);
1060 int chip_state = chip->state;
1061 unsigned int timeo, sleep_time;
1063 spin_unlock(chip->mutex);
1065 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1066 spin_lock(chip->mutex);
1068 /* set our timeout to 8 times the expected delay */
1069 timeo = chip_op_time * 8;
1072 sleep_time = chip_op_time / 2;
1075 status = map_read(map, cmd_adr);
1076 if (map_word_andequal(map, status, status_OK, status_OK))
1080 map_write(map, CMD(0x70), cmd_adr);
1081 chip->state = FL_STATUS;
1085 /* OK Still waiting. Drop the lock, wait a while and retry. */
1086 spin_unlock(chip->mutex);
1087 if (sleep_time >= 1000000/HZ) {
1089 * Half of the normal delay still remaining
1090 * can be performed with a sleeping delay instead
1093 msleep(sleep_time/1000);
1094 timeo -= sleep_time;
1095 sleep_time = 1000000/HZ;
1101 spin_lock(chip->mutex);
1103 while (chip->state != chip_state) {
1104 /* Someone's suspended the operation: sleep */
1105 DECLARE_WAITQUEUE(wait, current);
1106 set_current_state(TASK_UNINTERRUPTIBLE);
1107 add_wait_queue(&chip->wq, &wait);
1108 spin_unlock(chip->mutex);
1110 remove_wait_queue(&chip->wq, &wait);
1111 spin_lock(chip->mutex);
1115 /* Done and happy. */
1116 chip->state = FL_STATUS;
1122 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1123 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1126 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1128 unsigned long cmd_addr;
1129 struct cfi_private *cfi = map->fldrv_priv;
1134 /* Ensure cmd read/writes are aligned. */
1135 cmd_addr = adr & ~(map_bankwidth(map)-1);
1137 spin_lock(chip->mutex);
1139 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1142 if (chip->state != FL_POINT && chip->state != FL_READY)
1143 map_write(map, CMD(0xff), cmd_addr);
1145 chip->state = FL_POINT;
1146 chip->ref_point_counter++;
1148 spin_unlock(chip->mutex);
1153 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1155 struct map_info *map = mtd->priv;
1156 struct cfi_private *cfi = map->fldrv_priv;
1161 if (!map->virt || (from + len > mtd->size))
1164 *mtdbuf = (void *)map->virt + from;
1167 /* Now lock the chip(s) to POINT state */
1169 /* ofs: offset within the first chip that the first read should start */
1170 chipnum = (from >> cfi->chipshift);
1171 ofs = from - (chipnum << cfi->chipshift);
1174 unsigned long thislen;
1176 if (chipnum >= cfi->numchips)
1179 if ((len + ofs -1) >> cfi->chipshift)
1180 thislen = (1<<cfi->chipshift) - ofs;
1184 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1197 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1199 struct map_info *map = mtd->priv;
1200 struct cfi_private *cfi = map->fldrv_priv;
1204 /* Now unlock the chip(s) POINT state */
1206 /* ofs: offset within the first chip that the first read should start */
1207 chipnum = (from >> cfi->chipshift);
1208 ofs = from - (chipnum << cfi->chipshift);
1211 unsigned long thislen;
1212 struct flchip *chip;
1214 chip = &cfi->chips[chipnum];
1215 if (chipnum >= cfi->numchips)
1218 if ((len + ofs -1) >> cfi->chipshift)
1219 thislen = (1<<cfi->chipshift) - ofs;
1223 spin_lock(chip->mutex);
1224 if (chip->state == FL_POINT) {
1225 chip->ref_point_counter--;
1226 if(chip->ref_point_counter == 0)
1227 chip->state = FL_READY;
1229 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1231 put_chip(map, chip, chip->start);
1232 spin_unlock(chip->mutex);
1240 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1242 unsigned long cmd_addr;
1243 struct cfi_private *cfi = map->fldrv_priv;
1248 /* Ensure cmd read/writes are aligned. */
1249 cmd_addr = adr & ~(map_bankwidth(map)-1);
1251 spin_lock(chip->mutex);
1252 ret = get_chip(map, chip, cmd_addr, FL_READY);
1254 spin_unlock(chip->mutex);
1258 if (chip->state != FL_POINT && chip->state != FL_READY) {
1259 map_write(map, CMD(0xff), cmd_addr);
1261 chip->state = FL_READY;
1264 map_copy_from(map, buf, adr, len);
1266 put_chip(map, chip, cmd_addr);
1268 spin_unlock(chip->mutex);
1272 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1274 struct map_info *map = mtd->priv;
1275 struct cfi_private *cfi = map->fldrv_priv;
1280 /* ofs: offset within the first chip that the first read should start */
1281 chipnum = (from >> cfi->chipshift);
1282 ofs = from - (chipnum << cfi->chipshift);
1287 unsigned long thislen;
1289 if (chipnum >= cfi->numchips)
1292 if ((len + ofs -1) >> cfi->chipshift)
1293 thislen = (1<<cfi->chipshift) - ofs;
1297 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1311 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1312 unsigned long adr, map_word datum, int mode)
1314 struct cfi_private *cfi = map->fldrv_priv;
1315 map_word status, write_cmd;
1322 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1325 write_cmd = CMD(0xc0);
1331 spin_lock(chip->mutex);
1332 ret = get_chip(map, chip, adr, mode);
1334 spin_unlock(chip->mutex);
1338 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1340 xip_disable(map, chip, adr);
1341 map_write(map, write_cmd, adr);
1342 map_write(map, datum, adr);
1345 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1346 adr, map_bankwidth(map),
1347 chip->word_write_time);
1349 xip_enable(map, chip, adr);
1350 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1354 /* check for errors */
1355 status = map_read(map, adr);
1356 if (map_word_bitsset(map, status, CMD(0x1a))) {
1357 unsigned long chipstatus = MERGESTATUS(status);
1360 map_write(map, CMD(0x50), adr);
1361 map_write(map, CMD(0x70), adr);
1362 xip_enable(map, chip, adr);
1364 if (chipstatus & 0x02) {
1366 } else if (chipstatus & 0x08) {
1367 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1370 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1377 xip_enable(map, chip, adr);
1378 out: put_chip(map, chip, adr);
1379 spin_unlock(chip->mutex);
1384 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1386 struct map_info *map = mtd->priv;
1387 struct cfi_private *cfi = map->fldrv_priv;
1396 chipnum = to >> cfi->chipshift;
1397 ofs = to - (chipnum << cfi->chipshift);
1399 /* If it's not bus-aligned, do the first byte write */
1400 if (ofs & (map_bankwidth(map)-1)) {
1401 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1402 int gap = ofs - bus_ofs;
1406 n = min_t(int, len, map_bankwidth(map)-gap);
1407 datum = map_word_ff(map);
1408 datum = map_word_load_partial(map, datum, buf, gap, n);
1410 ret = do_write_oneword(map, &cfi->chips[chipnum],
1411 bus_ofs, datum, FL_WRITING);
1420 if (ofs >> cfi->chipshift) {
1423 if (chipnum == cfi->numchips)
1428 while(len >= map_bankwidth(map)) {
1429 map_word datum = map_word_load(map, buf);
1431 ret = do_write_oneword(map, &cfi->chips[chipnum],
1432 ofs, datum, FL_WRITING);
1436 ofs += map_bankwidth(map);
1437 buf += map_bankwidth(map);
1438 (*retlen) += map_bankwidth(map);
1439 len -= map_bankwidth(map);
1441 if (ofs >> cfi->chipshift) {
1444 if (chipnum == cfi->numchips)
1449 if (len & (map_bankwidth(map)-1)) {
1452 datum = map_word_ff(map);
1453 datum = map_word_load_partial(map, datum, buf, 0, len);
1455 ret = do_write_oneword(map, &cfi->chips[chipnum],
1456 ofs, datum, FL_WRITING);
1467 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1468 unsigned long adr, const struct kvec **pvec,
1469 unsigned long *pvec_seek, int len)
1471 struct cfi_private *cfi = map->fldrv_priv;
1472 map_word status, write_cmd, datum;
1473 unsigned long cmd_adr;
1474 int ret, wbufsize, word_gap, words;
1475 const struct kvec *vec;
1476 unsigned long vec_seek;
1478 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1480 cmd_adr = adr & ~(wbufsize-1);
1482 /* Let's determine this according to the interleave only once */
1483 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1485 spin_lock(chip->mutex);
1486 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1488 spin_unlock(chip->mutex);
1492 XIP_INVAL_CACHED_RANGE(map, adr, len);
1494 xip_disable(map, chip, cmd_adr);
1496 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1497 [...], the device will not accept any more Write to Buffer commands".
1498 So we must check here and reset those bits if they're set. Otherwise
1499 we're just pissing in the wind */
1500 if (chip->state != FL_STATUS) {
1501 map_write(map, CMD(0x70), cmd_adr);
1502 chip->state = FL_STATUS;
1504 status = map_read(map, cmd_adr);
1505 if (map_word_bitsset(map, status, CMD(0x30))) {
1506 xip_enable(map, chip, cmd_adr);
1507 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1508 xip_disable(map, chip, cmd_adr);
1509 map_write(map, CMD(0x50), cmd_adr);
1510 map_write(map, CMD(0x70), cmd_adr);
1513 chip->state = FL_WRITING_TO_BUFFER;
1514 map_write(map, write_cmd, cmd_adr);
1515 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1517 /* Argh. Not ready for write to buffer */
1518 map_word Xstatus = map_read(map, cmd_adr);
1519 map_write(map, CMD(0x70), cmd_adr);
1520 chip->state = FL_STATUS;
1521 status = map_read(map, cmd_adr);
1522 map_write(map, CMD(0x50), cmd_adr);
1523 map_write(map, CMD(0x70), cmd_adr);
1524 xip_enable(map, chip, cmd_adr);
1525 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1526 map->name, Xstatus.x[0], status.x[0]);
1530 /* Figure out the number of words to write */
1531 word_gap = (-adr & (map_bankwidth(map)-1));
1532 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1536 word_gap = map_bankwidth(map) - word_gap;
1538 datum = map_word_ff(map);
1541 /* Write length of data to come */
1542 map_write(map, CMD(words), cmd_adr );
1546 vec_seek = *pvec_seek;
1548 int n = map_bankwidth(map) - word_gap;
1549 if (n > vec->iov_len - vec_seek)
1550 n = vec->iov_len - vec_seek;
1554 if (!word_gap && len < map_bankwidth(map))
1555 datum = map_word_ff(map);
1557 datum = map_word_load_partial(map, datum,
1558 vec->iov_base + vec_seek,
1563 if (!len || word_gap == map_bankwidth(map)) {
1564 map_write(map, datum, adr);
1565 adr += map_bankwidth(map);
1570 if (vec_seek == vec->iov_len) {
1576 *pvec_seek = vec_seek;
1579 map_write(map, CMD(0xd0), cmd_adr);
1580 chip->state = FL_WRITING;
1582 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1584 chip->buffer_write_time);
1586 map_write(map, CMD(0x70), cmd_adr);
1587 chip->state = FL_STATUS;
1588 xip_enable(map, chip, cmd_adr);
1589 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1593 /* check for errors */
1594 status = map_read(map, cmd_adr);
1595 if (map_word_bitsset(map, status, CMD(0x1a))) {
1596 unsigned long chipstatus = MERGESTATUS(status);
1599 map_write(map, CMD(0x50), cmd_adr);
1600 map_write(map, CMD(0x70), cmd_adr);
1601 xip_enable(map, chip, cmd_adr);
1603 if (chipstatus & 0x02) {
1605 } else if (chipstatus & 0x08) {
1606 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1609 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1616 xip_enable(map, chip, cmd_adr);
1617 out: put_chip(map, chip, cmd_adr);
1618 spin_unlock(chip->mutex);
1622 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1623 unsigned long count, loff_t to, size_t *retlen)
1625 struct map_info *map = mtd->priv;
1626 struct cfi_private *cfi = map->fldrv_priv;
1627 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1630 unsigned long ofs, vec_seek, i;
1633 for (i = 0; i < count; i++)
1634 len += vecs[i].iov_len;
1640 chipnum = to >> cfi->chipshift;
1641 ofs = to - (chipnum << cfi->chipshift);
1645 /* We must not cross write block boundaries */
1646 int size = wbufsize - (ofs & (wbufsize-1));
1650 ret = do_write_buffer(map, &cfi->chips[chipnum],
1651 ofs, &vecs, &vec_seek, size);
1659 if (ofs >> cfi->chipshift) {
1662 if (chipnum == cfi->numchips)
1666 /* Be nice and reschedule with the chip in a usable state for other
1675 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1676 size_t len, size_t *retlen, const u_char *buf)
1680 vec.iov_base = (void *) buf;
1683 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1686 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1687 unsigned long adr, int len, void *thunk)
1689 struct cfi_private *cfi = map->fldrv_priv;
1697 spin_lock(chip->mutex);
1698 ret = get_chip(map, chip, adr, FL_ERASING);
1700 spin_unlock(chip->mutex);
1704 XIP_INVAL_CACHED_RANGE(map, adr, len);
1706 xip_disable(map, chip, adr);
1708 /* Clear the status register first */
1709 map_write(map, CMD(0x50), adr);
1712 map_write(map, CMD(0x20), adr);
1713 map_write(map, CMD(0xD0), adr);
1714 chip->state = FL_ERASING;
1715 chip->erase_suspended = 0;
1717 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1721 map_write(map, CMD(0x70), adr);
1722 chip->state = FL_STATUS;
1723 xip_enable(map, chip, adr);
1724 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1728 /* We've broken this before. It doesn't hurt to be safe */
1729 map_write(map, CMD(0x70), adr);
1730 chip->state = FL_STATUS;
1731 status = map_read(map, adr);
1733 /* check for errors */
1734 if (map_word_bitsset(map, status, CMD(0x3a))) {
1735 unsigned long chipstatus = MERGESTATUS(status);
1737 /* Reset the error bits */
1738 map_write(map, CMD(0x50), adr);
1739 map_write(map, CMD(0x70), adr);
1740 xip_enable(map, chip, adr);
1742 if ((chipstatus & 0x30) == 0x30) {
1743 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1745 } else if (chipstatus & 0x02) {
1746 /* Protection bit set */
1748 } else if (chipstatus & 0x8) {
1750 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1752 } else if (chipstatus & 0x20 && retries--) {
1753 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1754 put_chip(map, chip, adr);
1755 spin_unlock(chip->mutex);
1758 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1765 xip_enable(map, chip, adr);
1766 out: put_chip(map, chip, adr);
1767 spin_unlock(chip->mutex);
1771 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1773 unsigned long ofs, len;
1779 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1783 instr->state = MTD_ERASE_DONE;
1784 mtd_erase_callback(instr);
1789 static void cfi_intelext_sync (struct mtd_info *mtd)
1791 struct map_info *map = mtd->priv;
1792 struct cfi_private *cfi = map->fldrv_priv;
1794 struct flchip *chip;
1797 for (i=0; !ret && i<cfi->numchips; i++) {
1798 chip = &cfi->chips[i];
1800 spin_lock(chip->mutex);
1801 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1804 chip->oldstate = chip->state;
1805 chip->state = FL_SYNCING;
1806 /* No need to wake_up() on this state change -
1807 * as the whole point is that nobody can do anything
1808 * with the chip now anyway.
1811 spin_unlock(chip->mutex);
1814 /* Unlock the chips again */
1816 for (i--; i >=0; i--) {
1817 chip = &cfi->chips[i];
1819 spin_lock(chip->mutex);
1821 if (chip->state == FL_SYNCING) {
1822 chip->state = chip->oldstate;
1823 chip->oldstate = FL_READY;
1826 spin_unlock(chip->mutex);
1830 #ifdef DEBUG_LOCK_BITS
1831 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1832 struct flchip *chip,
1834 int len, void *thunk)
1836 struct cfi_private *cfi = map->fldrv_priv;
1837 int status, ofs_factor = cfi->interleave * cfi->device_type;
1840 xip_disable(map, chip, adr+(2*ofs_factor));
1841 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1842 chip->state = FL_JEDEC_QUERY;
1843 status = cfi_read_query(map, adr+(2*ofs_factor));
1844 xip_enable(map, chip, 0);
1845 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1851 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1852 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1854 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1855 unsigned long adr, int len, void *thunk)
1857 struct cfi_private *cfi = map->fldrv_priv;
1858 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1864 spin_lock(chip->mutex);
1865 ret = get_chip(map, chip, adr, FL_LOCKING);
1867 spin_unlock(chip->mutex);
1872 xip_disable(map, chip, adr);
1874 map_write(map, CMD(0x60), adr);
1875 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1876 map_write(map, CMD(0x01), adr);
1877 chip->state = FL_LOCKING;
1878 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1879 map_write(map, CMD(0xD0), adr);
1880 chip->state = FL_UNLOCKING;
1885 * If Instant Individual Block Locking supported then no need
1888 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1890 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1892 map_write(map, CMD(0x70), adr);
1893 chip->state = FL_STATUS;
1894 xip_enable(map, chip, adr);
1895 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1899 xip_enable(map, chip, adr);
1900 out: put_chip(map, chip, adr);
1901 spin_unlock(chip->mutex);
1905 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1909 #ifdef DEBUG_LOCK_BITS
1910 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1911 __FUNCTION__, ofs, len);
1912 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1916 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1919 #ifdef DEBUG_LOCK_BITS
1920 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1922 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1929 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1933 #ifdef DEBUG_LOCK_BITS
1934 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1935 __FUNCTION__, ofs, len);
1936 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1940 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1943 #ifdef DEBUG_LOCK_BITS
1944 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1946 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1953 #ifdef CONFIG_MTD_OTP
1955 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956 u_long data_offset, u_char *buf, u_int size,
1957 u_long prot_offset, u_int groupno, u_int groupsize);
1960 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1961 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1963 struct cfi_private *cfi = map->fldrv_priv;
1966 spin_lock(chip->mutex);
1967 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1969 spin_unlock(chip->mutex);
1973 /* let's ensure we're not reading back cached data from array mode */
1974 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1976 xip_disable(map, chip, chip->start);
1977 if (chip->state != FL_JEDEC_QUERY) {
1978 map_write(map, CMD(0x90), chip->start);
1979 chip->state = FL_JEDEC_QUERY;
1981 map_copy_from(map, buf, chip->start + offset, size);
1982 xip_enable(map, chip, chip->start);
1984 /* then ensure we don't keep OTP data in the cache */
1985 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1987 put_chip(map, chip, chip->start);
1988 spin_unlock(chip->mutex);
1993 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1994 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1999 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2000 int gap = offset - bus_ofs;
2001 int n = min_t(int, size, map_bankwidth(map)-gap);
2002 map_word datum = map_word_ff(map);
2004 datum = map_word_load_partial(map, datum, buf, gap, n);
2005 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2018 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2019 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2021 struct cfi_private *cfi = map->fldrv_priv;
2024 /* make sure area matches group boundaries */
2028 datum = map_word_ff(map);
2029 datum = map_word_clr(map, datum, CMD(1 << grpno));
2030 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2033 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2034 size_t *retlen, u_char *buf,
2035 otp_op_t action, int user_regs)
2037 struct map_info *map = mtd->priv;
2038 struct cfi_private *cfi = map->fldrv_priv;
2039 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2040 struct flchip *chip;
2041 struct cfi_intelext_otpinfo *otp;
2042 u_long devsize, reg_prot_offset, data_offset;
2043 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2044 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2049 /* Check that we actually have some OTP registers */
2050 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2053 /* we need real chips here not virtual ones */
2054 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2055 chip_step = devsize >> cfi->chipshift;
2058 /* Some chips have OTP located in the _top_ partition only.
2059 For example: Intel 28F256L18T (T means top-parameter device) */
2060 if (cfi->mfr == MANUFACTURER_INTEL) {
2065 chip_num = chip_step - 1;
2069 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2070 chip = &cfi->chips[chip_num];
2071 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2073 /* first OTP region */
2075 reg_prot_offset = extp->ProtRegAddr;
2076 reg_fact_groups = 1;
2077 reg_fact_size = 1 << extp->FactProtRegSize;
2078 reg_user_groups = 1;
2079 reg_user_size = 1 << extp->UserProtRegSize;
2082 /* flash geometry fixup */
2083 data_offset = reg_prot_offset + 1;
2084 data_offset *= cfi->interleave * cfi->device_type;
2085 reg_prot_offset *= cfi->interleave * cfi->device_type;
2086 reg_fact_size *= cfi->interleave;
2087 reg_user_size *= cfi->interleave;
2090 groups = reg_user_groups;
2091 groupsize = reg_user_size;
2092 /* skip over factory reg area */
2093 groupno = reg_fact_groups;
2094 data_offset += reg_fact_groups * reg_fact_size;
2096 groups = reg_fact_groups;
2097 groupsize = reg_fact_size;
2101 while (len > 0 && groups > 0) {
2104 * Special case: if action is NULL
2105 * we fill buf with otp_info records.
2107 struct otp_info *otpinfo;
2109 len -= sizeof(struct otp_info);
2112 ret = do_otp_read(map, chip,
2114 (u_char *)&lockword,
2119 otpinfo = (struct otp_info *)buf;
2120 otpinfo->start = from;
2121 otpinfo->length = groupsize;
2123 !map_word_bitsset(map, lockword,
2126 buf += sizeof(*otpinfo);
2127 *retlen += sizeof(*otpinfo);
2128 } else if (from >= groupsize) {
2130 data_offset += groupsize;
2132 int size = groupsize;
2133 data_offset += from;
2138 ret = action(map, chip, data_offset,
2139 buf, size, reg_prot_offset,
2140 groupno, groupsize);
2146 data_offset += size;
2152 /* next OTP region */
2153 if (++field == extp->NumProtectionFields)
2155 reg_prot_offset = otp->ProtRegAddr;
2156 reg_fact_groups = otp->FactGroups;
2157 reg_fact_size = 1 << otp->FactProtRegSize;
2158 reg_user_groups = otp->UserGroups;
2159 reg_user_size = 1 << otp->UserProtRegSize;
2167 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2168 size_t len, size_t *retlen,
2171 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2172 buf, do_otp_read, 0);
2175 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2176 size_t len, size_t *retlen,
2179 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2180 buf, do_otp_read, 1);
2183 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2184 size_t len, size_t *retlen,
2187 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2188 buf, do_otp_write, 1);
2191 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2192 loff_t from, size_t len)
2195 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2196 NULL, do_otp_lock, 1);
2199 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2200 struct otp_info *buf, size_t len)
2205 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2206 return ret ? : retlen;
2209 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2210 struct otp_info *buf, size_t len)
2215 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2216 return ret ? : retlen;
2221 static int cfi_intelext_suspend(struct mtd_info *mtd)
2223 struct map_info *map = mtd->priv;
2224 struct cfi_private *cfi = map->fldrv_priv;
2226 struct flchip *chip;
2229 for (i=0; !ret && i<cfi->numchips; i++) {
2230 chip = &cfi->chips[i];
2232 spin_lock(chip->mutex);
2234 switch (chip->state) {
2238 case FL_JEDEC_QUERY:
2239 if (chip->oldstate == FL_READY) {
2240 /* place the chip in a known state before suspend */
2241 map_write(map, CMD(0xFF), cfi->chips[i].start);
2242 chip->oldstate = chip->state;
2243 chip->state = FL_PM_SUSPENDED;
2244 /* No need to wake_up() on this state change -
2245 * as the whole point is that nobody can do anything
2246 * with the chip now anyway.
2249 /* There seems to be an operation pending. We must wait for it. */
2250 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2255 /* Should we actually wait? Once upon a time these routines weren't
2256 allowed to. Or should we return -EAGAIN, because the upper layers
2257 ought to have already shut down anything which was using the device
2258 anyway? The latter for now. */
2259 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2261 case FL_PM_SUSPENDED:
2264 spin_unlock(chip->mutex);
2267 /* Unlock the chips again */
2270 for (i--; i >=0; i--) {
2271 chip = &cfi->chips[i];
2273 spin_lock(chip->mutex);
2275 if (chip->state == FL_PM_SUSPENDED) {
2276 /* No need to force it into a known state here,
2277 because we're returning failure, and it didn't
2279 chip->state = chip->oldstate;
2280 chip->oldstate = FL_READY;
2283 spin_unlock(chip->mutex);
2290 static void cfi_intelext_resume(struct mtd_info *mtd)
2292 struct map_info *map = mtd->priv;
2293 struct cfi_private *cfi = map->fldrv_priv;
2295 struct flchip *chip;
2297 for (i=0; i<cfi->numchips; i++) {
2299 chip = &cfi->chips[i];
2301 spin_lock(chip->mutex);
2303 /* Go to known state. Chip may have been power cycled */
2304 if (chip->state == FL_PM_SUSPENDED) {
2305 map_write(map, CMD(0xFF), cfi->chips[i].start);
2306 chip->oldstate = chip->state = FL_READY;
2310 spin_unlock(chip->mutex);
2314 static int cfi_intelext_reset(struct mtd_info *mtd)
2316 struct map_info *map = mtd->priv;
2317 struct cfi_private *cfi = map->fldrv_priv;
2320 for (i=0; i < cfi->numchips; i++) {
2321 struct flchip *chip = &cfi->chips[i];
2323 /* force the completion of any ongoing operation
2324 and switch to array mode so any bootloader in
2325 flash is accessible for soft reboot. */
2326 spin_lock(chip->mutex);
2327 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2329 map_write(map, CMD(0xff), chip->start);
2330 chip->state = FL_READY;
2332 spin_unlock(chip->mutex);
2338 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2341 struct mtd_info *mtd;
2343 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2344 cfi_intelext_reset(mtd);
2348 static void cfi_intelext_destroy(struct mtd_info *mtd)
2350 struct map_info *map = mtd->priv;
2351 struct cfi_private *cfi = map->fldrv_priv;
2352 cfi_intelext_reset(mtd);
2353 unregister_reboot_notifier(&mtd->reboot_notifier);
2354 kfree(cfi->cmdset_priv);
2356 kfree(cfi->chips[0].priv);
2358 kfree(mtd->eraseregions);
2361 MODULE_LICENSE("GPL");
2362 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2363 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2364 MODULE_ALIAS("cfi_cmdset_0003");
2365 MODULE_ALIAS("cfi_cmdset_0200");