Merge branch 'for-linus' of git://git.o-hand.com/linux-rpurdie-leds
[linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         mtd->priv = map;
346         mtd->type = MTD_NORFLASH;
347
348         /* Fill in the default mtd operations */
349         mtd->erase   = cfi_intelext_erase_varsize;
350         mtd->read    = cfi_intelext_read;
351         mtd->write   = cfi_intelext_write_words;
352         mtd->sync    = cfi_intelext_sync;
353         mtd->lock    = cfi_intelext_lock;
354         mtd->unlock  = cfi_intelext_unlock;
355         mtd->suspend = cfi_intelext_suspend;
356         mtd->resume  = cfi_intelext_resume;
357         mtd->flags   = MTD_CAP_NORFLASH;
358         mtd->name    = map->name;
359         mtd->writesize = 1;
360
361         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
363         if (cfi->cfi_mode == CFI_MODE_CFI) {
364                 /*
365                  * It's a real CFI chip, not one for which the probe
366                  * routine faked a CFI structure. So we read the feature
367                  * table from it.
368                  */
369                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370                 struct cfi_pri_intelext *extp;
371
372                 extp = read_pri_intelext(map, adr);
373                 if (!extp) {
374                         kfree(mtd);
375                         return NULL;
376                 }
377
378                 /* Install our own private info structure */
379                 cfi->cmdset_priv = extp;
380
381                 cfi_fixup(mtd, cfi_fixup_table);
382
383 #ifdef DEBUG_CFI_FEATURES
384                 /* Tell the user about it in lots of lovely detail */
385                 cfi_tell_features(extp);
386 #endif
387
388                 if(extp->SuspendCmdSupport & 1) {
389                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390                 }
391         }
392         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393                 /* Apply jedec specific fixups */
394                 cfi_fixup(mtd, jedec_fixup_table);
395         }
396         /* Apply generic fixups */
397         cfi_fixup(mtd, fixup_table);
398
399         for (i=0; i< cfi->numchips; i++) {
400                 if (cfi->cfiq->WordWriteTimeoutTyp)
401                         cfi->chips[i].word_write_time =
402                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
403                 else
404                         cfi->chips[i].word_write_time = 50000;
405
406                 if (cfi->cfiq->BufWriteTimeoutTyp)
407                         cfi->chips[i].buffer_write_time =
408                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
409                 /* No default; if it isn't specified, we won't use it */
410
411                 if (cfi->cfiq->BlockEraseTimeoutTyp)
412                         cfi->chips[i].erase_time =
413                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
414                 else
415                         cfi->chips[i].erase_time = 2000000;
416
417                 cfi->chips[i].ref_point_counter = 0;
418                 init_waitqueue_head(&(cfi->chips[i].wq));
419         }
420
421         map->fldrv = &cfi_intelext_chipdrv;
422
423         return cfi_intelext_setup(mtd);
424 }
425 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
426 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
427 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
428 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
429 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
430
431 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
432 {
433         struct map_info *map = mtd->priv;
434         struct cfi_private *cfi = map->fldrv_priv;
435         unsigned long offset = 0;
436         int i,j;
437         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
438
439         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
440
441         mtd->size = devsize * cfi->numchips;
442
443         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
444         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
445                         * mtd->numeraseregions, GFP_KERNEL);
446         if (!mtd->eraseregions) {
447                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
448                 goto setup_err;
449         }
450
451         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
452                 unsigned long ernum, ersize;
453                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
454                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
455
456                 if (mtd->erasesize < ersize) {
457                         mtd->erasesize = ersize;
458                 }
459                 for (j=0; j<cfi->numchips; j++) {
460                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
461                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
462                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
463                 }
464                 offset += (ersize * ernum);
465         }
466
467         if (offset != devsize) {
468                 /* Argh */
469                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
470                 goto setup_err;
471         }
472
473         for (i=0; i<mtd->numeraseregions;i++){
474                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
475                        i,mtd->eraseregions[i].offset,
476                        mtd->eraseregions[i].erasesize,
477                        mtd->eraseregions[i].numblocks);
478         }
479
480 #ifdef CONFIG_MTD_OTP
481         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
482         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
483         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
484         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
485         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
486         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
487 #endif
488
489         /* This function has the potential to distort the reality
490            a bit and therefore should be called last. */
491         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
492                 goto setup_err;
493
494         __module_get(THIS_MODULE);
495         register_reboot_notifier(&mtd->reboot_notifier);
496         return mtd;
497
498  setup_err:
499         if(mtd) {
500                 kfree(mtd->eraseregions);
501                 kfree(mtd);
502         }
503         kfree(cfi->cmdset_priv);
504         return NULL;
505 }
506
507 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
508                                         struct cfi_private **pcfi)
509 {
510         struct map_info *map = mtd->priv;
511         struct cfi_private *cfi = *pcfi;
512         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
513
514         /*
515          * Probing of multi-partition flash ships.
516          *
517          * To support multiple partitions when available, we simply arrange
518          * for each of them to have their own flchip structure even if they
519          * are on the same physical chip.  This means completely recreating
520          * a new cfi_private structure right here which is a blatent code
521          * layering violation, but this is still the least intrusive
522          * arrangement at this point. This can be rearranged in the future
523          * if someone feels motivated enough.  --nico
524          */
525         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
526             && extp->FeatureSupport & (1 << 9)) {
527                 struct cfi_private *newcfi;
528                 struct flchip *chip;
529                 struct flchip_shared *shared;
530                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
531
532                 /* Protection Register info */
533                 offs = (extp->NumProtectionFields - 1) *
534                        sizeof(struct cfi_intelext_otpinfo);
535
536                 /* Burst Read info */
537                 offs += extp->extra[offs+1]+2;
538
539                 /* Number of partition regions */
540                 numregions = extp->extra[offs];
541                 offs += 1;
542
543                 /* skip the sizeof(partregion) field in CFI 1.4 */
544                 if (extp->MinorVersion >= '4')
545                         offs += 2;
546
547                 /* Number of hardware partitions */
548                 numparts = 0;
549                 for (i = 0; i < numregions; i++) {
550                         struct cfi_intelext_regioninfo *rinfo;
551                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
552                         numparts += rinfo->NumIdentPartitions;
553                         offs += sizeof(*rinfo)
554                                 + (rinfo->NumBlockTypes - 1) *
555                                   sizeof(struct cfi_intelext_blockinfo);
556                 }
557
558                 /* Programming Region info */
559                 if (extp->MinorVersion >= '4') {
560                         struct cfi_intelext_programming_regioninfo *prinfo;
561                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
562                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
563                         mtd->flags &= ~MTD_BIT_WRITEABLE;
564                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
565                                map->name, mtd->writesize,
566                                cfi->interleave * prinfo->ControlValid,
567                                cfi->interleave * prinfo->ControlInvalid);
568                 }
569
570                 /*
571                  * All functions below currently rely on all chips having
572                  * the same geometry so we'll just assume that all hardware
573                  * partitions are of the same size too.
574                  */
575                 partshift = cfi->chipshift - __ffs(numparts);
576
577                 if ((1 << partshift) < mtd->erasesize) {
578                         printk( KERN_ERR
579                                 "%s: bad number of hw partitions (%d)\n",
580                                 __FUNCTION__, numparts);
581                         return -EINVAL;
582                 }
583
584                 numvirtchips = cfi->numchips * numparts;
585                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
586                 if (!newcfi)
587                         return -ENOMEM;
588                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
589                 if (!shared) {
590                         kfree(newcfi);
591                         return -ENOMEM;
592                 }
593                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
594                 newcfi->numchips = numvirtchips;
595                 newcfi->chipshift = partshift;
596
597                 chip = &newcfi->chips[0];
598                 for (i = 0; i < cfi->numchips; i++) {
599                         shared[i].writing = shared[i].erasing = NULL;
600                         spin_lock_init(&shared[i].lock);
601                         for (j = 0; j < numparts; j++) {
602                                 *chip = cfi->chips[i];
603                                 chip->start += j << partshift;
604                                 chip->priv = &shared[i];
605                                 /* those should be reset too since
606                                    they create memory references. */
607                                 init_waitqueue_head(&chip->wq);
608                                 spin_lock_init(&chip->_spinlock);
609                                 chip->mutex = &chip->_spinlock;
610                                 chip++;
611                         }
612                 }
613
614                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
615                                   "--> %d partitions of %d KiB\n",
616                                   map->name, cfi->numchips, cfi->interleave,
617                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
618
619                 map->fldrv_priv = newcfi;
620                 *pcfi = newcfi;
621                 kfree(cfi);
622         }
623
624         return 0;
625 }
626
627 /*
628  *  *********** CHIP ACCESS FUNCTIONS ***********
629  */
630
631 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
632 {
633         DECLARE_WAITQUEUE(wait, current);
634         struct cfi_private *cfi = map->fldrv_priv;
635         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
636         unsigned long timeo;
637         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
638
639  resettime:
640         timeo = jiffies + HZ;
641  retry:
642         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
643                 /*
644                  * OK. We have possibility for contension on the write/erase
645                  * operations which are global to the real chip and not per
646                  * partition.  So let's fight it over in the partition which
647                  * currently has authority on the operation.
648                  *
649                  * The rules are as follows:
650                  *
651                  * - any write operation must own shared->writing.
652                  *
653                  * - any erase operation must own _both_ shared->writing and
654                  *   shared->erasing.
655                  *
656                  * - contension arbitration is handled in the owner's context.
657                  *
658                  * The 'shared' struct can be read and/or written only when
659                  * its lock is taken.
660                  */
661                 struct flchip_shared *shared = chip->priv;
662                 struct flchip *contender;
663                 spin_lock(&shared->lock);
664                 contender = shared->writing;
665                 if (contender && contender != chip) {
666                         /*
667                          * The engine to perform desired operation on this
668                          * partition is already in use by someone else.
669                          * Let's fight over it in the context of the chip
670                          * currently using it.  If it is possible to suspend,
671                          * that other partition will do just that, otherwise
672                          * it'll happily send us to sleep.  In any case, when
673                          * get_chip returns success we're clear to go ahead.
674                          */
675                         int ret = spin_trylock(contender->mutex);
676                         spin_unlock(&shared->lock);
677                         if (!ret)
678                                 goto retry;
679                         spin_unlock(chip->mutex);
680                         ret = get_chip(map, contender, contender->start, mode);
681                         spin_lock(chip->mutex);
682                         if (ret) {
683                                 spin_unlock(contender->mutex);
684                                 return ret;
685                         }
686                         timeo = jiffies + HZ;
687                         spin_lock(&shared->lock);
688                         spin_unlock(contender->mutex);
689                 }
690
691                 /* We now own it */
692                 shared->writing = chip;
693                 if (mode == FL_ERASING)
694                         shared->erasing = chip;
695                 spin_unlock(&shared->lock);
696         }
697
698         switch (chip->state) {
699
700         case FL_STATUS:
701                 for (;;) {
702                         status = map_read(map, adr);
703                         if (map_word_andequal(map, status, status_OK, status_OK))
704                                 break;
705
706                         /* At this point we're fine with write operations
707                            in other partitions as they don't conflict. */
708                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
709                                 break;
710
711                         if (time_after(jiffies, timeo)) {
712                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
713                                        map->name, status.x[0]);
714                                 return -EIO;
715                         }
716                         spin_unlock(chip->mutex);
717                         cfi_udelay(1);
718                         spin_lock(chip->mutex);
719                         /* Someone else might have been playing with it. */
720                         goto retry;
721                 }
722
723         case FL_READY:
724         case FL_CFI_QUERY:
725         case FL_JEDEC_QUERY:
726                 return 0;
727
728         case FL_ERASING:
729                 if (!cfip ||
730                     !(cfip->FeatureSupport & 2) ||
731                     !(mode == FL_READY || mode == FL_POINT ||
732                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
733                         goto sleep;
734
735
736                 /* Erase suspend */
737                 map_write(map, CMD(0xB0), adr);
738
739                 /* If the flash has finished erasing, then 'erase suspend'
740                  * appears to make some (28F320) flash devices switch to
741                  * 'read' mode.  Make sure that we switch to 'read status'
742                  * mode so we get the right data. --rmk
743                  */
744                 map_write(map, CMD(0x70), adr);
745                 chip->oldstate = FL_ERASING;
746                 chip->state = FL_ERASE_SUSPENDING;
747                 chip->erase_suspended = 1;
748                 for (;;) {
749                         status = map_read(map, adr);
750                         if (map_word_andequal(map, status, status_OK, status_OK))
751                                 break;
752
753                         if (time_after(jiffies, timeo)) {
754                                 /* Urgh. Resume and pretend we weren't here.  */
755                                 map_write(map, CMD(0xd0), adr);
756                                 /* Make sure we're in 'read status' mode if it had finished */
757                                 map_write(map, CMD(0x70), adr);
758                                 chip->state = FL_ERASING;
759                                 chip->oldstate = FL_READY;
760                                 printk(KERN_ERR "%s: Chip not ready after erase "
761                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
762                                 return -EIO;
763                         }
764
765                         spin_unlock(chip->mutex);
766                         cfi_udelay(1);
767                         spin_lock(chip->mutex);
768                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
769                            So we can just loop here. */
770                 }
771                 chip->state = FL_STATUS;
772                 return 0;
773
774         case FL_XIP_WHILE_ERASING:
775                 if (mode != FL_READY && mode != FL_POINT &&
776                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
777                         goto sleep;
778                 chip->oldstate = chip->state;
779                 chip->state = FL_READY;
780                 return 0;
781
782         case FL_POINT:
783                 /* Only if there's no operation suspended... */
784                 if (mode == FL_READY && chip->oldstate == FL_READY)
785                         return 0;
786
787         default:
788         sleep:
789                 set_current_state(TASK_UNINTERRUPTIBLE);
790                 add_wait_queue(&chip->wq, &wait);
791                 spin_unlock(chip->mutex);
792                 schedule();
793                 remove_wait_queue(&chip->wq, &wait);
794                 spin_lock(chip->mutex);
795                 goto resettime;
796         }
797 }
798
799 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
800 {
801         struct cfi_private *cfi = map->fldrv_priv;
802
803         if (chip->priv) {
804                 struct flchip_shared *shared = chip->priv;
805                 spin_lock(&shared->lock);
806                 if (shared->writing == chip && chip->oldstate == FL_READY) {
807                         /* We own the ability to write, but we're done */
808                         shared->writing = shared->erasing;
809                         if (shared->writing && shared->writing != chip) {
810                                 /* give back ownership to who we loaned it from */
811                                 struct flchip *loaner = shared->writing;
812                                 spin_lock(loaner->mutex);
813                                 spin_unlock(&shared->lock);
814                                 spin_unlock(chip->mutex);
815                                 put_chip(map, loaner, loaner->start);
816                                 spin_lock(chip->mutex);
817                                 spin_unlock(loaner->mutex);
818                                 wake_up(&chip->wq);
819                                 return;
820                         }
821                         shared->erasing = NULL;
822                         shared->writing = NULL;
823                 } else if (shared->erasing == chip && shared->writing != chip) {
824                         /*
825                          * We own the ability to erase without the ability
826                          * to write, which means the erase was suspended
827                          * and some other partition is currently writing.
828                          * Don't let the switch below mess things up since
829                          * we don't have ownership to resume anything.
830                          */
831                         spin_unlock(&shared->lock);
832                         wake_up(&chip->wq);
833                         return;
834                 }
835                 spin_unlock(&shared->lock);
836         }
837
838         switch(chip->oldstate) {
839         case FL_ERASING:
840                 chip->state = chip->oldstate;
841                 /* What if one interleaved chip has finished and the
842                    other hasn't? The old code would leave the finished
843                    one in READY mode. That's bad, and caused -EROFS
844                    errors to be returned from do_erase_oneblock because
845                    that's the only bit it checked for at the time.
846                    As the state machine appears to explicitly allow
847                    sending the 0x70 (Read Status) command to an erasing
848                    chip and expecting it to be ignored, that's what we
849                    do. */
850                 map_write(map, CMD(0xd0), adr);
851                 map_write(map, CMD(0x70), adr);
852                 chip->oldstate = FL_READY;
853                 chip->state = FL_ERASING;
854                 break;
855
856         case FL_XIP_WHILE_ERASING:
857                 chip->state = chip->oldstate;
858                 chip->oldstate = FL_READY;
859                 break;
860
861         case FL_READY:
862         case FL_STATUS:
863         case FL_JEDEC_QUERY:
864                 /* We should really make set_vpp() count, rather than doing this */
865                 DISABLE_VPP(map);
866                 break;
867         default:
868                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
869         }
870         wake_up(&chip->wq);
871 }
872
873 #ifdef CONFIG_MTD_XIP
874
875 /*
876  * No interrupt what so ever can be serviced while the flash isn't in array
877  * mode.  This is ensured by the xip_disable() and xip_enable() functions
878  * enclosing any code path where the flash is known not to be in array mode.
879  * And within a XIP disabled code path, only functions marked with __xipram
880  * may be called and nothing else (it's a good thing to inspect generated
881  * assembly to make sure inline functions were actually inlined and that gcc
882  * didn't emit calls to its own support functions). Also configuring MTD CFI
883  * support to a single buswidth and a single interleave is also recommended.
884  */
885
886 static void xip_disable(struct map_info *map, struct flchip *chip,
887                         unsigned long adr)
888 {
889         /* TODO: chips with no XIP use should ignore and return */
890         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
891         local_irq_disable();
892 }
893
894 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
895                                 unsigned long adr)
896 {
897         struct cfi_private *cfi = map->fldrv_priv;
898         if (chip->state != FL_POINT && chip->state != FL_READY) {
899                 map_write(map, CMD(0xff), adr);
900                 chip->state = FL_READY;
901         }
902         (void) map_read(map, adr);
903         xip_iprefetch();
904         local_irq_enable();
905 }
906
907 /*
908  * When a delay is required for the flash operation to complete, the
909  * xip_wait_for_operation() function is polling for both the given timeout
910  * and pending (but still masked) hardware interrupts.  Whenever there is an
911  * interrupt pending then the flash erase or write operation is suspended,
912  * array mode restored and interrupts unmasked.  Task scheduling might also
913  * happen at that point.  The CPU eventually returns from the interrupt or
914  * the call to schedule() and the suspended flash operation is resumed for
915  * the remaining of the delay period.
916  *
917  * Warning: this function _will_ fool interrupt latency tracing tools.
918  */
919
920 static int __xipram xip_wait_for_operation(
921                 struct map_info *map, struct flchip *chip,
922                 unsigned long adr, unsigned int chip_op_time )
923 {
924         struct cfi_private *cfi = map->fldrv_priv;
925         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
926         map_word status, OK = CMD(0x80);
927         unsigned long usec, suspended, start, done;
928         flstate_t oldstate, newstate;
929
930         start = xip_currtime();
931         usec = chip_op_time * 8;
932         if (usec == 0)
933                 usec = 500000;
934         done = 0;
935
936         do {
937                 cpu_relax();
938                 if (xip_irqpending() && cfip &&
939                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
940                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
941                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
942                         /*
943                          * Let's suspend the erase or write operation when
944                          * supported.  Note that we currently don't try to
945                          * suspend interleaved chips if there is already
946                          * another operation suspended (imagine what happens
947                          * when one chip was already done with the current
948                          * operation while another chip suspended it, then
949                          * we resume the whole thing at once).  Yes, it
950                          * can happen!
951                          */
952                         usec -= done;
953                         map_write(map, CMD(0xb0), adr);
954                         map_write(map, CMD(0x70), adr);
955                         suspended = xip_currtime();
956                         do {
957                                 if (xip_elapsed_since(suspended) > 100000) {
958                                         /*
959                                          * The chip doesn't want to suspend
960                                          * after waiting for 100 msecs.
961                                          * This is a critical error but there
962                                          * is not much we can do here.
963                                          */
964                                         return -EIO;
965                                 }
966                                 status = map_read(map, adr);
967                         } while (!map_word_andequal(map, status, OK, OK));
968
969                         /* Suspend succeeded */
970                         oldstate = chip->state;
971                         if (oldstate == FL_ERASING) {
972                                 if (!map_word_bitsset(map, status, CMD(0x40)))
973                                         break;
974                                 newstate = FL_XIP_WHILE_ERASING;
975                                 chip->erase_suspended = 1;
976                         } else {
977                                 if (!map_word_bitsset(map, status, CMD(0x04)))
978                                         break;
979                                 newstate = FL_XIP_WHILE_WRITING;
980                                 chip->write_suspended = 1;
981                         }
982                         chip->state = newstate;
983                         map_write(map, CMD(0xff), adr);
984                         (void) map_read(map, adr);
985                         asm volatile (".rep 8; nop; .endr");
986                         local_irq_enable();
987                         spin_unlock(chip->mutex);
988                         asm volatile (".rep 8; nop; .endr");
989                         cond_resched();
990
991                         /*
992                          * We're back.  However someone else might have
993                          * decided to go write to the chip if we are in
994                          * a suspended erase state.  If so let's wait
995                          * until it's done.
996                          */
997                         spin_lock(chip->mutex);
998                         while (chip->state != newstate) {
999                                 DECLARE_WAITQUEUE(wait, current);
1000                                 set_current_state(TASK_UNINTERRUPTIBLE);
1001                                 add_wait_queue(&chip->wq, &wait);
1002                                 spin_unlock(chip->mutex);
1003                                 schedule();
1004                                 remove_wait_queue(&chip->wq, &wait);
1005                                 spin_lock(chip->mutex);
1006                         }
1007                         /* Disallow XIP again */
1008                         local_irq_disable();
1009
1010                         /* Resume the write or erase operation */
1011                         map_write(map, CMD(0xd0), adr);
1012                         map_write(map, CMD(0x70), adr);
1013                         chip->state = oldstate;
1014                         start = xip_currtime();
1015                 } else if (usec >= 1000000/HZ) {
1016                         /*
1017                          * Try to save on CPU power when waiting delay
1018                          * is at least a system timer tick period.
1019                          * No need to be extremely accurate here.
1020                          */
1021                         xip_cpu_idle();
1022                 }
1023                 status = map_read(map, adr);
1024                 done = xip_elapsed_since(start);
1025         } while (!map_word_andequal(map, status, OK, OK)
1026                  && done < usec);
1027
1028         return (done >= usec) ? -ETIME : 0;
1029 }
1030
1031 /*
1032  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1033  * the flash is actively programming or erasing since we have to poll for
1034  * the operation to complete anyway.  We can't do that in a generic way with
1035  * a XIP setup so do it before the actual flash operation in this case
1036  * and stub it out from INVAL_CACHE_AND_WAIT.
1037  */
1038 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1039         INVALIDATE_CACHED_RANGE(map, from, size)
1040
1041 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1042         xip_wait_for_operation(map, chip, cmd_adr, usec)
1043
1044 #else
1045
1046 #define xip_disable(map, chip, adr)
1047 #define xip_enable(map, chip, adr)
1048 #define XIP_INVAL_CACHED_RANGE(x...)
1049 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1050
1051 static int inval_cache_and_wait_for_operation(
1052                 struct map_info *map, struct flchip *chip,
1053                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1054                 unsigned int chip_op_time)
1055 {
1056         struct cfi_private *cfi = map->fldrv_priv;
1057         map_word status, status_OK = CMD(0x80);
1058         int chip_state = chip->state;
1059         unsigned int timeo, sleep_time;
1060
1061         spin_unlock(chip->mutex);
1062         if (inval_len)
1063                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1064         spin_lock(chip->mutex);
1065
1066         /* set our timeout to 8 times the expected delay */
1067         timeo = chip_op_time * 8;
1068         if (!timeo)
1069                 timeo = 500000;
1070         sleep_time = chip_op_time / 2;
1071
1072         for (;;) {
1073                 status = map_read(map, cmd_adr);
1074                 if (map_word_andequal(map, status, status_OK, status_OK))
1075                         break;
1076
1077                 if (!timeo) {
1078                         map_write(map, CMD(0x70), cmd_adr);
1079                         chip->state = FL_STATUS;
1080                         return -ETIME;
1081                 }
1082
1083                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1084                 spin_unlock(chip->mutex);
1085                 if (sleep_time >= 1000000/HZ) {
1086                         /*
1087                          * Half of the normal delay still remaining
1088                          * can be performed with a sleeping delay instead
1089                          * of busy waiting.
1090                          */
1091                         msleep(sleep_time/1000);
1092                         timeo -= sleep_time;
1093                         sleep_time = 1000000/HZ;
1094                 } else {
1095                         udelay(1);
1096                         cond_resched();
1097                         timeo--;
1098                 }
1099                 spin_lock(chip->mutex);
1100
1101                 while (chip->state != chip_state) {
1102                         /* Someone's suspended the operation: sleep */
1103                         DECLARE_WAITQUEUE(wait, current);
1104                         set_current_state(TASK_UNINTERRUPTIBLE);
1105                         add_wait_queue(&chip->wq, &wait);
1106                         spin_unlock(chip->mutex);
1107                         schedule();
1108                         remove_wait_queue(&chip->wq, &wait);
1109                         spin_lock(chip->mutex);
1110                 }
1111         }
1112
1113         /* Done and happy. */
1114         chip->state = FL_STATUS;
1115         return 0;
1116 }
1117
1118 #endif
1119
1120 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1121         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1122
1123
1124 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1125 {
1126         unsigned long cmd_addr;
1127         struct cfi_private *cfi = map->fldrv_priv;
1128         int ret = 0;
1129
1130         adr += chip->start;
1131
1132         /* Ensure cmd read/writes are aligned. */
1133         cmd_addr = adr & ~(map_bankwidth(map)-1);
1134
1135         spin_lock(chip->mutex);
1136
1137         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1138
1139         if (!ret) {
1140                 if (chip->state != FL_POINT && chip->state != FL_READY)
1141                         map_write(map, CMD(0xff), cmd_addr);
1142
1143                 chip->state = FL_POINT;
1144                 chip->ref_point_counter++;
1145         }
1146         spin_unlock(chip->mutex);
1147
1148         return ret;
1149 }
1150
1151 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1152 {
1153         struct map_info *map = mtd->priv;
1154         struct cfi_private *cfi = map->fldrv_priv;
1155         unsigned long ofs;
1156         int chipnum;
1157         int ret = 0;
1158
1159         if (!map->virt || (from + len > mtd->size))
1160                 return -EINVAL;
1161
1162         *mtdbuf = (void *)map->virt + from;
1163         *retlen = 0;
1164
1165         /* Now lock the chip(s) to POINT state */
1166
1167         /* ofs: offset within the first chip that the first read should start */
1168         chipnum = (from >> cfi->chipshift);
1169         ofs = from - (chipnum << cfi->chipshift);
1170
1171         while (len) {
1172                 unsigned long thislen;
1173
1174                 if (chipnum >= cfi->numchips)
1175                         break;
1176
1177                 if ((len + ofs -1) >> cfi->chipshift)
1178                         thislen = (1<<cfi->chipshift) - ofs;
1179                 else
1180                         thislen = len;
1181
1182                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1183                 if (ret)
1184                         break;
1185
1186                 *retlen += thislen;
1187                 len -= thislen;
1188
1189                 ofs = 0;
1190                 chipnum++;
1191         }
1192         return 0;
1193 }
1194
1195 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1196 {
1197         struct map_info *map = mtd->priv;
1198         struct cfi_private *cfi = map->fldrv_priv;
1199         unsigned long ofs;
1200         int chipnum;
1201
1202         /* Now unlock the chip(s) POINT state */
1203
1204         /* ofs: offset within the first chip that the first read should start */
1205         chipnum = (from >> cfi->chipshift);
1206         ofs = from - (chipnum <<  cfi->chipshift);
1207
1208         while (len) {
1209                 unsigned long thislen;
1210                 struct flchip *chip;
1211
1212                 chip = &cfi->chips[chipnum];
1213                 if (chipnum >= cfi->numchips)
1214                         break;
1215
1216                 if ((len + ofs -1) >> cfi->chipshift)
1217                         thislen = (1<<cfi->chipshift) - ofs;
1218                 else
1219                         thislen = len;
1220
1221                 spin_lock(chip->mutex);
1222                 if (chip->state == FL_POINT) {
1223                         chip->ref_point_counter--;
1224                         if(chip->ref_point_counter == 0)
1225                                 chip->state = FL_READY;
1226                 } else
1227                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1228
1229                 put_chip(map, chip, chip->start);
1230                 spin_unlock(chip->mutex);
1231
1232                 len -= thislen;
1233                 ofs = 0;
1234                 chipnum++;
1235         }
1236 }
1237
1238 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1239 {
1240         unsigned long cmd_addr;
1241         struct cfi_private *cfi = map->fldrv_priv;
1242         int ret;
1243
1244         adr += chip->start;
1245
1246         /* Ensure cmd read/writes are aligned. */
1247         cmd_addr = adr & ~(map_bankwidth(map)-1);
1248
1249         spin_lock(chip->mutex);
1250         ret = get_chip(map, chip, cmd_addr, FL_READY);
1251         if (ret) {
1252                 spin_unlock(chip->mutex);
1253                 return ret;
1254         }
1255
1256         if (chip->state != FL_POINT && chip->state != FL_READY) {
1257                 map_write(map, CMD(0xff), cmd_addr);
1258
1259                 chip->state = FL_READY;
1260         }
1261
1262         map_copy_from(map, buf, adr, len);
1263
1264         put_chip(map, chip, cmd_addr);
1265
1266         spin_unlock(chip->mutex);
1267         return 0;
1268 }
1269
1270 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1271 {
1272         struct map_info *map = mtd->priv;
1273         struct cfi_private *cfi = map->fldrv_priv;
1274         unsigned long ofs;
1275         int chipnum;
1276         int ret = 0;
1277
1278         /* ofs: offset within the first chip that the first read should start */
1279         chipnum = (from >> cfi->chipshift);
1280         ofs = from - (chipnum <<  cfi->chipshift);
1281
1282         *retlen = 0;
1283
1284         while (len) {
1285                 unsigned long thislen;
1286
1287                 if (chipnum >= cfi->numchips)
1288                         break;
1289
1290                 if ((len + ofs -1) >> cfi->chipshift)
1291                         thislen = (1<<cfi->chipshift) - ofs;
1292                 else
1293                         thislen = len;
1294
1295                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1296                 if (ret)
1297                         break;
1298
1299                 *retlen += thislen;
1300                 len -= thislen;
1301                 buf += thislen;
1302
1303                 ofs = 0;
1304                 chipnum++;
1305         }
1306         return ret;
1307 }
1308
1309 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1310                                      unsigned long adr, map_word datum, int mode)
1311 {
1312         struct cfi_private *cfi = map->fldrv_priv;
1313         map_word status, write_cmd;
1314         int ret=0;
1315
1316         adr += chip->start;
1317
1318         switch (mode) {
1319         case FL_WRITING:
1320                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1321                 break;
1322         case FL_OTP_WRITE:
1323                 write_cmd = CMD(0xc0);
1324                 break;
1325         default:
1326                 return -EINVAL;
1327         }
1328
1329         spin_lock(chip->mutex);
1330         ret = get_chip(map, chip, adr, mode);
1331         if (ret) {
1332                 spin_unlock(chip->mutex);
1333                 return ret;
1334         }
1335
1336         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1337         ENABLE_VPP(map);
1338         xip_disable(map, chip, adr);
1339         map_write(map, write_cmd, adr);
1340         map_write(map, datum, adr);
1341         chip->state = mode;
1342
1343         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1344                                    adr, map_bankwidth(map),
1345                                    chip->word_write_time);
1346         if (ret) {
1347                 xip_enable(map, chip, adr);
1348                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1349                 goto out;
1350         }
1351
1352         /* check for errors */
1353         status = map_read(map, adr);
1354         if (map_word_bitsset(map, status, CMD(0x1a))) {
1355                 unsigned long chipstatus = MERGESTATUS(status);
1356
1357                 /* reset status */
1358                 map_write(map, CMD(0x50), adr);
1359                 map_write(map, CMD(0x70), adr);
1360                 xip_enable(map, chip, adr);
1361
1362                 if (chipstatus & 0x02) {
1363                         ret = -EROFS;
1364                 } else if (chipstatus & 0x08) {
1365                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1366                         ret = -EIO;
1367                 } else {
1368                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1369                         ret = -EINVAL;
1370                 }
1371
1372                 goto out;
1373         }
1374
1375         xip_enable(map, chip, adr);
1376  out:   put_chip(map, chip, adr);
1377         spin_unlock(chip->mutex);
1378         return ret;
1379 }
1380
1381
1382 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1383 {
1384         struct map_info *map = mtd->priv;
1385         struct cfi_private *cfi = map->fldrv_priv;
1386         int ret = 0;
1387         int chipnum;
1388         unsigned long ofs;
1389
1390         *retlen = 0;
1391         if (!len)
1392                 return 0;
1393
1394         chipnum = to >> cfi->chipshift;
1395         ofs = to  - (chipnum << cfi->chipshift);
1396
1397         /* If it's not bus-aligned, do the first byte write */
1398         if (ofs & (map_bankwidth(map)-1)) {
1399                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1400                 int gap = ofs - bus_ofs;
1401                 int n;
1402                 map_word datum;
1403
1404                 n = min_t(int, len, map_bankwidth(map)-gap);
1405                 datum = map_word_ff(map);
1406                 datum = map_word_load_partial(map, datum, buf, gap, n);
1407
1408                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1409                                                bus_ofs, datum, FL_WRITING);
1410                 if (ret)
1411                         return ret;
1412
1413                 len -= n;
1414                 ofs += n;
1415                 buf += n;
1416                 (*retlen) += n;
1417
1418                 if (ofs >> cfi->chipshift) {
1419                         chipnum ++;
1420                         ofs = 0;
1421                         if (chipnum == cfi->numchips)
1422                                 return 0;
1423                 }
1424         }
1425
1426         while(len >= map_bankwidth(map)) {
1427                 map_word datum = map_word_load(map, buf);
1428
1429                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1430                                        ofs, datum, FL_WRITING);
1431                 if (ret)
1432                         return ret;
1433
1434                 ofs += map_bankwidth(map);
1435                 buf += map_bankwidth(map);
1436                 (*retlen) += map_bankwidth(map);
1437                 len -= map_bankwidth(map);
1438
1439                 if (ofs >> cfi->chipshift) {
1440                         chipnum ++;
1441                         ofs = 0;
1442                         if (chipnum == cfi->numchips)
1443                                 return 0;
1444                 }
1445         }
1446
1447         if (len & (map_bankwidth(map)-1)) {
1448                 map_word datum;
1449
1450                 datum = map_word_ff(map);
1451                 datum = map_word_load_partial(map, datum, buf, 0, len);
1452
1453                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1454                                        ofs, datum, FL_WRITING);
1455                 if (ret)
1456                         return ret;
1457
1458                 (*retlen) += len;
1459         }
1460
1461         return 0;
1462 }
1463
1464
1465 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1466                                     unsigned long adr, const struct kvec **pvec,
1467                                     unsigned long *pvec_seek, int len)
1468 {
1469         struct cfi_private *cfi = map->fldrv_priv;
1470         map_word status, write_cmd, datum;
1471         unsigned long cmd_adr;
1472         int ret, wbufsize, word_gap, words;
1473         const struct kvec *vec;
1474         unsigned long vec_seek;
1475
1476         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1477         adr += chip->start;
1478         cmd_adr = adr & ~(wbufsize-1);
1479
1480         /* Let's determine this according to the interleave only once */
1481         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1482
1483         spin_lock(chip->mutex);
1484         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1485         if (ret) {
1486                 spin_unlock(chip->mutex);
1487                 return ret;
1488         }
1489
1490         XIP_INVAL_CACHED_RANGE(map, adr, len);
1491         ENABLE_VPP(map);
1492         xip_disable(map, chip, cmd_adr);
1493
1494         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1495            [...], the device will not accept any more Write to Buffer commands".
1496            So we must check here and reset those bits if they're set. Otherwise
1497            we're just pissing in the wind */
1498         if (chip->state != FL_STATUS) {
1499                 map_write(map, CMD(0x70), cmd_adr);
1500                 chip->state = FL_STATUS;
1501         }
1502         status = map_read(map, cmd_adr);
1503         if (map_word_bitsset(map, status, CMD(0x30))) {
1504                 xip_enable(map, chip, cmd_adr);
1505                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1506                 xip_disable(map, chip, cmd_adr);
1507                 map_write(map, CMD(0x50), cmd_adr);
1508                 map_write(map, CMD(0x70), cmd_adr);
1509         }
1510
1511         chip->state = FL_WRITING_TO_BUFFER;
1512         map_write(map, write_cmd, cmd_adr);
1513         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1514         if (ret) {
1515                 /* Argh. Not ready for write to buffer */
1516                 map_word Xstatus = map_read(map, cmd_adr);
1517                 map_write(map, CMD(0x70), cmd_adr);
1518                 chip->state = FL_STATUS;
1519                 status = map_read(map, cmd_adr);
1520                 map_write(map, CMD(0x50), cmd_adr);
1521                 map_write(map, CMD(0x70), cmd_adr);
1522                 xip_enable(map, chip, cmd_adr);
1523                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1524                                 map->name, Xstatus.x[0], status.x[0]);
1525                 goto out;
1526         }
1527
1528         /* Figure out the number of words to write */
1529         word_gap = (-adr & (map_bankwidth(map)-1));
1530         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1531         if (!word_gap) {
1532                 words--;
1533         } else {
1534                 word_gap = map_bankwidth(map) - word_gap;
1535                 adr -= word_gap;
1536                 datum = map_word_ff(map);
1537         }
1538
1539         /* Write length of data to come */
1540         map_write(map, CMD(words), cmd_adr );
1541
1542         /* Write data */
1543         vec = *pvec;
1544         vec_seek = *pvec_seek;
1545         do {
1546                 int n = map_bankwidth(map) - word_gap;
1547                 if (n > vec->iov_len - vec_seek)
1548                         n = vec->iov_len - vec_seek;
1549                 if (n > len)
1550                         n = len;
1551
1552                 if (!word_gap && len < map_bankwidth(map))
1553                         datum = map_word_ff(map);
1554
1555                 datum = map_word_load_partial(map, datum,
1556                                               vec->iov_base + vec_seek,
1557                                               word_gap, n);
1558
1559                 len -= n;
1560                 word_gap += n;
1561                 if (!len || word_gap == map_bankwidth(map)) {
1562                         map_write(map, datum, adr);
1563                         adr += map_bankwidth(map);
1564                         word_gap = 0;
1565                 }
1566
1567                 vec_seek += n;
1568                 if (vec_seek == vec->iov_len) {
1569                         vec++;
1570                         vec_seek = 0;
1571                 }
1572         } while (len);
1573         *pvec = vec;
1574         *pvec_seek = vec_seek;
1575
1576         /* GO GO GO */
1577         map_write(map, CMD(0xd0), cmd_adr);
1578         chip->state = FL_WRITING;
1579
1580         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1581                                    adr, len,
1582                                    chip->buffer_write_time);
1583         if (ret) {
1584                 map_write(map, CMD(0x70), cmd_adr);
1585                 chip->state = FL_STATUS;
1586                 xip_enable(map, chip, cmd_adr);
1587                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1588                 goto out;
1589         }
1590
1591         /* check for errors */
1592         status = map_read(map, cmd_adr);
1593         if (map_word_bitsset(map, status, CMD(0x1a))) {
1594                 unsigned long chipstatus = MERGESTATUS(status);
1595
1596                 /* reset status */
1597                 map_write(map, CMD(0x50), cmd_adr);
1598                 map_write(map, CMD(0x70), cmd_adr);
1599                 xip_enable(map, chip, cmd_adr);
1600
1601                 if (chipstatus & 0x02) {
1602                         ret = -EROFS;
1603                 } else if (chipstatus & 0x08) {
1604                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1605                         ret = -EIO;
1606                 } else {
1607                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1608                         ret = -EINVAL;
1609                 }
1610
1611                 goto out;
1612         }
1613
1614         xip_enable(map, chip, cmd_adr);
1615  out:   put_chip(map, chip, cmd_adr);
1616         spin_unlock(chip->mutex);
1617         return ret;
1618 }
1619
1620 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1621                                 unsigned long count, loff_t to, size_t *retlen)
1622 {
1623         struct map_info *map = mtd->priv;
1624         struct cfi_private *cfi = map->fldrv_priv;
1625         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1626         int ret = 0;
1627         int chipnum;
1628         unsigned long ofs, vec_seek, i;
1629         size_t len = 0;
1630
1631         for (i = 0; i < count; i++)
1632                 len += vecs[i].iov_len;
1633
1634         *retlen = 0;
1635         if (!len)
1636                 return 0;
1637
1638         chipnum = to >> cfi->chipshift;
1639         ofs = to - (chipnum << cfi->chipshift);
1640         vec_seek = 0;
1641
1642         do {
1643                 /* We must not cross write block boundaries */
1644                 int size = wbufsize - (ofs & (wbufsize-1));
1645
1646                 if (size > len)
1647                         size = len;
1648                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1649                                       ofs, &vecs, &vec_seek, size);
1650                 if (ret)
1651                         return ret;
1652
1653                 ofs += size;
1654                 (*retlen) += size;
1655                 len -= size;
1656
1657                 if (ofs >> cfi->chipshift) {
1658                         chipnum ++;
1659                         ofs = 0;
1660                         if (chipnum == cfi->numchips)
1661                                 return 0;
1662                 }
1663
1664                 /* Be nice and reschedule with the chip in a usable state for other
1665                    processes. */
1666                 cond_resched();
1667
1668         } while (len);
1669
1670         return 0;
1671 }
1672
1673 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1674                                        size_t len, size_t *retlen, const u_char *buf)
1675 {
1676         struct kvec vec;
1677
1678         vec.iov_base = (void *) buf;
1679         vec.iov_len = len;
1680
1681         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1682 }
1683
1684 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1685                                       unsigned long adr, int len, void *thunk)
1686 {
1687         struct cfi_private *cfi = map->fldrv_priv;
1688         map_word status;
1689         int retries = 3;
1690         int ret;
1691
1692         adr += chip->start;
1693
1694  retry:
1695         spin_lock(chip->mutex);
1696         ret = get_chip(map, chip, adr, FL_ERASING);
1697         if (ret) {
1698                 spin_unlock(chip->mutex);
1699                 return ret;
1700         }
1701
1702         XIP_INVAL_CACHED_RANGE(map, adr, len);
1703         ENABLE_VPP(map);
1704         xip_disable(map, chip, adr);
1705
1706         /* Clear the status register first */
1707         map_write(map, CMD(0x50), adr);
1708
1709         /* Now erase */
1710         map_write(map, CMD(0x20), adr);
1711         map_write(map, CMD(0xD0), adr);
1712         chip->state = FL_ERASING;
1713         chip->erase_suspended = 0;
1714
1715         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1716                                    adr, len,
1717                                    chip->erase_time);
1718         if (ret) {
1719                 map_write(map, CMD(0x70), adr);
1720                 chip->state = FL_STATUS;
1721                 xip_enable(map, chip, adr);
1722                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1723                 goto out;
1724         }
1725
1726         /* We've broken this before. It doesn't hurt to be safe */
1727         map_write(map, CMD(0x70), adr);
1728         chip->state = FL_STATUS;
1729         status = map_read(map, adr);
1730
1731         /* check for errors */
1732         if (map_word_bitsset(map, status, CMD(0x3a))) {
1733                 unsigned long chipstatus = MERGESTATUS(status);
1734
1735                 /* Reset the error bits */
1736                 map_write(map, CMD(0x50), adr);
1737                 map_write(map, CMD(0x70), adr);
1738                 xip_enable(map, chip, adr);
1739
1740                 if ((chipstatus & 0x30) == 0x30) {
1741                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1742                         ret = -EINVAL;
1743                 } else if (chipstatus & 0x02) {
1744                         /* Protection bit set */
1745                         ret = -EROFS;
1746                 } else if (chipstatus & 0x8) {
1747                         /* Voltage */
1748                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1749                         ret = -EIO;
1750                 } else if (chipstatus & 0x20 && retries--) {
1751                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1752                         put_chip(map, chip, adr);
1753                         spin_unlock(chip->mutex);
1754                         goto retry;
1755                 } else {
1756                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1757                         ret = -EIO;
1758                 }
1759
1760                 goto out;
1761         }
1762
1763         xip_enable(map, chip, adr);
1764  out:   put_chip(map, chip, adr);
1765         spin_unlock(chip->mutex);
1766         return ret;
1767 }
1768
1769 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1770 {
1771         unsigned long ofs, len;
1772         int ret;
1773
1774         ofs = instr->addr;
1775         len = instr->len;
1776
1777         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1778         if (ret)
1779                 return ret;
1780
1781         instr->state = MTD_ERASE_DONE;
1782         mtd_erase_callback(instr);
1783
1784         return 0;
1785 }
1786
1787 static void cfi_intelext_sync (struct mtd_info *mtd)
1788 {
1789         struct map_info *map = mtd->priv;
1790         struct cfi_private *cfi = map->fldrv_priv;
1791         int i;
1792         struct flchip *chip;
1793         int ret = 0;
1794
1795         for (i=0; !ret && i<cfi->numchips; i++) {
1796                 chip = &cfi->chips[i];
1797
1798                 spin_lock(chip->mutex);
1799                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1800
1801                 if (!ret) {
1802                         chip->oldstate = chip->state;
1803                         chip->state = FL_SYNCING;
1804                         /* No need to wake_up() on this state change -
1805                          * as the whole point is that nobody can do anything
1806                          * with the chip now anyway.
1807                          */
1808                 }
1809                 spin_unlock(chip->mutex);
1810         }
1811
1812         /* Unlock the chips again */
1813
1814         for (i--; i >=0; i--) {
1815                 chip = &cfi->chips[i];
1816
1817                 spin_lock(chip->mutex);
1818
1819                 if (chip->state == FL_SYNCING) {
1820                         chip->state = chip->oldstate;
1821                         chip->oldstate = FL_READY;
1822                         wake_up(&chip->wq);
1823                 }
1824                 spin_unlock(chip->mutex);
1825         }
1826 }
1827
1828 #ifdef DEBUG_LOCK_BITS
1829 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1830                                                 struct flchip *chip,
1831                                                 unsigned long adr,
1832                                                 int len, void *thunk)
1833 {
1834         struct cfi_private *cfi = map->fldrv_priv;
1835         int status, ofs_factor = cfi->interleave * cfi->device_type;
1836
1837         adr += chip->start;
1838         xip_disable(map, chip, adr+(2*ofs_factor));
1839         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1840         chip->state = FL_JEDEC_QUERY;
1841         status = cfi_read_query(map, adr+(2*ofs_factor));
1842         xip_enable(map, chip, 0);
1843         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1844                adr, status);
1845         return 0;
1846 }
1847 #endif
1848
1849 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1850 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1851
1852 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1853                                        unsigned long adr, int len, void *thunk)
1854 {
1855         struct cfi_private *cfi = map->fldrv_priv;
1856         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1857         int udelay;
1858         int ret;
1859
1860         adr += chip->start;
1861
1862         spin_lock(chip->mutex);
1863         ret = get_chip(map, chip, adr, FL_LOCKING);
1864         if (ret) {
1865                 spin_unlock(chip->mutex);
1866                 return ret;
1867         }
1868
1869         ENABLE_VPP(map);
1870         xip_disable(map, chip, adr);
1871
1872         map_write(map, CMD(0x60), adr);
1873         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1874                 map_write(map, CMD(0x01), adr);
1875                 chip->state = FL_LOCKING;
1876         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1877                 map_write(map, CMD(0xD0), adr);
1878                 chip->state = FL_UNLOCKING;
1879         } else
1880                 BUG();
1881
1882         /*
1883          * If Instant Individual Block Locking supported then no need
1884          * to delay.
1885          */
1886         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1887
1888         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1889         if (ret) {
1890                 map_write(map, CMD(0x70), adr);
1891                 chip->state = FL_STATUS;
1892                 xip_enable(map, chip, adr);
1893                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1894                 goto out;
1895         }
1896
1897         xip_enable(map, chip, adr);
1898 out:    put_chip(map, chip, adr);
1899         spin_unlock(chip->mutex);
1900         return ret;
1901 }
1902
1903 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1904 {
1905         int ret;
1906
1907 #ifdef DEBUG_LOCK_BITS
1908         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1909                __FUNCTION__, ofs, len);
1910         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1911                 ofs, len, 0);
1912 #endif
1913
1914         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1915                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1916
1917 #ifdef DEBUG_LOCK_BITS
1918         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1919                __FUNCTION__, ret);
1920         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1921                 ofs, len, 0);
1922 #endif
1923
1924         return ret;
1925 }
1926
1927 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1928 {
1929         int ret;
1930
1931 #ifdef DEBUG_LOCK_BITS
1932         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1933                __FUNCTION__, ofs, len);
1934         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1935                 ofs, len, 0);
1936 #endif
1937
1938         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1939                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1940
1941 #ifdef DEBUG_LOCK_BITS
1942         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1943                __FUNCTION__, ret);
1944         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1945                 ofs, len, 0);
1946 #endif
1947
1948         return ret;
1949 }
1950
1951 #ifdef CONFIG_MTD_OTP
1952
1953 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1954                         u_long data_offset, u_char *buf, u_int size,
1955                         u_long prot_offset, u_int groupno, u_int groupsize);
1956
1957 static int __xipram
1958 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1959             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1960 {
1961         struct cfi_private *cfi = map->fldrv_priv;
1962         int ret;
1963
1964         spin_lock(chip->mutex);
1965         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1966         if (ret) {
1967                 spin_unlock(chip->mutex);
1968                 return ret;
1969         }
1970
1971         /* let's ensure we're not reading back cached data from array mode */
1972         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1973
1974         xip_disable(map, chip, chip->start);
1975         if (chip->state != FL_JEDEC_QUERY) {
1976                 map_write(map, CMD(0x90), chip->start);
1977                 chip->state = FL_JEDEC_QUERY;
1978         }
1979         map_copy_from(map, buf, chip->start + offset, size);
1980         xip_enable(map, chip, chip->start);
1981
1982         /* then ensure we don't keep OTP data in the cache */
1983         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1984
1985         put_chip(map, chip, chip->start);
1986         spin_unlock(chip->mutex);
1987         return 0;
1988 }
1989
1990 static int
1991 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1992              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1993 {
1994         int ret;
1995
1996         while (size) {
1997                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1998                 int gap = offset - bus_ofs;
1999                 int n = min_t(int, size, map_bankwidth(map)-gap);
2000                 map_word datum = map_word_ff(map);
2001
2002                 datum = map_word_load_partial(map, datum, buf, gap, n);
2003                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2004                 if (ret)
2005                         return ret;
2006
2007                 offset += n;
2008                 buf += n;
2009                 size -= n;
2010         }
2011
2012         return 0;
2013 }
2014
2015 static int
2016 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2017             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2018 {
2019         struct cfi_private *cfi = map->fldrv_priv;
2020         map_word datum;
2021
2022         /* make sure area matches group boundaries */
2023         if (size != grpsz)
2024                 return -EXDEV;
2025
2026         datum = map_word_ff(map);
2027         datum = map_word_clr(map, datum, CMD(1 << grpno));
2028         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2029 }
2030
2031 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2032                                  size_t *retlen, u_char *buf,
2033                                  otp_op_t action, int user_regs)
2034 {
2035         struct map_info *map = mtd->priv;
2036         struct cfi_private *cfi = map->fldrv_priv;
2037         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2038         struct flchip *chip;
2039         struct cfi_intelext_otpinfo *otp;
2040         u_long devsize, reg_prot_offset, data_offset;
2041         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2042         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2043         int ret;
2044
2045         *retlen = 0;
2046
2047         /* Check that we actually have some OTP registers */
2048         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2049                 return -ENODATA;
2050
2051         /* we need real chips here not virtual ones */
2052         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2053         chip_step = devsize >> cfi->chipshift;
2054         chip_num = 0;
2055
2056         /* Some chips have OTP located in the _top_ partition only.
2057            For example: Intel 28F256L18T (T means top-parameter device) */
2058         if (cfi->mfr == MANUFACTURER_INTEL) {
2059                 switch (cfi->id) {
2060                 case 0x880b:
2061                 case 0x880c:
2062                 case 0x880d:
2063                         chip_num = chip_step - 1;
2064                 }
2065         }
2066
2067         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2068                 chip = &cfi->chips[chip_num];
2069                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2070
2071                 /* first OTP region */
2072                 field = 0;
2073                 reg_prot_offset = extp->ProtRegAddr;
2074                 reg_fact_groups = 1;
2075                 reg_fact_size = 1 << extp->FactProtRegSize;
2076                 reg_user_groups = 1;
2077                 reg_user_size = 1 << extp->UserProtRegSize;
2078
2079                 while (len > 0) {
2080                         /* flash geometry fixup */
2081                         data_offset = reg_prot_offset + 1;
2082                         data_offset *= cfi->interleave * cfi->device_type;
2083                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2084                         reg_fact_size *= cfi->interleave;
2085                         reg_user_size *= cfi->interleave;
2086
2087                         if (user_regs) {
2088                                 groups = reg_user_groups;
2089                                 groupsize = reg_user_size;
2090                                 /* skip over factory reg area */
2091                                 groupno = reg_fact_groups;
2092                                 data_offset += reg_fact_groups * reg_fact_size;
2093                         } else {
2094                                 groups = reg_fact_groups;
2095                                 groupsize = reg_fact_size;
2096                                 groupno = 0;
2097                         }
2098
2099                         while (len > 0 && groups > 0) {
2100                                 if (!action) {
2101                                         /*
2102                                          * Special case: if action is NULL
2103                                          * we fill buf with otp_info records.
2104                                          */
2105                                         struct otp_info *otpinfo;
2106                                         map_word lockword;
2107                                         len -= sizeof(struct otp_info);
2108                                         if (len <= 0)
2109                                                 return -ENOSPC;
2110                                         ret = do_otp_read(map, chip,
2111                                                           reg_prot_offset,
2112                                                           (u_char *)&lockword,
2113                                                           map_bankwidth(map),
2114                                                           0, 0,  0);
2115                                         if (ret)
2116                                                 return ret;
2117                                         otpinfo = (struct otp_info *)buf;
2118                                         otpinfo->start = from;
2119                                         otpinfo->length = groupsize;
2120                                         otpinfo->locked =
2121                                            !map_word_bitsset(map, lockword,
2122                                                              CMD(1 << groupno));
2123                                         from += groupsize;
2124                                         buf += sizeof(*otpinfo);
2125                                         *retlen += sizeof(*otpinfo);
2126                                 } else if (from >= groupsize) {
2127                                         from -= groupsize;
2128                                         data_offset += groupsize;
2129                                 } else {
2130                                         int size = groupsize;
2131                                         data_offset += from;
2132                                         size -= from;
2133                                         from = 0;
2134                                         if (size > len)
2135                                                 size = len;
2136                                         ret = action(map, chip, data_offset,
2137                                                      buf, size, reg_prot_offset,
2138                                                      groupno, groupsize);
2139                                         if (ret < 0)
2140                                                 return ret;
2141                                         buf += size;
2142                                         len -= size;
2143                                         *retlen += size;
2144                                         data_offset += size;
2145                                 }
2146                                 groupno++;
2147                                 groups--;
2148                         }
2149
2150                         /* next OTP region */
2151                         if (++field == extp->NumProtectionFields)
2152                                 break;
2153                         reg_prot_offset = otp->ProtRegAddr;
2154                         reg_fact_groups = otp->FactGroups;
2155                         reg_fact_size = 1 << otp->FactProtRegSize;
2156                         reg_user_groups = otp->UserGroups;
2157                         reg_user_size = 1 << otp->UserProtRegSize;
2158                         otp++;
2159                 }
2160         }
2161
2162         return 0;
2163 }
2164
2165 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2166                                            size_t len, size_t *retlen,
2167                                             u_char *buf)
2168 {
2169         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2170                                      buf, do_otp_read, 0);
2171 }
2172
2173 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2174                                            size_t len, size_t *retlen,
2175                                             u_char *buf)
2176 {
2177         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2178                                      buf, do_otp_read, 1);
2179 }
2180
2181 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2182                                             size_t len, size_t *retlen,
2183                                              u_char *buf)
2184 {
2185         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2186                                      buf, do_otp_write, 1);
2187 }
2188
2189 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2190                                            loff_t from, size_t len)
2191 {
2192         size_t retlen;
2193         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2194                                      NULL, do_otp_lock, 1);
2195 }
2196
2197 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2198                                            struct otp_info *buf, size_t len)
2199 {
2200         size_t retlen;
2201         int ret;
2202
2203         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2204         return ret ? : retlen;
2205 }
2206
2207 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2208                                            struct otp_info *buf, size_t len)
2209 {
2210         size_t retlen;
2211         int ret;
2212
2213         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2214         return ret ? : retlen;
2215 }
2216
2217 #endif
2218
2219 static int cfi_intelext_suspend(struct mtd_info *mtd)
2220 {
2221         struct map_info *map = mtd->priv;
2222         struct cfi_private *cfi = map->fldrv_priv;
2223         int i;
2224         struct flchip *chip;
2225         int ret = 0;
2226
2227         for (i=0; !ret && i<cfi->numchips; i++) {
2228                 chip = &cfi->chips[i];
2229
2230                 spin_lock(chip->mutex);
2231
2232                 switch (chip->state) {
2233                 case FL_READY:
2234                 case FL_STATUS:
2235                 case FL_CFI_QUERY:
2236                 case FL_JEDEC_QUERY:
2237                         if (chip->oldstate == FL_READY) {
2238                                 /* place the chip in a known state before suspend */
2239                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2240                                 chip->oldstate = chip->state;
2241                                 chip->state = FL_PM_SUSPENDED;
2242                                 /* No need to wake_up() on this state change -
2243                                  * as the whole point is that nobody can do anything
2244                                  * with the chip now anyway.
2245                                  */
2246                         } else {
2247                                 /* There seems to be an operation pending. We must wait for it. */
2248                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2249                                 ret = -EAGAIN;
2250                         }
2251                         break;
2252                 default:
2253                         /* Should we actually wait? Once upon a time these routines weren't
2254                            allowed to. Or should we return -EAGAIN, because the upper layers
2255                            ought to have already shut down anything which was using the device
2256                            anyway? The latter for now. */
2257                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2258                         ret = -EAGAIN;
2259                 case FL_PM_SUSPENDED:
2260                         break;
2261                 }
2262                 spin_unlock(chip->mutex);
2263         }
2264
2265         /* Unlock the chips again */
2266
2267         if (ret) {
2268                 for (i--; i >=0; i--) {
2269                         chip = &cfi->chips[i];
2270
2271                         spin_lock(chip->mutex);
2272
2273                         if (chip->state == FL_PM_SUSPENDED) {
2274                                 /* No need to force it into a known state here,
2275                                    because we're returning failure, and it didn't
2276                                    get power cycled */
2277                                 chip->state = chip->oldstate;
2278                                 chip->oldstate = FL_READY;
2279                                 wake_up(&chip->wq);
2280                         }
2281                         spin_unlock(chip->mutex);
2282                 }
2283         }
2284
2285         return ret;
2286 }
2287
2288 static void cfi_intelext_resume(struct mtd_info *mtd)
2289 {
2290         struct map_info *map = mtd->priv;
2291         struct cfi_private *cfi = map->fldrv_priv;
2292         int i;
2293         struct flchip *chip;
2294
2295         for (i=0; i<cfi->numchips; i++) {
2296
2297                 chip = &cfi->chips[i];
2298
2299                 spin_lock(chip->mutex);
2300
2301                 /* Go to known state. Chip may have been power cycled */
2302                 if (chip->state == FL_PM_SUSPENDED) {
2303                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2304                         chip->oldstate = chip->state = FL_READY;
2305                         wake_up(&chip->wq);
2306                 }
2307
2308                 spin_unlock(chip->mutex);
2309         }
2310 }
2311
2312 static int cfi_intelext_reset(struct mtd_info *mtd)
2313 {
2314         struct map_info *map = mtd->priv;
2315         struct cfi_private *cfi = map->fldrv_priv;
2316         int i, ret;
2317
2318         for (i=0; i < cfi->numchips; i++) {
2319                 struct flchip *chip = &cfi->chips[i];
2320
2321                 /* force the completion of any ongoing operation
2322                    and switch to array mode so any bootloader in
2323                    flash is accessible for soft reboot. */
2324                 spin_lock(chip->mutex);
2325                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2326                 if (!ret) {
2327                         map_write(map, CMD(0xff), chip->start);
2328                         chip->state = FL_READY;
2329                 }
2330                 spin_unlock(chip->mutex);
2331         }
2332
2333         return 0;
2334 }
2335
2336 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2337                                void *v)
2338 {
2339         struct mtd_info *mtd;
2340
2341         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2342         cfi_intelext_reset(mtd);
2343         return NOTIFY_DONE;
2344 }
2345
2346 static void cfi_intelext_destroy(struct mtd_info *mtd)
2347 {
2348         struct map_info *map = mtd->priv;
2349         struct cfi_private *cfi = map->fldrv_priv;
2350         cfi_intelext_reset(mtd);
2351         unregister_reboot_notifier(&mtd->reboot_notifier);
2352         kfree(cfi->cmdset_priv);
2353         kfree(cfi->cfiq);
2354         kfree(cfi->chips[0].priv);
2355         kfree(cfi);
2356         kfree(mtd->eraseregions);
2357 }
2358
2359 MODULE_LICENSE("GPL");
2360 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2361 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2362 MODULE_ALIAS("cfi_cmdset_0003");
2363 MODULE_ALIAS("cfi_cmdset_0200");