Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         memset(mtd, 0, sizeof(*mtd));
346         mtd->priv = map;
347         mtd->type = MTD_NORFLASH;
348
349         /* Fill in the default mtd operations */
350         mtd->erase   = cfi_intelext_erase_varsize;
351         mtd->read    = cfi_intelext_read;
352         mtd->write   = cfi_intelext_write_words;
353         mtd->sync    = cfi_intelext_sync;
354         mtd->lock    = cfi_intelext_lock;
355         mtd->unlock  = cfi_intelext_unlock;
356         mtd->suspend = cfi_intelext_suspend;
357         mtd->resume  = cfi_intelext_resume;
358         mtd->flags   = MTD_CAP_NORFLASH;
359         mtd->name    = map->name;
360         mtd->writesize = 1;
361
362         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
363
364         if (cfi->cfi_mode == CFI_MODE_CFI) {
365                 /*
366                  * It's a real CFI chip, not one for which the probe
367                  * routine faked a CFI structure. So we read the feature
368                  * table from it.
369                  */
370                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
371                 struct cfi_pri_intelext *extp;
372
373                 extp = read_pri_intelext(map, adr);
374                 if (!extp) {
375                         kfree(mtd);
376                         return NULL;
377                 }
378
379                 /* Install our own private info structure */
380                 cfi->cmdset_priv = extp;
381
382                 cfi_fixup(mtd, cfi_fixup_table);
383
384 #ifdef DEBUG_CFI_FEATURES
385                 /* Tell the user about it in lots of lovely detail */
386                 cfi_tell_features(extp);
387 #endif
388
389                 if(extp->SuspendCmdSupport & 1) {
390                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
391                 }
392         }
393         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
394                 /* Apply jedec specific fixups */
395                 cfi_fixup(mtd, jedec_fixup_table);
396         }
397         /* Apply generic fixups */
398         cfi_fixup(mtd, fixup_table);
399
400         for (i=0; i< cfi->numchips; i++) {
401                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
402                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
403                 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
404                 cfi->chips[i].ref_point_counter = 0;
405                 init_waitqueue_head(&(cfi->chips[i].wq));
406         }
407
408         map->fldrv = &cfi_intelext_chipdrv;
409
410         return cfi_intelext_setup(mtd);
411 }
412 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
414 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
415 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
416 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
417
418 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
419 {
420         struct map_info *map = mtd->priv;
421         struct cfi_private *cfi = map->fldrv_priv;
422         unsigned long offset = 0;
423         int i,j;
424         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
425
426         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
427
428         mtd->size = devsize * cfi->numchips;
429
430         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
431         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
432                         * mtd->numeraseregions, GFP_KERNEL);
433         if (!mtd->eraseregions) {
434                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
435                 goto setup_err;
436         }
437
438         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
439                 unsigned long ernum, ersize;
440                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
441                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
442
443                 if (mtd->erasesize < ersize) {
444                         mtd->erasesize = ersize;
445                 }
446                 for (j=0; j<cfi->numchips; j++) {
447                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
448                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
449                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
450                 }
451                 offset += (ersize * ernum);
452         }
453
454         if (offset != devsize) {
455                 /* Argh */
456                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
457                 goto setup_err;
458         }
459
460         for (i=0; i<mtd->numeraseregions;i++){
461                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
462                        i,mtd->eraseregions[i].offset,
463                        mtd->eraseregions[i].erasesize,
464                        mtd->eraseregions[i].numblocks);
465         }
466
467 #ifdef CONFIG_MTD_OTP
468         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
469         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
470         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
471         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
472         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
473         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
474 #endif
475
476         /* This function has the potential to distort the reality
477            a bit and therefore should be called last. */
478         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
479                 goto setup_err;
480
481         __module_get(THIS_MODULE);
482         register_reboot_notifier(&mtd->reboot_notifier);
483         return mtd;
484
485  setup_err:
486         if(mtd) {
487                 kfree(mtd->eraseregions);
488                 kfree(mtd);
489         }
490         kfree(cfi->cmdset_priv);
491         return NULL;
492 }
493
494 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
495                                         struct cfi_private **pcfi)
496 {
497         struct map_info *map = mtd->priv;
498         struct cfi_private *cfi = *pcfi;
499         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
500
501         /*
502          * Probing of multi-partition flash ships.
503          *
504          * To support multiple partitions when available, we simply arrange
505          * for each of them to have their own flchip structure even if they
506          * are on the same physical chip.  This means completely recreating
507          * a new cfi_private structure right here which is a blatent code
508          * layering violation, but this is still the least intrusive
509          * arrangement at this point. This can be rearranged in the future
510          * if someone feels motivated enough.  --nico
511          */
512         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
513             && extp->FeatureSupport & (1 << 9)) {
514                 struct cfi_private *newcfi;
515                 struct flchip *chip;
516                 struct flchip_shared *shared;
517                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
518
519                 /* Protection Register info */
520                 offs = (extp->NumProtectionFields - 1) *
521                        sizeof(struct cfi_intelext_otpinfo);
522
523                 /* Burst Read info */
524                 offs += extp->extra[offs+1]+2;
525
526                 /* Number of partition regions */
527                 numregions = extp->extra[offs];
528                 offs += 1;
529
530                 /* skip the sizeof(partregion) field in CFI 1.4 */
531                 if (extp->MinorVersion >= '4')
532                         offs += 2;
533
534                 /* Number of hardware partitions */
535                 numparts = 0;
536                 for (i = 0; i < numregions; i++) {
537                         struct cfi_intelext_regioninfo *rinfo;
538                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
539                         numparts += rinfo->NumIdentPartitions;
540                         offs += sizeof(*rinfo)
541                                 + (rinfo->NumBlockTypes - 1) *
542                                   sizeof(struct cfi_intelext_blockinfo);
543                 }
544
545                 /* Programming Region info */
546                 if (extp->MinorVersion >= '4') {
547                         struct cfi_intelext_programming_regioninfo *prinfo;
548                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
549                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
550                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
551                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
552                         mtd->flags &= ~MTD_BIT_WRITEABLE;
553                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
554                                map->name, mtd->writesize,
555                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
556                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
557                 }
558
559                 /*
560                  * All functions below currently rely on all chips having
561                  * the same geometry so we'll just assume that all hardware
562                  * partitions are of the same size too.
563                  */
564                 partshift = cfi->chipshift - __ffs(numparts);
565
566                 if ((1 << partshift) < mtd->erasesize) {
567                         printk( KERN_ERR
568                                 "%s: bad number of hw partitions (%d)\n",
569                                 __FUNCTION__, numparts);
570                         return -EINVAL;
571                 }
572
573                 numvirtchips = cfi->numchips * numparts;
574                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
575                 if (!newcfi)
576                         return -ENOMEM;
577                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
578                 if (!shared) {
579                         kfree(newcfi);
580                         return -ENOMEM;
581                 }
582                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
583                 newcfi->numchips = numvirtchips;
584                 newcfi->chipshift = partshift;
585
586                 chip = &newcfi->chips[0];
587                 for (i = 0; i < cfi->numchips; i++) {
588                         shared[i].writing = shared[i].erasing = NULL;
589                         spin_lock_init(&shared[i].lock);
590                         for (j = 0; j < numparts; j++) {
591                                 *chip = cfi->chips[i];
592                                 chip->start += j << partshift;
593                                 chip->priv = &shared[i];
594                                 /* those should be reset too since
595                                    they create memory references. */
596                                 init_waitqueue_head(&chip->wq);
597                                 spin_lock_init(&chip->_spinlock);
598                                 chip->mutex = &chip->_spinlock;
599                                 chip++;
600                         }
601                 }
602
603                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
604                                   "--> %d partitions of %d KiB\n",
605                                   map->name, cfi->numchips, cfi->interleave,
606                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
607
608                 map->fldrv_priv = newcfi;
609                 *pcfi = newcfi;
610                 kfree(cfi);
611         }
612
613         return 0;
614 }
615
616 /*
617  *  *********** CHIP ACCESS FUNCTIONS ***********
618  */
619
620 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
621 {
622         DECLARE_WAITQUEUE(wait, current);
623         struct cfi_private *cfi = map->fldrv_priv;
624         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
625         unsigned long timeo;
626         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
627
628  resettime:
629         timeo = jiffies + HZ;
630  retry:
631         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
632                 /*
633                  * OK. We have possibility for contension on the write/erase
634                  * operations which are global to the real chip and not per
635                  * partition.  So let's fight it over in the partition which
636                  * currently has authority on the operation.
637                  *
638                  * The rules are as follows:
639                  *
640                  * - any write operation must own shared->writing.
641                  *
642                  * - any erase operation must own _both_ shared->writing and
643                  *   shared->erasing.
644                  *
645                  * - contension arbitration is handled in the owner's context.
646                  *
647                  * The 'shared' struct can be read and/or written only when
648                  * its lock is taken.
649                  */
650                 struct flchip_shared *shared = chip->priv;
651                 struct flchip *contender;
652                 spin_lock(&shared->lock);
653                 contender = shared->writing;
654                 if (contender && contender != chip) {
655                         /*
656                          * The engine to perform desired operation on this
657                          * partition is already in use by someone else.
658                          * Let's fight over it in the context of the chip
659                          * currently using it.  If it is possible to suspend,
660                          * that other partition will do just that, otherwise
661                          * it'll happily send us to sleep.  In any case, when
662                          * get_chip returns success we're clear to go ahead.
663                          */
664                         int ret = spin_trylock(contender->mutex);
665                         spin_unlock(&shared->lock);
666                         if (!ret)
667                                 goto retry;
668                         spin_unlock(chip->mutex);
669                         ret = get_chip(map, contender, contender->start, mode);
670                         spin_lock(chip->mutex);
671                         if (ret) {
672                                 spin_unlock(contender->mutex);
673                                 return ret;
674                         }
675                         timeo = jiffies + HZ;
676                         spin_lock(&shared->lock);
677                         spin_unlock(contender->mutex);
678                 }
679
680                 /* We now own it */
681                 shared->writing = chip;
682                 if (mode == FL_ERASING)
683                         shared->erasing = chip;
684                 spin_unlock(&shared->lock);
685         }
686
687         switch (chip->state) {
688
689         case FL_STATUS:
690                 for (;;) {
691                         status = map_read(map, adr);
692                         if (map_word_andequal(map, status, status_OK, status_OK))
693                                 break;
694
695                         /* At this point we're fine with write operations
696                            in other partitions as they don't conflict. */
697                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698                                 break;
699
700                         if (time_after(jiffies, timeo)) {
701                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
702                                        map->name, status.x[0]);
703                                 return -EIO;
704                         }
705                         spin_unlock(chip->mutex);
706                         cfi_udelay(1);
707                         spin_lock(chip->mutex);
708                         /* Someone else might have been playing with it. */
709                         goto retry;
710                 }
711
712         case FL_READY:
713         case FL_CFI_QUERY:
714         case FL_JEDEC_QUERY:
715                 return 0;
716
717         case FL_ERASING:
718                 if (!cfip ||
719                     !(cfip->FeatureSupport & 2) ||
720                     !(mode == FL_READY || mode == FL_POINT ||
721                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
722                         goto sleep;
723
724
725                 /* Erase suspend */
726                 map_write(map, CMD(0xB0), adr);
727
728                 /* If the flash has finished erasing, then 'erase suspend'
729                  * appears to make some (28F320) flash devices switch to
730                  * 'read' mode.  Make sure that we switch to 'read status'
731                  * mode so we get the right data. --rmk
732                  */
733                 map_write(map, CMD(0x70), adr);
734                 chip->oldstate = FL_ERASING;
735                 chip->state = FL_ERASE_SUSPENDING;
736                 chip->erase_suspended = 1;
737                 for (;;) {
738                         status = map_read(map, adr);
739                         if (map_word_andequal(map, status, status_OK, status_OK))
740                                 break;
741
742                         if (time_after(jiffies, timeo)) {
743                                 /* Urgh. Resume and pretend we weren't here.  */
744                                 map_write(map, CMD(0xd0), adr);
745                                 /* Make sure we're in 'read status' mode if it had finished */
746                                 map_write(map, CMD(0x70), adr);
747                                 chip->state = FL_ERASING;
748                                 chip->oldstate = FL_READY;
749                                 printk(KERN_ERR "%s: Chip not ready after erase "
750                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
751                                 return -EIO;
752                         }
753
754                         spin_unlock(chip->mutex);
755                         cfi_udelay(1);
756                         spin_lock(chip->mutex);
757                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
758                            So we can just loop here. */
759                 }
760                 chip->state = FL_STATUS;
761                 return 0;
762
763         case FL_XIP_WHILE_ERASING:
764                 if (mode != FL_READY && mode != FL_POINT &&
765                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
766                         goto sleep;
767                 chip->oldstate = chip->state;
768                 chip->state = FL_READY;
769                 return 0;
770
771         case FL_POINT:
772                 /* Only if there's no operation suspended... */
773                 if (mode == FL_READY && chip->oldstate == FL_READY)
774                         return 0;
775
776         default:
777         sleep:
778                 set_current_state(TASK_UNINTERRUPTIBLE);
779                 add_wait_queue(&chip->wq, &wait);
780                 spin_unlock(chip->mutex);
781                 schedule();
782                 remove_wait_queue(&chip->wq, &wait);
783                 spin_lock(chip->mutex);
784                 goto resettime;
785         }
786 }
787
788 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
789 {
790         struct cfi_private *cfi = map->fldrv_priv;
791
792         if (chip->priv) {
793                 struct flchip_shared *shared = chip->priv;
794                 spin_lock(&shared->lock);
795                 if (shared->writing == chip && chip->oldstate == FL_READY) {
796                         /* We own the ability to write, but we're done */
797                         shared->writing = shared->erasing;
798                         if (shared->writing && shared->writing != chip) {
799                                 /* give back ownership to who we loaned it from */
800                                 struct flchip *loaner = shared->writing;
801                                 spin_lock(loaner->mutex);
802                                 spin_unlock(&shared->lock);
803                                 spin_unlock(chip->mutex);
804                                 put_chip(map, loaner, loaner->start);
805                                 spin_lock(chip->mutex);
806                                 spin_unlock(loaner->mutex);
807                                 wake_up(&chip->wq);
808                                 return;
809                         }
810                         shared->erasing = NULL;
811                         shared->writing = NULL;
812                 } else if (shared->erasing == chip && shared->writing != chip) {
813                         /*
814                          * We own the ability to erase without the ability
815                          * to write, which means the erase was suspended
816                          * and some other partition is currently writing.
817                          * Don't let the switch below mess things up since
818                          * we don't have ownership to resume anything.
819                          */
820                         spin_unlock(&shared->lock);
821                         wake_up(&chip->wq);
822                         return;
823                 }
824                 spin_unlock(&shared->lock);
825         }
826
827         switch(chip->oldstate) {
828         case FL_ERASING:
829                 chip->state = chip->oldstate;
830                 /* What if one interleaved chip has finished and the
831                    other hasn't? The old code would leave the finished
832                    one in READY mode. That's bad, and caused -EROFS
833                    errors to be returned from do_erase_oneblock because
834                    that's the only bit it checked for at the time.
835                    As the state machine appears to explicitly allow
836                    sending the 0x70 (Read Status) command to an erasing
837                    chip and expecting it to be ignored, that's what we
838                    do. */
839                 map_write(map, CMD(0xd0), adr);
840                 map_write(map, CMD(0x70), adr);
841                 chip->oldstate = FL_READY;
842                 chip->state = FL_ERASING;
843                 break;
844
845         case FL_XIP_WHILE_ERASING:
846                 chip->state = chip->oldstate;
847                 chip->oldstate = FL_READY;
848                 break;
849
850         case FL_READY:
851         case FL_STATUS:
852         case FL_JEDEC_QUERY:
853                 /* We should really make set_vpp() count, rather than doing this */
854                 DISABLE_VPP(map);
855                 break;
856         default:
857                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
858         }
859         wake_up(&chip->wq);
860 }
861
862 #ifdef CONFIG_MTD_XIP
863
864 /*
865  * No interrupt what so ever can be serviced while the flash isn't in array
866  * mode.  This is ensured by the xip_disable() and xip_enable() functions
867  * enclosing any code path where the flash is known not to be in array mode.
868  * And within a XIP disabled code path, only functions marked with __xipram
869  * may be called and nothing else (it's a good thing to inspect generated
870  * assembly to make sure inline functions were actually inlined and that gcc
871  * didn't emit calls to its own support functions). Also configuring MTD CFI
872  * support to a single buswidth and a single interleave is also recommended.
873  */
874
875 static void xip_disable(struct map_info *map, struct flchip *chip,
876                         unsigned long adr)
877 {
878         /* TODO: chips with no XIP use should ignore and return */
879         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
880         local_irq_disable();
881 }
882
883 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884                                 unsigned long adr)
885 {
886         struct cfi_private *cfi = map->fldrv_priv;
887         if (chip->state != FL_POINT && chip->state != FL_READY) {
888                 map_write(map, CMD(0xff), adr);
889                 chip->state = FL_READY;
890         }
891         (void) map_read(map, adr);
892         xip_iprefetch();
893         local_irq_enable();
894 }
895
896 /*
897  * When a delay is required for the flash operation to complete, the
898  * xip_wait_for_operation() function is polling for both the given timeout
899  * and pending (but still masked) hardware interrupts.  Whenever there is an
900  * interrupt pending then the flash erase or write operation is suspended,
901  * array mode restored and interrupts unmasked.  Task scheduling might also
902  * happen at that point.  The CPU eventually returns from the interrupt or
903  * the call to schedule() and the suspended flash operation is resumed for
904  * the remaining of the delay period.
905  *
906  * Warning: this function _will_ fool interrupt latency tracing tools.
907  */
908
909 static int __xipram xip_wait_for_operation(
910                 struct map_info *map, struct flchip *chip,
911                 unsigned long adr, unsigned int chip_op_time )
912 {
913         struct cfi_private *cfi = map->fldrv_priv;
914         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
915         map_word status, OK = CMD(0x80);
916         unsigned long usec, suspended, start, done;
917         flstate_t oldstate, newstate;
918
919         start = xip_currtime();
920         usec = chip_op_time * 8;
921         if (usec == 0)
922                 usec = 500000;
923         done = 0;
924
925         do {
926                 cpu_relax();
927                 if (xip_irqpending() && cfip &&
928                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
929                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
930                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
931                         /*
932                          * Let's suspend the erase or write operation when
933                          * supported.  Note that we currently don't try to
934                          * suspend interleaved chips if there is already
935                          * another operation suspended (imagine what happens
936                          * when one chip was already done with the current
937                          * operation while another chip suspended it, then
938                          * we resume the whole thing at once).  Yes, it
939                          * can happen!
940                          */
941                         usec -= done;
942                         map_write(map, CMD(0xb0), adr);
943                         map_write(map, CMD(0x70), adr);
944                         suspended = xip_currtime();
945                         do {
946                                 if (xip_elapsed_since(suspended) > 100000) {
947                                         /*
948                                          * The chip doesn't want to suspend
949                                          * after waiting for 100 msecs.
950                                          * This is a critical error but there
951                                          * is not much we can do here.
952                                          */
953                                         return -EIO;
954                                 }
955                                 status = map_read(map, adr);
956                         } while (!map_word_andequal(map, status, OK, OK));
957
958                         /* Suspend succeeded */
959                         oldstate = chip->state;
960                         if (oldstate == FL_ERASING) {
961                                 if (!map_word_bitsset(map, status, CMD(0x40)))
962                                         break;
963                                 newstate = FL_XIP_WHILE_ERASING;
964                                 chip->erase_suspended = 1;
965                         } else {
966                                 if (!map_word_bitsset(map, status, CMD(0x04)))
967                                         break;
968                                 newstate = FL_XIP_WHILE_WRITING;
969                                 chip->write_suspended = 1;
970                         }
971                         chip->state = newstate;
972                         map_write(map, CMD(0xff), adr);
973                         (void) map_read(map, adr);
974                         asm volatile (".rep 8; nop; .endr");
975                         local_irq_enable();
976                         spin_unlock(chip->mutex);
977                         asm volatile (".rep 8; nop; .endr");
978                         cond_resched();
979
980                         /*
981                          * We're back.  However someone else might have
982                          * decided to go write to the chip if we are in
983                          * a suspended erase state.  If so let's wait
984                          * until it's done.
985                          */
986                         spin_lock(chip->mutex);
987                         while (chip->state != newstate) {
988                                 DECLARE_WAITQUEUE(wait, current);
989                                 set_current_state(TASK_UNINTERRUPTIBLE);
990                                 add_wait_queue(&chip->wq, &wait);
991                                 spin_unlock(chip->mutex);
992                                 schedule();
993                                 remove_wait_queue(&chip->wq, &wait);
994                                 spin_lock(chip->mutex);
995                         }
996                         /* Disallow XIP again */
997                         local_irq_disable();
998
999                         /* Resume the write or erase operation */
1000                         map_write(map, CMD(0xd0), adr);
1001                         map_write(map, CMD(0x70), adr);
1002                         chip->state = oldstate;
1003                         start = xip_currtime();
1004                 } else if (usec >= 1000000/HZ) {
1005                         /*
1006                          * Try to save on CPU power when waiting delay
1007                          * is at least a system timer tick period.
1008                          * No need to be extremely accurate here.
1009                          */
1010                         xip_cpu_idle();
1011                 }
1012                 status = map_read(map, adr);
1013                 done = xip_elapsed_since(start);
1014         } while (!map_word_andequal(map, status, OK, OK)
1015                  && done < usec);
1016
1017         return (done >= usec) ? -ETIME : 0;
1018 }
1019
1020 /*
1021  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1022  * the flash is actively programming or erasing since we have to poll for
1023  * the operation to complete anyway.  We can't do that in a generic way with
1024  * a XIP setup so do it before the actual flash operation in this case
1025  * and stub it out from INVAL_CACHE_AND_WAIT.
1026  */
1027 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1028         INVALIDATE_CACHED_RANGE(map, from, size)
1029
1030 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1031         xip_wait_for_operation(map, chip, cmd_adr, usec)
1032
1033 #else
1034
1035 #define xip_disable(map, chip, adr)
1036 #define xip_enable(map, chip, adr)
1037 #define XIP_INVAL_CACHED_RANGE(x...)
1038 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1039
1040 static int inval_cache_and_wait_for_operation(
1041                 struct map_info *map, struct flchip *chip,
1042                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1043                 unsigned int chip_op_time)
1044 {
1045         struct cfi_private *cfi = map->fldrv_priv;
1046         map_word status, status_OK = CMD(0x80);
1047         int chip_state = chip->state;
1048         unsigned int timeo, sleep_time;
1049
1050         spin_unlock(chip->mutex);
1051         if (inval_len)
1052                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1053         spin_lock(chip->mutex);
1054
1055         /* set our timeout to 8 times the expected delay */
1056         timeo = chip_op_time * 8;
1057         if (!timeo)
1058                 timeo = 500000;
1059         sleep_time = chip_op_time / 2;
1060
1061         for (;;) {
1062                 status = map_read(map, cmd_adr);
1063                 if (map_word_andequal(map, status, status_OK, status_OK))
1064                         break;
1065
1066                 if (!timeo) {
1067                         map_write(map, CMD(0x70), cmd_adr);
1068                         chip->state = FL_STATUS;
1069                         return -ETIME;
1070                 }
1071
1072                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1073                 spin_unlock(chip->mutex);
1074                 if (sleep_time >= 1000000/HZ) {
1075                         /*
1076                          * Half of the normal delay still remaining
1077                          * can be performed with a sleeping delay instead
1078                          * of busy waiting.
1079                          */
1080                         msleep(sleep_time/1000);
1081                         timeo -= sleep_time;
1082                         sleep_time = 1000000/HZ;
1083                 } else {
1084                         udelay(1);
1085                         cond_resched();
1086                         timeo--;
1087                 }
1088                 spin_lock(chip->mutex);
1089
1090                 while (chip->state != chip_state) {
1091                         /* Someone's suspended the operation: sleep */
1092                         DECLARE_WAITQUEUE(wait, current);
1093                         set_current_state(TASK_UNINTERRUPTIBLE);
1094                         add_wait_queue(&chip->wq, &wait);
1095                         spin_unlock(chip->mutex);
1096                         schedule();
1097                         remove_wait_queue(&chip->wq, &wait);
1098                         spin_lock(chip->mutex);
1099                 }
1100         }
1101
1102         /* Done and happy. */
1103         chip->state = FL_STATUS;
1104         return 0;
1105 }
1106
1107 #endif
1108
1109 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1110         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1111
1112
1113 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1114 {
1115         unsigned long cmd_addr;
1116         struct cfi_private *cfi = map->fldrv_priv;
1117         int ret = 0;
1118
1119         adr += chip->start;
1120
1121         /* Ensure cmd read/writes are aligned. */
1122         cmd_addr = adr & ~(map_bankwidth(map)-1);
1123
1124         spin_lock(chip->mutex);
1125
1126         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1127
1128         if (!ret) {
1129                 if (chip->state != FL_POINT && chip->state != FL_READY)
1130                         map_write(map, CMD(0xff), cmd_addr);
1131
1132                 chip->state = FL_POINT;
1133                 chip->ref_point_counter++;
1134         }
1135         spin_unlock(chip->mutex);
1136
1137         return ret;
1138 }
1139
1140 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1141 {
1142         struct map_info *map = mtd->priv;
1143         struct cfi_private *cfi = map->fldrv_priv;
1144         unsigned long ofs;
1145         int chipnum;
1146         int ret = 0;
1147
1148         if (!map->virt || (from + len > mtd->size))
1149                 return -EINVAL;
1150
1151         *mtdbuf = (void *)map->virt + from;
1152         *retlen = 0;
1153
1154         /* Now lock the chip(s) to POINT state */
1155
1156         /* ofs: offset within the first chip that the first read should start */
1157         chipnum = (from >> cfi->chipshift);
1158         ofs = from - (chipnum << cfi->chipshift);
1159
1160         while (len) {
1161                 unsigned long thislen;
1162
1163                 if (chipnum >= cfi->numchips)
1164                         break;
1165
1166                 if ((len + ofs -1) >> cfi->chipshift)
1167                         thislen = (1<<cfi->chipshift) - ofs;
1168                 else
1169                         thislen = len;
1170
1171                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1172                 if (ret)
1173                         break;
1174
1175                 *retlen += thislen;
1176                 len -= thislen;
1177
1178                 ofs = 0;
1179                 chipnum++;
1180         }
1181         return 0;
1182 }
1183
1184 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1185 {
1186         struct map_info *map = mtd->priv;
1187         struct cfi_private *cfi = map->fldrv_priv;
1188         unsigned long ofs;
1189         int chipnum;
1190
1191         /* Now unlock the chip(s) POINT state */
1192
1193         /* ofs: offset within the first chip that the first read should start */
1194         chipnum = (from >> cfi->chipshift);
1195         ofs = from - (chipnum <<  cfi->chipshift);
1196
1197         while (len) {
1198                 unsigned long thislen;
1199                 struct flchip *chip;
1200
1201                 chip = &cfi->chips[chipnum];
1202                 if (chipnum >= cfi->numchips)
1203                         break;
1204
1205                 if ((len + ofs -1) >> cfi->chipshift)
1206                         thislen = (1<<cfi->chipshift) - ofs;
1207                 else
1208                         thislen = len;
1209
1210                 spin_lock(chip->mutex);
1211                 if (chip->state == FL_POINT) {
1212                         chip->ref_point_counter--;
1213                         if(chip->ref_point_counter == 0)
1214                                 chip->state = FL_READY;
1215                 } else
1216                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1217
1218                 put_chip(map, chip, chip->start);
1219                 spin_unlock(chip->mutex);
1220
1221                 len -= thislen;
1222                 ofs = 0;
1223                 chipnum++;
1224         }
1225 }
1226
1227 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1228 {
1229         unsigned long cmd_addr;
1230         struct cfi_private *cfi = map->fldrv_priv;
1231         int ret;
1232
1233         adr += chip->start;
1234
1235         /* Ensure cmd read/writes are aligned. */
1236         cmd_addr = adr & ~(map_bankwidth(map)-1);
1237
1238         spin_lock(chip->mutex);
1239         ret = get_chip(map, chip, cmd_addr, FL_READY);
1240         if (ret) {
1241                 spin_unlock(chip->mutex);
1242                 return ret;
1243         }
1244
1245         if (chip->state != FL_POINT && chip->state != FL_READY) {
1246                 map_write(map, CMD(0xff), cmd_addr);
1247
1248                 chip->state = FL_READY;
1249         }
1250
1251         map_copy_from(map, buf, adr, len);
1252
1253         put_chip(map, chip, cmd_addr);
1254
1255         spin_unlock(chip->mutex);
1256         return 0;
1257 }
1258
1259 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1260 {
1261         struct map_info *map = mtd->priv;
1262         struct cfi_private *cfi = map->fldrv_priv;
1263         unsigned long ofs;
1264         int chipnum;
1265         int ret = 0;
1266
1267         /* ofs: offset within the first chip that the first read should start */
1268         chipnum = (from >> cfi->chipshift);
1269         ofs = from - (chipnum <<  cfi->chipshift);
1270
1271         *retlen = 0;
1272
1273         while (len) {
1274                 unsigned long thislen;
1275
1276                 if (chipnum >= cfi->numchips)
1277                         break;
1278
1279                 if ((len + ofs -1) >> cfi->chipshift)
1280                         thislen = (1<<cfi->chipshift) - ofs;
1281                 else
1282                         thislen = len;
1283
1284                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1285                 if (ret)
1286                         break;
1287
1288                 *retlen += thislen;
1289                 len -= thislen;
1290                 buf += thislen;
1291
1292                 ofs = 0;
1293                 chipnum++;
1294         }
1295         return ret;
1296 }
1297
1298 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1299                                      unsigned long adr, map_word datum, int mode)
1300 {
1301         struct cfi_private *cfi = map->fldrv_priv;
1302         map_word status, write_cmd;
1303         int ret=0;
1304
1305         adr += chip->start;
1306
1307         switch (mode) {
1308         case FL_WRITING:
1309                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1310                 break;
1311         case FL_OTP_WRITE:
1312                 write_cmd = CMD(0xc0);
1313                 break;
1314         default:
1315                 return -EINVAL;
1316         }
1317
1318         spin_lock(chip->mutex);
1319         ret = get_chip(map, chip, adr, mode);
1320         if (ret) {
1321                 spin_unlock(chip->mutex);
1322                 return ret;
1323         }
1324
1325         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1326         ENABLE_VPP(map);
1327         xip_disable(map, chip, adr);
1328         map_write(map, write_cmd, adr);
1329         map_write(map, datum, adr);
1330         chip->state = mode;
1331
1332         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1333                                    adr, map_bankwidth(map),
1334                                    chip->word_write_time);
1335         if (ret) {
1336                 xip_enable(map, chip, adr);
1337                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1338                 goto out;
1339         }
1340
1341         /* check for errors */
1342         status = map_read(map, adr);
1343         if (map_word_bitsset(map, status, CMD(0x1a))) {
1344                 unsigned long chipstatus = MERGESTATUS(status);
1345
1346                 /* reset status */
1347                 map_write(map, CMD(0x50), adr);
1348                 map_write(map, CMD(0x70), adr);
1349                 xip_enable(map, chip, adr);
1350
1351                 if (chipstatus & 0x02) {
1352                         ret = -EROFS;
1353                 } else if (chipstatus & 0x08) {
1354                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1355                         ret = -EIO;
1356                 } else {
1357                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1358                         ret = -EINVAL;
1359                 }
1360
1361                 goto out;
1362         }
1363
1364         xip_enable(map, chip, adr);
1365  out:   put_chip(map, chip, adr);
1366         spin_unlock(chip->mutex);
1367         return ret;
1368 }
1369
1370
1371 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1372 {
1373         struct map_info *map = mtd->priv;
1374         struct cfi_private *cfi = map->fldrv_priv;
1375         int ret = 0;
1376         int chipnum;
1377         unsigned long ofs;
1378
1379         *retlen = 0;
1380         if (!len)
1381                 return 0;
1382
1383         chipnum = to >> cfi->chipshift;
1384         ofs = to  - (chipnum << cfi->chipshift);
1385
1386         /* If it's not bus-aligned, do the first byte write */
1387         if (ofs & (map_bankwidth(map)-1)) {
1388                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1389                 int gap = ofs - bus_ofs;
1390                 int n;
1391                 map_word datum;
1392
1393                 n = min_t(int, len, map_bankwidth(map)-gap);
1394                 datum = map_word_ff(map);
1395                 datum = map_word_load_partial(map, datum, buf, gap, n);
1396
1397                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1398                                                bus_ofs, datum, FL_WRITING);
1399                 if (ret)
1400                         return ret;
1401
1402                 len -= n;
1403                 ofs += n;
1404                 buf += n;
1405                 (*retlen) += n;
1406
1407                 if (ofs >> cfi->chipshift) {
1408                         chipnum ++;
1409                         ofs = 0;
1410                         if (chipnum == cfi->numchips)
1411                                 return 0;
1412                 }
1413         }
1414
1415         while(len >= map_bankwidth(map)) {
1416                 map_word datum = map_word_load(map, buf);
1417
1418                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1419                                        ofs, datum, FL_WRITING);
1420                 if (ret)
1421                         return ret;
1422
1423                 ofs += map_bankwidth(map);
1424                 buf += map_bankwidth(map);
1425                 (*retlen) += map_bankwidth(map);
1426                 len -= map_bankwidth(map);
1427
1428                 if (ofs >> cfi->chipshift) {
1429                         chipnum ++;
1430                         ofs = 0;
1431                         if (chipnum == cfi->numchips)
1432                                 return 0;
1433                 }
1434         }
1435
1436         if (len & (map_bankwidth(map)-1)) {
1437                 map_word datum;
1438
1439                 datum = map_word_ff(map);
1440                 datum = map_word_load_partial(map, datum, buf, 0, len);
1441
1442                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1443                                        ofs, datum, FL_WRITING);
1444                 if (ret)
1445                         return ret;
1446
1447                 (*retlen) += len;
1448         }
1449
1450         return 0;
1451 }
1452
1453
1454 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1455                                     unsigned long adr, const struct kvec **pvec,
1456                                     unsigned long *pvec_seek, int len)
1457 {
1458         struct cfi_private *cfi = map->fldrv_priv;
1459         map_word status, write_cmd, datum;
1460         unsigned long cmd_adr;
1461         int ret, wbufsize, word_gap, words;
1462         const struct kvec *vec;
1463         unsigned long vec_seek;
1464
1465         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1466         adr += chip->start;
1467         cmd_adr = adr & ~(wbufsize-1);
1468
1469         /* Let's determine this according to the interleave only once */
1470         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1471
1472         spin_lock(chip->mutex);
1473         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1474         if (ret) {
1475                 spin_unlock(chip->mutex);
1476                 return ret;
1477         }
1478
1479         XIP_INVAL_CACHED_RANGE(map, adr, len);
1480         ENABLE_VPP(map);
1481         xip_disable(map, chip, cmd_adr);
1482
1483         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1484            [...], the device will not accept any more Write to Buffer commands".
1485            So we must check here and reset those bits if they're set. Otherwise
1486            we're just pissing in the wind */
1487         if (chip->state != FL_STATUS) {
1488                 map_write(map, CMD(0x70), cmd_adr);
1489                 chip->state = FL_STATUS;
1490         }
1491         status = map_read(map, cmd_adr);
1492         if (map_word_bitsset(map, status, CMD(0x30))) {
1493                 xip_enable(map, chip, cmd_adr);
1494                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1495                 xip_disable(map, chip, cmd_adr);
1496                 map_write(map, CMD(0x50), cmd_adr);
1497                 map_write(map, CMD(0x70), cmd_adr);
1498         }
1499
1500         chip->state = FL_WRITING_TO_BUFFER;
1501         map_write(map, write_cmd, cmd_adr);
1502         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1503         if (ret) {
1504                 /* Argh. Not ready for write to buffer */
1505                 map_word Xstatus = map_read(map, cmd_adr);
1506                 map_write(map, CMD(0x70), cmd_adr);
1507                 chip->state = FL_STATUS;
1508                 status = map_read(map, cmd_adr);
1509                 map_write(map, CMD(0x50), cmd_adr);
1510                 map_write(map, CMD(0x70), cmd_adr);
1511                 xip_enable(map, chip, cmd_adr);
1512                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1513                                 map->name, Xstatus.x[0], status.x[0]);
1514                 goto out;
1515         }
1516
1517         /* Figure out the number of words to write */
1518         word_gap = (-adr & (map_bankwidth(map)-1));
1519         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1520         if (!word_gap) {
1521                 words--;
1522         } else {
1523                 word_gap = map_bankwidth(map) - word_gap;
1524                 adr -= word_gap;
1525                 datum = map_word_ff(map);
1526         }
1527
1528         /* Write length of data to come */
1529         map_write(map, CMD(words), cmd_adr );
1530
1531         /* Write data */
1532         vec = *pvec;
1533         vec_seek = *pvec_seek;
1534         do {
1535                 int n = map_bankwidth(map) - word_gap;
1536                 if (n > vec->iov_len - vec_seek)
1537                         n = vec->iov_len - vec_seek;
1538                 if (n > len)
1539                         n = len;
1540
1541                 if (!word_gap && len < map_bankwidth(map))
1542                         datum = map_word_ff(map);
1543
1544                 datum = map_word_load_partial(map, datum,
1545                                               vec->iov_base + vec_seek,
1546                                               word_gap, n);
1547
1548                 len -= n;
1549                 word_gap += n;
1550                 if (!len || word_gap == map_bankwidth(map)) {
1551                         map_write(map, datum, adr);
1552                         adr += map_bankwidth(map);
1553                         word_gap = 0;
1554                 }
1555
1556                 vec_seek += n;
1557                 if (vec_seek == vec->iov_len) {
1558                         vec++;
1559                         vec_seek = 0;
1560                 }
1561         } while (len);
1562         *pvec = vec;
1563         *pvec_seek = vec_seek;
1564
1565         /* GO GO GO */
1566         map_write(map, CMD(0xd0), cmd_adr);
1567         chip->state = FL_WRITING;
1568
1569         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1570                                    adr, len,
1571                                    chip->buffer_write_time);
1572         if (ret) {
1573                 map_write(map, CMD(0x70), cmd_adr);
1574                 chip->state = FL_STATUS;
1575                 xip_enable(map, chip, cmd_adr);
1576                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1577                 goto out;
1578         }
1579
1580         /* check for errors */
1581         status = map_read(map, cmd_adr);
1582         if (map_word_bitsset(map, status, CMD(0x1a))) {
1583                 unsigned long chipstatus = MERGESTATUS(status);
1584
1585                 /* reset status */
1586                 map_write(map, CMD(0x50), cmd_adr);
1587                 map_write(map, CMD(0x70), cmd_adr);
1588                 xip_enable(map, chip, cmd_adr);
1589
1590                 if (chipstatus & 0x02) {
1591                         ret = -EROFS;
1592                 } else if (chipstatus & 0x08) {
1593                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1594                         ret = -EIO;
1595                 } else {
1596                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1597                         ret = -EINVAL;
1598                 }
1599
1600                 goto out;
1601         }
1602
1603         xip_enable(map, chip, cmd_adr);
1604  out:   put_chip(map, chip, cmd_adr);
1605         spin_unlock(chip->mutex);
1606         return ret;
1607 }
1608
1609 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1610                                 unsigned long count, loff_t to, size_t *retlen)
1611 {
1612         struct map_info *map = mtd->priv;
1613         struct cfi_private *cfi = map->fldrv_priv;
1614         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1615         int ret = 0;
1616         int chipnum;
1617         unsigned long ofs, vec_seek, i;
1618         size_t len = 0;
1619
1620         for (i = 0; i < count; i++)
1621                 len += vecs[i].iov_len;
1622
1623         *retlen = 0;
1624         if (!len)
1625                 return 0;
1626
1627         chipnum = to >> cfi->chipshift;
1628         ofs = to - (chipnum << cfi->chipshift);
1629         vec_seek = 0;
1630
1631         do {
1632                 /* We must not cross write block boundaries */
1633                 int size = wbufsize - (ofs & (wbufsize-1));
1634
1635                 if (size > len)
1636                         size = len;
1637                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1638                                       ofs, &vecs, &vec_seek, size);
1639                 if (ret)
1640                         return ret;
1641
1642                 ofs += size;
1643                 (*retlen) += size;
1644                 len -= size;
1645
1646                 if (ofs >> cfi->chipshift) {
1647                         chipnum ++;
1648                         ofs = 0;
1649                         if (chipnum == cfi->numchips)
1650                                 return 0;
1651                 }
1652
1653                 /* Be nice and reschedule with the chip in a usable state for other
1654                    processes. */
1655                 cond_resched();
1656
1657         } while (len);
1658
1659         return 0;
1660 }
1661
1662 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1663                                        size_t len, size_t *retlen, const u_char *buf)
1664 {
1665         struct kvec vec;
1666
1667         vec.iov_base = (void *) buf;
1668         vec.iov_len = len;
1669
1670         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1671 }
1672
1673 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1674                                       unsigned long adr, int len, void *thunk)
1675 {
1676         struct cfi_private *cfi = map->fldrv_priv;
1677         map_word status;
1678         int retries = 3;
1679         int ret;
1680
1681         adr += chip->start;
1682
1683  retry:
1684         spin_lock(chip->mutex);
1685         ret = get_chip(map, chip, adr, FL_ERASING);
1686         if (ret) {
1687                 spin_unlock(chip->mutex);
1688                 return ret;
1689         }
1690
1691         XIP_INVAL_CACHED_RANGE(map, adr, len);
1692         ENABLE_VPP(map);
1693         xip_disable(map, chip, adr);
1694
1695         /* Clear the status register first */
1696         map_write(map, CMD(0x50), adr);
1697
1698         /* Now erase */
1699         map_write(map, CMD(0x20), adr);
1700         map_write(map, CMD(0xD0), adr);
1701         chip->state = FL_ERASING;
1702         chip->erase_suspended = 0;
1703
1704         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1705                                    adr, len,
1706                                    chip->erase_time);
1707         if (ret) {
1708                 map_write(map, CMD(0x70), adr);
1709                 chip->state = FL_STATUS;
1710                 xip_enable(map, chip, adr);
1711                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1712                 goto out;
1713         }
1714
1715         /* We've broken this before. It doesn't hurt to be safe */
1716         map_write(map, CMD(0x70), adr);
1717         chip->state = FL_STATUS;
1718         status = map_read(map, adr);
1719
1720         /* check for errors */
1721         if (map_word_bitsset(map, status, CMD(0x3a))) {
1722                 unsigned long chipstatus = MERGESTATUS(status);
1723
1724                 /* Reset the error bits */
1725                 map_write(map, CMD(0x50), adr);
1726                 map_write(map, CMD(0x70), adr);
1727                 xip_enable(map, chip, adr);
1728
1729                 if ((chipstatus & 0x30) == 0x30) {
1730                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1731                         ret = -EINVAL;
1732                 } else if (chipstatus & 0x02) {
1733                         /* Protection bit set */
1734                         ret = -EROFS;
1735                 } else if (chipstatus & 0x8) {
1736                         /* Voltage */
1737                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1738                         ret = -EIO;
1739                 } else if (chipstatus & 0x20 && retries--) {
1740                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1741                         put_chip(map, chip, adr);
1742                         spin_unlock(chip->mutex);
1743                         goto retry;
1744                 } else {
1745                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1746                         ret = -EIO;
1747                 }
1748
1749                 goto out;
1750         }
1751
1752         xip_enable(map, chip, adr);
1753  out:   put_chip(map, chip, adr);
1754         spin_unlock(chip->mutex);
1755         return ret;
1756 }
1757
1758 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1759 {
1760         unsigned long ofs, len;
1761         int ret;
1762
1763         ofs = instr->addr;
1764         len = instr->len;
1765
1766         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1767         if (ret)
1768                 return ret;
1769
1770         instr->state = MTD_ERASE_DONE;
1771         mtd_erase_callback(instr);
1772
1773         return 0;
1774 }
1775
1776 static void cfi_intelext_sync (struct mtd_info *mtd)
1777 {
1778         struct map_info *map = mtd->priv;
1779         struct cfi_private *cfi = map->fldrv_priv;
1780         int i;
1781         struct flchip *chip;
1782         int ret = 0;
1783
1784         for (i=0; !ret && i<cfi->numchips; i++) {
1785                 chip = &cfi->chips[i];
1786
1787                 spin_lock(chip->mutex);
1788                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1789
1790                 if (!ret) {
1791                         chip->oldstate = chip->state;
1792                         chip->state = FL_SYNCING;
1793                         /* No need to wake_up() on this state change -
1794                          * as the whole point is that nobody can do anything
1795                          * with the chip now anyway.
1796                          */
1797                 }
1798                 spin_unlock(chip->mutex);
1799         }
1800
1801         /* Unlock the chips again */
1802
1803         for (i--; i >=0; i--) {
1804                 chip = &cfi->chips[i];
1805
1806                 spin_lock(chip->mutex);
1807
1808                 if (chip->state == FL_SYNCING) {
1809                         chip->state = chip->oldstate;
1810                         chip->oldstate = FL_READY;
1811                         wake_up(&chip->wq);
1812                 }
1813                 spin_unlock(chip->mutex);
1814         }
1815 }
1816
1817 #ifdef DEBUG_LOCK_BITS
1818 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1819                                                 struct flchip *chip,
1820                                                 unsigned long adr,
1821                                                 int len, void *thunk)
1822 {
1823         struct cfi_private *cfi = map->fldrv_priv;
1824         int status, ofs_factor = cfi->interleave * cfi->device_type;
1825
1826         adr += chip->start;
1827         xip_disable(map, chip, adr+(2*ofs_factor));
1828         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1829         chip->state = FL_JEDEC_QUERY;
1830         status = cfi_read_query(map, adr+(2*ofs_factor));
1831         xip_enable(map, chip, 0);
1832         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1833                adr, status);
1834         return 0;
1835 }
1836 #endif
1837
1838 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1839 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1840
1841 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1842                                        unsigned long adr, int len, void *thunk)
1843 {
1844         struct cfi_private *cfi = map->fldrv_priv;
1845         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1846         int udelay;
1847         int ret;
1848
1849         adr += chip->start;
1850
1851         spin_lock(chip->mutex);
1852         ret = get_chip(map, chip, adr, FL_LOCKING);
1853         if (ret) {
1854                 spin_unlock(chip->mutex);
1855                 return ret;
1856         }
1857
1858         ENABLE_VPP(map);
1859         xip_disable(map, chip, adr);
1860
1861         map_write(map, CMD(0x60), adr);
1862         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1863                 map_write(map, CMD(0x01), adr);
1864                 chip->state = FL_LOCKING;
1865         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1866                 map_write(map, CMD(0xD0), adr);
1867                 chip->state = FL_UNLOCKING;
1868         } else
1869                 BUG();
1870
1871         /*
1872          * If Instant Individual Block Locking supported then no need
1873          * to delay.
1874          */
1875         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1876
1877         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1878         if (ret) {
1879                 map_write(map, CMD(0x70), adr);
1880                 chip->state = FL_STATUS;
1881                 xip_enable(map, chip, adr);
1882                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1883                 goto out;
1884         }
1885
1886         xip_enable(map, chip, adr);
1887 out:    put_chip(map, chip, adr);
1888         spin_unlock(chip->mutex);
1889         return ret;
1890 }
1891
1892 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1893 {
1894         int ret;
1895
1896 #ifdef DEBUG_LOCK_BITS
1897         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1898                __FUNCTION__, ofs, len);
1899         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1900                 ofs, len, 0);
1901 #endif
1902
1903         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1904                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1905
1906 #ifdef DEBUG_LOCK_BITS
1907         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1908                __FUNCTION__, ret);
1909         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1910                 ofs, len, 0);
1911 #endif
1912
1913         return ret;
1914 }
1915
1916 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1917 {
1918         int ret;
1919
1920 #ifdef DEBUG_LOCK_BITS
1921         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1922                __FUNCTION__, ofs, len);
1923         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1924                 ofs, len, 0);
1925 #endif
1926
1927         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1928                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1929
1930 #ifdef DEBUG_LOCK_BITS
1931         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1932                __FUNCTION__, ret);
1933         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1934                 ofs, len, 0);
1935 #endif
1936
1937         return ret;
1938 }
1939
1940 #ifdef CONFIG_MTD_OTP
1941
1942 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1943                         u_long data_offset, u_char *buf, u_int size,
1944                         u_long prot_offset, u_int groupno, u_int groupsize);
1945
1946 static int __xipram
1947 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1948             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1949 {
1950         struct cfi_private *cfi = map->fldrv_priv;
1951         int ret;
1952
1953         spin_lock(chip->mutex);
1954         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1955         if (ret) {
1956                 spin_unlock(chip->mutex);
1957                 return ret;
1958         }
1959
1960         /* let's ensure we're not reading back cached data from array mode */
1961         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1962
1963         xip_disable(map, chip, chip->start);
1964         if (chip->state != FL_JEDEC_QUERY) {
1965                 map_write(map, CMD(0x90), chip->start);
1966                 chip->state = FL_JEDEC_QUERY;
1967         }
1968         map_copy_from(map, buf, chip->start + offset, size);
1969         xip_enable(map, chip, chip->start);
1970
1971         /* then ensure we don't keep OTP data in the cache */
1972         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1973
1974         put_chip(map, chip, chip->start);
1975         spin_unlock(chip->mutex);
1976         return 0;
1977 }
1978
1979 static int
1980 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1981              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1982 {
1983         int ret;
1984
1985         while (size) {
1986                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
1987                 int gap = offset - bus_ofs;
1988                 int n = min_t(int, size, map_bankwidth(map)-gap);
1989                 map_word datum = map_word_ff(map);
1990
1991                 datum = map_word_load_partial(map, datum, buf, gap, n);
1992                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1993                 if (ret)
1994                         return ret;
1995
1996                 offset += n;
1997                 buf += n;
1998                 size -= n;
1999         }
2000
2001         return 0;
2002 }
2003
2004 static int
2005 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2006             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2007 {
2008         struct cfi_private *cfi = map->fldrv_priv;
2009         map_word datum;
2010
2011         /* make sure area matches group boundaries */
2012         if (size != grpsz)
2013                 return -EXDEV;
2014
2015         datum = map_word_ff(map);
2016         datum = map_word_clr(map, datum, CMD(1 << grpno));
2017         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2018 }
2019
2020 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2021                                  size_t *retlen, u_char *buf,
2022                                  otp_op_t action, int user_regs)
2023 {
2024         struct map_info *map = mtd->priv;
2025         struct cfi_private *cfi = map->fldrv_priv;
2026         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2027         struct flchip *chip;
2028         struct cfi_intelext_otpinfo *otp;
2029         u_long devsize, reg_prot_offset, data_offset;
2030         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2031         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2032         int ret;
2033
2034         *retlen = 0;
2035
2036         /* Check that we actually have some OTP registers */
2037         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2038                 return -ENODATA;
2039
2040         /* we need real chips here not virtual ones */
2041         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2042         chip_step = devsize >> cfi->chipshift;
2043         chip_num = 0;
2044
2045         /* Some chips have OTP located in the _top_ partition only.
2046            For example: Intel 28F256L18T (T means top-parameter device) */
2047         if (cfi->mfr == MANUFACTURER_INTEL) {
2048                 switch (cfi->id) {
2049                 case 0x880b:
2050                 case 0x880c:
2051                 case 0x880d:
2052                         chip_num = chip_step - 1;
2053                 }
2054         }
2055
2056         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2057                 chip = &cfi->chips[chip_num];
2058                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2059
2060                 /* first OTP region */
2061                 field = 0;
2062                 reg_prot_offset = extp->ProtRegAddr;
2063                 reg_fact_groups = 1;
2064                 reg_fact_size = 1 << extp->FactProtRegSize;
2065                 reg_user_groups = 1;
2066                 reg_user_size = 1 << extp->UserProtRegSize;
2067
2068                 while (len > 0) {
2069                         /* flash geometry fixup */
2070                         data_offset = reg_prot_offset + 1;
2071                         data_offset *= cfi->interleave * cfi->device_type;
2072                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2073                         reg_fact_size *= cfi->interleave;
2074                         reg_user_size *= cfi->interleave;
2075
2076                         if (user_regs) {
2077                                 groups = reg_user_groups;
2078                                 groupsize = reg_user_size;
2079                                 /* skip over factory reg area */
2080                                 groupno = reg_fact_groups;
2081                                 data_offset += reg_fact_groups * reg_fact_size;
2082                         } else {
2083                                 groups = reg_fact_groups;
2084                                 groupsize = reg_fact_size;
2085                                 groupno = 0;
2086                         }
2087
2088                         while (len > 0 && groups > 0) {
2089                                 if (!action) {
2090                                         /*
2091                                          * Special case: if action is NULL
2092                                          * we fill buf with otp_info records.
2093                                          */
2094                                         struct otp_info *otpinfo;
2095                                         map_word lockword;
2096                                         len -= sizeof(struct otp_info);
2097                                         if (len <= 0)
2098                                                 return -ENOSPC;
2099                                         ret = do_otp_read(map, chip,
2100                                                           reg_prot_offset,
2101                                                           (u_char *)&lockword,
2102                                                           map_bankwidth(map),
2103                                                           0, 0,  0);
2104                                         if (ret)
2105                                                 return ret;
2106                                         otpinfo = (struct otp_info *)buf;
2107                                         otpinfo->start = from;
2108                                         otpinfo->length = groupsize;
2109                                         otpinfo->locked =
2110                                            !map_word_bitsset(map, lockword,
2111                                                              CMD(1 << groupno));
2112                                         from += groupsize;
2113                                         buf += sizeof(*otpinfo);
2114                                         *retlen += sizeof(*otpinfo);
2115                                 } else if (from >= groupsize) {
2116                                         from -= groupsize;
2117                                         data_offset += groupsize;
2118                                 } else {
2119                                         int size = groupsize;
2120                                         data_offset += from;
2121                                         size -= from;
2122                                         from = 0;
2123                                         if (size > len)
2124                                                 size = len;
2125                                         ret = action(map, chip, data_offset,
2126                                                      buf, size, reg_prot_offset,
2127                                                      groupno, groupsize);
2128                                         if (ret < 0)
2129                                                 return ret;
2130                                         buf += size;
2131                                         len -= size;
2132                                         *retlen += size;
2133                                         data_offset += size;
2134                                 }
2135                                 groupno++;
2136                                 groups--;
2137                         }
2138
2139                         /* next OTP region */
2140                         if (++field == extp->NumProtectionFields)
2141                                 break;
2142                         reg_prot_offset = otp->ProtRegAddr;
2143                         reg_fact_groups = otp->FactGroups;
2144                         reg_fact_size = 1 << otp->FactProtRegSize;
2145                         reg_user_groups = otp->UserGroups;
2146                         reg_user_size = 1 << otp->UserProtRegSize;
2147                         otp++;
2148                 }
2149         }
2150
2151         return 0;
2152 }
2153
2154 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2155                                            size_t len, size_t *retlen,
2156                                             u_char *buf)
2157 {
2158         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2159                                      buf, do_otp_read, 0);
2160 }
2161
2162 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2163                                            size_t len, size_t *retlen,
2164                                             u_char *buf)
2165 {
2166         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2167                                      buf, do_otp_read, 1);
2168 }
2169
2170 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2171                                             size_t len, size_t *retlen,
2172                                              u_char *buf)
2173 {
2174         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2175                                      buf, do_otp_write, 1);
2176 }
2177
2178 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2179                                            loff_t from, size_t len)
2180 {
2181         size_t retlen;
2182         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2183                                      NULL, do_otp_lock, 1);
2184 }
2185
2186 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2187                                            struct otp_info *buf, size_t len)
2188 {
2189         size_t retlen;
2190         int ret;
2191
2192         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2193         return ret ? : retlen;
2194 }
2195
2196 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2197                                            struct otp_info *buf, size_t len)
2198 {
2199         size_t retlen;
2200         int ret;
2201
2202         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2203         return ret ? : retlen;
2204 }
2205
2206 #endif
2207
2208 static int cfi_intelext_suspend(struct mtd_info *mtd)
2209 {
2210         struct map_info *map = mtd->priv;
2211         struct cfi_private *cfi = map->fldrv_priv;
2212         int i;
2213         struct flchip *chip;
2214         int ret = 0;
2215
2216         for (i=0; !ret && i<cfi->numchips; i++) {
2217                 chip = &cfi->chips[i];
2218
2219                 spin_lock(chip->mutex);
2220
2221                 switch (chip->state) {
2222                 case FL_READY:
2223                 case FL_STATUS:
2224                 case FL_CFI_QUERY:
2225                 case FL_JEDEC_QUERY:
2226                         if (chip->oldstate == FL_READY) {
2227                                 chip->oldstate = chip->state;
2228                                 chip->state = FL_PM_SUSPENDED;
2229                                 /* No need to wake_up() on this state change -
2230                                  * as the whole point is that nobody can do anything
2231                                  * with the chip now anyway.
2232                                  */
2233                         } else {
2234                                 /* There seems to be an operation pending. We must wait for it. */
2235                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2236                                 ret = -EAGAIN;
2237                         }
2238                         break;
2239                 default:
2240                         /* Should we actually wait? Once upon a time these routines weren't
2241                            allowed to. Or should we return -EAGAIN, because the upper layers
2242                            ought to have already shut down anything which was using the device
2243                            anyway? The latter for now. */
2244                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2245                         ret = -EAGAIN;
2246                 case FL_PM_SUSPENDED:
2247                         break;
2248                 }
2249                 spin_unlock(chip->mutex);
2250         }
2251
2252         /* Unlock the chips again */
2253
2254         if (ret) {
2255                 for (i--; i >=0; i--) {
2256                         chip = &cfi->chips[i];
2257
2258                         spin_lock(chip->mutex);
2259
2260                         if (chip->state == FL_PM_SUSPENDED) {
2261                                 /* No need to force it into a known state here,
2262                                    because we're returning failure, and it didn't
2263                                    get power cycled */
2264                                 chip->state = chip->oldstate;
2265                                 chip->oldstate = FL_READY;
2266                                 wake_up(&chip->wq);
2267                         }
2268                         spin_unlock(chip->mutex);
2269                 }
2270         }
2271
2272         return ret;
2273 }
2274
2275 static void cfi_intelext_resume(struct mtd_info *mtd)
2276 {
2277         struct map_info *map = mtd->priv;
2278         struct cfi_private *cfi = map->fldrv_priv;
2279         int i;
2280         struct flchip *chip;
2281
2282         for (i=0; i<cfi->numchips; i++) {
2283
2284                 chip = &cfi->chips[i];
2285
2286                 spin_lock(chip->mutex);
2287
2288                 /* Go to known state. Chip may have been power cycled */
2289                 if (chip->state == FL_PM_SUSPENDED) {
2290                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2291                         chip->oldstate = chip->state = FL_READY;
2292                         wake_up(&chip->wq);
2293                 }
2294
2295                 spin_unlock(chip->mutex);
2296         }
2297 }
2298
2299 static int cfi_intelext_reset(struct mtd_info *mtd)
2300 {
2301         struct map_info *map = mtd->priv;
2302         struct cfi_private *cfi = map->fldrv_priv;
2303         int i, ret;
2304
2305         for (i=0; i < cfi->numchips; i++) {
2306                 struct flchip *chip = &cfi->chips[i];
2307
2308                 /* force the completion of any ongoing operation
2309                    and switch to array mode so any bootloader in
2310                    flash is accessible for soft reboot. */
2311                 spin_lock(chip->mutex);
2312                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2313                 if (!ret) {
2314                         map_write(map, CMD(0xff), chip->start);
2315                         chip->state = FL_READY;
2316                 }
2317                 spin_unlock(chip->mutex);
2318         }
2319
2320         return 0;
2321 }
2322
2323 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2324                                void *v)
2325 {
2326         struct mtd_info *mtd;
2327
2328         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2329         cfi_intelext_reset(mtd);
2330         return NOTIFY_DONE;
2331 }
2332
2333 static void cfi_intelext_destroy(struct mtd_info *mtd)
2334 {
2335         struct map_info *map = mtd->priv;
2336         struct cfi_private *cfi = map->fldrv_priv;
2337         cfi_intelext_reset(mtd);
2338         unregister_reboot_notifier(&mtd->reboot_notifier);
2339         kfree(cfi->cmdset_priv);
2340         kfree(cfi->cfiq);
2341         kfree(cfi->chips[0].priv);
2342         kfree(cfi);
2343         kfree(mtd->eraseregions);
2344 }
2345
2346 MODULE_LICENSE("GPL");
2347 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2348 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2349 MODULE_ALIAS("cfi_cmdset_0003");
2350 MODULE_ALIAS("cfi_cmdset_0200");