NBD: allow hung network I/O to be cancelled
[linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  *      - auto unlock sectors on resume for auto locking flash on power up
20  */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47
48 #define MANUFACTURER_INTEL      0x0089
49 #define I82802AB        0x00ad
50 #define I82802AC        0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 #ifdef CONFIG_MTD_OTP
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68                                             struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70                                             struct otp_info *, size_t);
71 #endif
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
75
76 static void cfi_intelext_destroy(struct mtd_info *);
77
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
79
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
82
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84                      size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86                         size_t len);
87
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
90 #include "fwh_lock.h"
91
92
93
94 /*
95  *  *********** SETUP AND PROBE BITS  ***********
96  */
97
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99         .probe          = NULL, /* Not usable directly */
100         .destroy        = cfi_intelext_destroy,
101         .name           = "cfi_cmdset_0001",
102         .module         = THIS_MODULE
103 };
104
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
107
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
110 {
111         int i;
112         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
114         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125         for (i=11; i<32; i++) {
126                 if (extp->FeatureSupport & (1<<i))
127                         printk("     - Unknown Bit %X:      supported\n", i);
128         }
129
130         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132         for (i=1; i<8; i++) {
133                 if (extp->SuspendCmdSupport & (1<<i))
134                         printk("     - Unknown Bit %X:               supported\n", i);
135         }
136
137         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140         for (i=2; i<3; i++) {
141                 if (extp->BlkStatusRegMask & (1<<i))
142                         printk("     - Unknown Bit %X Active: yes\n",i);
143         }
144         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146         for (i=6; i<16; i++) {
147                 if (extp->BlkStatusRegMask & (1<<i))
148                         printk("     - Unknown Bit %X Active: yes\n",i);
149         }
150
151         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153         if (extp->VppOptimal)
154                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
156 }
157 #endif
158
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
166
167         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168                             "erase on write disabled.\n");
169         extp->SuspendCmdSupport &= ~1;
170 }
171 #endif
172
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
175 {
176         struct map_info *map = mtd->priv;
177         struct cfi_private *cfi = map->fldrv_priv;
178         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
179
180         if (cfip && (cfip->FeatureSupport&4)) {
181                 cfip->FeatureSupport &= ~4;
182                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
183         }
184 }
185 #endif
186
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
188 {
189         struct map_info *map = mtd->priv;
190         struct cfi_private *cfi = map->fldrv_priv;
191
192         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
193         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
194 }
195
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
197 {
198         struct map_info *map = mtd->priv;
199         struct cfi_private *cfi = map->fldrv_priv;
200
201         /* Note this is done after the region info is endian swapped */
202         cfi->cfiq->EraseRegionInfo[1] =
203                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
204 };
205
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
207 {
208         struct map_info *map = mtd->priv;
209         if (!mtd->point && map_is_linear(map)) {
210                 mtd->point   = cfi_intelext_point;
211                 mtd->unpoint = cfi_intelext_unpoint;
212         }
213 }
214
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
216 {
217         struct map_info *map = mtd->priv;
218         struct cfi_private *cfi = map->fldrv_priv;
219         if (cfi->cfiq->BufWriteTimeoutTyp) {
220                 printk(KERN_INFO "Using buffer write method\n" );
221                 mtd->write = cfi_intelext_write_buffers;
222                 mtd->writev = cfi_intelext_writev;
223         }
224 }
225
226 /*
227  * Some chips power-up with all sectors locked by default.
228  */
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
230 {
231         printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232         mtd->flags |= MTD_STUPID_LOCK;
233 }
234
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
238 #endif
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
241 #endif
242 #if !FORCE_WORD_WRITE
243         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
244 #endif
245         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247         { MANUFACTURER_INTEL, 0x891c,         fixup_use_powerup_lock, NULL, },
248         { 0, 0, NULL, NULL }
249 };
250
251 static struct cfi_fixup jedec_fixup_table[] = {
252         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
253         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
254         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
255         { 0, 0, NULL, NULL }
256 };
257 static struct cfi_fixup fixup_table[] = {
258         /* The CFI vendor ids and the JEDEC vendor IDs appear
259          * to be common.  It is like the devices id's are as
260          * well.  This table is to pick all cases where
261          * we know that is the case.
262          */
263         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
264         { 0, 0, NULL, NULL }
265 };
266
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
269 {
270         struct cfi_pri_intelext *extp;
271         unsigned int extp_size = sizeof(*extp);
272
273  again:
274         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
275         if (!extp)
276                 return NULL;
277
278         if (extp->MajorVersion != '1' ||
279             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
281                        "version %c.%c.\n",  extp->MajorVersion,
282                        extp->MinorVersion);
283                 kfree(extp);
284                 return NULL;
285         }
286
287         /* Do some byteswapping if necessary */
288         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
291
292         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293                 unsigned int extra_size = 0;
294                 int nb_parts, i;
295
296                 /* Protection Register info */
297                 extra_size += (extp->NumProtectionFields - 1) *
298                               sizeof(struct cfi_intelext_otpinfo);
299
300                 /* Burst Read info */
301                 extra_size += 2;
302                 if (extp_size < sizeof(*extp) + extra_size)
303                         goto need_more;
304                 extra_size += extp->extra[extra_size-1];
305
306                 /* Number of hardware-partitions */
307                 extra_size += 1;
308                 if (extp_size < sizeof(*extp) + extra_size)
309                         goto need_more;
310                 nb_parts = extp->extra[extra_size - 1];
311
312                 /* skip the sizeof(partregion) field in CFI 1.4 */
313                 if (extp->MinorVersion >= '4')
314                         extra_size += 2;
315
316                 for (i = 0; i < nb_parts; i++) {
317                         struct cfi_intelext_regioninfo *rinfo;
318                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319                         extra_size += sizeof(*rinfo);
320                         if (extp_size < sizeof(*extp) + extra_size)
321                                 goto need_more;
322                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323                         extra_size += (rinfo->NumBlockTypes - 1)
324                                       * sizeof(struct cfi_intelext_blockinfo);
325                 }
326
327                 if (extp->MinorVersion >= '4')
328                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
329
330                 if (extp_size < sizeof(*extp) + extra_size) {
331                         need_more:
332                         extp_size = sizeof(*extp) + extra_size;
333                         kfree(extp);
334                         if (extp_size > 4096) {
335                                 printk(KERN_ERR
336                                         "%s: cfi_pri_intelext is too fat\n",
337                                         __FUNCTION__);
338                                 return NULL;
339                         }
340                         goto again;
341                 }
342         }
343
344         return extp;
345 }
346
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
348 {
349         struct cfi_private *cfi = map->fldrv_priv;
350         struct mtd_info *mtd;
351         int i;
352
353         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
354         if (!mtd) {
355                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
356                 return NULL;
357         }
358         mtd->priv = map;
359         mtd->type = MTD_NORFLASH;
360
361         /* Fill in the default mtd operations */
362         mtd->erase   = cfi_intelext_erase_varsize;
363         mtd->read    = cfi_intelext_read;
364         mtd->write   = cfi_intelext_write_words;
365         mtd->sync    = cfi_intelext_sync;
366         mtd->lock    = cfi_intelext_lock;
367         mtd->unlock  = cfi_intelext_unlock;
368         mtd->suspend = cfi_intelext_suspend;
369         mtd->resume  = cfi_intelext_resume;
370         mtd->flags   = MTD_CAP_NORFLASH;
371         mtd->name    = map->name;
372         mtd->writesize = 1;
373
374         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
375
376         if (cfi->cfi_mode == CFI_MODE_CFI) {
377                 /*
378                  * It's a real CFI chip, not one for which the probe
379                  * routine faked a CFI structure. So we read the feature
380                  * table from it.
381                  */
382                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383                 struct cfi_pri_intelext *extp;
384
385                 extp = read_pri_intelext(map, adr);
386                 if (!extp) {
387                         kfree(mtd);
388                         return NULL;
389                 }
390
391                 /* Install our own private info structure */
392                 cfi->cmdset_priv = extp;
393
394                 cfi_fixup(mtd, cfi_fixup_table);
395
396 #ifdef DEBUG_CFI_FEATURES
397                 /* Tell the user about it in lots of lovely detail */
398                 cfi_tell_features(extp);
399 #endif
400
401                 if(extp->SuspendCmdSupport & 1) {
402                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
403                 }
404         }
405         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406                 /* Apply jedec specific fixups */
407                 cfi_fixup(mtd, jedec_fixup_table);
408         }
409         /* Apply generic fixups */
410         cfi_fixup(mtd, fixup_table);
411
412         for (i=0; i< cfi->numchips; i++) {
413                 if (cfi->cfiq->WordWriteTimeoutTyp)
414                         cfi->chips[i].word_write_time =
415                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
416                 else
417                         cfi->chips[i].word_write_time = 50000;
418
419                 if (cfi->cfiq->BufWriteTimeoutTyp)
420                         cfi->chips[i].buffer_write_time =
421                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
422                 /* No default; if it isn't specified, we won't use it */
423
424                 if (cfi->cfiq->BlockEraseTimeoutTyp)
425                         cfi->chips[i].erase_time =
426                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
427                 else
428                         cfi->chips[i].erase_time = 2000000;
429
430                 cfi->chips[i].ref_point_counter = 0;
431                 init_waitqueue_head(&(cfi->chips[i].wq));
432         }
433
434         map->fldrv = &cfi_intelext_chipdrv;
435
436         return cfi_intelext_setup(mtd);
437 }
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
443
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
445 {
446         struct map_info *map = mtd->priv;
447         struct cfi_private *cfi = map->fldrv_priv;
448         unsigned long offset = 0;
449         int i,j;
450         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
451
452         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
453
454         mtd->size = devsize * cfi->numchips;
455
456         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458                         * mtd->numeraseregions, GFP_KERNEL);
459         if (!mtd->eraseregions) {
460                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
461                 goto setup_err;
462         }
463
464         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465                 unsigned long ernum, ersize;
466                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
468
469                 if (mtd->erasesize < ersize) {
470                         mtd->erasesize = ersize;
471                 }
472                 for (j=0; j<cfi->numchips; j++) {
473                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
477                 }
478                 offset += (ersize * ernum);
479         }
480
481         if (offset != devsize) {
482                 /* Argh */
483                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
484                 goto setup_err;
485         }
486
487         for (i=0; i<mtd->numeraseregions;i++){
488                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489                        i,mtd->eraseregions[i].offset,
490                        mtd->eraseregions[i].erasesize,
491                        mtd->eraseregions[i].numblocks);
492         }
493
494 #ifdef CONFIG_MTD_OTP
495         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
501 #endif
502
503         /* This function has the potential to distort the reality
504            a bit and therefore should be called last. */
505         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
506                 goto setup_err;
507
508         __module_get(THIS_MODULE);
509         register_reboot_notifier(&mtd->reboot_notifier);
510         return mtd;
511
512  setup_err:
513         if(mtd) {
514                 kfree(mtd->eraseregions);
515                 kfree(mtd);
516         }
517         kfree(cfi->cmdset_priv);
518         return NULL;
519 }
520
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522                                         struct cfi_private **pcfi)
523 {
524         struct map_info *map = mtd->priv;
525         struct cfi_private *cfi = *pcfi;
526         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
527
528         /*
529          * Probing of multi-partition flash chips.
530          *
531          * To support multiple partitions when available, we simply arrange
532          * for each of them to have their own flchip structure even if they
533          * are on the same physical chip.  This means completely recreating
534          * a new cfi_private structure right here which is a blatent code
535          * layering violation, but this is still the least intrusive
536          * arrangement at this point. This can be rearranged in the future
537          * if someone feels motivated enough.  --nico
538          */
539         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540             && extp->FeatureSupport & (1 << 9)) {
541                 struct cfi_private *newcfi;
542                 struct flchip *chip;
543                 struct flchip_shared *shared;
544                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
545
546                 /* Protection Register info */
547                 offs = (extp->NumProtectionFields - 1) *
548                        sizeof(struct cfi_intelext_otpinfo);
549
550                 /* Burst Read info */
551                 offs += extp->extra[offs+1]+2;
552
553                 /* Number of partition regions */
554                 numregions = extp->extra[offs];
555                 offs += 1;
556
557                 /* skip the sizeof(partregion) field in CFI 1.4 */
558                 if (extp->MinorVersion >= '4')
559                         offs += 2;
560
561                 /* Number of hardware partitions */
562                 numparts = 0;
563                 for (i = 0; i < numregions; i++) {
564                         struct cfi_intelext_regioninfo *rinfo;
565                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566                         numparts += rinfo->NumIdentPartitions;
567                         offs += sizeof(*rinfo)
568                                 + (rinfo->NumBlockTypes - 1) *
569                                   sizeof(struct cfi_intelext_blockinfo);
570                 }
571
572                 /* Programming Region info */
573                 if (extp->MinorVersion >= '4') {
574                         struct cfi_intelext_programming_regioninfo *prinfo;
575                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577                         mtd->flags &= ~MTD_BIT_WRITEABLE;
578                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579                                map->name, mtd->writesize,
580                                cfi->interleave * prinfo->ControlValid,
581                                cfi->interleave * prinfo->ControlInvalid);
582                 }
583
584                 /*
585                  * All functions below currently rely on all chips having
586                  * the same geometry so we'll just assume that all hardware
587                  * partitions are of the same size too.
588                  */
589                 partshift = cfi->chipshift - __ffs(numparts);
590
591                 if ((1 << partshift) < mtd->erasesize) {
592                         printk( KERN_ERR
593                                 "%s: bad number of hw partitions (%d)\n",
594                                 __FUNCTION__, numparts);
595                         return -EINVAL;
596                 }
597
598                 numvirtchips = cfi->numchips * numparts;
599                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
600                 if (!newcfi)
601                         return -ENOMEM;
602                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
603                 if (!shared) {
604                         kfree(newcfi);
605                         return -ENOMEM;
606                 }
607                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608                 newcfi->numchips = numvirtchips;
609                 newcfi->chipshift = partshift;
610
611                 chip = &newcfi->chips[0];
612                 for (i = 0; i < cfi->numchips; i++) {
613                         shared[i].writing = shared[i].erasing = NULL;
614                         spin_lock_init(&shared[i].lock);
615                         for (j = 0; j < numparts; j++) {
616                                 *chip = cfi->chips[i];
617                                 chip->start += j << partshift;
618                                 chip->priv = &shared[i];
619                                 /* those should be reset too since
620                                    they create memory references. */
621                                 init_waitqueue_head(&chip->wq);
622                                 spin_lock_init(&chip->_spinlock);
623                                 chip->mutex = &chip->_spinlock;
624                                 chip++;
625                         }
626                 }
627
628                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629                                   "--> %d partitions of %d KiB\n",
630                                   map->name, cfi->numchips, cfi->interleave,
631                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
632
633                 map->fldrv_priv = newcfi;
634                 *pcfi = newcfi;
635                 kfree(cfi);
636         }
637
638         return 0;
639 }
640
641 /*
642  *  *********** CHIP ACCESS FUNCTIONS ***********
643  */
644
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
646 {
647         DECLARE_WAITQUEUE(wait, current);
648         struct cfi_private *cfi = map->fldrv_priv;
649         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650         unsigned long timeo;
651         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
652
653  resettime:
654         timeo = jiffies + HZ;
655  retry:
656         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
657                 /*
658                  * OK. We have possibility for contension on the write/erase
659                  * operations which are global to the real chip and not per
660                  * partition.  So let's fight it over in the partition which
661                  * currently has authority on the operation.
662                  *
663                  * The rules are as follows:
664                  *
665                  * - any write operation must own shared->writing.
666                  *
667                  * - any erase operation must own _both_ shared->writing and
668                  *   shared->erasing.
669                  *
670                  * - contension arbitration is handled in the owner's context.
671                  *
672                  * The 'shared' struct can be read and/or written only when
673                  * its lock is taken.
674                  */
675                 struct flchip_shared *shared = chip->priv;
676                 struct flchip *contender;
677                 spin_lock(&shared->lock);
678                 contender = shared->writing;
679                 if (contender && contender != chip) {
680                         /*
681                          * The engine to perform desired operation on this
682                          * partition is already in use by someone else.
683                          * Let's fight over it in the context of the chip
684                          * currently using it.  If it is possible to suspend,
685                          * that other partition will do just that, otherwise
686                          * it'll happily send us to sleep.  In any case, when
687                          * get_chip returns success we're clear to go ahead.
688                          */
689                         int ret = spin_trylock(contender->mutex);
690                         spin_unlock(&shared->lock);
691                         if (!ret)
692                                 goto retry;
693                         spin_unlock(chip->mutex);
694                         ret = get_chip(map, contender, contender->start, mode);
695                         spin_lock(chip->mutex);
696                         if (ret) {
697                                 spin_unlock(contender->mutex);
698                                 return ret;
699                         }
700                         timeo = jiffies + HZ;
701                         spin_lock(&shared->lock);
702                         spin_unlock(contender->mutex);
703                 }
704
705                 /* We now own it */
706                 shared->writing = chip;
707                 if (mode == FL_ERASING)
708                         shared->erasing = chip;
709                 spin_unlock(&shared->lock);
710         }
711
712         switch (chip->state) {
713
714         case FL_STATUS:
715                 for (;;) {
716                         status = map_read(map, adr);
717                         if (map_word_andequal(map, status, status_OK, status_OK))
718                                 break;
719
720                         /* At this point we're fine with write operations
721                            in other partitions as they don't conflict. */
722                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
723                                 break;
724
725                         if (time_after(jiffies, timeo)) {
726                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727                                        map->name, status.x[0]);
728                                 return -EIO;
729                         }
730                         spin_unlock(chip->mutex);
731                         cfi_udelay(1);
732                         spin_lock(chip->mutex);
733                         /* Someone else might have been playing with it. */
734                         goto retry;
735                 }
736
737         case FL_READY:
738         case FL_CFI_QUERY:
739         case FL_JEDEC_QUERY:
740                 return 0;
741
742         case FL_ERASING:
743                 if (!cfip ||
744                     !(cfip->FeatureSupport & 2) ||
745                     !(mode == FL_READY || mode == FL_POINT ||
746                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
747                         goto sleep;
748
749
750                 /* Erase suspend */
751                 map_write(map, CMD(0xB0), adr);
752
753                 /* If the flash has finished erasing, then 'erase suspend'
754                  * appears to make some (28F320) flash devices switch to
755                  * 'read' mode.  Make sure that we switch to 'read status'
756                  * mode so we get the right data. --rmk
757                  */
758                 map_write(map, CMD(0x70), adr);
759                 chip->oldstate = FL_ERASING;
760                 chip->state = FL_ERASE_SUSPENDING;
761                 chip->erase_suspended = 1;
762                 for (;;) {
763                         status = map_read(map, adr);
764                         if (map_word_andequal(map, status, status_OK, status_OK))
765                                 break;
766
767                         if (time_after(jiffies, timeo)) {
768                                 /* Urgh. Resume and pretend we weren't here.  */
769                                 map_write(map, CMD(0xd0), adr);
770                                 /* Make sure we're in 'read status' mode if it had finished */
771                                 map_write(map, CMD(0x70), adr);
772                                 chip->state = FL_ERASING;
773                                 chip->oldstate = FL_READY;
774                                 printk(KERN_ERR "%s: Chip not ready after erase "
775                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
776                                 return -EIO;
777                         }
778
779                         spin_unlock(chip->mutex);
780                         cfi_udelay(1);
781                         spin_lock(chip->mutex);
782                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783                            So we can just loop here. */
784                 }
785                 chip->state = FL_STATUS;
786                 return 0;
787
788         case FL_XIP_WHILE_ERASING:
789                 if (mode != FL_READY && mode != FL_POINT &&
790                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
791                         goto sleep;
792                 chip->oldstate = chip->state;
793                 chip->state = FL_READY;
794                 return 0;
795
796         case FL_POINT:
797                 /* Only if there's no operation suspended... */
798                 if (mode == FL_READY && chip->oldstate == FL_READY)
799                         return 0;
800
801         case FL_SHUTDOWN:
802                 /* The machine is rebooting now,so no one can get chip anymore */
803                 return -EIO;
804         default:
805         sleep:
806                 set_current_state(TASK_UNINTERRUPTIBLE);
807                 add_wait_queue(&chip->wq, &wait);
808                 spin_unlock(chip->mutex);
809                 schedule();
810                 remove_wait_queue(&chip->wq, &wait);
811                 spin_lock(chip->mutex);
812                 goto resettime;
813         }
814 }
815
816 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
817 {
818         struct cfi_private *cfi = map->fldrv_priv;
819
820         if (chip->priv) {
821                 struct flchip_shared *shared = chip->priv;
822                 spin_lock(&shared->lock);
823                 if (shared->writing == chip && chip->oldstate == FL_READY) {
824                         /* We own the ability to write, but we're done */
825                         shared->writing = shared->erasing;
826                         if (shared->writing && shared->writing != chip) {
827                                 /* give back ownership to who we loaned it from */
828                                 struct flchip *loaner = shared->writing;
829                                 spin_lock(loaner->mutex);
830                                 spin_unlock(&shared->lock);
831                                 spin_unlock(chip->mutex);
832                                 put_chip(map, loaner, loaner->start);
833                                 spin_lock(chip->mutex);
834                                 spin_unlock(loaner->mutex);
835                                 wake_up(&chip->wq);
836                                 return;
837                         }
838                         shared->erasing = NULL;
839                         shared->writing = NULL;
840                 } else if (shared->erasing == chip && shared->writing != chip) {
841                         /*
842                          * We own the ability to erase without the ability
843                          * to write, which means the erase was suspended
844                          * and some other partition is currently writing.
845                          * Don't let the switch below mess things up since
846                          * we don't have ownership to resume anything.
847                          */
848                         spin_unlock(&shared->lock);
849                         wake_up(&chip->wq);
850                         return;
851                 }
852                 spin_unlock(&shared->lock);
853         }
854
855         switch(chip->oldstate) {
856         case FL_ERASING:
857                 chip->state = chip->oldstate;
858                 /* What if one interleaved chip has finished and the
859                    other hasn't? The old code would leave the finished
860                    one in READY mode. That's bad, and caused -EROFS
861                    errors to be returned from do_erase_oneblock because
862                    that's the only bit it checked for at the time.
863                    As the state machine appears to explicitly allow
864                    sending the 0x70 (Read Status) command to an erasing
865                    chip and expecting it to be ignored, that's what we
866                    do. */
867                 map_write(map, CMD(0xd0), adr);
868                 map_write(map, CMD(0x70), adr);
869                 chip->oldstate = FL_READY;
870                 chip->state = FL_ERASING;
871                 break;
872
873         case FL_XIP_WHILE_ERASING:
874                 chip->state = chip->oldstate;
875                 chip->oldstate = FL_READY;
876                 break;
877
878         case FL_READY:
879         case FL_STATUS:
880         case FL_JEDEC_QUERY:
881                 /* We should really make set_vpp() count, rather than doing this */
882                 DISABLE_VPP(map);
883                 break;
884         default:
885                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
886         }
887         wake_up(&chip->wq);
888 }
889
890 #ifdef CONFIG_MTD_XIP
891
892 /*
893  * No interrupt what so ever can be serviced while the flash isn't in array
894  * mode.  This is ensured by the xip_disable() and xip_enable() functions
895  * enclosing any code path where the flash is known not to be in array mode.
896  * And within a XIP disabled code path, only functions marked with __xipram
897  * may be called and nothing else (it's a good thing to inspect generated
898  * assembly to make sure inline functions were actually inlined and that gcc
899  * didn't emit calls to its own support functions). Also configuring MTD CFI
900  * support to a single buswidth and a single interleave is also recommended.
901  */
902
903 static void xip_disable(struct map_info *map, struct flchip *chip,
904                         unsigned long adr)
905 {
906         /* TODO: chips with no XIP use should ignore and return */
907         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
908         local_irq_disable();
909 }
910
911 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
912                                 unsigned long adr)
913 {
914         struct cfi_private *cfi = map->fldrv_priv;
915         if (chip->state != FL_POINT && chip->state != FL_READY) {
916                 map_write(map, CMD(0xff), adr);
917                 chip->state = FL_READY;
918         }
919         (void) map_read(map, adr);
920         xip_iprefetch();
921         local_irq_enable();
922 }
923
924 /*
925  * When a delay is required for the flash operation to complete, the
926  * xip_wait_for_operation() function is polling for both the given timeout
927  * and pending (but still masked) hardware interrupts.  Whenever there is an
928  * interrupt pending then the flash erase or write operation is suspended,
929  * array mode restored and interrupts unmasked.  Task scheduling might also
930  * happen at that point.  The CPU eventually returns from the interrupt or
931  * the call to schedule() and the suspended flash operation is resumed for
932  * the remaining of the delay period.
933  *
934  * Warning: this function _will_ fool interrupt latency tracing tools.
935  */
936
937 static int __xipram xip_wait_for_operation(
938                 struct map_info *map, struct flchip *chip,
939                 unsigned long adr, unsigned int chip_op_time )
940 {
941         struct cfi_private *cfi = map->fldrv_priv;
942         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
943         map_word status, OK = CMD(0x80);
944         unsigned long usec, suspended, start, done;
945         flstate_t oldstate, newstate;
946
947         start = xip_currtime();
948         usec = chip_op_time * 8;
949         if (usec == 0)
950                 usec = 500000;
951         done = 0;
952
953         do {
954                 cpu_relax();
955                 if (xip_irqpending() && cfip &&
956                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
957                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
958                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
959                         /*
960                          * Let's suspend the erase or write operation when
961                          * supported.  Note that we currently don't try to
962                          * suspend interleaved chips if there is already
963                          * another operation suspended (imagine what happens
964                          * when one chip was already done with the current
965                          * operation while another chip suspended it, then
966                          * we resume the whole thing at once).  Yes, it
967                          * can happen!
968                          */
969                         usec -= done;
970                         map_write(map, CMD(0xb0), adr);
971                         map_write(map, CMD(0x70), adr);
972                         suspended = xip_currtime();
973                         do {
974                                 if (xip_elapsed_since(suspended) > 100000) {
975                                         /*
976                                          * The chip doesn't want to suspend
977                                          * after waiting for 100 msecs.
978                                          * This is a critical error but there
979                                          * is not much we can do here.
980                                          */
981                                         return -EIO;
982                                 }
983                                 status = map_read(map, adr);
984                         } while (!map_word_andequal(map, status, OK, OK));
985
986                         /* Suspend succeeded */
987                         oldstate = chip->state;
988                         if (oldstate == FL_ERASING) {
989                                 if (!map_word_bitsset(map, status, CMD(0x40)))
990                                         break;
991                                 newstate = FL_XIP_WHILE_ERASING;
992                                 chip->erase_suspended = 1;
993                         } else {
994                                 if (!map_word_bitsset(map, status, CMD(0x04)))
995                                         break;
996                                 newstate = FL_XIP_WHILE_WRITING;
997                                 chip->write_suspended = 1;
998                         }
999                         chip->state = newstate;
1000                         map_write(map, CMD(0xff), adr);
1001                         (void) map_read(map, adr);
1002                         asm volatile (".rep 8; nop; .endr");
1003                         local_irq_enable();
1004                         spin_unlock(chip->mutex);
1005                         asm volatile (".rep 8; nop; .endr");
1006                         cond_resched();
1007
1008                         /*
1009                          * We're back.  However someone else might have
1010                          * decided to go write to the chip if we are in
1011                          * a suspended erase state.  If so let's wait
1012                          * until it's done.
1013                          */
1014                         spin_lock(chip->mutex);
1015                         while (chip->state != newstate) {
1016                                 DECLARE_WAITQUEUE(wait, current);
1017                                 set_current_state(TASK_UNINTERRUPTIBLE);
1018                                 add_wait_queue(&chip->wq, &wait);
1019                                 spin_unlock(chip->mutex);
1020                                 schedule();
1021                                 remove_wait_queue(&chip->wq, &wait);
1022                                 spin_lock(chip->mutex);
1023                         }
1024                         /* Disallow XIP again */
1025                         local_irq_disable();
1026
1027                         /* Resume the write or erase operation */
1028                         map_write(map, CMD(0xd0), adr);
1029                         map_write(map, CMD(0x70), adr);
1030                         chip->state = oldstate;
1031                         start = xip_currtime();
1032                 } else if (usec >= 1000000/HZ) {
1033                         /*
1034                          * Try to save on CPU power when waiting delay
1035                          * is at least a system timer tick period.
1036                          * No need to be extremely accurate here.
1037                          */
1038                         xip_cpu_idle();
1039                 }
1040                 status = map_read(map, adr);
1041                 done = xip_elapsed_since(start);
1042         } while (!map_word_andequal(map, status, OK, OK)
1043                  && done < usec);
1044
1045         return (done >= usec) ? -ETIME : 0;
1046 }
1047
1048 /*
1049  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1050  * the flash is actively programming or erasing since we have to poll for
1051  * the operation to complete anyway.  We can't do that in a generic way with
1052  * a XIP setup so do it before the actual flash operation in this case
1053  * and stub it out from INVAL_CACHE_AND_WAIT.
1054  */
1055 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1056         INVALIDATE_CACHED_RANGE(map, from, size)
1057
1058 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1059         xip_wait_for_operation(map, chip, cmd_adr, usec)
1060
1061 #else
1062
1063 #define xip_disable(map, chip, adr)
1064 #define xip_enable(map, chip, adr)
1065 #define XIP_INVAL_CACHED_RANGE(x...)
1066 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1067
1068 static int inval_cache_and_wait_for_operation(
1069                 struct map_info *map, struct flchip *chip,
1070                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1071                 unsigned int chip_op_time)
1072 {
1073         struct cfi_private *cfi = map->fldrv_priv;
1074         map_word status, status_OK = CMD(0x80);
1075         int chip_state = chip->state;
1076         unsigned int timeo, sleep_time;
1077
1078         spin_unlock(chip->mutex);
1079         if (inval_len)
1080                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1081         spin_lock(chip->mutex);
1082
1083         /* set our timeout to 8 times the expected delay */
1084         timeo = chip_op_time * 8;
1085         if (!timeo)
1086                 timeo = 500000;
1087         sleep_time = chip_op_time / 2;
1088
1089         for (;;) {
1090                 status = map_read(map, cmd_adr);
1091                 if (map_word_andequal(map, status, status_OK, status_OK))
1092                         break;
1093
1094                 if (!timeo) {
1095                         map_write(map, CMD(0x70), cmd_adr);
1096                         chip->state = FL_STATUS;
1097                         return -ETIME;
1098                 }
1099
1100                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1101                 spin_unlock(chip->mutex);
1102                 if (sleep_time >= 1000000/HZ) {
1103                         /*
1104                          * Half of the normal delay still remaining
1105                          * can be performed with a sleeping delay instead
1106                          * of busy waiting.
1107                          */
1108                         msleep(sleep_time/1000);
1109                         timeo -= sleep_time;
1110                         sleep_time = 1000000/HZ;
1111                 } else {
1112                         udelay(1);
1113                         cond_resched();
1114                         timeo--;
1115                 }
1116                 spin_lock(chip->mutex);
1117
1118                 while (chip->state != chip_state) {
1119                         /* Someone's suspended the operation: sleep */
1120                         DECLARE_WAITQUEUE(wait, current);
1121                         set_current_state(TASK_UNINTERRUPTIBLE);
1122                         add_wait_queue(&chip->wq, &wait);
1123                         spin_unlock(chip->mutex);
1124                         schedule();
1125                         remove_wait_queue(&chip->wq, &wait);
1126                         spin_lock(chip->mutex);
1127                 }
1128         }
1129
1130         /* Done and happy. */
1131         chip->state = FL_STATUS;
1132         return 0;
1133 }
1134
1135 #endif
1136
1137 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1138         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1139
1140
1141 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1142 {
1143         unsigned long cmd_addr;
1144         struct cfi_private *cfi = map->fldrv_priv;
1145         int ret = 0;
1146
1147         adr += chip->start;
1148
1149         /* Ensure cmd read/writes are aligned. */
1150         cmd_addr = adr & ~(map_bankwidth(map)-1);
1151
1152         spin_lock(chip->mutex);
1153
1154         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1155
1156         if (!ret) {
1157                 if (chip->state != FL_POINT && chip->state != FL_READY)
1158                         map_write(map, CMD(0xff), cmd_addr);
1159
1160                 chip->state = FL_POINT;
1161                 chip->ref_point_counter++;
1162         }
1163         spin_unlock(chip->mutex);
1164
1165         return ret;
1166 }
1167
1168 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1169 {
1170         struct map_info *map = mtd->priv;
1171         struct cfi_private *cfi = map->fldrv_priv;
1172         unsigned long ofs, last_end = 0;
1173         int chipnum;
1174         int ret = 0;
1175
1176         if (!map->virt || (from + len > mtd->size))
1177                 return -EINVAL;
1178
1179         /* Now lock the chip(s) to POINT state */
1180
1181         /* ofs: offset within the first chip that the first read should start */
1182         chipnum = (from >> cfi->chipshift);
1183         ofs = from - (chipnum << cfi->chipshift);
1184
1185         *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1186         *retlen = 0;
1187
1188         while (len) {
1189                 unsigned long thislen;
1190
1191                 if (chipnum >= cfi->numchips)
1192                         break;
1193
1194                 /* We cannot point across chips that are virtually disjoint */
1195                 if (!last_end)
1196                         last_end = cfi->chips[chipnum].start;
1197                 else if (cfi->chips[chipnum].start != last_end)
1198                         break;
1199
1200                 if ((len + ofs -1) >> cfi->chipshift)
1201                         thislen = (1<<cfi->chipshift) - ofs;
1202                 else
1203                         thislen = len;
1204
1205                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1206                 if (ret)
1207                         break;
1208
1209                 *retlen += thislen;
1210                 len -= thislen;
1211
1212                 ofs = 0;
1213                 last_end += 1 << cfi->chipshift;
1214                 chipnum++;
1215         }
1216         return 0;
1217 }
1218
1219 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1220 {
1221         struct map_info *map = mtd->priv;
1222         struct cfi_private *cfi = map->fldrv_priv;
1223         unsigned long ofs;
1224         int chipnum;
1225
1226         /* Now unlock the chip(s) POINT state */
1227
1228         /* ofs: offset within the first chip that the first read should start */
1229         chipnum = (from >> cfi->chipshift);
1230         ofs = from - (chipnum <<  cfi->chipshift);
1231
1232         while (len) {
1233                 unsigned long thislen;
1234                 struct flchip *chip;
1235
1236                 chip = &cfi->chips[chipnum];
1237                 if (chipnum >= cfi->numchips)
1238                         break;
1239
1240                 if ((len + ofs -1) >> cfi->chipshift)
1241                         thislen = (1<<cfi->chipshift) - ofs;
1242                 else
1243                         thislen = len;
1244
1245                 spin_lock(chip->mutex);
1246                 if (chip->state == FL_POINT) {
1247                         chip->ref_point_counter--;
1248                         if(chip->ref_point_counter == 0)
1249                                 chip->state = FL_READY;
1250                 } else
1251                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1252
1253                 put_chip(map, chip, chip->start);
1254                 spin_unlock(chip->mutex);
1255
1256                 len -= thislen;
1257                 ofs = 0;
1258                 chipnum++;
1259         }
1260 }
1261
1262 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1263 {
1264         unsigned long cmd_addr;
1265         struct cfi_private *cfi = map->fldrv_priv;
1266         int ret;
1267
1268         adr += chip->start;
1269
1270         /* Ensure cmd read/writes are aligned. */
1271         cmd_addr = adr & ~(map_bankwidth(map)-1);
1272
1273         spin_lock(chip->mutex);
1274         ret = get_chip(map, chip, cmd_addr, FL_READY);
1275         if (ret) {
1276                 spin_unlock(chip->mutex);
1277                 return ret;
1278         }
1279
1280         if (chip->state != FL_POINT && chip->state != FL_READY) {
1281                 map_write(map, CMD(0xff), cmd_addr);
1282
1283                 chip->state = FL_READY;
1284         }
1285
1286         map_copy_from(map, buf, adr, len);
1287
1288         put_chip(map, chip, cmd_addr);
1289
1290         spin_unlock(chip->mutex);
1291         return 0;
1292 }
1293
1294 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1295 {
1296         struct map_info *map = mtd->priv;
1297         struct cfi_private *cfi = map->fldrv_priv;
1298         unsigned long ofs;
1299         int chipnum;
1300         int ret = 0;
1301
1302         /* ofs: offset within the first chip that the first read should start */
1303         chipnum = (from >> cfi->chipshift);
1304         ofs = from - (chipnum <<  cfi->chipshift);
1305
1306         *retlen = 0;
1307
1308         while (len) {
1309                 unsigned long thislen;
1310
1311                 if (chipnum >= cfi->numchips)
1312                         break;
1313
1314                 if ((len + ofs -1) >> cfi->chipshift)
1315                         thislen = (1<<cfi->chipshift) - ofs;
1316                 else
1317                         thislen = len;
1318
1319                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1320                 if (ret)
1321                         break;
1322
1323                 *retlen += thislen;
1324                 len -= thislen;
1325                 buf += thislen;
1326
1327                 ofs = 0;
1328                 chipnum++;
1329         }
1330         return ret;
1331 }
1332
1333 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1334                                      unsigned long adr, map_word datum, int mode)
1335 {
1336         struct cfi_private *cfi = map->fldrv_priv;
1337         map_word status, write_cmd;
1338         int ret=0;
1339
1340         adr += chip->start;
1341
1342         switch (mode) {
1343         case FL_WRITING:
1344                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1345                 break;
1346         case FL_OTP_WRITE:
1347                 write_cmd = CMD(0xc0);
1348                 break;
1349         default:
1350                 return -EINVAL;
1351         }
1352
1353         spin_lock(chip->mutex);
1354         ret = get_chip(map, chip, adr, mode);
1355         if (ret) {
1356                 spin_unlock(chip->mutex);
1357                 return ret;
1358         }
1359
1360         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1361         ENABLE_VPP(map);
1362         xip_disable(map, chip, adr);
1363         map_write(map, write_cmd, adr);
1364         map_write(map, datum, adr);
1365         chip->state = mode;
1366
1367         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1368                                    adr, map_bankwidth(map),
1369                                    chip->word_write_time);
1370         if (ret) {
1371                 xip_enable(map, chip, adr);
1372                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1373                 goto out;
1374         }
1375
1376         /* check for errors */
1377         status = map_read(map, adr);
1378         if (map_word_bitsset(map, status, CMD(0x1a))) {
1379                 unsigned long chipstatus = MERGESTATUS(status);
1380
1381                 /* reset status */
1382                 map_write(map, CMD(0x50), adr);
1383                 map_write(map, CMD(0x70), adr);
1384                 xip_enable(map, chip, adr);
1385
1386                 if (chipstatus & 0x02) {
1387                         ret = -EROFS;
1388                 } else if (chipstatus & 0x08) {
1389                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1390                         ret = -EIO;
1391                 } else {
1392                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1393                         ret = -EINVAL;
1394                 }
1395
1396                 goto out;
1397         }
1398
1399         xip_enable(map, chip, adr);
1400  out:   put_chip(map, chip, adr);
1401         spin_unlock(chip->mutex);
1402         return ret;
1403 }
1404
1405
1406 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1407 {
1408         struct map_info *map = mtd->priv;
1409         struct cfi_private *cfi = map->fldrv_priv;
1410         int ret = 0;
1411         int chipnum;
1412         unsigned long ofs;
1413
1414         *retlen = 0;
1415         if (!len)
1416                 return 0;
1417
1418         chipnum = to >> cfi->chipshift;
1419         ofs = to  - (chipnum << cfi->chipshift);
1420
1421         /* If it's not bus-aligned, do the first byte write */
1422         if (ofs & (map_bankwidth(map)-1)) {
1423                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1424                 int gap = ofs - bus_ofs;
1425                 int n;
1426                 map_word datum;
1427
1428                 n = min_t(int, len, map_bankwidth(map)-gap);
1429                 datum = map_word_ff(map);
1430                 datum = map_word_load_partial(map, datum, buf, gap, n);
1431
1432                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1433                                                bus_ofs, datum, FL_WRITING);
1434                 if (ret)
1435                         return ret;
1436
1437                 len -= n;
1438                 ofs += n;
1439                 buf += n;
1440                 (*retlen) += n;
1441
1442                 if (ofs >> cfi->chipshift) {
1443                         chipnum ++;
1444                         ofs = 0;
1445                         if (chipnum == cfi->numchips)
1446                                 return 0;
1447                 }
1448         }
1449
1450         while(len >= map_bankwidth(map)) {
1451                 map_word datum = map_word_load(map, buf);
1452
1453                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1454                                        ofs, datum, FL_WRITING);
1455                 if (ret)
1456                         return ret;
1457
1458                 ofs += map_bankwidth(map);
1459                 buf += map_bankwidth(map);
1460                 (*retlen) += map_bankwidth(map);
1461                 len -= map_bankwidth(map);
1462
1463                 if (ofs >> cfi->chipshift) {
1464                         chipnum ++;
1465                         ofs = 0;
1466                         if (chipnum == cfi->numchips)
1467                                 return 0;
1468                 }
1469         }
1470
1471         if (len & (map_bankwidth(map)-1)) {
1472                 map_word datum;
1473
1474                 datum = map_word_ff(map);
1475                 datum = map_word_load_partial(map, datum, buf, 0, len);
1476
1477                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1478                                        ofs, datum, FL_WRITING);
1479                 if (ret)
1480                         return ret;
1481
1482                 (*retlen) += len;
1483         }
1484
1485         return 0;
1486 }
1487
1488
1489 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1490                                     unsigned long adr, const struct kvec **pvec,
1491                                     unsigned long *pvec_seek, int len)
1492 {
1493         struct cfi_private *cfi = map->fldrv_priv;
1494         map_word status, write_cmd, datum;
1495         unsigned long cmd_adr;
1496         int ret, wbufsize, word_gap, words;
1497         const struct kvec *vec;
1498         unsigned long vec_seek;
1499
1500         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1501         adr += chip->start;
1502         cmd_adr = adr & ~(wbufsize-1);
1503
1504         /* Let's determine this according to the interleave only once */
1505         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1506
1507         spin_lock(chip->mutex);
1508         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1509         if (ret) {
1510                 spin_unlock(chip->mutex);
1511                 return ret;
1512         }
1513
1514         XIP_INVAL_CACHED_RANGE(map, adr, len);
1515         ENABLE_VPP(map);
1516         xip_disable(map, chip, cmd_adr);
1517
1518         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1519            [...], the device will not accept any more Write to Buffer commands".
1520            So we must check here and reset those bits if they're set. Otherwise
1521            we're just pissing in the wind */
1522         if (chip->state != FL_STATUS) {
1523                 map_write(map, CMD(0x70), cmd_adr);
1524                 chip->state = FL_STATUS;
1525         }
1526         status = map_read(map, cmd_adr);
1527         if (map_word_bitsset(map, status, CMD(0x30))) {
1528                 xip_enable(map, chip, cmd_adr);
1529                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1530                 xip_disable(map, chip, cmd_adr);
1531                 map_write(map, CMD(0x50), cmd_adr);
1532                 map_write(map, CMD(0x70), cmd_adr);
1533         }
1534
1535         chip->state = FL_WRITING_TO_BUFFER;
1536         map_write(map, write_cmd, cmd_adr);
1537         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1538         if (ret) {
1539                 /* Argh. Not ready for write to buffer */
1540                 map_word Xstatus = map_read(map, cmd_adr);
1541                 map_write(map, CMD(0x70), cmd_adr);
1542                 chip->state = FL_STATUS;
1543                 status = map_read(map, cmd_adr);
1544                 map_write(map, CMD(0x50), cmd_adr);
1545                 map_write(map, CMD(0x70), cmd_adr);
1546                 xip_enable(map, chip, cmd_adr);
1547                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1548                                 map->name, Xstatus.x[0], status.x[0]);
1549                 goto out;
1550         }
1551
1552         /* Figure out the number of words to write */
1553         word_gap = (-adr & (map_bankwidth(map)-1));
1554         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1555         if (!word_gap) {
1556                 words--;
1557         } else {
1558                 word_gap = map_bankwidth(map) - word_gap;
1559                 adr -= word_gap;
1560                 datum = map_word_ff(map);
1561         }
1562
1563         /* Write length of data to come */
1564         map_write(map, CMD(words), cmd_adr );
1565
1566         /* Write data */
1567         vec = *pvec;
1568         vec_seek = *pvec_seek;
1569         do {
1570                 int n = map_bankwidth(map) - word_gap;
1571                 if (n > vec->iov_len - vec_seek)
1572                         n = vec->iov_len - vec_seek;
1573                 if (n > len)
1574                         n = len;
1575
1576                 if (!word_gap && len < map_bankwidth(map))
1577                         datum = map_word_ff(map);
1578
1579                 datum = map_word_load_partial(map, datum,
1580                                               vec->iov_base + vec_seek,
1581                                               word_gap, n);
1582
1583                 len -= n;
1584                 word_gap += n;
1585                 if (!len || word_gap == map_bankwidth(map)) {
1586                         map_write(map, datum, adr);
1587                         adr += map_bankwidth(map);
1588                         word_gap = 0;
1589                 }
1590
1591                 vec_seek += n;
1592                 if (vec_seek == vec->iov_len) {
1593                         vec++;
1594                         vec_seek = 0;
1595                 }
1596         } while (len);
1597         *pvec = vec;
1598         *pvec_seek = vec_seek;
1599
1600         /* GO GO GO */
1601         map_write(map, CMD(0xd0), cmd_adr);
1602         chip->state = FL_WRITING;
1603
1604         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1605                                    adr, len,
1606                                    chip->buffer_write_time);
1607         if (ret) {
1608                 map_write(map, CMD(0x70), cmd_adr);
1609                 chip->state = FL_STATUS;
1610                 xip_enable(map, chip, cmd_adr);
1611                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1612                 goto out;
1613         }
1614
1615         /* check for errors */
1616         status = map_read(map, cmd_adr);
1617         if (map_word_bitsset(map, status, CMD(0x1a))) {
1618                 unsigned long chipstatus = MERGESTATUS(status);
1619
1620                 /* reset status */
1621                 map_write(map, CMD(0x50), cmd_adr);
1622                 map_write(map, CMD(0x70), cmd_adr);
1623                 xip_enable(map, chip, cmd_adr);
1624
1625                 if (chipstatus & 0x02) {
1626                         ret = -EROFS;
1627                 } else if (chipstatus & 0x08) {
1628                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1629                         ret = -EIO;
1630                 } else {
1631                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1632                         ret = -EINVAL;
1633                 }
1634
1635                 goto out;
1636         }
1637
1638         xip_enable(map, chip, cmd_adr);
1639  out:   put_chip(map, chip, cmd_adr);
1640         spin_unlock(chip->mutex);
1641         return ret;
1642 }
1643
1644 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1645                                 unsigned long count, loff_t to, size_t *retlen)
1646 {
1647         struct map_info *map = mtd->priv;
1648         struct cfi_private *cfi = map->fldrv_priv;
1649         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1650         int ret = 0;
1651         int chipnum;
1652         unsigned long ofs, vec_seek, i;
1653         size_t len = 0;
1654
1655         for (i = 0; i < count; i++)
1656                 len += vecs[i].iov_len;
1657
1658         *retlen = 0;
1659         if (!len)
1660                 return 0;
1661
1662         chipnum = to >> cfi->chipshift;
1663         ofs = to - (chipnum << cfi->chipshift);
1664         vec_seek = 0;
1665
1666         do {
1667                 /* We must not cross write block boundaries */
1668                 int size = wbufsize - (ofs & (wbufsize-1));
1669
1670                 if (size > len)
1671                         size = len;
1672                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1673                                       ofs, &vecs, &vec_seek, size);
1674                 if (ret)
1675                         return ret;
1676
1677                 ofs += size;
1678                 (*retlen) += size;
1679                 len -= size;
1680
1681                 if (ofs >> cfi->chipshift) {
1682                         chipnum ++;
1683                         ofs = 0;
1684                         if (chipnum == cfi->numchips)
1685                                 return 0;
1686                 }
1687
1688                 /* Be nice and reschedule with the chip in a usable state for other
1689                    processes. */
1690                 cond_resched();
1691
1692         } while (len);
1693
1694         return 0;
1695 }
1696
1697 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1698                                        size_t len, size_t *retlen, const u_char *buf)
1699 {
1700         struct kvec vec;
1701
1702         vec.iov_base = (void *) buf;
1703         vec.iov_len = len;
1704
1705         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1706 }
1707
1708 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1709                                       unsigned long adr, int len, void *thunk)
1710 {
1711         struct cfi_private *cfi = map->fldrv_priv;
1712         map_word status;
1713         int retries = 3;
1714         int ret;
1715
1716         adr += chip->start;
1717
1718  retry:
1719         spin_lock(chip->mutex);
1720         ret = get_chip(map, chip, adr, FL_ERASING);
1721         if (ret) {
1722                 spin_unlock(chip->mutex);
1723                 return ret;
1724         }
1725
1726         XIP_INVAL_CACHED_RANGE(map, adr, len);
1727         ENABLE_VPP(map);
1728         xip_disable(map, chip, adr);
1729
1730         /* Clear the status register first */
1731         map_write(map, CMD(0x50), adr);
1732
1733         /* Now erase */
1734         map_write(map, CMD(0x20), adr);
1735         map_write(map, CMD(0xD0), adr);
1736         chip->state = FL_ERASING;
1737         chip->erase_suspended = 0;
1738
1739         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1740                                    adr, len,
1741                                    chip->erase_time);
1742         if (ret) {
1743                 map_write(map, CMD(0x70), adr);
1744                 chip->state = FL_STATUS;
1745                 xip_enable(map, chip, adr);
1746                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1747                 goto out;
1748         }
1749
1750         /* We've broken this before. It doesn't hurt to be safe */
1751         map_write(map, CMD(0x70), adr);
1752         chip->state = FL_STATUS;
1753         status = map_read(map, adr);
1754
1755         /* check for errors */
1756         if (map_word_bitsset(map, status, CMD(0x3a))) {
1757                 unsigned long chipstatus = MERGESTATUS(status);
1758
1759                 /* Reset the error bits */
1760                 map_write(map, CMD(0x50), adr);
1761                 map_write(map, CMD(0x70), adr);
1762                 xip_enable(map, chip, adr);
1763
1764                 if ((chipstatus & 0x30) == 0x30) {
1765                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1766                         ret = -EINVAL;
1767                 } else if (chipstatus & 0x02) {
1768                         /* Protection bit set */
1769                         ret = -EROFS;
1770                 } else if (chipstatus & 0x8) {
1771                         /* Voltage */
1772                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1773                         ret = -EIO;
1774                 } else if (chipstatus & 0x20 && retries--) {
1775                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1776                         put_chip(map, chip, adr);
1777                         spin_unlock(chip->mutex);
1778                         goto retry;
1779                 } else {
1780                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1781                         ret = -EIO;
1782                 }
1783
1784                 goto out;
1785         }
1786
1787         xip_enable(map, chip, adr);
1788  out:   put_chip(map, chip, adr);
1789         spin_unlock(chip->mutex);
1790         return ret;
1791 }
1792
1793 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1794 {
1795         unsigned long ofs, len;
1796         int ret;
1797
1798         ofs = instr->addr;
1799         len = instr->len;
1800
1801         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1802         if (ret)
1803                 return ret;
1804
1805         instr->state = MTD_ERASE_DONE;
1806         mtd_erase_callback(instr);
1807
1808         return 0;
1809 }
1810
1811 static void cfi_intelext_sync (struct mtd_info *mtd)
1812 {
1813         struct map_info *map = mtd->priv;
1814         struct cfi_private *cfi = map->fldrv_priv;
1815         int i;
1816         struct flchip *chip;
1817         int ret = 0;
1818
1819         for (i=0; !ret && i<cfi->numchips; i++) {
1820                 chip = &cfi->chips[i];
1821
1822                 spin_lock(chip->mutex);
1823                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1824
1825                 if (!ret) {
1826                         chip->oldstate = chip->state;
1827                         chip->state = FL_SYNCING;
1828                         /* No need to wake_up() on this state change -
1829                          * as the whole point is that nobody can do anything
1830                          * with the chip now anyway.
1831                          */
1832                 }
1833                 spin_unlock(chip->mutex);
1834         }
1835
1836         /* Unlock the chips again */
1837
1838         for (i--; i >=0; i--) {
1839                 chip = &cfi->chips[i];
1840
1841                 spin_lock(chip->mutex);
1842
1843                 if (chip->state == FL_SYNCING) {
1844                         chip->state = chip->oldstate;
1845                         chip->oldstate = FL_READY;
1846                         wake_up(&chip->wq);
1847                 }
1848                 spin_unlock(chip->mutex);
1849         }
1850 }
1851
1852 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1853                                                 struct flchip *chip,
1854                                                 unsigned long adr,
1855                                                 int len, void *thunk)
1856 {
1857         struct cfi_private *cfi = map->fldrv_priv;
1858         int status, ofs_factor = cfi->interleave * cfi->device_type;
1859
1860         adr += chip->start;
1861         xip_disable(map, chip, adr+(2*ofs_factor));
1862         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1863         chip->state = FL_JEDEC_QUERY;
1864         status = cfi_read_query(map, adr+(2*ofs_factor));
1865         xip_enable(map, chip, 0);
1866         return status;
1867 }
1868
1869 #ifdef DEBUG_LOCK_BITS
1870 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1871                                                 struct flchip *chip,
1872                                                 unsigned long adr,
1873                                                 int len, void *thunk)
1874 {
1875         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1876                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1877         return 0;
1878 }
1879 #endif
1880
1881 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1882 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1883
1884 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1885                                        unsigned long adr, int len, void *thunk)
1886 {
1887         struct cfi_private *cfi = map->fldrv_priv;
1888         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1889         int udelay;
1890         int ret;
1891
1892         adr += chip->start;
1893
1894         spin_lock(chip->mutex);
1895         ret = get_chip(map, chip, adr, FL_LOCKING);
1896         if (ret) {
1897                 spin_unlock(chip->mutex);
1898                 return ret;
1899         }
1900
1901         ENABLE_VPP(map);
1902         xip_disable(map, chip, adr);
1903
1904         map_write(map, CMD(0x60), adr);
1905         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1906                 map_write(map, CMD(0x01), adr);
1907                 chip->state = FL_LOCKING;
1908         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1909                 map_write(map, CMD(0xD0), adr);
1910                 chip->state = FL_UNLOCKING;
1911         } else
1912                 BUG();
1913
1914         /*
1915          * If Instant Individual Block Locking supported then no need
1916          * to delay.
1917          */
1918         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1919
1920         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1921         if (ret) {
1922                 map_write(map, CMD(0x70), adr);
1923                 chip->state = FL_STATUS;
1924                 xip_enable(map, chip, adr);
1925                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1926                 goto out;
1927         }
1928
1929         xip_enable(map, chip, adr);
1930 out:    put_chip(map, chip, adr);
1931         spin_unlock(chip->mutex);
1932         return ret;
1933 }
1934
1935 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1936 {
1937         int ret;
1938
1939 #ifdef DEBUG_LOCK_BITS
1940         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1941                __FUNCTION__, ofs, len);
1942         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943                 ofs, len, NULL);
1944 #endif
1945
1946         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1947                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1948
1949 #ifdef DEBUG_LOCK_BITS
1950         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1951                __FUNCTION__, ret);
1952         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1953                 ofs, len, NULL);
1954 #endif
1955
1956         return ret;
1957 }
1958
1959 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1960 {
1961         int ret;
1962
1963 #ifdef DEBUG_LOCK_BITS
1964         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1965                __FUNCTION__, ofs, len);
1966         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1967                 ofs, len, NULL);
1968 #endif
1969
1970         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1971                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1972
1973 #ifdef DEBUG_LOCK_BITS
1974         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1975                __FUNCTION__, ret);
1976         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1977                 ofs, len, NULL);
1978 #endif
1979
1980         return ret;
1981 }
1982
1983 #ifdef CONFIG_MTD_OTP
1984
1985 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1986                         u_long data_offset, u_char *buf, u_int size,
1987                         u_long prot_offset, u_int groupno, u_int groupsize);
1988
1989 static int __xipram
1990 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1991             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1992 {
1993         struct cfi_private *cfi = map->fldrv_priv;
1994         int ret;
1995
1996         spin_lock(chip->mutex);
1997         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1998         if (ret) {
1999                 spin_unlock(chip->mutex);
2000                 return ret;
2001         }
2002
2003         /* let's ensure we're not reading back cached data from array mode */
2004         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2005
2006         xip_disable(map, chip, chip->start);
2007         if (chip->state != FL_JEDEC_QUERY) {
2008                 map_write(map, CMD(0x90), chip->start);
2009                 chip->state = FL_JEDEC_QUERY;
2010         }
2011         map_copy_from(map, buf, chip->start + offset, size);
2012         xip_enable(map, chip, chip->start);
2013
2014         /* then ensure we don't keep OTP data in the cache */
2015         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2016
2017         put_chip(map, chip, chip->start);
2018         spin_unlock(chip->mutex);
2019         return 0;
2020 }
2021
2022 static int
2023 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2024              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2025 {
2026         int ret;
2027
2028         while (size) {
2029                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2030                 int gap = offset - bus_ofs;
2031                 int n = min_t(int, size, map_bankwidth(map)-gap);
2032                 map_word datum = map_word_ff(map);
2033
2034                 datum = map_word_load_partial(map, datum, buf, gap, n);
2035                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2036                 if (ret)
2037                         return ret;
2038
2039                 offset += n;
2040                 buf += n;
2041                 size -= n;
2042         }
2043
2044         return 0;
2045 }
2046
2047 static int
2048 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2049             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2050 {
2051         struct cfi_private *cfi = map->fldrv_priv;
2052         map_word datum;
2053
2054         /* make sure area matches group boundaries */
2055         if (size != grpsz)
2056                 return -EXDEV;
2057
2058         datum = map_word_ff(map);
2059         datum = map_word_clr(map, datum, CMD(1 << grpno));
2060         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2061 }
2062
2063 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2064                                  size_t *retlen, u_char *buf,
2065                                  otp_op_t action, int user_regs)
2066 {
2067         struct map_info *map = mtd->priv;
2068         struct cfi_private *cfi = map->fldrv_priv;
2069         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2070         struct flchip *chip;
2071         struct cfi_intelext_otpinfo *otp;
2072         u_long devsize, reg_prot_offset, data_offset;
2073         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2074         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2075         int ret;
2076
2077         *retlen = 0;
2078
2079         /* Check that we actually have some OTP registers */
2080         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2081                 return -ENODATA;
2082
2083         /* we need real chips here not virtual ones */
2084         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2085         chip_step = devsize >> cfi->chipshift;
2086         chip_num = 0;
2087
2088         /* Some chips have OTP located in the _top_ partition only.
2089            For example: Intel 28F256L18T (T means top-parameter device) */
2090         if (cfi->mfr == MANUFACTURER_INTEL) {
2091                 switch (cfi->id) {
2092                 case 0x880b:
2093                 case 0x880c:
2094                 case 0x880d:
2095                         chip_num = chip_step - 1;
2096                 }
2097         }
2098
2099         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2100                 chip = &cfi->chips[chip_num];
2101                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2102
2103                 /* first OTP region */
2104                 field = 0;
2105                 reg_prot_offset = extp->ProtRegAddr;
2106                 reg_fact_groups = 1;
2107                 reg_fact_size = 1 << extp->FactProtRegSize;
2108                 reg_user_groups = 1;
2109                 reg_user_size = 1 << extp->UserProtRegSize;
2110
2111                 while (len > 0) {
2112                         /* flash geometry fixup */
2113                         data_offset = reg_prot_offset + 1;
2114                         data_offset *= cfi->interleave * cfi->device_type;
2115                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2116                         reg_fact_size *= cfi->interleave;
2117                         reg_user_size *= cfi->interleave;
2118
2119                         if (user_regs) {
2120                                 groups = reg_user_groups;
2121                                 groupsize = reg_user_size;
2122                                 /* skip over factory reg area */
2123                                 groupno = reg_fact_groups;
2124                                 data_offset += reg_fact_groups * reg_fact_size;
2125                         } else {
2126                                 groups = reg_fact_groups;
2127                                 groupsize = reg_fact_size;
2128                                 groupno = 0;
2129                         }
2130
2131                         while (len > 0 && groups > 0) {
2132                                 if (!action) {
2133                                         /*
2134                                          * Special case: if action is NULL
2135                                          * we fill buf with otp_info records.
2136                                          */
2137                                         struct otp_info *otpinfo;
2138                                         map_word lockword;
2139                                         len -= sizeof(struct otp_info);
2140                                         if (len <= 0)
2141                                                 return -ENOSPC;
2142                                         ret = do_otp_read(map, chip,
2143                                                           reg_prot_offset,
2144                                                           (u_char *)&lockword,
2145                                                           map_bankwidth(map),
2146                                                           0, 0,  0);
2147                                         if (ret)
2148                                                 return ret;
2149                                         otpinfo = (struct otp_info *)buf;
2150                                         otpinfo->start = from;
2151                                         otpinfo->length = groupsize;
2152                                         otpinfo->locked =
2153                                            !map_word_bitsset(map, lockword,
2154                                                              CMD(1 << groupno));
2155                                         from += groupsize;
2156                                         buf += sizeof(*otpinfo);
2157                                         *retlen += sizeof(*otpinfo);
2158                                 } else if (from >= groupsize) {
2159                                         from -= groupsize;
2160                                         data_offset += groupsize;
2161                                 } else {
2162                                         int size = groupsize;
2163                                         data_offset += from;
2164                                         size -= from;
2165                                         from = 0;
2166                                         if (size > len)
2167                                                 size = len;
2168                                         ret = action(map, chip, data_offset,
2169                                                      buf, size, reg_prot_offset,
2170                                                      groupno, groupsize);
2171                                         if (ret < 0)
2172                                                 return ret;
2173                                         buf += size;
2174                                         len -= size;
2175                                         *retlen += size;
2176                                         data_offset += size;
2177                                 }
2178                                 groupno++;
2179                                 groups--;
2180                         }
2181
2182                         /* next OTP region */
2183                         if (++field == extp->NumProtectionFields)
2184                                 break;
2185                         reg_prot_offset = otp->ProtRegAddr;
2186                         reg_fact_groups = otp->FactGroups;
2187                         reg_fact_size = 1 << otp->FactProtRegSize;
2188                         reg_user_groups = otp->UserGroups;
2189                         reg_user_size = 1 << otp->UserProtRegSize;
2190                         otp++;
2191                 }
2192         }
2193
2194         return 0;
2195 }
2196
2197 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2198                                            size_t len, size_t *retlen,
2199                                             u_char *buf)
2200 {
2201         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2202                                      buf, do_otp_read, 0);
2203 }
2204
2205 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2206                                            size_t len, size_t *retlen,
2207                                             u_char *buf)
2208 {
2209         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2210                                      buf, do_otp_read, 1);
2211 }
2212
2213 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2214                                             size_t len, size_t *retlen,
2215                                              u_char *buf)
2216 {
2217         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2218                                      buf, do_otp_write, 1);
2219 }
2220
2221 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2222                                            loff_t from, size_t len)
2223 {
2224         size_t retlen;
2225         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2226                                      NULL, do_otp_lock, 1);
2227 }
2228
2229 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2230                                            struct otp_info *buf, size_t len)
2231 {
2232         size_t retlen;
2233         int ret;
2234
2235         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2236         return ret ? : retlen;
2237 }
2238
2239 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2240                                            struct otp_info *buf, size_t len)
2241 {
2242         size_t retlen;
2243         int ret;
2244
2245         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2246         return ret ? : retlen;
2247 }
2248
2249 #endif
2250
2251 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2252 {
2253         struct mtd_erase_region_info *region;
2254         int block, status, i;
2255         unsigned long adr;
2256         size_t len;
2257
2258         for (i = 0; i < mtd->numeraseregions; i++) {
2259                 region = &mtd->eraseregions[i];
2260                 if (!region->lockmap)
2261                         continue;
2262
2263                 for (block = 0; block < region->numblocks; block++){
2264                         len = region->erasesize;
2265                         adr = region->offset + block * len;
2266
2267                         status = cfi_varsize_frob(mtd,
2268                                         do_getlockstatus_oneblock, adr, len, NULL);
2269                         if (status)
2270                                 set_bit(block, region->lockmap);
2271                         else
2272                                 clear_bit(block, region->lockmap);
2273                 }
2274         }
2275 }
2276
2277 static int cfi_intelext_suspend(struct mtd_info *mtd)
2278 {
2279         struct map_info *map = mtd->priv;
2280         struct cfi_private *cfi = map->fldrv_priv;
2281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2282         int i;
2283         struct flchip *chip;
2284         int ret = 0;
2285
2286         if ((mtd->flags & MTD_STUPID_LOCK)
2287             && extp && (extp->FeatureSupport & (1 << 5)))
2288                 cfi_intelext_save_locks(mtd);
2289
2290         for (i=0; !ret && i<cfi->numchips; i++) {
2291                 chip = &cfi->chips[i];
2292
2293                 spin_lock(chip->mutex);
2294
2295                 switch (chip->state) {
2296                 case FL_READY:
2297                 case FL_STATUS:
2298                 case FL_CFI_QUERY:
2299                 case FL_JEDEC_QUERY:
2300                         if (chip->oldstate == FL_READY) {
2301                                 /* place the chip in a known state before suspend */
2302                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2303                                 chip->oldstate = chip->state;
2304                                 chip->state = FL_PM_SUSPENDED;
2305                                 /* No need to wake_up() on this state change -
2306                                  * as the whole point is that nobody can do anything
2307                                  * with the chip now anyway.
2308                                  */
2309                         } else {
2310                                 /* There seems to be an operation pending. We must wait for it. */
2311                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2312                                 ret = -EAGAIN;
2313                         }
2314                         break;
2315                 default:
2316                         /* Should we actually wait? Once upon a time these routines weren't
2317                            allowed to. Or should we return -EAGAIN, because the upper layers
2318                            ought to have already shut down anything which was using the device
2319                            anyway? The latter for now. */
2320                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2321                         ret = -EAGAIN;
2322                 case FL_PM_SUSPENDED:
2323                         break;
2324                 }
2325                 spin_unlock(chip->mutex);
2326         }
2327
2328         /* Unlock the chips again */
2329
2330         if (ret) {
2331                 for (i--; i >=0; i--) {
2332                         chip = &cfi->chips[i];
2333
2334                         spin_lock(chip->mutex);
2335
2336                         if (chip->state == FL_PM_SUSPENDED) {
2337                                 /* No need to force it into a known state here,
2338                                    because we're returning failure, and it didn't
2339                                    get power cycled */
2340                                 chip->state = chip->oldstate;
2341                                 chip->oldstate = FL_READY;
2342                                 wake_up(&chip->wq);
2343                         }
2344                         spin_unlock(chip->mutex);
2345                 }
2346         }
2347
2348         return ret;
2349 }
2350
2351 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2352 {
2353         struct mtd_erase_region_info *region;
2354         int block, i;
2355         unsigned long adr;
2356         size_t len;
2357
2358         for (i = 0; i < mtd->numeraseregions; i++) {
2359                 region = &mtd->eraseregions[i];
2360                 if (!region->lockmap)
2361                         continue;
2362
2363                 for (block = 0; block < region->numblocks; block++) {
2364                         len = region->erasesize;
2365                         adr = region->offset + block * len;
2366
2367                         if (!test_bit(block, region->lockmap))
2368                                 cfi_intelext_unlock(mtd, adr, len);
2369                 }
2370         }
2371 }
2372
2373 static void cfi_intelext_resume(struct mtd_info *mtd)
2374 {
2375         struct map_info *map = mtd->priv;
2376         struct cfi_private *cfi = map->fldrv_priv;
2377         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2378         int i;
2379         struct flchip *chip;
2380
2381         for (i=0; i<cfi->numchips; i++) {
2382
2383                 chip = &cfi->chips[i];
2384
2385                 spin_lock(chip->mutex);
2386
2387                 /* Go to known state. Chip may have been power cycled */
2388                 if (chip->state == FL_PM_SUSPENDED) {
2389                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2390                         chip->oldstate = chip->state = FL_READY;
2391                         wake_up(&chip->wq);
2392                 }
2393
2394                 spin_unlock(chip->mutex);
2395         }
2396
2397         if ((mtd->flags & MTD_STUPID_LOCK)
2398             && extp && (extp->FeatureSupport & (1 << 5)))
2399                 cfi_intelext_restore_locks(mtd);
2400 }
2401
2402 static int cfi_intelext_reset(struct mtd_info *mtd)
2403 {
2404         struct map_info *map = mtd->priv;
2405         struct cfi_private *cfi = map->fldrv_priv;
2406         int i, ret;
2407
2408         for (i=0; i < cfi->numchips; i++) {
2409                 struct flchip *chip = &cfi->chips[i];
2410
2411                 /* force the completion of any ongoing operation
2412                    and switch to array mode so any bootloader in
2413                    flash is accessible for soft reboot. */
2414                 spin_lock(chip->mutex);
2415                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2416                 if (!ret) {
2417                         map_write(map, CMD(0xff), chip->start);
2418                         chip->state = FL_SHUTDOWN;
2419                 }
2420                 spin_unlock(chip->mutex);
2421         }
2422
2423         return 0;
2424 }
2425
2426 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2427                                void *v)
2428 {
2429         struct mtd_info *mtd;
2430
2431         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2432         cfi_intelext_reset(mtd);
2433         return NOTIFY_DONE;
2434 }
2435
2436 static void cfi_intelext_destroy(struct mtd_info *mtd)
2437 {
2438         struct map_info *map = mtd->priv;
2439         struct cfi_private *cfi = map->fldrv_priv;
2440         struct mtd_erase_region_info *region;
2441         int i;
2442         cfi_intelext_reset(mtd);
2443         unregister_reboot_notifier(&mtd->reboot_notifier);
2444         kfree(cfi->cmdset_priv);
2445         kfree(cfi->cfiq);
2446         kfree(cfi->chips[0].priv);
2447         kfree(cfi);
2448         for (i = 0; i < mtd->numeraseregions; i++) {
2449                 region = &mtd->eraseregions[i];
2450                 if (region->lockmap)
2451                         kfree(region->lockmap);
2452         }
2453         kfree(mtd->eraseregions);
2454 }
2455
2456 MODULE_LICENSE("GPL");
2457 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2458 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2459 MODULE_ALIAS("cfi_cmdset_0003");
2460 MODULE_ALIAS("cfi_cmdset_0200");