Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
19  *      - auto unlock sectors on resume for auto locking flash on power up
20  */
21
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
29
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
41
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
44
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
47
48 #define MANUFACTURER_INTEL      0x0089
49 #define I82802AB        0x00ad
50 #define I82802AC        0x00ac
51 #define MANUFACTURER_ST         0x0020
52 #define M50LPW080       0x002F
53
54 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 #ifdef CONFIG_MTD_OTP
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
68                                             struct otp_info *, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
70                                             struct otp_info *, size_t);
71 #endif
72 static int cfi_intelext_suspend (struct mtd_info *);
73 static void cfi_intelext_resume (struct mtd_info *);
74 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
75
76 static void cfi_intelext_destroy(struct mtd_info *);
77
78 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
79
80 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
81 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
82
83 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
84                      size_t *retlen, u_char **mtdbuf);
85 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
86                         size_t len);
87
88 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
90 #include "fwh_lock.h"
91
92
93
94 /*
95  *  *********** SETUP AND PROBE BITS  ***********
96  */
97
98 static struct mtd_chip_driver cfi_intelext_chipdrv = {
99         .probe          = NULL, /* Not usable directly */
100         .destroy        = cfi_intelext_destroy,
101         .name           = "cfi_cmdset_0001",
102         .module         = THIS_MODULE
103 };
104
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
107
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext *extp)
110 {
111         int i;
112         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
113         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
114         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
115         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
116         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
117         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
118         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
119         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
120         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
121         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
122         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
123         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
124         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
125         for (i=11; i<32; i++) {
126                 if (extp->FeatureSupport & (1<<i))
127                         printk("     - Unknown Bit %X:      supported\n", i);
128         }
129
130         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
131         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
132         for (i=1; i<8; i++) {
133                 if (extp->SuspendCmdSupport & (1<<i))
134                         printk("     - Unknown Bit %X:               supported\n", i);
135         }
136
137         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
138         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
139         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
140         for (i=2; i<3; i++) {
141                 if (extp->BlkStatusRegMask & (1<<i))
142                         printk("     - Unknown Bit %X Active: yes\n",i);
143         }
144         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
145         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
146         for (i=6; i<16; i++) {
147                 if (extp->BlkStatusRegMask & (1<<i))
148                         printk("     - Unknown Bit %X Active: yes\n",i);
149         }
150
151         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
153         if (extp->VppOptimal)
154                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
156 }
157 #endif
158
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
166
167         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
168                             "erase on write disabled.\n");
169         extp->SuspendCmdSupport &= ~1;
170 }
171 #endif
172
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
175 {
176         struct map_info *map = mtd->priv;
177         struct cfi_private *cfi = map->fldrv_priv;
178         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
179
180         if (cfip && (cfip->FeatureSupport&4)) {
181                 cfip->FeatureSupport &= ~4;
182                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
183         }
184 }
185 #endif
186
187 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
188 {
189         struct map_info *map = mtd->priv;
190         struct cfi_private *cfi = map->fldrv_priv;
191
192         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
193         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
194 }
195
196 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
197 {
198         struct map_info *map = mtd->priv;
199         struct cfi_private *cfi = map->fldrv_priv;
200
201         /* Note this is done after the region info is endian swapped */
202         cfi->cfiq->EraseRegionInfo[1] =
203                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
204 };
205
206 static void fixup_use_point(struct mtd_info *mtd, void *param)
207 {
208         struct map_info *map = mtd->priv;
209         if (!mtd->point && map_is_linear(map)) {
210                 mtd->point   = cfi_intelext_point;
211                 mtd->unpoint = cfi_intelext_unpoint;
212         }
213 }
214
215 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
216 {
217         struct map_info *map = mtd->priv;
218         struct cfi_private *cfi = map->fldrv_priv;
219         if (cfi->cfiq->BufWriteTimeoutTyp) {
220                 printk(KERN_INFO "Using buffer write method\n" );
221                 mtd->write = cfi_intelext_write_buffers;
222                 mtd->writev = cfi_intelext_writev;
223         }
224 }
225
226 /*
227  * Some chips power-up with all sectors locked by default.
228  */
229 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
230 {
231         printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
232         mtd->flags |= MTD_STUPID_LOCK;
233 }
234
235 static struct cfi_fixup cfi_fixup_table[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
238 #endif
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
241 #endif
242 #if !FORCE_WORD_WRITE
243         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
244 #endif
245         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
246         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
247         { MANUFACTURER_INTEL, 0x891c,         fixup_use_powerup_lock, NULL, },
248         { 0, 0, NULL, NULL }
249 };
250
251 static struct cfi_fixup jedec_fixup_table[] = {
252         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
253         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
254         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
255         { 0, 0, NULL, NULL }
256 };
257 static struct cfi_fixup fixup_table[] = {
258         /* The CFI vendor ids and the JEDEC vendor IDs appear
259          * to be common.  It is like the devices id's are as
260          * well.  This table is to pick all cases where
261          * we know that is the case.
262          */
263         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
264         { 0, 0, NULL, NULL }
265 };
266
267 static inline struct cfi_pri_intelext *
268 read_pri_intelext(struct map_info *map, __u16 adr)
269 {
270         struct cfi_pri_intelext *extp;
271         unsigned int extp_size = sizeof(*extp);
272
273  again:
274         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
275         if (!extp)
276                 return NULL;
277
278         if (extp->MajorVersion != '1' ||
279             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
280                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
281                        "version %c.%c.\n",  extp->MajorVersion,
282                        extp->MinorVersion);
283                 kfree(extp);
284                 return NULL;
285         }
286
287         /* Do some byteswapping if necessary */
288         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
289         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
290         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
291
292         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
293                 unsigned int extra_size = 0;
294                 int nb_parts, i;
295
296                 /* Protection Register info */
297                 extra_size += (extp->NumProtectionFields - 1) *
298                               sizeof(struct cfi_intelext_otpinfo);
299
300                 /* Burst Read info */
301                 extra_size += 2;
302                 if (extp_size < sizeof(*extp) + extra_size)
303                         goto need_more;
304                 extra_size += extp->extra[extra_size-1];
305
306                 /* Number of hardware-partitions */
307                 extra_size += 1;
308                 if (extp_size < sizeof(*extp) + extra_size)
309                         goto need_more;
310                 nb_parts = extp->extra[extra_size - 1];
311
312                 /* skip the sizeof(partregion) field in CFI 1.4 */
313                 if (extp->MinorVersion >= '4')
314                         extra_size += 2;
315
316                 for (i = 0; i < nb_parts; i++) {
317                         struct cfi_intelext_regioninfo *rinfo;
318                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
319                         extra_size += sizeof(*rinfo);
320                         if (extp_size < sizeof(*extp) + extra_size)
321                                 goto need_more;
322                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
323                         extra_size += (rinfo->NumBlockTypes - 1)
324                                       * sizeof(struct cfi_intelext_blockinfo);
325                 }
326
327                 if (extp->MinorVersion >= '4')
328                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
329
330                 if (extp_size < sizeof(*extp) + extra_size) {
331                         need_more:
332                         extp_size = sizeof(*extp) + extra_size;
333                         kfree(extp);
334                         if (extp_size > 4096) {
335                                 printk(KERN_ERR
336                                         "%s: cfi_pri_intelext is too fat\n",
337                                         __FUNCTION__);
338                                 return NULL;
339                         }
340                         goto again;
341                 }
342         }
343
344         return extp;
345 }
346
347 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
348 {
349         struct cfi_private *cfi = map->fldrv_priv;
350         struct mtd_info *mtd;
351         int i;
352
353         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
354         if (!mtd) {
355                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
356                 return NULL;
357         }
358         mtd->priv = map;
359         mtd->type = MTD_NORFLASH;
360
361         /* Fill in the default mtd operations */
362         mtd->erase   = cfi_intelext_erase_varsize;
363         mtd->read    = cfi_intelext_read;
364         mtd->write   = cfi_intelext_write_words;
365         mtd->sync    = cfi_intelext_sync;
366         mtd->lock    = cfi_intelext_lock;
367         mtd->unlock  = cfi_intelext_unlock;
368         mtd->suspend = cfi_intelext_suspend;
369         mtd->resume  = cfi_intelext_resume;
370         mtd->flags   = MTD_CAP_NORFLASH;
371         mtd->name    = map->name;
372         mtd->writesize = 1;
373
374         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
375
376         if (cfi->cfi_mode == CFI_MODE_CFI) {
377                 /*
378                  * It's a real CFI chip, not one for which the probe
379                  * routine faked a CFI structure. So we read the feature
380                  * table from it.
381                  */
382                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
383                 struct cfi_pri_intelext *extp;
384
385                 extp = read_pri_intelext(map, adr);
386                 if (!extp) {
387                         kfree(mtd);
388                         return NULL;
389                 }
390
391                 /* Install our own private info structure */
392                 cfi->cmdset_priv = extp;
393
394                 cfi_fixup(mtd, cfi_fixup_table);
395
396 #ifdef DEBUG_CFI_FEATURES
397                 /* Tell the user about it in lots of lovely detail */
398                 cfi_tell_features(extp);
399 #endif
400
401                 if(extp->SuspendCmdSupport & 1) {
402                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
403                 }
404         }
405         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
406                 /* Apply jedec specific fixups */
407                 cfi_fixup(mtd, jedec_fixup_table);
408         }
409         /* Apply generic fixups */
410         cfi_fixup(mtd, fixup_table);
411
412         for (i=0; i< cfi->numchips; i++) {
413                 if (cfi->cfiq->WordWriteTimeoutTyp)
414                         cfi->chips[i].word_write_time =
415                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
416                 else
417                         cfi->chips[i].word_write_time = 50000;
418
419                 if (cfi->cfiq->BufWriteTimeoutTyp)
420                         cfi->chips[i].buffer_write_time =
421                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
422                 /* No default; if it isn't specified, we won't use it */
423
424                 if (cfi->cfiq->BlockEraseTimeoutTyp)
425                         cfi->chips[i].erase_time =
426                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
427                 else
428                         cfi->chips[i].erase_time = 2000000;
429
430                 cfi->chips[i].ref_point_counter = 0;
431                 init_waitqueue_head(&(cfi->chips[i].wq));
432         }
433
434         map->fldrv = &cfi_intelext_chipdrv;
435
436         return cfi_intelext_setup(mtd);
437 }
438 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
443
444 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
445 {
446         struct map_info *map = mtd->priv;
447         struct cfi_private *cfi = map->fldrv_priv;
448         unsigned long offset = 0;
449         int i,j;
450         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
451
452         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
453
454         mtd->size = devsize * cfi->numchips;
455
456         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
457         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
458                         * mtd->numeraseregions, GFP_KERNEL);
459         if (!mtd->eraseregions) {
460                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
461                 goto setup_err;
462         }
463
464         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
465                 unsigned long ernum, ersize;
466                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
467                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
468
469                 if (mtd->erasesize < ersize) {
470                         mtd->erasesize = ersize;
471                 }
472                 for (j=0; j<cfi->numchips; j++) {
473                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
474                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
475                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
476                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
477                 }
478                 offset += (ersize * ernum);
479         }
480
481         if (offset != devsize) {
482                 /* Argh */
483                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
484                 goto setup_err;
485         }
486
487         for (i=0; i<mtd->numeraseregions;i++){
488                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489                        i,mtd->eraseregions[i].offset,
490                        mtd->eraseregions[i].erasesize,
491                        mtd->eraseregions[i].numblocks);
492         }
493
494 #ifdef CONFIG_MTD_OTP
495         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
496         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
497         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
498         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
499         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
500         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
501 #endif
502
503         /* This function has the potential to distort the reality
504            a bit and therefore should be called last. */
505         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
506                 goto setup_err;
507
508         __module_get(THIS_MODULE);
509         register_reboot_notifier(&mtd->reboot_notifier);
510         return mtd;
511
512  setup_err:
513         if(mtd) {
514                 kfree(mtd->eraseregions);
515                 kfree(mtd);
516         }
517         kfree(cfi->cmdset_priv);
518         return NULL;
519 }
520
521 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
522                                         struct cfi_private **pcfi)
523 {
524         struct map_info *map = mtd->priv;
525         struct cfi_private *cfi = *pcfi;
526         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
527
528         /*
529          * Probing of multi-partition flash ships.
530          *
531          * To support multiple partitions when available, we simply arrange
532          * for each of them to have their own flchip structure even if they
533          * are on the same physical chip.  This means completely recreating
534          * a new cfi_private structure right here which is a blatent code
535          * layering violation, but this is still the least intrusive
536          * arrangement at this point. This can be rearranged in the future
537          * if someone feels motivated enough.  --nico
538          */
539         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
540             && extp->FeatureSupport & (1 << 9)) {
541                 struct cfi_private *newcfi;
542                 struct flchip *chip;
543                 struct flchip_shared *shared;
544                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
545
546                 /* Protection Register info */
547                 offs = (extp->NumProtectionFields - 1) *
548                        sizeof(struct cfi_intelext_otpinfo);
549
550                 /* Burst Read info */
551                 offs += extp->extra[offs+1]+2;
552
553                 /* Number of partition regions */
554                 numregions = extp->extra[offs];
555                 offs += 1;
556
557                 /* skip the sizeof(partregion) field in CFI 1.4 */
558                 if (extp->MinorVersion >= '4')
559                         offs += 2;
560
561                 /* Number of hardware partitions */
562                 numparts = 0;
563                 for (i = 0; i < numregions; i++) {
564                         struct cfi_intelext_regioninfo *rinfo;
565                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
566                         numparts += rinfo->NumIdentPartitions;
567                         offs += sizeof(*rinfo)
568                                 + (rinfo->NumBlockTypes - 1) *
569                                   sizeof(struct cfi_intelext_blockinfo);
570                 }
571
572                 /* Programming Region info */
573                 if (extp->MinorVersion >= '4') {
574                         struct cfi_intelext_programming_regioninfo *prinfo;
575                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
576                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
577                         mtd->flags &= ~MTD_BIT_WRITEABLE;
578                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579                                map->name, mtd->writesize,
580                                cfi->interleave * prinfo->ControlValid,
581                                cfi->interleave * prinfo->ControlInvalid);
582                 }
583
584                 /*
585                  * All functions below currently rely on all chips having
586                  * the same geometry so we'll just assume that all hardware
587                  * partitions are of the same size too.
588                  */
589                 partshift = cfi->chipshift - __ffs(numparts);
590
591                 if ((1 << partshift) < mtd->erasesize) {
592                         printk( KERN_ERR
593                                 "%s: bad number of hw partitions (%d)\n",
594                                 __FUNCTION__, numparts);
595                         return -EINVAL;
596                 }
597
598                 numvirtchips = cfi->numchips * numparts;
599                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
600                 if (!newcfi)
601                         return -ENOMEM;
602                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
603                 if (!shared) {
604                         kfree(newcfi);
605                         return -ENOMEM;
606                 }
607                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
608                 newcfi->numchips = numvirtchips;
609                 newcfi->chipshift = partshift;
610
611                 chip = &newcfi->chips[0];
612                 for (i = 0; i < cfi->numchips; i++) {
613                         shared[i].writing = shared[i].erasing = NULL;
614                         spin_lock_init(&shared[i].lock);
615                         for (j = 0; j < numparts; j++) {
616                                 *chip = cfi->chips[i];
617                                 chip->start += j << partshift;
618                                 chip->priv = &shared[i];
619                                 /* those should be reset too since
620                                    they create memory references. */
621                                 init_waitqueue_head(&chip->wq);
622                                 spin_lock_init(&chip->_spinlock);
623                                 chip->mutex = &chip->_spinlock;
624                                 chip++;
625                         }
626                 }
627
628                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
629                                   "--> %d partitions of %d KiB\n",
630                                   map->name, cfi->numchips, cfi->interleave,
631                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
632
633                 map->fldrv_priv = newcfi;
634                 *pcfi = newcfi;
635                 kfree(cfi);
636         }
637
638         return 0;
639 }
640
641 /*
642  *  *********** CHIP ACCESS FUNCTIONS ***********
643  */
644
645 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
646 {
647         DECLARE_WAITQUEUE(wait, current);
648         struct cfi_private *cfi = map->fldrv_priv;
649         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
650         unsigned long timeo;
651         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
652
653  resettime:
654         timeo = jiffies + HZ;
655  retry:
656         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
657                 /*
658                  * OK. We have possibility for contension on the write/erase
659                  * operations which are global to the real chip and not per
660                  * partition.  So let's fight it over in the partition which
661                  * currently has authority on the operation.
662                  *
663                  * The rules are as follows:
664                  *
665                  * - any write operation must own shared->writing.
666                  *
667                  * - any erase operation must own _both_ shared->writing and
668                  *   shared->erasing.
669                  *
670                  * - contension arbitration is handled in the owner's context.
671                  *
672                  * The 'shared' struct can be read and/or written only when
673                  * its lock is taken.
674                  */
675                 struct flchip_shared *shared = chip->priv;
676                 struct flchip *contender;
677                 spin_lock(&shared->lock);
678                 contender = shared->writing;
679                 if (contender && contender != chip) {
680                         /*
681                          * The engine to perform desired operation on this
682                          * partition is already in use by someone else.
683                          * Let's fight over it in the context of the chip
684                          * currently using it.  If it is possible to suspend,
685                          * that other partition will do just that, otherwise
686                          * it'll happily send us to sleep.  In any case, when
687                          * get_chip returns success we're clear to go ahead.
688                          */
689                         int ret = spin_trylock(contender->mutex);
690                         spin_unlock(&shared->lock);
691                         if (!ret)
692                                 goto retry;
693                         spin_unlock(chip->mutex);
694                         ret = get_chip(map, contender, contender->start, mode);
695                         spin_lock(chip->mutex);
696                         if (ret) {
697                                 spin_unlock(contender->mutex);
698                                 return ret;
699                         }
700                         timeo = jiffies + HZ;
701                         spin_lock(&shared->lock);
702                         spin_unlock(contender->mutex);
703                 }
704
705                 /* We now own it */
706                 shared->writing = chip;
707                 if (mode == FL_ERASING)
708                         shared->erasing = chip;
709                 spin_unlock(&shared->lock);
710         }
711
712         switch (chip->state) {
713
714         case FL_STATUS:
715                 for (;;) {
716                         status = map_read(map, adr);
717                         if (map_word_andequal(map, status, status_OK, status_OK))
718                                 break;
719
720                         /* At this point we're fine with write operations
721                            in other partitions as they don't conflict. */
722                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
723                                 break;
724
725                         if (time_after(jiffies, timeo)) {
726                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
727                                        map->name, status.x[0]);
728                                 return -EIO;
729                         }
730                         spin_unlock(chip->mutex);
731                         cfi_udelay(1);
732                         spin_lock(chip->mutex);
733                         /* Someone else might have been playing with it. */
734                         goto retry;
735                 }
736
737         case FL_READY:
738         case FL_CFI_QUERY:
739         case FL_JEDEC_QUERY:
740                 return 0;
741
742         case FL_ERASING:
743                 if (!cfip ||
744                     !(cfip->FeatureSupport & 2) ||
745                     !(mode == FL_READY || mode == FL_POINT ||
746                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
747                         goto sleep;
748
749
750                 /* Erase suspend */
751                 map_write(map, CMD(0xB0), adr);
752
753                 /* If the flash has finished erasing, then 'erase suspend'
754                  * appears to make some (28F320) flash devices switch to
755                  * 'read' mode.  Make sure that we switch to 'read status'
756                  * mode so we get the right data. --rmk
757                  */
758                 map_write(map, CMD(0x70), adr);
759                 chip->oldstate = FL_ERASING;
760                 chip->state = FL_ERASE_SUSPENDING;
761                 chip->erase_suspended = 1;
762                 for (;;) {
763                         status = map_read(map, adr);
764                         if (map_word_andequal(map, status, status_OK, status_OK))
765                                 break;
766
767                         if (time_after(jiffies, timeo)) {
768                                 /* Urgh. Resume and pretend we weren't here.  */
769                                 map_write(map, CMD(0xd0), adr);
770                                 /* Make sure we're in 'read status' mode if it had finished */
771                                 map_write(map, CMD(0x70), adr);
772                                 chip->state = FL_ERASING;
773                                 chip->oldstate = FL_READY;
774                                 printk(KERN_ERR "%s: Chip not ready after erase "
775                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
776                                 return -EIO;
777                         }
778
779                         spin_unlock(chip->mutex);
780                         cfi_udelay(1);
781                         spin_lock(chip->mutex);
782                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783                            So we can just loop here. */
784                 }
785                 chip->state = FL_STATUS;
786                 return 0;
787
788         case FL_XIP_WHILE_ERASING:
789                 if (mode != FL_READY && mode != FL_POINT &&
790                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
791                         goto sleep;
792                 chip->oldstate = chip->state;
793                 chip->state = FL_READY;
794                 return 0;
795
796         case FL_POINT:
797                 /* Only if there's no operation suspended... */
798                 if (mode == FL_READY && chip->oldstate == FL_READY)
799                         return 0;
800
801         default:
802         sleep:
803                 set_current_state(TASK_UNINTERRUPTIBLE);
804                 add_wait_queue(&chip->wq, &wait);
805                 spin_unlock(chip->mutex);
806                 schedule();
807                 remove_wait_queue(&chip->wq, &wait);
808                 spin_lock(chip->mutex);
809                 goto resettime;
810         }
811 }
812
813 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
814 {
815         struct cfi_private *cfi = map->fldrv_priv;
816
817         if (chip->priv) {
818                 struct flchip_shared *shared = chip->priv;
819                 spin_lock(&shared->lock);
820                 if (shared->writing == chip && chip->oldstate == FL_READY) {
821                         /* We own the ability to write, but we're done */
822                         shared->writing = shared->erasing;
823                         if (shared->writing && shared->writing != chip) {
824                                 /* give back ownership to who we loaned it from */
825                                 struct flchip *loaner = shared->writing;
826                                 spin_lock(loaner->mutex);
827                                 spin_unlock(&shared->lock);
828                                 spin_unlock(chip->mutex);
829                                 put_chip(map, loaner, loaner->start);
830                                 spin_lock(chip->mutex);
831                                 spin_unlock(loaner->mutex);
832                                 wake_up(&chip->wq);
833                                 return;
834                         }
835                         shared->erasing = NULL;
836                         shared->writing = NULL;
837                 } else if (shared->erasing == chip && shared->writing != chip) {
838                         /*
839                          * We own the ability to erase without the ability
840                          * to write, which means the erase was suspended
841                          * and some other partition is currently writing.
842                          * Don't let the switch below mess things up since
843                          * we don't have ownership to resume anything.
844                          */
845                         spin_unlock(&shared->lock);
846                         wake_up(&chip->wq);
847                         return;
848                 }
849                 spin_unlock(&shared->lock);
850         }
851
852         switch(chip->oldstate) {
853         case FL_ERASING:
854                 chip->state = chip->oldstate;
855                 /* What if one interleaved chip has finished and the
856                    other hasn't? The old code would leave the finished
857                    one in READY mode. That's bad, and caused -EROFS
858                    errors to be returned from do_erase_oneblock because
859                    that's the only bit it checked for at the time.
860                    As the state machine appears to explicitly allow
861                    sending the 0x70 (Read Status) command to an erasing
862                    chip and expecting it to be ignored, that's what we
863                    do. */
864                 map_write(map, CMD(0xd0), adr);
865                 map_write(map, CMD(0x70), adr);
866                 chip->oldstate = FL_READY;
867                 chip->state = FL_ERASING;
868                 break;
869
870         case FL_XIP_WHILE_ERASING:
871                 chip->state = chip->oldstate;
872                 chip->oldstate = FL_READY;
873                 break;
874
875         case FL_READY:
876         case FL_STATUS:
877         case FL_JEDEC_QUERY:
878                 /* We should really make set_vpp() count, rather than doing this */
879                 DISABLE_VPP(map);
880                 break;
881         default:
882                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
883         }
884         wake_up(&chip->wq);
885 }
886
887 #ifdef CONFIG_MTD_XIP
888
889 /*
890  * No interrupt what so ever can be serviced while the flash isn't in array
891  * mode.  This is ensured by the xip_disable() and xip_enable() functions
892  * enclosing any code path where the flash is known not to be in array mode.
893  * And within a XIP disabled code path, only functions marked with __xipram
894  * may be called and nothing else (it's a good thing to inspect generated
895  * assembly to make sure inline functions were actually inlined and that gcc
896  * didn't emit calls to its own support functions). Also configuring MTD CFI
897  * support to a single buswidth and a single interleave is also recommended.
898  */
899
900 static void xip_disable(struct map_info *map, struct flchip *chip,
901                         unsigned long adr)
902 {
903         /* TODO: chips with no XIP use should ignore and return */
904         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
905         local_irq_disable();
906 }
907
908 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
909                                 unsigned long adr)
910 {
911         struct cfi_private *cfi = map->fldrv_priv;
912         if (chip->state != FL_POINT && chip->state != FL_READY) {
913                 map_write(map, CMD(0xff), adr);
914                 chip->state = FL_READY;
915         }
916         (void) map_read(map, adr);
917         xip_iprefetch();
918         local_irq_enable();
919 }
920
921 /*
922  * When a delay is required for the flash operation to complete, the
923  * xip_wait_for_operation() function is polling for both the given timeout
924  * and pending (but still masked) hardware interrupts.  Whenever there is an
925  * interrupt pending then the flash erase or write operation is suspended,
926  * array mode restored and interrupts unmasked.  Task scheduling might also
927  * happen at that point.  The CPU eventually returns from the interrupt or
928  * the call to schedule() and the suspended flash operation is resumed for
929  * the remaining of the delay period.
930  *
931  * Warning: this function _will_ fool interrupt latency tracing tools.
932  */
933
934 static int __xipram xip_wait_for_operation(
935                 struct map_info *map, struct flchip *chip,
936                 unsigned long adr, unsigned int chip_op_time )
937 {
938         struct cfi_private *cfi = map->fldrv_priv;
939         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
940         map_word status, OK = CMD(0x80);
941         unsigned long usec, suspended, start, done;
942         flstate_t oldstate, newstate;
943
944         start = xip_currtime();
945         usec = chip_op_time * 8;
946         if (usec == 0)
947                 usec = 500000;
948         done = 0;
949
950         do {
951                 cpu_relax();
952                 if (xip_irqpending() && cfip &&
953                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
954                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
955                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
956                         /*
957                          * Let's suspend the erase or write operation when
958                          * supported.  Note that we currently don't try to
959                          * suspend interleaved chips if there is already
960                          * another operation suspended (imagine what happens
961                          * when one chip was already done with the current
962                          * operation while another chip suspended it, then
963                          * we resume the whole thing at once).  Yes, it
964                          * can happen!
965                          */
966                         usec -= done;
967                         map_write(map, CMD(0xb0), adr);
968                         map_write(map, CMD(0x70), adr);
969                         suspended = xip_currtime();
970                         do {
971                                 if (xip_elapsed_since(suspended) > 100000) {
972                                         /*
973                                          * The chip doesn't want to suspend
974                                          * after waiting for 100 msecs.
975                                          * This is a critical error but there
976                                          * is not much we can do here.
977                                          */
978                                         return -EIO;
979                                 }
980                                 status = map_read(map, adr);
981                         } while (!map_word_andequal(map, status, OK, OK));
982
983                         /* Suspend succeeded */
984                         oldstate = chip->state;
985                         if (oldstate == FL_ERASING) {
986                                 if (!map_word_bitsset(map, status, CMD(0x40)))
987                                         break;
988                                 newstate = FL_XIP_WHILE_ERASING;
989                                 chip->erase_suspended = 1;
990                         } else {
991                                 if (!map_word_bitsset(map, status, CMD(0x04)))
992                                         break;
993                                 newstate = FL_XIP_WHILE_WRITING;
994                                 chip->write_suspended = 1;
995                         }
996                         chip->state = newstate;
997                         map_write(map, CMD(0xff), adr);
998                         (void) map_read(map, adr);
999                         asm volatile (".rep 8; nop; .endr");
1000                         local_irq_enable();
1001                         spin_unlock(chip->mutex);
1002                         asm volatile (".rep 8; nop; .endr");
1003                         cond_resched();
1004
1005                         /*
1006                          * We're back.  However someone else might have
1007                          * decided to go write to the chip if we are in
1008                          * a suspended erase state.  If so let's wait
1009                          * until it's done.
1010                          */
1011                         spin_lock(chip->mutex);
1012                         while (chip->state != newstate) {
1013                                 DECLARE_WAITQUEUE(wait, current);
1014                                 set_current_state(TASK_UNINTERRUPTIBLE);
1015                                 add_wait_queue(&chip->wq, &wait);
1016                                 spin_unlock(chip->mutex);
1017                                 schedule();
1018                                 remove_wait_queue(&chip->wq, &wait);
1019                                 spin_lock(chip->mutex);
1020                         }
1021                         /* Disallow XIP again */
1022                         local_irq_disable();
1023
1024                         /* Resume the write or erase operation */
1025                         map_write(map, CMD(0xd0), adr);
1026                         map_write(map, CMD(0x70), adr);
1027                         chip->state = oldstate;
1028                         start = xip_currtime();
1029                 } else if (usec >= 1000000/HZ) {
1030                         /*
1031                          * Try to save on CPU power when waiting delay
1032                          * is at least a system timer tick period.
1033                          * No need to be extremely accurate here.
1034                          */
1035                         xip_cpu_idle();
1036                 }
1037                 status = map_read(map, adr);
1038                 done = xip_elapsed_since(start);
1039         } while (!map_word_andequal(map, status, OK, OK)
1040                  && done < usec);
1041
1042         return (done >= usec) ? -ETIME : 0;
1043 }
1044
1045 /*
1046  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1047  * the flash is actively programming or erasing since we have to poll for
1048  * the operation to complete anyway.  We can't do that in a generic way with
1049  * a XIP setup so do it before the actual flash operation in this case
1050  * and stub it out from INVAL_CACHE_AND_WAIT.
1051  */
1052 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1053         INVALIDATE_CACHED_RANGE(map, from, size)
1054
1055 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1056         xip_wait_for_operation(map, chip, cmd_adr, usec)
1057
1058 #else
1059
1060 #define xip_disable(map, chip, adr)
1061 #define xip_enable(map, chip, adr)
1062 #define XIP_INVAL_CACHED_RANGE(x...)
1063 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1064
1065 static int inval_cache_and_wait_for_operation(
1066                 struct map_info *map, struct flchip *chip,
1067                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1068                 unsigned int chip_op_time)
1069 {
1070         struct cfi_private *cfi = map->fldrv_priv;
1071         map_word status, status_OK = CMD(0x80);
1072         int chip_state = chip->state;
1073         unsigned int timeo, sleep_time;
1074
1075         spin_unlock(chip->mutex);
1076         if (inval_len)
1077                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1078         spin_lock(chip->mutex);
1079
1080         /* set our timeout to 8 times the expected delay */
1081         timeo = chip_op_time * 8;
1082         if (!timeo)
1083                 timeo = 500000;
1084         sleep_time = chip_op_time / 2;
1085
1086         for (;;) {
1087                 status = map_read(map, cmd_adr);
1088                 if (map_word_andequal(map, status, status_OK, status_OK))
1089                         break;
1090
1091                 if (!timeo) {
1092                         map_write(map, CMD(0x70), cmd_adr);
1093                         chip->state = FL_STATUS;
1094                         return -ETIME;
1095                 }
1096
1097                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1098                 spin_unlock(chip->mutex);
1099                 if (sleep_time >= 1000000/HZ) {
1100                         /*
1101                          * Half of the normal delay still remaining
1102                          * can be performed with a sleeping delay instead
1103                          * of busy waiting.
1104                          */
1105                         msleep(sleep_time/1000);
1106                         timeo -= sleep_time;
1107                         sleep_time = 1000000/HZ;
1108                 } else {
1109                         udelay(1);
1110                         cond_resched();
1111                         timeo--;
1112                 }
1113                 spin_lock(chip->mutex);
1114
1115                 while (chip->state != chip_state) {
1116                         /* Someone's suspended the operation: sleep */
1117                         DECLARE_WAITQUEUE(wait, current);
1118                         set_current_state(TASK_UNINTERRUPTIBLE);
1119                         add_wait_queue(&chip->wq, &wait);
1120                         spin_unlock(chip->mutex);
1121                         schedule();
1122                         remove_wait_queue(&chip->wq, &wait);
1123                         spin_lock(chip->mutex);
1124                 }
1125         }
1126
1127         /* Done and happy. */
1128         chip->state = FL_STATUS;
1129         return 0;
1130 }
1131
1132 #endif
1133
1134 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1135         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1136
1137
1138 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1139 {
1140         unsigned long cmd_addr;
1141         struct cfi_private *cfi = map->fldrv_priv;
1142         int ret = 0;
1143
1144         adr += chip->start;
1145
1146         /* Ensure cmd read/writes are aligned. */
1147         cmd_addr = adr & ~(map_bankwidth(map)-1);
1148
1149         spin_lock(chip->mutex);
1150
1151         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1152
1153         if (!ret) {
1154                 if (chip->state != FL_POINT && chip->state != FL_READY)
1155                         map_write(map, CMD(0xff), cmd_addr);
1156
1157                 chip->state = FL_POINT;
1158                 chip->ref_point_counter++;
1159         }
1160         spin_unlock(chip->mutex);
1161
1162         return ret;
1163 }
1164
1165 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1166 {
1167         struct map_info *map = mtd->priv;
1168         struct cfi_private *cfi = map->fldrv_priv;
1169         unsigned long ofs;
1170         int chipnum;
1171         int ret = 0;
1172
1173         if (!map->virt || (from + len > mtd->size))
1174                 return -EINVAL;
1175
1176         *mtdbuf = (void *)map->virt + from;
1177         *retlen = 0;
1178
1179         /* Now lock the chip(s) to POINT state */
1180
1181         /* ofs: offset within the first chip that the first read should start */
1182         chipnum = (from >> cfi->chipshift);
1183         ofs = from - (chipnum << cfi->chipshift);
1184
1185         while (len) {
1186                 unsigned long thislen;
1187
1188                 if (chipnum >= cfi->numchips)
1189                         break;
1190
1191                 if ((len + ofs -1) >> cfi->chipshift)
1192                         thislen = (1<<cfi->chipshift) - ofs;
1193                 else
1194                         thislen = len;
1195
1196                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1197                 if (ret)
1198                         break;
1199
1200                 *retlen += thislen;
1201                 len -= thislen;
1202
1203                 ofs = 0;
1204                 chipnum++;
1205         }
1206         return 0;
1207 }
1208
1209 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1210 {
1211         struct map_info *map = mtd->priv;
1212         struct cfi_private *cfi = map->fldrv_priv;
1213         unsigned long ofs;
1214         int chipnum;
1215
1216         /* Now unlock the chip(s) POINT state */
1217
1218         /* ofs: offset within the first chip that the first read should start */
1219         chipnum = (from >> cfi->chipshift);
1220         ofs = from - (chipnum <<  cfi->chipshift);
1221
1222         while (len) {
1223                 unsigned long thislen;
1224                 struct flchip *chip;
1225
1226                 chip = &cfi->chips[chipnum];
1227                 if (chipnum >= cfi->numchips)
1228                         break;
1229
1230                 if ((len + ofs -1) >> cfi->chipshift)
1231                         thislen = (1<<cfi->chipshift) - ofs;
1232                 else
1233                         thislen = len;
1234
1235                 spin_lock(chip->mutex);
1236                 if (chip->state == FL_POINT) {
1237                         chip->ref_point_counter--;
1238                         if(chip->ref_point_counter == 0)
1239                                 chip->state = FL_READY;
1240                 } else
1241                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1242
1243                 put_chip(map, chip, chip->start);
1244                 spin_unlock(chip->mutex);
1245
1246                 len -= thislen;
1247                 ofs = 0;
1248                 chipnum++;
1249         }
1250 }
1251
1252 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1253 {
1254         unsigned long cmd_addr;
1255         struct cfi_private *cfi = map->fldrv_priv;
1256         int ret;
1257
1258         adr += chip->start;
1259
1260         /* Ensure cmd read/writes are aligned. */
1261         cmd_addr = adr & ~(map_bankwidth(map)-1);
1262
1263         spin_lock(chip->mutex);
1264         ret = get_chip(map, chip, cmd_addr, FL_READY);
1265         if (ret) {
1266                 spin_unlock(chip->mutex);
1267                 return ret;
1268         }
1269
1270         if (chip->state != FL_POINT && chip->state != FL_READY) {
1271                 map_write(map, CMD(0xff), cmd_addr);
1272
1273                 chip->state = FL_READY;
1274         }
1275
1276         map_copy_from(map, buf, adr, len);
1277
1278         put_chip(map, chip, cmd_addr);
1279
1280         spin_unlock(chip->mutex);
1281         return 0;
1282 }
1283
1284 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1285 {
1286         struct map_info *map = mtd->priv;
1287         struct cfi_private *cfi = map->fldrv_priv;
1288         unsigned long ofs;
1289         int chipnum;
1290         int ret = 0;
1291
1292         /* ofs: offset within the first chip that the first read should start */
1293         chipnum = (from >> cfi->chipshift);
1294         ofs = from - (chipnum <<  cfi->chipshift);
1295
1296         *retlen = 0;
1297
1298         while (len) {
1299                 unsigned long thislen;
1300
1301                 if (chipnum >= cfi->numchips)
1302                         break;
1303
1304                 if ((len + ofs -1) >> cfi->chipshift)
1305                         thislen = (1<<cfi->chipshift) - ofs;
1306                 else
1307                         thislen = len;
1308
1309                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1310                 if (ret)
1311                         break;
1312
1313                 *retlen += thislen;
1314                 len -= thislen;
1315                 buf += thislen;
1316
1317                 ofs = 0;
1318                 chipnum++;
1319         }
1320         return ret;
1321 }
1322
1323 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1324                                      unsigned long adr, map_word datum, int mode)
1325 {
1326         struct cfi_private *cfi = map->fldrv_priv;
1327         map_word status, write_cmd;
1328         int ret=0;
1329
1330         adr += chip->start;
1331
1332         switch (mode) {
1333         case FL_WRITING:
1334                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1335                 break;
1336         case FL_OTP_WRITE:
1337                 write_cmd = CMD(0xc0);
1338                 break;
1339         default:
1340                 return -EINVAL;
1341         }
1342
1343         spin_lock(chip->mutex);
1344         ret = get_chip(map, chip, adr, mode);
1345         if (ret) {
1346                 spin_unlock(chip->mutex);
1347                 return ret;
1348         }
1349
1350         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1351         ENABLE_VPP(map);
1352         xip_disable(map, chip, adr);
1353         map_write(map, write_cmd, adr);
1354         map_write(map, datum, adr);
1355         chip->state = mode;
1356
1357         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1358                                    adr, map_bankwidth(map),
1359                                    chip->word_write_time);
1360         if (ret) {
1361                 xip_enable(map, chip, adr);
1362                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1363                 goto out;
1364         }
1365
1366         /* check for errors */
1367         status = map_read(map, adr);
1368         if (map_word_bitsset(map, status, CMD(0x1a))) {
1369                 unsigned long chipstatus = MERGESTATUS(status);
1370
1371                 /* reset status */
1372                 map_write(map, CMD(0x50), adr);
1373                 map_write(map, CMD(0x70), adr);
1374                 xip_enable(map, chip, adr);
1375
1376                 if (chipstatus & 0x02) {
1377                         ret = -EROFS;
1378                 } else if (chipstatus & 0x08) {
1379                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1380                         ret = -EIO;
1381                 } else {
1382                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1383                         ret = -EINVAL;
1384                 }
1385
1386                 goto out;
1387         }
1388
1389         xip_enable(map, chip, adr);
1390  out:   put_chip(map, chip, adr);
1391         spin_unlock(chip->mutex);
1392         return ret;
1393 }
1394
1395
1396 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1397 {
1398         struct map_info *map = mtd->priv;
1399         struct cfi_private *cfi = map->fldrv_priv;
1400         int ret = 0;
1401         int chipnum;
1402         unsigned long ofs;
1403
1404         *retlen = 0;
1405         if (!len)
1406                 return 0;
1407
1408         chipnum = to >> cfi->chipshift;
1409         ofs = to  - (chipnum << cfi->chipshift);
1410
1411         /* If it's not bus-aligned, do the first byte write */
1412         if (ofs & (map_bankwidth(map)-1)) {
1413                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1414                 int gap = ofs - bus_ofs;
1415                 int n;
1416                 map_word datum;
1417
1418                 n = min_t(int, len, map_bankwidth(map)-gap);
1419                 datum = map_word_ff(map);
1420                 datum = map_word_load_partial(map, datum, buf, gap, n);
1421
1422                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1423                                                bus_ofs, datum, FL_WRITING);
1424                 if (ret)
1425                         return ret;
1426
1427                 len -= n;
1428                 ofs += n;
1429                 buf += n;
1430                 (*retlen) += n;
1431
1432                 if (ofs >> cfi->chipshift) {
1433                         chipnum ++;
1434                         ofs = 0;
1435                         if (chipnum == cfi->numchips)
1436                                 return 0;
1437                 }
1438         }
1439
1440         while(len >= map_bankwidth(map)) {
1441                 map_word datum = map_word_load(map, buf);
1442
1443                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1444                                        ofs, datum, FL_WRITING);
1445                 if (ret)
1446                         return ret;
1447
1448                 ofs += map_bankwidth(map);
1449                 buf += map_bankwidth(map);
1450                 (*retlen) += map_bankwidth(map);
1451                 len -= map_bankwidth(map);
1452
1453                 if (ofs >> cfi->chipshift) {
1454                         chipnum ++;
1455                         ofs = 0;
1456                         if (chipnum == cfi->numchips)
1457                                 return 0;
1458                 }
1459         }
1460
1461         if (len & (map_bankwidth(map)-1)) {
1462                 map_word datum;
1463
1464                 datum = map_word_ff(map);
1465                 datum = map_word_load_partial(map, datum, buf, 0, len);
1466
1467                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1468                                        ofs, datum, FL_WRITING);
1469                 if (ret)
1470                         return ret;
1471
1472                 (*retlen) += len;
1473         }
1474
1475         return 0;
1476 }
1477
1478
1479 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1480                                     unsigned long adr, const struct kvec **pvec,
1481                                     unsigned long *pvec_seek, int len)
1482 {
1483         struct cfi_private *cfi = map->fldrv_priv;
1484         map_word status, write_cmd, datum;
1485         unsigned long cmd_adr;
1486         int ret, wbufsize, word_gap, words;
1487         const struct kvec *vec;
1488         unsigned long vec_seek;
1489
1490         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1491         adr += chip->start;
1492         cmd_adr = adr & ~(wbufsize-1);
1493
1494         /* Let's determine this according to the interleave only once */
1495         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1496
1497         spin_lock(chip->mutex);
1498         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1499         if (ret) {
1500                 spin_unlock(chip->mutex);
1501                 return ret;
1502         }
1503
1504         XIP_INVAL_CACHED_RANGE(map, adr, len);
1505         ENABLE_VPP(map);
1506         xip_disable(map, chip, cmd_adr);
1507
1508         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1509            [...], the device will not accept any more Write to Buffer commands".
1510            So we must check here and reset those bits if they're set. Otherwise
1511            we're just pissing in the wind */
1512         if (chip->state != FL_STATUS) {
1513                 map_write(map, CMD(0x70), cmd_adr);
1514                 chip->state = FL_STATUS;
1515         }
1516         status = map_read(map, cmd_adr);
1517         if (map_word_bitsset(map, status, CMD(0x30))) {
1518                 xip_enable(map, chip, cmd_adr);
1519                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1520                 xip_disable(map, chip, cmd_adr);
1521                 map_write(map, CMD(0x50), cmd_adr);
1522                 map_write(map, CMD(0x70), cmd_adr);
1523         }
1524
1525         chip->state = FL_WRITING_TO_BUFFER;
1526         map_write(map, write_cmd, cmd_adr);
1527         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1528         if (ret) {
1529                 /* Argh. Not ready for write to buffer */
1530                 map_word Xstatus = map_read(map, cmd_adr);
1531                 map_write(map, CMD(0x70), cmd_adr);
1532                 chip->state = FL_STATUS;
1533                 status = map_read(map, cmd_adr);
1534                 map_write(map, CMD(0x50), cmd_adr);
1535                 map_write(map, CMD(0x70), cmd_adr);
1536                 xip_enable(map, chip, cmd_adr);
1537                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1538                                 map->name, Xstatus.x[0], status.x[0]);
1539                 goto out;
1540         }
1541
1542         /* Figure out the number of words to write */
1543         word_gap = (-adr & (map_bankwidth(map)-1));
1544         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1545         if (!word_gap) {
1546                 words--;
1547         } else {
1548                 word_gap = map_bankwidth(map) - word_gap;
1549                 adr -= word_gap;
1550                 datum = map_word_ff(map);
1551         }
1552
1553         /* Write length of data to come */
1554         map_write(map, CMD(words), cmd_adr );
1555
1556         /* Write data */
1557         vec = *pvec;
1558         vec_seek = *pvec_seek;
1559         do {
1560                 int n = map_bankwidth(map) - word_gap;
1561                 if (n > vec->iov_len - vec_seek)
1562                         n = vec->iov_len - vec_seek;
1563                 if (n > len)
1564                         n = len;
1565
1566                 if (!word_gap && len < map_bankwidth(map))
1567                         datum = map_word_ff(map);
1568
1569                 datum = map_word_load_partial(map, datum,
1570                                               vec->iov_base + vec_seek,
1571                                               word_gap, n);
1572
1573                 len -= n;
1574                 word_gap += n;
1575                 if (!len || word_gap == map_bankwidth(map)) {
1576                         map_write(map, datum, adr);
1577                         adr += map_bankwidth(map);
1578                         word_gap = 0;
1579                 }
1580
1581                 vec_seek += n;
1582                 if (vec_seek == vec->iov_len) {
1583                         vec++;
1584                         vec_seek = 0;
1585                 }
1586         } while (len);
1587         *pvec = vec;
1588         *pvec_seek = vec_seek;
1589
1590         /* GO GO GO */
1591         map_write(map, CMD(0xd0), cmd_adr);
1592         chip->state = FL_WRITING;
1593
1594         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1595                                    adr, len,
1596                                    chip->buffer_write_time);
1597         if (ret) {
1598                 map_write(map, CMD(0x70), cmd_adr);
1599                 chip->state = FL_STATUS;
1600                 xip_enable(map, chip, cmd_adr);
1601                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1602                 goto out;
1603         }
1604
1605         /* check for errors */
1606         status = map_read(map, cmd_adr);
1607         if (map_word_bitsset(map, status, CMD(0x1a))) {
1608                 unsigned long chipstatus = MERGESTATUS(status);
1609
1610                 /* reset status */
1611                 map_write(map, CMD(0x50), cmd_adr);
1612                 map_write(map, CMD(0x70), cmd_adr);
1613                 xip_enable(map, chip, cmd_adr);
1614
1615                 if (chipstatus & 0x02) {
1616                         ret = -EROFS;
1617                 } else if (chipstatus & 0x08) {
1618                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1619                         ret = -EIO;
1620                 } else {
1621                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1622                         ret = -EINVAL;
1623                 }
1624
1625                 goto out;
1626         }
1627
1628         xip_enable(map, chip, cmd_adr);
1629  out:   put_chip(map, chip, cmd_adr);
1630         spin_unlock(chip->mutex);
1631         return ret;
1632 }
1633
1634 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1635                                 unsigned long count, loff_t to, size_t *retlen)
1636 {
1637         struct map_info *map = mtd->priv;
1638         struct cfi_private *cfi = map->fldrv_priv;
1639         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1640         int ret = 0;
1641         int chipnum;
1642         unsigned long ofs, vec_seek, i;
1643         size_t len = 0;
1644
1645         for (i = 0; i < count; i++)
1646                 len += vecs[i].iov_len;
1647
1648         *retlen = 0;
1649         if (!len)
1650                 return 0;
1651
1652         chipnum = to >> cfi->chipshift;
1653         ofs = to - (chipnum << cfi->chipshift);
1654         vec_seek = 0;
1655
1656         do {
1657                 /* We must not cross write block boundaries */
1658                 int size = wbufsize - (ofs & (wbufsize-1));
1659
1660                 if (size > len)
1661                         size = len;
1662                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1663                                       ofs, &vecs, &vec_seek, size);
1664                 if (ret)
1665                         return ret;
1666
1667                 ofs += size;
1668                 (*retlen) += size;
1669                 len -= size;
1670
1671                 if (ofs >> cfi->chipshift) {
1672                         chipnum ++;
1673                         ofs = 0;
1674                         if (chipnum == cfi->numchips)
1675                                 return 0;
1676                 }
1677
1678                 /* Be nice and reschedule with the chip in a usable state for other
1679                    processes. */
1680                 cond_resched();
1681
1682         } while (len);
1683
1684         return 0;
1685 }
1686
1687 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1688                                        size_t len, size_t *retlen, const u_char *buf)
1689 {
1690         struct kvec vec;
1691
1692         vec.iov_base = (void *) buf;
1693         vec.iov_len = len;
1694
1695         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1696 }
1697
1698 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1699                                       unsigned long adr, int len, void *thunk)
1700 {
1701         struct cfi_private *cfi = map->fldrv_priv;
1702         map_word status;
1703         int retries = 3;
1704         int ret;
1705
1706         adr += chip->start;
1707
1708  retry:
1709         spin_lock(chip->mutex);
1710         ret = get_chip(map, chip, adr, FL_ERASING);
1711         if (ret) {
1712                 spin_unlock(chip->mutex);
1713                 return ret;
1714         }
1715
1716         XIP_INVAL_CACHED_RANGE(map, adr, len);
1717         ENABLE_VPP(map);
1718         xip_disable(map, chip, adr);
1719
1720         /* Clear the status register first */
1721         map_write(map, CMD(0x50), adr);
1722
1723         /* Now erase */
1724         map_write(map, CMD(0x20), adr);
1725         map_write(map, CMD(0xD0), adr);
1726         chip->state = FL_ERASING;
1727         chip->erase_suspended = 0;
1728
1729         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1730                                    adr, len,
1731                                    chip->erase_time);
1732         if (ret) {
1733                 map_write(map, CMD(0x70), adr);
1734                 chip->state = FL_STATUS;
1735                 xip_enable(map, chip, adr);
1736                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1737                 goto out;
1738         }
1739
1740         /* We've broken this before. It doesn't hurt to be safe */
1741         map_write(map, CMD(0x70), adr);
1742         chip->state = FL_STATUS;
1743         status = map_read(map, adr);
1744
1745         /* check for errors */
1746         if (map_word_bitsset(map, status, CMD(0x3a))) {
1747                 unsigned long chipstatus = MERGESTATUS(status);
1748
1749                 /* Reset the error bits */
1750                 map_write(map, CMD(0x50), adr);
1751                 map_write(map, CMD(0x70), adr);
1752                 xip_enable(map, chip, adr);
1753
1754                 if ((chipstatus & 0x30) == 0x30) {
1755                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1756                         ret = -EINVAL;
1757                 } else if (chipstatus & 0x02) {
1758                         /* Protection bit set */
1759                         ret = -EROFS;
1760                 } else if (chipstatus & 0x8) {
1761                         /* Voltage */
1762                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1763                         ret = -EIO;
1764                 } else if (chipstatus & 0x20 && retries--) {
1765                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1766                         put_chip(map, chip, adr);
1767                         spin_unlock(chip->mutex);
1768                         goto retry;
1769                 } else {
1770                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1771                         ret = -EIO;
1772                 }
1773
1774                 goto out;
1775         }
1776
1777         xip_enable(map, chip, adr);
1778  out:   put_chip(map, chip, adr);
1779         spin_unlock(chip->mutex);
1780         return ret;
1781 }
1782
1783 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1784 {
1785         unsigned long ofs, len;
1786         int ret;
1787
1788         ofs = instr->addr;
1789         len = instr->len;
1790
1791         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1792         if (ret)
1793                 return ret;
1794
1795         instr->state = MTD_ERASE_DONE;
1796         mtd_erase_callback(instr);
1797
1798         return 0;
1799 }
1800
1801 static void cfi_intelext_sync (struct mtd_info *mtd)
1802 {
1803         struct map_info *map = mtd->priv;
1804         struct cfi_private *cfi = map->fldrv_priv;
1805         int i;
1806         struct flchip *chip;
1807         int ret = 0;
1808
1809         for (i=0; !ret && i<cfi->numchips; i++) {
1810                 chip = &cfi->chips[i];
1811
1812                 spin_lock(chip->mutex);
1813                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1814
1815                 if (!ret) {
1816                         chip->oldstate = chip->state;
1817                         chip->state = FL_SYNCING;
1818                         /* No need to wake_up() on this state change -
1819                          * as the whole point is that nobody can do anything
1820                          * with the chip now anyway.
1821                          */
1822                 }
1823                 spin_unlock(chip->mutex);
1824         }
1825
1826         /* Unlock the chips again */
1827
1828         for (i--; i >=0; i--) {
1829                 chip = &cfi->chips[i];
1830
1831                 spin_lock(chip->mutex);
1832
1833                 if (chip->state == FL_SYNCING) {
1834                         chip->state = chip->oldstate;
1835                         chip->oldstate = FL_READY;
1836                         wake_up(&chip->wq);
1837                 }
1838                 spin_unlock(chip->mutex);
1839         }
1840 }
1841
1842 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1843                                                 struct flchip *chip,
1844                                                 unsigned long adr,
1845                                                 int len, void *thunk)
1846 {
1847         struct cfi_private *cfi = map->fldrv_priv;
1848         int status, ofs_factor = cfi->interleave * cfi->device_type;
1849
1850         adr += chip->start;
1851         xip_disable(map, chip, adr+(2*ofs_factor));
1852         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1853         chip->state = FL_JEDEC_QUERY;
1854         status = cfi_read_query(map, adr+(2*ofs_factor));
1855         xip_enable(map, chip, 0);
1856         return status;
1857 }
1858
1859 #ifdef DEBUG_LOCK_BITS
1860 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1861                                                 struct flchip *chip,
1862                                                 unsigned long adr,
1863                                                 int len, void *thunk)
1864 {
1865         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1866                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1867         return 0;
1868 }
1869 #endif
1870
1871 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1872 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1873
1874 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1875                                        unsigned long adr, int len, void *thunk)
1876 {
1877         struct cfi_private *cfi = map->fldrv_priv;
1878         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1879         int udelay;
1880         int ret;
1881
1882         adr += chip->start;
1883
1884         spin_lock(chip->mutex);
1885         ret = get_chip(map, chip, adr, FL_LOCKING);
1886         if (ret) {
1887                 spin_unlock(chip->mutex);
1888                 return ret;
1889         }
1890
1891         ENABLE_VPP(map);
1892         xip_disable(map, chip, adr);
1893
1894         map_write(map, CMD(0x60), adr);
1895         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1896                 map_write(map, CMD(0x01), adr);
1897                 chip->state = FL_LOCKING;
1898         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1899                 map_write(map, CMD(0xD0), adr);
1900                 chip->state = FL_UNLOCKING;
1901         } else
1902                 BUG();
1903
1904         /*
1905          * If Instant Individual Block Locking supported then no need
1906          * to delay.
1907          */
1908         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1909
1910         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1911         if (ret) {
1912                 map_write(map, CMD(0x70), adr);
1913                 chip->state = FL_STATUS;
1914                 xip_enable(map, chip, adr);
1915                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1916                 goto out;
1917         }
1918
1919         xip_enable(map, chip, adr);
1920 out:    put_chip(map, chip, adr);
1921         spin_unlock(chip->mutex);
1922         return ret;
1923 }
1924
1925 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1926 {
1927         int ret;
1928
1929 #ifdef DEBUG_LOCK_BITS
1930         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1931                __FUNCTION__, ofs, len);
1932         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1933                 ofs, len, 0);
1934 #endif
1935
1936         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1937                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1938
1939 #ifdef DEBUG_LOCK_BITS
1940         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1941                __FUNCTION__, ret);
1942         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1943                 ofs, len, 0);
1944 #endif
1945
1946         return ret;
1947 }
1948
1949 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1950 {
1951         int ret;
1952
1953 #ifdef DEBUG_LOCK_BITS
1954         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1955                __FUNCTION__, ofs, len);
1956         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1957                 ofs, len, 0);
1958 #endif
1959
1960         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1961                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1962
1963 #ifdef DEBUG_LOCK_BITS
1964         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1965                __FUNCTION__, ret);
1966         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1967                 ofs, len, 0);
1968 #endif
1969
1970         return ret;
1971 }
1972
1973 #ifdef CONFIG_MTD_OTP
1974
1975 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1976                         u_long data_offset, u_char *buf, u_int size,
1977                         u_long prot_offset, u_int groupno, u_int groupsize);
1978
1979 static int __xipram
1980 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1981             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1982 {
1983         struct cfi_private *cfi = map->fldrv_priv;
1984         int ret;
1985
1986         spin_lock(chip->mutex);
1987         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1988         if (ret) {
1989                 spin_unlock(chip->mutex);
1990                 return ret;
1991         }
1992
1993         /* let's ensure we're not reading back cached data from array mode */
1994         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1995
1996         xip_disable(map, chip, chip->start);
1997         if (chip->state != FL_JEDEC_QUERY) {
1998                 map_write(map, CMD(0x90), chip->start);
1999                 chip->state = FL_JEDEC_QUERY;
2000         }
2001         map_copy_from(map, buf, chip->start + offset, size);
2002         xip_enable(map, chip, chip->start);
2003
2004         /* then ensure we don't keep OTP data in the cache */
2005         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2006
2007         put_chip(map, chip, chip->start);
2008         spin_unlock(chip->mutex);
2009         return 0;
2010 }
2011
2012 static int
2013 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2014              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2015 {
2016         int ret;
2017
2018         while (size) {
2019                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2020                 int gap = offset - bus_ofs;
2021                 int n = min_t(int, size, map_bankwidth(map)-gap);
2022                 map_word datum = map_word_ff(map);
2023
2024                 datum = map_word_load_partial(map, datum, buf, gap, n);
2025                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2026                 if (ret)
2027                         return ret;
2028
2029                 offset += n;
2030                 buf += n;
2031                 size -= n;
2032         }
2033
2034         return 0;
2035 }
2036
2037 static int
2038 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2039             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2040 {
2041         struct cfi_private *cfi = map->fldrv_priv;
2042         map_word datum;
2043
2044         /* make sure area matches group boundaries */
2045         if (size != grpsz)
2046                 return -EXDEV;
2047
2048         datum = map_word_ff(map);
2049         datum = map_word_clr(map, datum, CMD(1 << grpno));
2050         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2051 }
2052
2053 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2054                                  size_t *retlen, u_char *buf,
2055                                  otp_op_t action, int user_regs)
2056 {
2057         struct map_info *map = mtd->priv;
2058         struct cfi_private *cfi = map->fldrv_priv;
2059         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2060         struct flchip *chip;
2061         struct cfi_intelext_otpinfo *otp;
2062         u_long devsize, reg_prot_offset, data_offset;
2063         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2064         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2065         int ret;
2066
2067         *retlen = 0;
2068
2069         /* Check that we actually have some OTP registers */
2070         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2071                 return -ENODATA;
2072
2073         /* we need real chips here not virtual ones */
2074         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2075         chip_step = devsize >> cfi->chipshift;
2076         chip_num = 0;
2077
2078         /* Some chips have OTP located in the _top_ partition only.
2079            For example: Intel 28F256L18T (T means top-parameter device) */
2080         if (cfi->mfr == MANUFACTURER_INTEL) {
2081                 switch (cfi->id) {
2082                 case 0x880b:
2083                 case 0x880c:
2084                 case 0x880d:
2085                         chip_num = chip_step - 1;
2086                 }
2087         }
2088
2089         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2090                 chip = &cfi->chips[chip_num];
2091                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2092
2093                 /* first OTP region */
2094                 field = 0;
2095                 reg_prot_offset = extp->ProtRegAddr;
2096                 reg_fact_groups = 1;
2097                 reg_fact_size = 1 << extp->FactProtRegSize;
2098                 reg_user_groups = 1;
2099                 reg_user_size = 1 << extp->UserProtRegSize;
2100
2101                 while (len > 0) {
2102                         /* flash geometry fixup */
2103                         data_offset = reg_prot_offset + 1;
2104                         data_offset *= cfi->interleave * cfi->device_type;
2105                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2106                         reg_fact_size *= cfi->interleave;
2107                         reg_user_size *= cfi->interleave;
2108
2109                         if (user_regs) {
2110                                 groups = reg_user_groups;
2111                                 groupsize = reg_user_size;
2112                                 /* skip over factory reg area */
2113                                 groupno = reg_fact_groups;
2114                                 data_offset += reg_fact_groups * reg_fact_size;
2115                         } else {
2116                                 groups = reg_fact_groups;
2117                                 groupsize = reg_fact_size;
2118                                 groupno = 0;
2119                         }
2120
2121                         while (len > 0 && groups > 0) {
2122                                 if (!action) {
2123                                         /*
2124                                          * Special case: if action is NULL
2125                                          * we fill buf with otp_info records.
2126                                          */
2127                                         struct otp_info *otpinfo;
2128                                         map_word lockword;
2129                                         len -= sizeof(struct otp_info);
2130                                         if (len <= 0)
2131                                                 return -ENOSPC;
2132                                         ret = do_otp_read(map, chip,
2133                                                           reg_prot_offset,
2134                                                           (u_char *)&lockword,
2135                                                           map_bankwidth(map),
2136                                                           0, 0,  0);
2137                                         if (ret)
2138                                                 return ret;
2139                                         otpinfo = (struct otp_info *)buf;
2140                                         otpinfo->start = from;
2141                                         otpinfo->length = groupsize;
2142                                         otpinfo->locked =
2143                                            !map_word_bitsset(map, lockword,
2144                                                              CMD(1 << groupno));
2145                                         from += groupsize;
2146                                         buf += sizeof(*otpinfo);
2147                                         *retlen += sizeof(*otpinfo);
2148                                 } else if (from >= groupsize) {
2149                                         from -= groupsize;
2150                                         data_offset += groupsize;
2151                                 } else {
2152                                         int size = groupsize;
2153                                         data_offset += from;
2154                                         size -= from;
2155                                         from = 0;
2156                                         if (size > len)
2157                                                 size = len;
2158                                         ret = action(map, chip, data_offset,
2159                                                      buf, size, reg_prot_offset,
2160                                                      groupno, groupsize);
2161                                         if (ret < 0)
2162                                                 return ret;
2163                                         buf += size;
2164                                         len -= size;
2165                                         *retlen += size;
2166                                         data_offset += size;
2167                                 }
2168                                 groupno++;
2169                                 groups--;
2170                         }
2171
2172                         /* next OTP region */
2173                         if (++field == extp->NumProtectionFields)
2174                                 break;
2175                         reg_prot_offset = otp->ProtRegAddr;
2176                         reg_fact_groups = otp->FactGroups;
2177                         reg_fact_size = 1 << otp->FactProtRegSize;
2178                         reg_user_groups = otp->UserGroups;
2179                         reg_user_size = 1 << otp->UserProtRegSize;
2180                         otp++;
2181                 }
2182         }
2183
2184         return 0;
2185 }
2186
2187 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2188                                            size_t len, size_t *retlen,
2189                                             u_char *buf)
2190 {
2191         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2192                                      buf, do_otp_read, 0);
2193 }
2194
2195 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2196                                            size_t len, size_t *retlen,
2197                                             u_char *buf)
2198 {
2199         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2200                                      buf, do_otp_read, 1);
2201 }
2202
2203 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2204                                             size_t len, size_t *retlen,
2205                                              u_char *buf)
2206 {
2207         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2208                                      buf, do_otp_write, 1);
2209 }
2210
2211 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2212                                            loff_t from, size_t len)
2213 {
2214         size_t retlen;
2215         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2216                                      NULL, do_otp_lock, 1);
2217 }
2218
2219 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2220                                            struct otp_info *buf, size_t len)
2221 {
2222         size_t retlen;
2223         int ret;
2224
2225         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2226         return ret ? : retlen;
2227 }
2228
2229 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2230                                            struct otp_info *buf, size_t len)
2231 {
2232         size_t retlen;
2233         int ret;
2234
2235         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2236         return ret ? : retlen;
2237 }
2238
2239 #endif
2240
2241 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2242 {
2243         struct mtd_erase_region_info *region;
2244         int block, status, i;
2245         unsigned long adr;
2246         size_t len;
2247
2248         for (i = 0; i < mtd->numeraseregions; i++) {
2249                 region = &mtd->eraseregions[i];
2250                 if (!region->lockmap)
2251                         continue;
2252
2253                 for (block = 0; block < region->numblocks; block++){
2254                         len = region->erasesize;
2255                         adr = region->offset + block * len;
2256
2257                         status = cfi_varsize_frob(mtd,
2258                                         do_getlockstatus_oneblock, adr, len, 0);
2259                         if (status)
2260                                 set_bit(block, region->lockmap);
2261                         else
2262                                 clear_bit(block, region->lockmap);
2263                 }
2264         }
2265 }
2266
2267 static int cfi_intelext_suspend(struct mtd_info *mtd)
2268 {
2269         struct map_info *map = mtd->priv;
2270         struct cfi_private *cfi = map->fldrv_priv;
2271         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2272         int i;
2273         struct flchip *chip;
2274         int ret = 0;
2275
2276         if ((mtd->flags & MTD_STUPID_LOCK)
2277             && extp && (extp->FeatureSupport & (1 << 5)))
2278                 cfi_intelext_save_locks(mtd);
2279
2280         for (i=0; !ret && i<cfi->numchips; i++) {
2281                 chip = &cfi->chips[i];
2282
2283                 spin_lock(chip->mutex);
2284
2285                 switch (chip->state) {
2286                 case FL_READY:
2287                 case FL_STATUS:
2288                 case FL_CFI_QUERY:
2289                 case FL_JEDEC_QUERY:
2290                         if (chip->oldstate == FL_READY) {
2291                                 /* place the chip in a known state before suspend */
2292                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2293                                 chip->oldstate = chip->state;
2294                                 chip->state = FL_PM_SUSPENDED;
2295                                 /* No need to wake_up() on this state change -
2296                                  * as the whole point is that nobody can do anything
2297                                  * with the chip now anyway.
2298                                  */
2299                         } else {
2300                                 /* There seems to be an operation pending. We must wait for it. */
2301                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2302                                 ret = -EAGAIN;
2303                         }
2304                         break;
2305                 default:
2306                         /* Should we actually wait? Once upon a time these routines weren't
2307                            allowed to. Or should we return -EAGAIN, because the upper layers
2308                            ought to have already shut down anything which was using the device
2309                            anyway? The latter for now. */
2310                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2311                         ret = -EAGAIN;
2312                 case FL_PM_SUSPENDED:
2313                         break;
2314                 }
2315                 spin_unlock(chip->mutex);
2316         }
2317
2318         /* Unlock the chips again */
2319
2320         if (ret) {
2321                 for (i--; i >=0; i--) {
2322                         chip = &cfi->chips[i];
2323
2324                         spin_lock(chip->mutex);
2325
2326                         if (chip->state == FL_PM_SUSPENDED) {
2327                                 /* No need to force it into a known state here,
2328                                    because we're returning failure, and it didn't
2329                                    get power cycled */
2330                                 chip->state = chip->oldstate;
2331                                 chip->oldstate = FL_READY;
2332                                 wake_up(&chip->wq);
2333                         }
2334                         spin_unlock(chip->mutex);
2335                 }
2336         }
2337
2338         return ret;
2339 }
2340
2341 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2342 {
2343         struct mtd_erase_region_info *region;
2344         int block, i;
2345         unsigned long adr;
2346         size_t len;
2347
2348         for (i = 0; i < mtd->numeraseregions; i++) {
2349                 region = &mtd->eraseregions[i];
2350                 if (!region->lockmap)
2351                         continue;
2352
2353                 for (block = 0; block < region->numblocks; block++) {
2354                         len = region->erasesize;
2355                         adr = region->offset + block * len;
2356
2357                         if (!test_bit(block, region->lockmap))
2358                                 cfi_intelext_unlock(mtd, adr, len);
2359                 }
2360         }
2361 }
2362
2363 static void cfi_intelext_resume(struct mtd_info *mtd)
2364 {
2365         struct map_info *map = mtd->priv;
2366         struct cfi_private *cfi = map->fldrv_priv;
2367         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2368         int i;
2369         struct flchip *chip;
2370
2371         for (i=0; i<cfi->numchips; i++) {
2372
2373                 chip = &cfi->chips[i];
2374
2375                 spin_lock(chip->mutex);
2376
2377                 /* Go to known state. Chip may have been power cycled */
2378                 if (chip->state == FL_PM_SUSPENDED) {
2379                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2380                         chip->oldstate = chip->state = FL_READY;
2381                         wake_up(&chip->wq);
2382                 }
2383
2384                 spin_unlock(chip->mutex);
2385         }
2386
2387         if ((mtd->flags & MTD_STUPID_LOCK)
2388             && extp && (extp->FeatureSupport & (1 << 5)))
2389                 cfi_intelext_restore_locks(mtd);
2390 }
2391
2392 static int cfi_intelext_reset(struct mtd_info *mtd)
2393 {
2394         struct map_info *map = mtd->priv;
2395         struct cfi_private *cfi = map->fldrv_priv;
2396         int i, ret;
2397
2398         for (i=0; i < cfi->numchips; i++) {
2399                 struct flchip *chip = &cfi->chips[i];
2400
2401                 /* force the completion of any ongoing operation
2402                    and switch to array mode so any bootloader in
2403                    flash is accessible for soft reboot. */
2404                 spin_lock(chip->mutex);
2405                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2406                 if (!ret) {
2407                         map_write(map, CMD(0xff), chip->start);
2408                         chip->state = FL_READY;
2409                 }
2410                 spin_unlock(chip->mutex);
2411         }
2412
2413         return 0;
2414 }
2415
2416 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2417                                void *v)
2418 {
2419         struct mtd_info *mtd;
2420
2421         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2422         cfi_intelext_reset(mtd);
2423         return NOTIFY_DONE;
2424 }
2425
2426 static void cfi_intelext_destroy(struct mtd_info *mtd)
2427 {
2428         struct map_info *map = mtd->priv;
2429         struct cfi_private *cfi = map->fldrv_priv;
2430         struct mtd_erase_region_info *region;
2431         int i;
2432         cfi_intelext_reset(mtd);
2433         unregister_reboot_notifier(&mtd->reboot_notifier);
2434         kfree(cfi->cmdset_priv);
2435         kfree(cfi->cfiq);
2436         kfree(cfi->chips[0].priv);
2437         kfree(cfi);
2438         for (i = 0; i < mtd->numeraseregions; i++) {
2439                 region = &mtd->eraseregions[i];
2440                 if (region->lockmap)
2441                         kfree(region->lockmap);
2442         }
2443         kfree(mtd->eraseregions);
2444 }
2445
2446 MODULE_LICENSE("GPL");
2447 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2448 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2449 MODULE_ALIAS("cfi_cmdset_0003");
2450 MODULE_ALIAS("cfi_cmdset_0200");