2  * Common Flash Interface support:
 
   3  *   Intel Extended Vendor Command Set (ID 0x0001)
 
   5  * (C) 2000 Red Hat. GPL'd
 
   7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
 
  10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
 
  11  *      - completely revamped method functions so they are aware and
 
  12  *        independent of the flash geometry (buswidth, interleave, etc.)
 
  13  *      - scalability vs code size is completely set at compile-time
 
  14  *        (see include/linux/mtd/cfi.h for selection)
 
  15  *      - optimized write buffer method
 
  16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
 
  17  *      - reworked lock/unlock/erase support for var size flash
 
  18  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
 
  19  *      - auto unlock sectors on resume for auto locking flash on power up
 
  22 #include <linux/module.h>
 
  23 #include <linux/types.h>
 
  24 #include <linux/kernel.h>
 
  25 #include <linux/sched.h>
 
  26 #include <linux/init.h>
 
  28 #include <asm/byteorder.h>
 
  30 #include <linux/errno.h>
 
  31 #include <linux/slab.h>
 
  32 #include <linux/delay.h>
 
  33 #include <linux/interrupt.h>
 
  34 #include <linux/reboot.h>
 
  35 #include <linux/bitmap.h>
 
  36 #include <linux/mtd/xip.h>
 
  37 #include <linux/mtd/map.h>
 
  38 #include <linux/mtd/mtd.h>
 
  39 #include <linux/mtd/compatmac.h>
 
  40 #include <linux/mtd/cfi.h>
 
  42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
 
  43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
 
  45 // debugging, turns off buffer write mode if set to 1
 
  46 #define FORCE_WORD_WRITE 0
 
  48 #define MANUFACTURER_INTEL      0x0089
 
  49 #define I82802AB        0x00ad
 
  50 #define I82802AC        0x00ac
 
  51 #define MANUFACTURER_ST         0x0020
 
  52 #define M50LPW080       0x002F
 
  53 #define AT49BV640D      0x02de
 
  55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
  56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
 
  57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
 
  58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
 
  59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
 
  60 static void cfi_intelext_sync (struct mtd_info *);
 
  61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
 
  62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
 
  64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
  65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
  66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 
  67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
 
  68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
 
  69                                             struct otp_info *, size_t);
 
  70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
 
  71                                             struct otp_info *, size_t);
 
  73 static int cfi_intelext_suspend (struct mtd_info *);
 
  74 static void cfi_intelext_resume (struct mtd_info *);
 
  75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
 
  77 static void cfi_intelext_destroy(struct mtd_info *);
 
  79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
 
  81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
 
  82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
 
  84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
 
  85                      size_t *retlen, u_char **mtdbuf);
 
  86 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
 
  89 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 
  90 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
 
  91 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
 
  97  *  *********** SETUP AND PROBE BITS  ***********
 
 100 static struct mtd_chip_driver cfi_intelext_chipdrv = {
 
 101         .probe          = NULL, /* Not usable directly */
 
 102         .destroy        = cfi_intelext_destroy,
 
 103         .name           = "cfi_cmdset_0001",
 
 104         .module         = THIS_MODULE
 
 107 /* #define DEBUG_LOCK_BITS */
 
 108 /* #define DEBUG_CFI_FEATURES */
 
 110 #ifdef DEBUG_CFI_FEATURES
 
 111 static void cfi_tell_features(struct cfi_pri_intelext *extp)
 
 114         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
 
 115         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
 
 116         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
 
 117         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
 
 118         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
 
 119         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
 
 120         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
 
 121         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
 
 122         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
 
 123         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
 
 124         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
 
 125         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
 
 126         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
 
 127         for (i=11; i<32; i++) {
 
 128                 if (extp->FeatureSupport & (1<<i))
 
 129                         printk("     - Unknown Bit %X:      supported\n", i);
 
 132         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
 
 133         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
 
 134         for (i=1; i<8; i++) {
 
 135                 if (extp->SuspendCmdSupport & (1<<i))
 
 136                         printk("     - Unknown Bit %X:               supported\n", i);
 
 139         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
 
 140         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
 
 141         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
 
 142         for (i=2; i<3; i++) {
 
 143                 if (extp->BlkStatusRegMask & (1<<i))
 
 144                         printk("     - Unknown Bit %X Active: yes\n",i);
 
 146         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
 
 147         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
 
 148         for (i=6; i<16; i++) {
 
 149                 if (extp->BlkStatusRegMask & (1<<i))
 
 150                         printk("     - Unknown Bit %X Active: yes\n",i);
 
 153         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
 
 154                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
 
 155         if (extp->VppOptimal)
 
 156                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
 
 157                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
 
 161 /* Atmel chips don't use the same PRI format as Intel chips */
 
 162 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
 
 164         struct map_info *map = mtd->priv;
 
 165         struct cfi_private *cfi = map->fldrv_priv;
 
 166         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
 167         struct cfi_pri_atmel atmel_pri;
 
 168         uint32_t features = 0;
 
 170         /* Reverse byteswapping */
 
 171         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
 
 172         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
 
 173         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
 
 175         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
 
 176         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
 
 178         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
 
 180         if (atmel_pri.Features & 0x01) /* chip erase supported */
 
 182         if (atmel_pri.Features & 0x02) /* erase suspend supported */
 
 184         if (atmel_pri.Features & 0x04) /* program suspend supported */
 
 186         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
 
 188         if (atmel_pri.Features & 0x20) /* page mode read supported */
 
 190         if (atmel_pri.Features & 0x40) /* queued erase supported */
 
 192         if (atmel_pri.Features & 0x80) /* Protection bits supported */
 
 195         extp->FeatureSupport = features;
 
 197         /* burst write mode not supported */
 
 198         cfi->cfiq->BufWriteTimeoutTyp = 0;
 
 199         cfi->cfiq->BufWriteTimeoutMax = 0;
 
 202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 
 203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
 
 204 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
 
 206         struct map_info *map = mtd->priv;
 
 207         struct cfi_private *cfi = map->fldrv_priv;
 
 208         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
 
 210         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 
 211                             "erase on write disabled.\n");
 
 212         extp->SuspendCmdSupport &= ~1;
 
 216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 
 217 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
 
 219         struct map_info *map = mtd->priv;
 
 220         struct cfi_private *cfi = map->fldrv_priv;
 
 221         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 
 223         if (cfip && (cfip->FeatureSupport&4)) {
 
 224                 cfip->FeatureSupport &= ~4;
 
 225                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
 
 230 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
 
 232         struct map_info *map = mtd->priv;
 
 233         struct cfi_private *cfi = map->fldrv_priv;
 
 235         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
 
 236         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
 
 239 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
 
 241         struct map_info *map = mtd->priv;
 
 242         struct cfi_private *cfi = map->fldrv_priv;
 
 244         /* Note this is done after the region info is endian swapped */
 
 245         cfi->cfiq->EraseRegionInfo[1] =
 
 246                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 
 249 static void fixup_use_point(struct mtd_info *mtd, void *param)
 
 251         struct map_info *map = mtd->priv;
 
 252         if (!mtd->point && map_is_linear(map)) {
 
 253                 mtd->point   = cfi_intelext_point;
 
 254                 mtd->unpoint = cfi_intelext_unpoint;
 
 258 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
 
 260         struct map_info *map = mtd->priv;
 
 261         struct cfi_private *cfi = map->fldrv_priv;
 
 262         if (cfi->cfiq->BufWriteTimeoutTyp) {
 
 263                 printk(KERN_INFO "Using buffer write method\n" );
 
 264                 mtd->write = cfi_intelext_write_buffers;
 
 265                 mtd->writev = cfi_intelext_writev;
 
 270  * Some chips power-up with all sectors locked by default.
 
 272 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
 
 274         struct map_info *map = mtd->priv;
 
 275         struct cfi_private *cfi = map->fldrv_priv;
 
 276         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 
 278         if (cfip->FeatureSupport&32) {
 
 279                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
 
 280                 mtd->flags |= MTD_POWERUP_LOCK;
 
 284 static struct cfi_fixup cfi_fixup_table[] = {
 
 285         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
 
 286 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 
 287         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
 
 289 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
 
 290         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
 
 292 #if !FORCE_WORD_WRITE
 
 293         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
 
 295         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
 
 296         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
 
 297         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
 
 301 static struct cfi_fixup jedec_fixup_table[] = {
 
 302         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
 
 303         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
 
 304         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
 
 307 static struct cfi_fixup fixup_table[] = {
 
 308         /* The CFI vendor ids and the JEDEC vendor IDs appear
 
 309          * to be common.  It is like the devices id's are as
 
 310          * well.  This table is to pick all cases where
 
 311          * we know that is the case.
 
 313         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
 
 317 static inline struct cfi_pri_intelext *
 
 318 read_pri_intelext(struct map_info *map, __u16 adr)
 
 320         struct cfi_pri_intelext *extp;
 
 321         unsigned int extp_size = sizeof(*extp);
 
 324         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
 
 328         if (extp->MajorVersion != '1' ||
 
 329             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
 
 330                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
 
 331                        "version %c.%c.\n",  extp->MajorVersion,
 
 337         /* Do some byteswapping if necessary */
 
 338         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 
 339         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 
 340         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 
 342         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
 
 343                 unsigned int extra_size = 0;
 
 346                 /* Protection Register info */
 
 347                 extra_size += (extp->NumProtectionFields - 1) *
 
 348                               sizeof(struct cfi_intelext_otpinfo);
 
 350                 /* Burst Read info */
 
 352                 if (extp_size < sizeof(*extp) + extra_size)
 
 354                 extra_size += extp->extra[extra_size-1];
 
 356                 /* Number of hardware-partitions */
 
 358                 if (extp_size < sizeof(*extp) + extra_size)
 
 360                 nb_parts = extp->extra[extra_size - 1];
 
 362                 /* skip the sizeof(partregion) field in CFI 1.4 */
 
 363                 if (extp->MinorVersion >= '4')
 
 366                 for (i = 0; i < nb_parts; i++) {
 
 367                         struct cfi_intelext_regioninfo *rinfo;
 
 368                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
 
 369                         extra_size += sizeof(*rinfo);
 
 370                         if (extp_size < sizeof(*extp) + extra_size)
 
 372                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
 
 373                         extra_size += (rinfo->NumBlockTypes - 1)
 
 374                                       * sizeof(struct cfi_intelext_blockinfo);
 
 377                 if (extp->MinorVersion >= '4')
 
 378                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
 
 380                 if (extp_size < sizeof(*extp) + extra_size) {
 
 382                         extp_size = sizeof(*extp) + extra_size;
 
 384                         if (extp_size > 4096) {
 
 386                                         "%s: cfi_pri_intelext is too fat\n",
 
 397 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 
 399         struct cfi_private *cfi = map->fldrv_priv;
 
 400         struct mtd_info *mtd;
 
 403         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
 
 405                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 
 409         mtd->type = MTD_NORFLASH;
 
 411         /* Fill in the default mtd operations */
 
 412         mtd->erase   = cfi_intelext_erase_varsize;
 
 413         mtd->read    = cfi_intelext_read;
 
 414         mtd->write   = cfi_intelext_write_words;
 
 415         mtd->sync    = cfi_intelext_sync;
 
 416         mtd->lock    = cfi_intelext_lock;
 
 417         mtd->unlock  = cfi_intelext_unlock;
 
 418         mtd->suspend = cfi_intelext_suspend;
 
 419         mtd->resume  = cfi_intelext_resume;
 
 420         mtd->flags   = MTD_CAP_NORFLASH;
 
 421         mtd->name    = map->name;
 
 424         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
 
 426         if (cfi->cfi_mode == CFI_MODE_CFI) {
 
 428                  * It's a real CFI chip, not one for which the probe
 
 429                  * routine faked a CFI structure. So we read the feature
 
 432                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 
 433                 struct cfi_pri_intelext *extp;
 
 435                 extp = read_pri_intelext(map, adr);
 
 441                 /* Install our own private info structure */
 
 442                 cfi->cmdset_priv = extp;
 
 444                 cfi_fixup(mtd, cfi_fixup_table);
 
 446 #ifdef DEBUG_CFI_FEATURES
 
 447                 /* Tell the user about it in lots of lovely detail */
 
 448                 cfi_tell_features(extp);
 
 451                 if(extp->SuspendCmdSupport & 1) {
 
 452                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 
 455         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
 
 456                 /* Apply jedec specific fixups */
 
 457                 cfi_fixup(mtd, jedec_fixup_table);
 
 459         /* Apply generic fixups */
 
 460         cfi_fixup(mtd, fixup_table);
 
 462         for (i=0; i< cfi->numchips; i++) {
 
 463                 if (cfi->cfiq->WordWriteTimeoutTyp)
 
 464                         cfi->chips[i].word_write_time =
 
 465                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
 
 467                         cfi->chips[i].word_write_time = 50000;
 
 469                 if (cfi->cfiq->BufWriteTimeoutTyp)
 
 470                         cfi->chips[i].buffer_write_time =
 
 471                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
 
 472                 /* No default; if it isn't specified, we won't use it */
 
 474                 if (cfi->cfiq->BlockEraseTimeoutTyp)
 
 475                         cfi->chips[i].erase_time =
 
 476                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
 
 478                         cfi->chips[i].erase_time = 2000000;
 
 480                 cfi->chips[i].ref_point_counter = 0;
 
 481                 init_waitqueue_head(&(cfi->chips[i].wq));
 
 484         map->fldrv = &cfi_intelext_chipdrv;
 
 486         return cfi_intelext_setup(mtd);
 
 488 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 
 489 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
 
 490 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
 
 491 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
 
 492 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
 
 494 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
 
 496         struct map_info *map = mtd->priv;
 
 497         struct cfi_private *cfi = map->fldrv_priv;
 
 498         unsigned long offset = 0;
 
 500         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 
 502         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 
 504         mtd->size = devsize * cfi->numchips;
 
 506         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 
 507         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
 
 508                         * mtd->numeraseregions, GFP_KERNEL);
 
 509         if (!mtd->eraseregions) {
 
 510                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 
 514         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 
 515                 unsigned long ernum, ersize;
 
 516                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 
 517                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 
 519                 if (mtd->erasesize < ersize) {
 
 520                         mtd->erasesize = ersize;
 
 522                 for (j=0; j<cfi->numchips; j++) {
 
 523                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 
 524                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 
 525                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 
 526                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
 
 528                 offset += (ersize * ernum);
 
 531         if (offset != devsize) {
 
 533                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 
 537         for (i=0; i<mtd->numeraseregions;i++){
 
 538                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
 
 539                        i,mtd->eraseregions[i].offset,
 
 540                        mtd->eraseregions[i].erasesize,
 
 541                        mtd->eraseregions[i].numblocks);
 
 544 #ifdef CONFIG_MTD_OTP
 
 545         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 
 546         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 
 547         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
 
 548         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
 
 549         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
 
 550         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
 
 553         /* This function has the potential to distort the reality
 
 554            a bit and therefore should be called last. */
 
 555         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
 
 558         __module_get(THIS_MODULE);
 
 559         register_reboot_notifier(&mtd->reboot_notifier);
 
 564                 kfree(mtd->eraseregions);
 
 567         kfree(cfi->cmdset_priv);
 
 571 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
 
 572                                         struct cfi_private **pcfi)
 
 574         struct map_info *map = mtd->priv;
 
 575         struct cfi_private *cfi = *pcfi;
 
 576         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
 579          * Probing of multi-partition flash chips.
 
 581          * To support multiple partitions when available, we simply arrange
 
 582          * for each of them to have their own flchip structure even if they
 
 583          * are on the same physical chip.  This means completely recreating
 
 584          * a new cfi_private structure right here which is a blatent code
 
 585          * layering violation, but this is still the least intrusive
 
 586          * arrangement at this point. This can be rearranged in the future
 
 587          * if someone feels motivated enough.  --nico
 
 589         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
 
 590             && extp->FeatureSupport & (1 << 9)) {
 
 591                 struct cfi_private *newcfi;
 
 593                 struct flchip_shared *shared;
 
 594                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
 
 596                 /* Protection Register info */
 
 597                 offs = (extp->NumProtectionFields - 1) *
 
 598                        sizeof(struct cfi_intelext_otpinfo);
 
 600                 /* Burst Read info */
 
 601                 offs += extp->extra[offs+1]+2;
 
 603                 /* Number of partition regions */
 
 604                 numregions = extp->extra[offs];
 
 607                 /* skip the sizeof(partregion) field in CFI 1.4 */
 
 608                 if (extp->MinorVersion >= '4')
 
 611                 /* Number of hardware partitions */
 
 613                 for (i = 0; i < numregions; i++) {
 
 614                         struct cfi_intelext_regioninfo *rinfo;
 
 615                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
 
 616                         numparts += rinfo->NumIdentPartitions;
 
 617                         offs += sizeof(*rinfo)
 
 618                                 + (rinfo->NumBlockTypes - 1) *
 
 619                                   sizeof(struct cfi_intelext_blockinfo);
 
 622                 /* Programming Region info */
 
 623                 if (extp->MinorVersion >= '4') {
 
 624                         struct cfi_intelext_programming_regioninfo *prinfo;
 
 625                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
 
 626                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
 
 627                         mtd->flags &= ~MTD_BIT_WRITEABLE;
 
 628                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
 
 629                                map->name, mtd->writesize,
 
 630                                cfi->interleave * prinfo->ControlValid,
 
 631                                cfi->interleave * prinfo->ControlInvalid);
 
 635                  * All functions below currently rely on all chips having
 
 636                  * the same geometry so we'll just assume that all hardware
 
 637                  * partitions are of the same size too.
 
 639                 partshift = cfi->chipshift - __ffs(numparts);
 
 641                 if ((1 << partshift) < mtd->erasesize) {
 
 643                                 "%s: bad number of hw partitions (%d)\n",
 
 644                                 __FUNCTION__, numparts);
 
 648                 numvirtchips = cfi->numchips * numparts;
 
 649                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
 
 652                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
 
 657                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
 
 658                 newcfi->numchips = numvirtchips;
 
 659                 newcfi->chipshift = partshift;
 
 661                 chip = &newcfi->chips[0];
 
 662                 for (i = 0; i < cfi->numchips; i++) {
 
 663                         shared[i].writing = shared[i].erasing = NULL;
 
 664                         spin_lock_init(&shared[i].lock);
 
 665                         for (j = 0; j < numparts; j++) {
 
 666                                 *chip = cfi->chips[i];
 
 667                                 chip->start += j << partshift;
 
 668                                 chip->priv = &shared[i];
 
 669                                 /* those should be reset too since
 
 670                                    they create memory references. */
 
 671                                 init_waitqueue_head(&chip->wq);
 
 672                                 spin_lock_init(&chip->_spinlock);
 
 673                                 chip->mutex = &chip->_spinlock;
 
 678                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
 
 679                                   "--> %d partitions of %d KiB\n",
 
 680                                   map->name, cfi->numchips, cfi->interleave,
 
 681                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
 
 683                 map->fldrv_priv = newcfi;
 
 692  *  *********** CHIP ACCESS FUNCTIONS ***********
 
 694 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 
 696         DECLARE_WAITQUEUE(wait, current);
 
 697         struct cfi_private *cfi = map->fldrv_priv;
 
 698         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
 
 699         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 
 700         unsigned long timeo = jiffies + HZ;
 
 702         switch (chip->state) {
 
 706                         status = map_read(map, adr);
 
 707                         if (map_word_andequal(map, status, status_OK, status_OK))
 
 710                         /* At this point we're fine with write operations
 
 711                            in other partitions as they don't conflict. */
 
 712                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
 
 715                         spin_unlock(chip->mutex);
 
 717                         spin_lock(chip->mutex);
 
 718                         /* Someone else might have been playing with it. */
 
 729                     !(cfip->FeatureSupport & 2) ||
 
 730                     !(mode == FL_READY || mode == FL_POINT ||
 
 731                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
 
 736                 map_write(map, CMD(0xB0), adr);
 
 738                 /* If the flash has finished erasing, then 'erase suspend'
 
 739                  * appears to make some (28F320) flash devices switch to
 
 740                  * 'read' mode.  Make sure that we switch to 'read status'
 
 741                  * mode so we get the right data. --rmk
 
 743                 map_write(map, CMD(0x70), adr);
 
 744                 chip->oldstate = FL_ERASING;
 
 745                 chip->state = FL_ERASE_SUSPENDING;
 
 746                 chip->erase_suspended = 1;
 
 748                         status = map_read(map, adr);
 
 749                         if (map_word_andequal(map, status, status_OK, status_OK))
 
 752                         if (time_after(jiffies, timeo)) {
 
 753                                 /* Urgh. Resume and pretend we weren't here.  */
 
 754                                 map_write(map, CMD(0xd0), adr);
 
 755                                 /* Make sure we're in 'read status' mode if it had finished */
 
 756                                 map_write(map, CMD(0x70), adr);
 
 757                                 chip->state = FL_ERASING;
 
 758                                 chip->oldstate = FL_READY;
 
 759                                 printk(KERN_ERR "%s: Chip not ready after erase "
 
 760                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
 
 764                         spin_unlock(chip->mutex);
 
 766                         spin_lock(chip->mutex);
 
 767                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
 
 768                            So we can just loop here. */
 
 770                 chip->state = FL_STATUS;
 
 773         case FL_XIP_WHILE_ERASING:
 
 774                 if (mode != FL_READY && mode != FL_POINT &&
 
 775                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
 
 777                 chip->oldstate = chip->state;
 
 778                 chip->state = FL_READY;
 
 782                 /* Only if there's no operation suspended... */
 
 783                 if (mode == FL_READY && chip->oldstate == FL_READY)
 
 787                 /* The machine is rebooting now,so no one can get chip anymore */
 
 791                 set_current_state(TASK_UNINTERRUPTIBLE);
 
 792                 add_wait_queue(&chip->wq, &wait);
 
 793                 spin_unlock(chip->mutex);
 
 795                 remove_wait_queue(&chip->wq, &wait);
 
 796                 spin_lock(chip->mutex);
 
 801 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
 
 804         DECLARE_WAITQUEUE(wait, current);
 
 807         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
 
 808                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
 
 810                  * OK. We have possibility for contention on the write/erase
 
 811                  * operations which are global to the real chip and not per
 
 812                  * partition.  So let's fight it over in the partition which
 
 813                  * currently has authority on the operation.
 
 815                  * The rules are as follows:
 
 817                  * - any write operation must own shared->writing.
 
 819                  * - any erase operation must own _both_ shared->writing and
 
 822                  * - contention arbitration is handled in the owner's context.
 
 824                  * The 'shared' struct can be read and/or written only when
 
 827                 struct flchip_shared *shared = chip->priv;
 
 828                 struct flchip *contender;
 
 829                 spin_lock(&shared->lock);
 
 830                 contender = shared->writing;
 
 831                 if (contender && contender != chip) {
 
 833                          * The engine to perform desired operation on this
 
 834                          * partition is already in use by someone else.
 
 835                          * Let's fight over it in the context of the chip
 
 836                          * currently using it.  If it is possible to suspend,
 
 837                          * that other partition will do just that, otherwise
 
 838                          * it'll happily send us to sleep.  In any case, when
 
 839                          * get_chip returns success we're clear to go ahead.
 
 841                         ret = spin_trylock(contender->mutex);
 
 842                         spin_unlock(&shared->lock);
 
 845                         spin_unlock(chip->mutex);
 
 846                         ret = chip_ready(map, contender, contender->start, mode);
 
 847                         spin_lock(chip->mutex);
 
 849                         if (ret == -EAGAIN) {
 
 850                                 spin_unlock(contender->mutex);
 
 854                                 spin_unlock(contender->mutex);
 
 857                         spin_lock(&shared->lock);
 
 858                         spin_unlock(contender->mutex);
 
 861                 /* Check if we already have suspended erase
 
 862                  * on this chip. Sleep. */
 
 863                 if (mode == FL_ERASING && shared->erasing
 
 864                     && shared->erasing->oldstate == FL_ERASING) {
 
 865                         spin_unlock(&shared->lock);
 
 866                         set_current_state(TASK_UNINTERRUPTIBLE);
 
 867                         add_wait_queue(&chip->wq, &wait);
 
 868                         spin_unlock(chip->mutex);
 
 870                         remove_wait_queue(&chip->wq, &wait);
 
 871                         spin_lock(chip->mutex);
 
 876                 shared->writing = chip;
 
 877                 if (mode == FL_ERASING)
 
 878                         shared->erasing = chip;
 
 879                 spin_unlock(&shared->lock);
 
 881         ret = chip_ready(map, chip, adr, mode);
 
 888 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
 
 890         struct cfi_private *cfi = map->fldrv_priv;
 
 893                 struct flchip_shared *shared = chip->priv;
 
 894                 spin_lock(&shared->lock);
 
 895                 if (shared->writing == chip && chip->oldstate == FL_READY) {
 
 896                         /* We own the ability to write, but we're done */
 
 897                         shared->writing = shared->erasing;
 
 898                         if (shared->writing && shared->writing != chip) {
 
 899                                 /* give back ownership to who we loaned it from */
 
 900                                 struct flchip *loaner = shared->writing;
 
 901                                 spin_lock(loaner->mutex);
 
 902                                 spin_unlock(&shared->lock);
 
 903                                 spin_unlock(chip->mutex);
 
 904                                 put_chip(map, loaner, loaner->start);
 
 905                                 spin_lock(chip->mutex);
 
 906                                 spin_unlock(loaner->mutex);
 
 910                         shared->erasing = NULL;
 
 911                         shared->writing = NULL;
 
 912                 } else if (shared->erasing == chip && shared->writing != chip) {
 
 914                          * We own the ability to erase without the ability
 
 915                          * to write, which means the erase was suspended
 
 916                          * and some other partition is currently writing.
 
 917                          * Don't let the switch below mess things up since
 
 918                          * we don't have ownership to resume anything.
 
 920                         spin_unlock(&shared->lock);
 
 924                 spin_unlock(&shared->lock);
 
 927         switch(chip->oldstate) {
 
 929                 chip->state = chip->oldstate;
 
 930                 /* What if one interleaved chip has finished and the
 
 931                    other hasn't? The old code would leave the finished
 
 932                    one in READY mode. That's bad, and caused -EROFS
 
 933                    errors to be returned from do_erase_oneblock because
 
 934                    that's the only bit it checked for at the time.
 
 935                    As the state machine appears to explicitly allow
 
 936                    sending the 0x70 (Read Status) command to an erasing
 
 937                    chip and expecting it to be ignored, that's what we
 
 939                 map_write(map, CMD(0xd0), adr);
 
 940                 map_write(map, CMD(0x70), adr);
 
 941                 chip->oldstate = FL_READY;
 
 942                 chip->state = FL_ERASING;
 
 945         case FL_XIP_WHILE_ERASING:
 
 946                 chip->state = chip->oldstate;
 
 947                 chip->oldstate = FL_READY;
 
 953                 /* We should really make set_vpp() count, rather than doing this */
 
 957                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
 
 962 #ifdef CONFIG_MTD_XIP
 
 965  * No interrupt what so ever can be serviced while the flash isn't in array
 
 966  * mode.  This is ensured by the xip_disable() and xip_enable() functions
 
 967  * enclosing any code path where the flash is known not to be in array mode.
 
 968  * And within a XIP disabled code path, only functions marked with __xipram
 
 969  * may be called and nothing else (it's a good thing to inspect generated
 
 970  * assembly to make sure inline functions were actually inlined and that gcc
 
 971  * didn't emit calls to its own support functions). Also configuring MTD CFI
 
 972  * support to a single buswidth and a single interleave is also recommended.
 
 975 static void xip_disable(struct map_info *map, struct flchip *chip,
 
 978         /* TODO: chips with no XIP use should ignore and return */
 
 979         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
 
 983 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
 
 986         struct cfi_private *cfi = map->fldrv_priv;
 
 987         if (chip->state != FL_POINT && chip->state != FL_READY) {
 
 988                 map_write(map, CMD(0xff), adr);
 
 989                 chip->state = FL_READY;
 
 991         (void) map_read(map, adr);
 
 997  * When a delay is required for the flash operation to complete, the
 
 998  * xip_wait_for_operation() function is polling for both the given timeout
 
 999  * and pending (but still masked) hardware interrupts.  Whenever there is an
 
1000  * interrupt pending then the flash erase or write operation is suspended,
 
1001  * array mode restored and interrupts unmasked.  Task scheduling might also
 
1002  * happen at that point.  The CPU eventually returns from the interrupt or
 
1003  * the call to schedule() and the suspended flash operation is resumed for
 
1004  * the remaining of the delay period.
 
1006  * Warning: this function _will_ fool interrupt latency tracing tools.
 
1009 static int __xipram xip_wait_for_operation(
 
1010                 struct map_info *map, struct flchip *chip,
 
1011                 unsigned long adr, unsigned int chip_op_time )
 
1013         struct cfi_private *cfi = map->fldrv_priv;
 
1014         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
 
1015         map_word status, OK = CMD(0x80);
 
1016         unsigned long usec, suspended, start, done;
 
1017         flstate_t oldstate, newstate;
 
1019         start = xip_currtime();
 
1020         usec = chip_op_time * 8;
 
1027                 if (xip_irqpending() && cfip &&
 
1028                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
 
1029                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
 
1030                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
 
1032                          * Let's suspend the erase or write operation when
 
1033                          * supported.  Note that we currently don't try to
 
1034                          * suspend interleaved chips if there is already
 
1035                          * another operation suspended (imagine what happens
 
1036                          * when one chip was already done with the current
 
1037                          * operation while another chip suspended it, then
 
1038                          * we resume the whole thing at once).  Yes, it
 
1042                         map_write(map, CMD(0xb0), adr);
 
1043                         map_write(map, CMD(0x70), adr);
 
1044                         suspended = xip_currtime();
 
1046                                 if (xip_elapsed_since(suspended) > 100000) {
 
1048                                          * The chip doesn't want to suspend
 
1049                                          * after waiting for 100 msecs.
 
1050                                          * This is a critical error but there
 
1051                                          * is not much we can do here.
 
1055                                 status = map_read(map, adr);
 
1056                         } while (!map_word_andequal(map, status, OK, OK));
 
1058                         /* Suspend succeeded */
 
1059                         oldstate = chip->state;
 
1060                         if (oldstate == FL_ERASING) {
 
1061                                 if (!map_word_bitsset(map, status, CMD(0x40)))
 
1063                                 newstate = FL_XIP_WHILE_ERASING;
 
1064                                 chip->erase_suspended = 1;
 
1066                                 if (!map_word_bitsset(map, status, CMD(0x04)))
 
1068                                 newstate = FL_XIP_WHILE_WRITING;
 
1069                                 chip->write_suspended = 1;
 
1071                         chip->state = newstate;
 
1072                         map_write(map, CMD(0xff), adr);
 
1073                         (void) map_read(map, adr);
 
1074                         asm volatile (".rep 8; nop; .endr");
 
1076                         spin_unlock(chip->mutex);
 
1077                         asm volatile (".rep 8; nop; .endr");
 
1081                          * We're back.  However someone else might have
 
1082                          * decided to go write to the chip if we are in
 
1083                          * a suspended erase state.  If so let's wait
 
1086                         spin_lock(chip->mutex);
 
1087                         while (chip->state != newstate) {
 
1088                                 DECLARE_WAITQUEUE(wait, current);
 
1089                                 set_current_state(TASK_UNINTERRUPTIBLE);
 
1090                                 add_wait_queue(&chip->wq, &wait);
 
1091                                 spin_unlock(chip->mutex);
 
1093                                 remove_wait_queue(&chip->wq, &wait);
 
1094                                 spin_lock(chip->mutex);
 
1096                         /* Disallow XIP again */
 
1097                         local_irq_disable();
 
1099                         /* Resume the write or erase operation */
 
1100                         map_write(map, CMD(0xd0), adr);
 
1101                         map_write(map, CMD(0x70), adr);
 
1102                         chip->state = oldstate;
 
1103                         start = xip_currtime();
 
1104                 } else if (usec >= 1000000/HZ) {
 
1106                          * Try to save on CPU power when waiting delay
 
1107                          * is at least a system timer tick period.
 
1108                          * No need to be extremely accurate here.
 
1112                 status = map_read(map, adr);
 
1113                 done = xip_elapsed_since(start);
 
1114         } while (!map_word_andequal(map, status, OK, OK)
 
1117         return (done >= usec) ? -ETIME : 0;
 
1121  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
 
1122  * the flash is actively programming or erasing since we have to poll for
 
1123  * the operation to complete anyway.  We can't do that in a generic way with
 
1124  * a XIP setup so do it before the actual flash operation in this case
 
1125  * and stub it out from INVAL_CACHE_AND_WAIT.
 
1127 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
 
1128         INVALIDATE_CACHED_RANGE(map, from, size)
 
1130 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
 
1131         xip_wait_for_operation(map, chip, cmd_adr, usec)
 
1135 #define xip_disable(map, chip, adr)
 
1136 #define xip_enable(map, chip, adr)
 
1137 #define XIP_INVAL_CACHED_RANGE(x...)
 
1138 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
 
1140 static int inval_cache_and_wait_for_operation(
 
1141                 struct map_info *map, struct flchip *chip,
 
1142                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
 
1143                 unsigned int chip_op_time)
 
1145         struct cfi_private *cfi = map->fldrv_priv;
 
1146         map_word status, status_OK = CMD(0x80);
 
1147         int chip_state = chip->state;
 
1148         unsigned int timeo, sleep_time;
 
1150         spin_unlock(chip->mutex);
 
1152                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
 
1153         spin_lock(chip->mutex);
 
1155         /* set our timeout to 8 times the expected delay */
 
1156         timeo = chip_op_time * 8;
 
1159         sleep_time = chip_op_time / 2;
 
1162                 status = map_read(map, cmd_adr);
 
1163                 if (map_word_andequal(map, status, status_OK, status_OK))
 
1167                         map_write(map, CMD(0x70), cmd_adr);
 
1168                         chip->state = FL_STATUS;
 
1172                 /* OK Still waiting. Drop the lock, wait a while and retry. */
 
1173                 spin_unlock(chip->mutex);
 
1174                 if (sleep_time >= 1000000/HZ) {
 
1176                          * Half of the normal delay still remaining
 
1177                          * can be performed with a sleeping delay instead
 
1180                         msleep(sleep_time/1000);
 
1181                         timeo -= sleep_time;
 
1182                         sleep_time = 1000000/HZ;
 
1188                 spin_lock(chip->mutex);
 
1190                 while (chip->state != chip_state) {
 
1191                         /* Someone's suspended the operation: sleep */
 
1192                         DECLARE_WAITQUEUE(wait, current);
 
1193                         set_current_state(TASK_UNINTERRUPTIBLE);
 
1194                         add_wait_queue(&chip->wq, &wait);
 
1195                         spin_unlock(chip->mutex);
 
1197                         remove_wait_queue(&chip->wq, &wait);
 
1198                         spin_lock(chip->mutex);
 
1202         /* Done and happy. */
 
1203         chip->state = FL_STATUS;
 
1209 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
 
1210         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
 
1213 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
 
1215         unsigned long cmd_addr;
 
1216         struct cfi_private *cfi = map->fldrv_priv;
 
1221         /* Ensure cmd read/writes are aligned. */
 
1222         cmd_addr = adr & ~(map_bankwidth(map)-1);
 
1224         spin_lock(chip->mutex);
 
1226         ret = get_chip(map, chip, cmd_addr, FL_POINT);
 
1229                 if (chip->state != FL_POINT && chip->state != FL_READY)
 
1230                         map_write(map, CMD(0xff), cmd_addr);
 
1232                 chip->state = FL_POINT;
 
1233                 chip->ref_point_counter++;
 
1235         spin_unlock(chip->mutex);
 
1240 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
 
1242         struct map_info *map = mtd->priv;
 
1243         struct cfi_private *cfi = map->fldrv_priv;
 
1244         unsigned long ofs, last_end = 0;
 
1248         if (!map->virt || (from + len > mtd->size))
 
1251         /* Now lock the chip(s) to POINT state */
 
1253         /* ofs: offset within the first chip that the first read should start */
 
1254         chipnum = (from >> cfi->chipshift);
 
1255         ofs = from - (chipnum << cfi->chipshift);
 
1257         *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
 
1261                 unsigned long thislen;
 
1263                 if (chipnum >= cfi->numchips)
 
1266                 /* We cannot point across chips that are virtually disjoint */
 
1268                         last_end = cfi->chips[chipnum].start;
 
1269                 else if (cfi->chips[chipnum].start != last_end)
 
1272                 if ((len + ofs -1) >> cfi->chipshift)
 
1273                         thislen = (1<<cfi->chipshift) - ofs;
 
1277                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
 
1285                 last_end += 1 << cfi->chipshift;
 
1291 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
 
1293         struct map_info *map = mtd->priv;
 
1294         struct cfi_private *cfi = map->fldrv_priv;
 
1298         /* Now unlock the chip(s) POINT state */
 
1300         /* ofs: offset within the first chip that the first read should start */
 
1301         chipnum = (from >> cfi->chipshift);
 
1302         ofs = from - (chipnum <<  cfi->chipshift);
 
1305                 unsigned long thislen;
 
1306                 struct flchip *chip;
 
1308                 chip = &cfi->chips[chipnum];
 
1309                 if (chipnum >= cfi->numchips)
 
1312                 if ((len + ofs -1) >> cfi->chipshift)
 
1313                         thislen = (1<<cfi->chipshift) - ofs;
 
1317                 spin_lock(chip->mutex);
 
1318                 if (chip->state == FL_POINT) {
 
1319                         chip->ref_point_counter--;
 
1320                         if(chip->ref_point_counter == 0)
 
1321                                 chip->state = FL_READY;
 
1323                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
 
1325                 put_chip(map, chip, chip->start);
 
1326                 spin_unlock(chip->mutex);
 
1334 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 
1336         unsigned long cmd_addr;
 
1337         struct cfi_private *cfi = map->fldrv_priv;
 
1342         /* Ensure cmd read/writes are aligned. */
 
1343         cmd_addr = adr & ~(map_bankwidth(map)-1);
 
1345         spin_lock(chip->mutex);
 
1346         ret = get_chip(map, chip, cmd_addr, FL_READY);
 
1348                 spin_unlock(chip->mutex);
 
1352         if (chip->state != FL_POINT && chip->state != FL_READY) {
 
1353                 map_write(map, CMD(0xff), cmd_addr);
 
1355                 chip->state = FL_READY;
 
1358         map_copy_from(map, buf, adr, len);
 
1360         put_chip(map, chip, cmd_addr);
 
1362         spin_unlock(chip->mutex);
 
1366 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 
1368         struct map_info *map = mtd->priv;
 
1369         struct cfi_private *cfi = map->fldrv_priv;
 
1374         /* ofs: offset within the first chip that the first read should start */
 
1375         chipnum = (from >> cfi->chipshift);
 
1376         ofs = from - (chipnum <<  cfi->chipshift);
 
1381                 unsigned long thislen;
 
1383                 if (chipnum >= cfi->numchips)
 
1386                 if ((len + ofs -1) >> cfi->chipshift)
 
1387                         thislen = (1<<cfi->chipshift) - ofs;
 
1391                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
 
1405 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
 
1406                                      unsigned long adr, map_word datum, int mode)
 
1408         struct cfi_private *cfi = map->fldrv_priv;
 
1409         map_word status, write_cmd;
 
1416                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
 
1419                 write_cmd = CMD(0xc0);
 
1425         spin_lock(chip->mutex);
 
1426         ret = get_chip(map, chip, adr, mode);
 
1428                 spin_unlock(chip->mutex);
 
1432         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
 
1434         xip_disable(map, chip, adr);
 
1435         map_write(map, write_cmd, adr);
 
1436         map_write(map, datum, adr);
 
1439         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 
1440                                    adr, map_bankwidth(map),
 
1441                                    chip->word_write_time);
 
1443                 xip_enable(map, chip, adr);
 
1444                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
 
1448         /* check for errors */
 
1449         status = map_read(map, adr);
 
1450         if (map_word_bitsset(map, status, CMD(0x1a))) {
 
1451                 unsigned long chipstatus = MERGESTATUS(status);
 
1454                 map_write(map, CMD(0x50), adr);
 
1455                 map_write(map, CMD(0x70), adr);
 
1456                 xip_enable(map, chip, adr);
 
1458                 if (chipstatus & 0x02) {
 
1460                 } else if (chipstatus & 0x08) {
 
1461                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
 
1464                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
 
1471         xip_enable(map, chip, adr);
 
1472  out:   put_chip(map, chip, adr);
 
1473         spin_unlock(chip->mutex);
 
1478 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
 
1480         struct map_info *map = mtd->priv;
 
1481         struct cfi_private *cfi = map->fldrv_priv;
 
1490         chipnum = to >> cfi->chipshift;
 
1491         ofs = to  - (chipnum << cfi->chipshift);
 
1493         /* If it's not bus-aligned, do the first byte write */
 
1494         if (ofs & (map_bankwidth(map)-1)) {
 
1495                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
 
1496                 int gap = ofs - bus_ofs;
 
1500                 n = min_t(int, len, map_bankwidth(map)-gap);
 
1501                 datum = map_word_ff(map);
 
1502                 datum = map_word_load_partial(map, datum, buf, gap, n);
 
1504                 ret = do_write_oneword(map, &cfi->chips[chipnum],
 
1505                                                bus_ofs, datum, FL_WRITING);
 
1514                 if (ofs >> cfi->chipshift) {
 
1517                         if (chipnum == cfi->numchips)
 
1522         while(len >= map_bankwidth(map)) {
 
1523                 map_word datum = map_word_load(map, buf);
 
1525                 ret = do_write_oneword(map, &cfi->chips[chipnum],
 
1526                                        ofs, datum, FL_WRITING);
 
1530                 ofs += map_bankwidth(map);
 
1531                 buf += map_bankwidth(map);
 
1532                 (*retlen) += map_bankwidth(map);
 
1533                 len -= map_bankwidth(map);
 
1535                 if (ofs >> cfi->chipshift) {
 
1538                         if (chipnum == cfi->numchips)
 
1543         if (len & (map_bankwidth(map)-1)) {
 
1546                 datum = map_word_ff(map);
 
1547                 datum = map_word_load_partial(map, datum, buf, 0, len);
 
1549                 ret = do_write_oneword(map, &cfi->chips[chipnum],
 
1550                                        ofs, datum, FL_WRITING);
 
1561 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
 
1562                                     unsigned long adr, const struct kvec **pvec,
 
1563                                     unsigned long *pvec_seek, int len)
 
1565         struct cfi_private *cfi = map->fldrv_priv;
 
1566         map_word status, write_cmd, datum;
 
1567         unsigned long cmd_adr;
 
1568         int ret, wbufsize, word_gap, words;
 
1569         const struct kvec *vec;
 
1570         unsigned long vec_seek;
 
1571         unsigned long initial_adr;
 
1572         int initial_len = len;
 
1574         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 
1577         cmd_adr = adr & ~(wbufsize-1);
 
1579         /* Let's determine this according to the interleave only once */
 
1580         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
 
1582         spin_lock(chip->mutex);
 
1583         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
 
1585                 spin_unlock(chip->mutex);
 
1589         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
 
1591         xip_disable(map, chip, cmd_adr);
 
1593         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
 
1594            [...], the device will not accept any more Write to Buffer commands".
 
1595            So we must check here and reset those bits if they're set. Otherwise
 
1596            we're just pissing in the wind */
 
1597         if (chip->state != FL_STATUS) {
 
1598                 map_write(map, CMD(0x70), cmd_adr);
 
1599                 chip->state = FL_STATUS;
 
1601         status = map_read(map, cmd_adr);
 
1602         if (map_word_bitsset(map, status, CMD(0x30))) {
 
1603                 xip_enable(map, chip, cmd_adr);
 
1604                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
 
1605                 xip_disable(map, chip, cmd_adr);
 
1606                 map_write(map, CMD(0x50), cmd_adr);
 
1607                 map_write(map, CMD(0x70), cmd_adr);
 
1610         chip->state = FL_WRITING_TO_BUFFER;
 
1611         map_write(map, write_cmd, cmd_adr);
 
1612         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
 
1614                 /* Argh. Not ready for write to buffer */
 
1615                 map_word Xstatus = map_read(map, cmd_adr);
 
1616                 map_write(map, CMD(0x70), cmd_adr);
 
1617                 chip->state = FL_STATUS;
 
1618                 status = map_read(map, cmd_adr);
 
1619                 map_write(map, CMD(0x50), cmd_adr);
 
1620                 map_write(map, CMD(0x70), cmd_adr);
 
1621                 xip_enable(map, chip, cmd_adr);
 
1622                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
 
1623                                 map->name, Xstatus.x[0], status.x[0]);
 
1627         /* Figure out the number of words to write */
 
1628         word_gap = (-adr & (map_bankwidth(map)-1));
 
1629         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
 
1633                 word_gap = map_bankwidth(map) - word_gap;
 
1635                 datum = map_word_ff(map);
 
1638         /* Write length of data to come */
 
1639         map_write(map, CMD(words), cmd_adr );
 
1643         vec_seek = *pvec_seek;
 
1645                 int n = map_bankwidth(map) - word_gap;
 
1646                 if (n > vec->iov_len - vec_seek)
 
1647                         n = vec->iov_len - vec_seek;
 
1651                 if (!word_gap && len < map_bankwidth(map))
 
1652                         datum = map_word_ff(map);
 
1654                 datum = map_word_load_partial(map, datum,
 
1655                                               vec->iov_base + vec_seek,
 
1660                 if (!len || word_gap == map_bankwidth(map)) {
 
1661                         map_write(map, datum, adr);
 
1662                         adr += map_bankwidth(map);
 
1667                 if (vec_seek == vec->iov_len) {
 
1673         *pvec_seek = vec_seek;
 
1676         map_write(map, CMD(0xd0), cmd_adr);
 
1677         chip->state = FL_WRITING;
 
1679         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
 
1680                                    initial_adr, initial_len,
 
1681                                    chip->buffer_write_time);
 
1683                 map_write(map, CMD(0x70), cmd_adr);
 
1684                 chip->state = FL_STATUS;
 
1685                 xip_enable(map, chip, cmd_adr);
 
1686                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
 
1690         /* check for errors */
 
1691         status = map_read(map, cmd_adr);
 
1692         if (map_word_bitsset(map, status, CMD(0x1a))) {
 
1693                 unsigned long chipstatus = MERGESTATUS(status);
 
1696                 map_write(map, CMD(0x50), cmd_adr);
 
1697                 map_write(map, CMD(0x70), cmd_adr);
 
1698                 xip_enable(map, chip, cmd_adr);
 
1700                 if (chipstatus & 0x02) {
 
1702                 } else if (chipstatus & 0x08) {
 
1703                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
 
1706                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
 
1713         xip_enable(map, chip, cmd_adr);
 
1714  out:   put_chip(map, chip, cmd_adr);
 
1715         spin_unlock(chip->mutex);
 
1719 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
 
1720                                 unsigned long count, loff_t to, size_t *retlen)
 
1722         struct map_info *map = mtd->priv;
 
1723         struct cfi_private *cfi = map->fldrv_priv;
 
1724         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 
1727         unsigned long ofs, vec_seek, i;
 
1730         for (i = 0; i < count; i++)
 
1731                 len += vecs[i].iov_len;
 
1737         chipnum = to >> cfi->chipshift;
 
1738         ofs = to - (chipnum << cfi->chipshift);
 
1742                 /* We must not cross write block boundaries */
 
1743                 int size = wbufsize - (ofs & (wbufsize-1));
 
1747                 ret = do_write_buffer(map, &cfi->chips[chipnum],
 
1748                                       ofs, &vecs, &vec_seek, size);
 
1756                 if (ofs >> cfi->chipshift) {
 
1759                         if (chipnum == cfi->numchips)
 
1763                 /* Be nice and reschedule with the chip in a usable state for other
 
1772 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
 
1773                                        size_t len, size_t *retlen, const u_char *buf)
 
1777         vec.iov_base = (void *) buf;
 
1780         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
 
1783 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
 
1784                                       unsigned long adr, int len, void *thunk)
 
1786         struct cfi_private *cfi = map->fldrv_priv;
 
1794         spin_lock(chip->mutex);
 
1795         ret = get_chip(map, chip, adr, FL_ERASING);
 
1797                 spin_unlock(chip->mutex);
 
1801         XIP_INVAL_CACHED_RANGE(map, adr, len);
 
1803         xip_disable(map, chip, adr);
 
1805         /* Clear the status register first */
 
1806         map_write(map, CMD(0x50), adr);
 
1809         map_write(map, CMD(0x20), adr);
 
1810         map_write(map, CMD(0xD0), adr);
 
1811         chip->state = FL_ERASING;
 
1812         chip->erase_suspended = 0;
 
1814         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
 
1818                 map_write(map, CMD(0x70), adr);
 
1819                 chip->state = FL_STATUS;
 
1820                 xip_enable(map, chip, adr);
 
1821                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
 
1825         /* We've broken this before. It doesn't hurt to be safe */
 
1826         map_write(map, CMD(0x70), adr);
 
1827         chip->state = FL_STATUS;
 
1828         status = map_read(map, adr);
 
1830         /* check for errors */
 
1831         if (map_word_bitsset(map, status, CMD(0x3a))) {
 
1832                 unsigned long chipstatus = MERGESTATUS(status);
 
1834                 /* Reset the error bits */
 
1835                 map_write(map, CMD(0x50), adr);
 
1836                 map_write(map, CMD(0x70), adr);
 
1837                 xip_enable(map, chip, adr);
 
1839                 if ((chipstatus & 0x30) == 0x30) {
 
1840                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
 
1842                 } else if (chipstatus & 0x02) {
 
1843                         /* Protection bit set */
 
1845                 } else if (chipstatus & 0x8) {
 
1847                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
 
1849                 } else if (chipstatus & 0x20 && retries--) {
 
1850                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
 
1851                         put_chip(map, chip, adr);
 
1852                         spin_unlock(chip->mutex);
 
1855                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
 
1862         xip_enable(map, chip, adr);
 
1863  out:   put_chip(map, chip, adr);
 
1864         spin_unlock(chip->mutex);
 
1868 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
 
1870         unsigned long ofs, len;
 
1876         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
 
1880         instr->state = MTD_ERASE_DONE;
 
1881         mtd_erase_callback(instr);
 
1886 static void cfi_intelext_sync (struct mtd_info *mtd)
 
1888         struct map_info *map = mtd->priv;
 
1889         struct cfi_private *cfi = map->fldrv_priv;
 
1891         struct flchip *chip;
 
1894         for (i=0; !ret && i<cfi->numchips; i++) {
 
1895                 chip = &cfi->chips[i];
 
1897                 spin_lock(chip->mutex);
 
1898                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
 
1901                         chip->oldstate = chip->state;
 
1902                         chip->state = FL_SYNCING;
 
1903                         /* No need to wake_up() on this state change -
 
1904                          * as the whole point is that nobody can do anything
 
1905                          * with the chip now anyway.
 
1908                 spin_unlock(chip->mutex);
 
1911         /* Unlock the chips again */
 
1913         for (i--; i >=0; i--) {
 
1914                 chip = &cfi->chips[i];
 
1916                 spin_lock(chip->mutex);
 
1918                 if (chip->state == FL_SYNCING) {
 
1919                         chip->state = chip->oldstate;
 
1920                         chip->oldstate = FL_READY;
 
1923                 spin_unlock(chip->mutex);
 
1927 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
 
1928                                                 struct flchip *chip,
 
1930                                                 int len, void *thunk)
 
1932         struct cfi_private *cfi = map->fldrv_priv;
 
1933         int status, ofs_factor = cfi->interleave * cfi->device_type;
 
1936         xip_disable(map, chip, adr+(2*ofs_factor));
 
1937         map_write(map, CMD(0x90), adr+(2*ofs_factor));
 
1938         chip->state = FL_JEDEC_QUERY;
 
1939         status = cfi_read_query(map, adr+(2*ofs_factor));
 
1940         xip_enable(map, chip, 0);
 
1944 #ifdef DEBUG_LOCK_BITS
 
1945 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
 
1946                                                 struct flchip *chip,
 
1948                                                 int len, void *thunk)
 
1950         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
 
1951                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
 
1956 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
 
1957 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
 
1959 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
 
1960                                        unsigned long adr, int len, void *thunk)
 
1962         struct cfi_private *cfi = map->fldrv_priv;
 
1963         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
1969         spin_lock(chip->mutex);
 
1970         ret = get_chip(map, chip, adr, FL_LOCKING);
 
1972                 spin_unlock(chip->mutex);
 
1977         xip_disable(map, chip, adr);
 
1979         map_write(map, CMD(0x60), adr);
 
1980         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
 
1981                 map_write(map, CMD(0x01), adr);
 
1982                 chip->state = FL_LOCKING;
 
1983         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
 
1984                 map_write(map, CMD(0xD0), adr);
 
1985                 chip->state = FL_UNLOCKING;
 
1990          * If Instant Individual Block Locking supported then no need
 
1993         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
 
1995         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
 
1997                 map_write(map, CMD(0x70), adr);
 
1998                 chip->state = FL_STATUS;
 
1999                 xip_enable(map, chip, adr);
 
2000                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
 
2004         xip_enable(map, chip, adr);
 
2005 out:    put_chip(map, chip, adr);
 
2006         spin_unlock(chip->mutex);
 
2010 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
2014 #ifdef DEBUG_LOCK_BITS
 
2015         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
 
2016                __FUNCTION__, ofs, len);
 
2017         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
 
2021         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
 
2022                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
 
2024 #ifdef DEBUG_LOCK_BITS
 
2025         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
 
2027         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
 
2034 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
 
2038 #ifdef DEBUG_LOCK_BITS
 
2039         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
 
2040                __FUNCTION__, ofs, len);
 
2041         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
 
2045         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
 
2046                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
 
2048 #ifdef DEBUG_LOCK_BITS
 
2049         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
 
2051         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
 
2058 #ifdef CONFIG_MTD_OTP
 
2060 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
 
2061                         u_long data_offset, u_char *buf, u_int size,
 
2062                         u_long prot_offset, u_int groupno, u_int groupsize);
 
2065 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
 
2066             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
 
2068         struct cfi_private *cfi = map->fldrv_priv;
 
2071         spin_lock(chip->mutex);
 
2072         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
 
2074                 spin_unlock(chip->mutex);
 
2078         /* let's ensure we're not reading back cached data from array mode */
 
2079         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
 
2081         xip_disable(map, chip, chip->start);
 
2082         if (chip->state != FL_JEDEC_QUERY) {
 
2083                 map_write(map, CMD(0x90), chip->start);
 
2084                 chip->state = FL_JEDEC_QUERY;
 
2086         map_copy_from(map, buf, chip->start + offset, size);
 
2087         xip_enable(map, chip, chip->start);
 
2089         /* then ensure we don't keep OTP data in the cache */
 
2090         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
 
2092         put_chip(map, chip, chip->start);
 
2093         spin_unlock(chip->mutex);
 
2098 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
 
2099              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
 
2104                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
 
2105                 int gap = offset - bus_ofs;
 
2106                 int n = min_t(int, size, map_bankwidth(map)-gap);
 
2107                 map_word datum = map_word_ff(map);
 
2109                 datum = map_word_load_partial(map, datum, buf, gap, n);
 
2110                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
 
2123 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
 
2124             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
 
2126         struct cfi_private *cfi = map->fldrv_priv;
 
2129         /* make sure area matches group boundaries */
 
2133         datum = map_word_ff(map);
 
2134         datum = map_word_clr(map, datum, CMD(1 << grpno));
 
2135         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
 
2138 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
 
2139                                  size_t *retlen, u_char *buf,
 
2140                                  otp_op_t action, int user_regs)
 
2142         struct map_info *map = mtd->priv;
 
2143         struct cfi_private *cfi = map->fldrv_priv;
 
2144         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
2145         struct flchip *chip;
 
2146         struct cfi_intelext_otpinfo *otp;
 
2147         u_long devsize, reg_prot_offset, data_offset;
 
2148         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
 
2149         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
 
2154         /* Check that we actually have some OTP registers */
 
2155         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
 
2158         /* we need real chips here not virtual ones */
 
2159         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
 
2160         chip_step = devsize >> cfi->chipshift;
 
2163         /* Some chips have OTP located in the _top_ partition only.
 
2164            For example: Intel 28F256L18T (T means top-parameter device) */
 
2165         if (cfi->mfr == MANUFACTURER_INTEL) {
 
2170                         chip_num = chip_step - 1;
 
2174         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
 
2175                 chip = &cfi->chips[chip_num];
 
2176                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
 
2178                 /* first OTP region */
 
2180                 reg_prot_offset = extp->ProtRegAddr;
 
2181                 reg_fact_groups = 1;
 
2182                 reg_fact_size = 1 << extp->FactProtRegSize;
 
2183                 reg_user_groups = 1;
 
2184                 reg_user_size = 1 << extp->UserProtRegSize;
 
2187                         /* flash geometry fixup */
 
2188                         data_offset = reg_prot_offset + 1;
 
2189                         data_offset *= cfi->interleave * cfi->device_type;
 
2190                         reg_prot_offset *= cfi->interleave * cfi->device_type;
 
2191                         reg_fact_size *= cfi->interleave;
 
2192                         reg_user_size *= cfi->interleave;
 
2195                                 groups = reg_user_groups;
 
2196                                 groupsize = reg_user_size;
 
2197                                 /* skip over factory reg area */
 
2198                                 groupno = reg_fact_groups;
 
2199                                 data_offset += reg_fact_groups * reg_fact_size;
 
2201                                 groups = reg_fact_groups;
 
2202                                 groupsize = reg_fact_size;
 
2206                         while (len > 0 && groups > 0) {
 
2209                                          * Special case: if action is NULL
 
2210                                          * we fill buf with otp_info records.
 
2212                                         struct otp_info *otpinfo;
 
2214                                         len -= sizeof(struct otp_info);
 
2217                                         ret = do_otp_read(map, chip,
 
2219                                                           (u_char *)&lockword,
 
2224                                         otpinfo = (struct otp_info *)buf;
 
2225                                         otpinfo->start = from;
 
2226                                         otpinfo->length = groupsize;
 
2228                                            !map_word_bitsset(map, lockword,
 
2231                                         buf += sizeof(*otpinfo);
 
2232                                         *retlen += sizeof(*otpinfo);
 
2233                                 } else if (from >= groupsize) {
 
2235                                         data_offset += groupsize;
 
2237                                         int size = groupsize;
 
2238                                         data_offset += from;
 
2243                                         ret = action(map, chip, data_offset,
 
2244                                                      buf, size, reg_prot_offset,
 
2245                                                      groupno, groupsize);
 
2251                                         data_offset += size;
 
2257                         /* next OTP region */
 
2258                         if (++field == extp->NumProtectionFields)
 
2260                         reg_prot_offset = otp->ProtRegAddr;
 
2261                         reg_fact_groups = otp->FactGroups;
 
2262                         reg_fact_size = 1 << otp->FactProtRegSize;
 
2263                         reg_user_groups = otp->UserGroups;
 
2264                         reg_user_size = 1 << otp->UserProtRegSize;
 
2272 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
 
2273                                            size_t len, size_t *retlen,
 
2276         return cfi_intelext_otp_walk(mtd, from, len, retlen,
 
2277                                      buf, do_otp_read, 0);
 
2280 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
 
2281                                            size_t len, size_t *retlen,
 
2284         return cfi_intelext_otp_walk(mtd, from, len, retlen,
 
2285                                      buf, do_otp_read, 1);
 
2288 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 
2289                                             size_t len, size_t *retlen,
 
2292         return cfi_intelext_otp_walk(mtd, from, len, retlen,
 
2293                                      buf, do_otp_write, 1);
 
2296 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
 
2297                                            loff_t from, size_t len)
 
2300         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
 
2301                                      NULL, do_otp_lock, 1);
 
2304 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
 
2305                                            struct otp_info *buf, size_t len)
 
2310         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
 
2311         return ret ? : retlen;
 
2314 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
 
2315                                            struct otp_info *buf, size_t len)
 
2320         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
 
2321         return ret ? : retlen;
 
2326 static void cfi_intelext_save_locks(struct mtd_info *mtd)
 
2328         struct mtd_erase_region_info *region;
 
2329         int block, status, i;
 
2333         for (i = 0; i < mtd->numeraseregions; i++) {
 
2334                 region = &mtd->eraseregions[i];
 
2335                 if (!region->lockmap)
 
2338                 for (block = 0; block < region->numblocks; block++){
 
2339                         len = region->erasesize;
 
2340                         adr = region->offset + block * len;
 
2342                         status = cfi_varsize_frob(mtd,
 
2343                                         do_getlockstatus_oneblock, adr, len, NULL);
 
2345                                 set_bit(block, region->lockmap);
 
2347                                 clear_bit(block, region->lockmap);
 
2352 static int cfi_intelext_suspend(struct mtd_info *mtd)
 
2354         struct map_info *map = mtd->priv;
 
2355         struct cfi_private *cfi = map->fldrv_priv;
 
2356         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
2358         struct flchip *chip;
 
2361         if ((mtd->flags & MTD_POWERUP_LOCK)
 
2362             && extp && (extp->FeatureSupport & (1 << 5)))
 
2363                 cfi_intelext_save_locks(mtd);
 
2365         for (i=0; !ret && i<cfi->numchips; i++) {
 
2366                 chip = &cfi->chips[i];
 
2368                 spin_lock(chip->mutex);
 
2370                 switch (chip->state) {
 
2374                 case FL_JEDEC_QUERY:
 
2375                         if (chip->oldstate == FL_READY) {
 
2376                                 /* place the chip in a known state before suspend */
 
2377                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
 
2378                                 chip->oldstate = chip->state;
 
2379                                 chip->state = FL_PM_SUSPENDED;
 
2380                                 /* No need to wake_up() on this state change -
 
2381                                  * as the whole point is that nobody can do anything
 
2382                                  * with the chip now anyway.
 
2385                                 /* There seems to be an operation pending. We must wait for it. */
 
2386                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
 
2391                         /* Should we actually wait? Once upon a time these routines weren't
 
2392                            allowed to. Or should we return -EAGAIN, because the upper layers
 
2393                            ought to have already shut down anything which was using the device
 
2394                            anyway? The latter for now. */
 
2395                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
 
2397                 case FL_PM_SUSPENDED:
 
2400                 spin_unlock(chip->mutex);
 
2403         /* Unlock the chips again */
 
2406                 for (i--; i >=0; i--) {
 
2407                         chip = &cfi->chips[i];
 
2409                         spin_lock(chip->mutex);
 
2411                         if (chip->state == FL_PM_SUSPENDED) {
 
2412                                 /* No need to force it into a known state here,
 
2413                                    because we're returning failure, and it didn't
 
2415                                 chip->state = chip->oldstate;
 
2416                                 chip->oldstate = FL_READY;
 
2419                         spin_unlock(chip->mutex);
 
2426 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
 
2428         struct mtd_erase_region_info *region;
 
2433         for (i = 0; i < mtd->numeraseregions; i++) {
 
2434                 region = &mtd->eraseregions[i];
 
2435                 if (!region->lockmap)
 
2438                 for (block = 0; block < region->numblocks; block++) {
 
2439                         len = region->erasesize;
 
2440                         adr = region->offset + block * len;
 
2442                         if (!test_bit(block, region->lockmap))
 
2443                                 cfi_intelext_unlock(mtd, adr, len);
 
2448 static void cfi_intelext_resume(struct mtd_info *mtd)
 
2450         struct map_info *map = mtd->priv;
 
2451         struct cfi_private *cfi = map->fldrv_priv;
 
2452         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 
2454         struct flchip *chip;
 
2456         for (i=0; i<cfi->numchips; i++) {
 
2458                 chip = &cfi->chips[i];
 
2460                 spin_lock(chip->mutex);
 
2462                 /* Go to known state. Chip may have been power cycled */
 
2463                 if (chip->state == FL_PM_SUSPENDED) {
 
2464                         map_write(map, CMD(0xFF), cfi->chips[i].start);
 
2465                         chip->oldstate = chip->state = FL_READY;
 
2469                 spin_unlock(chip->mutex);
 
2472         if ((mtd->flags & MTD_POWERUP_LOCK)
 
2473             && extp && (extp->FeatureSupport & (1 << 5)))
 
2474                 cfi_intelext_restore_locks(mtd);
 
2477 static int cfi_intelext_reset(struct mtd_info *mtd)
 
2479         struct map_info *map = mtd->priv;
 
2480         struct cfi_private *cfi = map->fldrv_priv;
 
2483         for (i=0; i < cfi->numchips; i++) {
 
2484                 struct flchip *chip = &cfi->chips[i];
 
2486                 /* force the completion of any ongoing operation
 
2487                    and switch to array mode so any bootloader in
 
2488                    flash is accessible for soft reboot. */
 
2489                 spin_lock(chip->mutex);
 
2490                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
 
2492                         map_write(map, CMD(0xff), chip->start);
 
2493                         chip->state = FL_SHUTDOWN;
 
2495                 spin_unlock(chip->mutex);
 
2501 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
 
2504         struct mtd_info *mtd;
 
2506         mtd = container_of(nb, struct mtd_info, reboot_notifier);
 
2507         cfi_intelext_reset(mtd);
 
2511 static void cfi_intelext_destroy(struct mtd_info *mtd)
 
2513         struct map_info *map = mtd->priv;
 
2514         struct cfi_private *cfi = map->fldrv_priv;
 
2515         struct mtd_erase_region_info *region;
 
2517         cfi_intelext_reset(mtd);
 
2518         unregister_reboot_notifier(&mtd->reboot_notifier);
 
2519         kfree(cfi->cmdset_priv);
 
2521         kfree(cfi->chips[0].priv);
 
2523         for (i = 0; i < mtd->numeraseregions; i++) {
 
2524                 region = &mtd->eraseregions[i];
 
2525                 if (region->lockmap)
 
2526                         kfree(region->lockmap);
 
2528         kfree(mtd->eraseregions);
 
2531 MODULE_LICENSE("GPL");
 
2532 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
 
2533 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
 
2534 MODULE_ALIAS("cfi_cmdset_0003");
 
2535 MODULE_ALIAS("cfi_cmdset_0200");