2 * Common Flash Interface support:
3 * Generic utility functions not dependant on command set
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
8 * This code is covered by the GPL.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
15 #include <asm/byteorder.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 #include <linux/mtd/compatmac.h>
27 int __xipram cfi_qry_present(struct map_info *map, __u32 base,
28 struct cfi_private *cfi)
30 int osf = cfi->interleave * cfi->device_type; /* scale factor */
34 qry[0] = cfi_build_cmd('Q', map, cfi);
35 qry[1] = cfi_build_cmd('R', map, cfi);
36 qry[2] = cfi_build_cmd('Y', map, cfi);
38 val[0] = map_read(map, base + osf*0x10);
39 val[1] = map_read(map, base + osf*0x11);
40 val[2] = map_read(map, base + osf*0x12);
42 if (!map_word_equal(map, qry[0], val[0]))
45 if (!map_word_equal(map, qry[1], val[1]))
48 if (!map_word_equal(map, qry[2], val[2]))
51 return 1; /* "QRY" found */
53 EXPORT_SYMBOL_GPL(cfi_qry_present);
55 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
56 struct cfi_private *cfi)
58 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
59 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
60 if (cfi_qry_present(map, base, cfi))
62 /* QRY not found probably we deal with some odd CFI chips */
63 /* Some revisions of some old Intel chips? */
64 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
65 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
66 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
67 if (cfi_qry_present(map, base, cfi))
70 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL);
72 if (cfi_qry_present(map, base, cfi))
77 EXPORT_SYMBOL_GPL(cfi_qry_mode_on);
79 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
80 struct cfi_private *cfi)
82 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
83 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
85 EXPORT_SYMBOL_GPL(cfi_qry_mode_off);
88 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
90 struct cfi_private *cfi = map->fldrv_priv;
91 __u32 base = 0; // cfi->chips[0].start;
92 int ofs_factor = cfi->interleave * cfi->device_type;
94 struct cfi_extquery *extp = NULL;
96 printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
100 extp = kmalloc(size, GFP_KERNEL);
102 printk(KERN_ERR "Failed to allocate memory\n");
106 #ifdef CONFIG_MTD_XIP
110 /* Switch it into Query Mode */
111 cfi_qry_mode_on(base, map, cfi);
112 /* Read in the Extended Query Table */
113 for (i=0; i<size; i++) {
114 ((unsigned char *)extp)[i] =
115 cfi_read_query(map, base+((adr+i)*ofs_factor));
118 /* Make sure it returns to read mode */
119 cfi_qry_mode_off(base, map, cfi);
121 #ifdef CONFIG_MTD_XIP
122 (void) map_read(map, base);
130 EXPORT_SYMBOL(cfi_read_pri);
132 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
134 struct map_info *map = mtd->priv;
135 struct cfi_private *cfi = map->fldrv_priv;
138 for (f=fixups; f->fixup; f++) {
139 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
140 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
141 f->fixup(mtd, f->param);
146 EXPORT_SYMBOL(cfi_fixup);
148 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
149 loff_t ofs, size_t len, void *thunk)
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
154 int chipnum, ret = 0;
156 struct mtd_erase_region_info *regions = mtd->eraseregions;
161 if ((len + ofs) > mtd->size)
164 /* Check that both start and end of the requested erase are
165 * aligned with the erasesize at the appropriate addresses.
170 /* Skip all erase regions which are ended before the start of
171 the requested erase. Actually, to save on the calculations,
172 we skip to the first erase region which starts after the
173 start of the requested erase, and then go back one.
176 while (i < mtd->numeraseregions && ofs >= regions[i].offset)
180 /* OK, now i is pointing at the erase region in which this
181 erase request starts. Check the start of the requested
182 erase range is aligned with the erase size which is in
186 if (ofs & (regions[i].erasesize-1))
189 /* Remember the erase region we start on */
192 /* Next, check that the end of the requested erase is aligned
193 * with the erase region at that address.
196 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
199 /* As before, drop back one to point at the region in which
200 the address actually falls
204 if ((ofs + len) & (regions[i].erasesize-1))
207 chipnum = ofs >> cfi->chipshift;
208 adr = ofs - (chipnum << cfi->chipshift);
213 int size = regions[i].erasesize;
215 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
224 if (ofs == regions[i].offset + size * regions[i].numblocks)
227 if (adr >> cfi->chipshift) {
231 if (chipnum >= cfi->numchips)
239 EXPORT_SYMBOL(cfi_varsize_frob);
241 MODULE_LICENSE("GPL");