video1394: small optimizations to frame retrieval codepath
[linux-2.6] / drivers / mtd / chips / amd_flash.c
1 /*
2  * MTD map driver for AMD compatible flash chips (non-CFI)
3  *
4  * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5  *
6  * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7  *
8  * Copyright (c) 2001 Axis Communications AB
9  *
10  * This file is under GPL.
11  *
12  */
13
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
26
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
29
30 #define DEVICE_TYPE_X8  (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
33
34 /* Addresses */
35 #define ADDR_MANUFACTURER               0x0000
36 #define ADDR_DEVICE_ID                  0x0001
37 #define ADDR_SECTOR_LOCK                0x0002
38 #define ADDR_HANDSHAKE                  0x0003
39 #define ADDR_UNLOCK_1                   0x0555
40 #define ADDR_UNLOCK_2                   0x02AA
41
42 /* Commands */
43 #define CMD_UNLOCK_DATA_1               0x00AA
44 #define CMD_UNLOCK_DATA_2               0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA    0x0090
46 #define CMD_UNLOCK_BYPASS_MODE          0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA         0x00A0
48 #define CMD_RESET_DATA                  0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA    0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2  0x0030
51
52 #define CMD_UNLOCK_SECTOR               0x0060
53
54 /* Manufacturers */
55 #define MANUFACTURER_AMD        0x0001
56 #define MANUFACTURER_ATMEL      0x001F
57 #define MANUFACTURER_FUJITSU    0x0004
58 #define MANUFACTURER_ST         0x0020
59 #define MANUFACTURER_SST        0x00BF
60 #define MANUFACTURER_TOSHIBA    0x0098
61
62 /* AMD */
63 #define AM29F800BB      0x2258
64 #define AM29F800BT      0x22D6
65 #define AM29LV800BB     0x225B
66 #define AM29LV800BT     0x22DA
67 #define AM29LV160DT     0x22C4
68 #define AM29LV160DB     0x2249
69 #define AM29BDS323D     0x22D1
70
71 /* Atmel */
72 #define AT49xV16x       0x00C0
73 #define AT49xV16xT      0x00C2
74
75 /* Fujitsu */
76 #define MBM29LV160TE    0x22C4
77 #define MBM29LV160BE    0x2249
78 #define MBM29LV800BB    0x225B
79
80 /* ST - www.st.com */
81 #define M29W800T        0x00D7
82 #define M29W160DT       0x22C4
83 #define M29W160DB       0x2249
84
85 /* SST */
86 #define SST39LF800      0x2781
87 #define SST39LF160      0x2782
88
89 /* Toshiba */
90 #define TC58FVT160      0x00C2
91 #define TC58FVB160      0x0043
92
93 #define D6_MASK 0x40
94
95 struct amd_flash_private {
96         int device_type;
97         int interleave;
98         int numchips;
99         unsigned long chipshift;
100         struct flchip chips[0];
101 };
102
103 struct amd_flash_info {
104         const __u16 mfr_id;
105         const __u16 dev_id;
106         const char *name;
107         const u_long size;
108         const int numeraseregions;
109         const struct mtd_erase_region_info regions[4];
110 };
111
112
113
114 static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *,
115                           u_char *);
116 static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *,
117                            const u_char *);
118 static int amd_flash_erase(struct mtd_info *, struct erase_info *);
119 static void amd_flash_sync(struct mtd_info *);
120 static int amd_flash_suspend(struct mtd_info *);
121 static void amd_flash_resume(struct mtd_info *);
122 static void amd_flash_destroy(struct mtd_info *);
123 static struct mtd_info *amd_flash_probe(struct map_info *map);
124
125
126 static struct mtd_chip_driver amd_flash_chipdrv = {
127         .probe = amd_flash_probe,
128         .destroy = amd_flash_destroy,
129         .name = "amd_flash",
130         .module = THIS_MODULE
131 };
132
133 static inline __u32 wide_read(struct map_info *map, __u32 addr)
134 {
135         if (map->buswidth == 1) {
136                 return map_read8(map, addr);
137         } else if (map->buswidth == 2) {
138                 return map_read16(map, addr);
139         } else if (map->buswidth == 4) {
140                 return map_read32(map, addr);
141         }
142
143         return 0;
144 }
145
146 static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
147 {
148         if (map->buswidth == 1) {
149                 map_write8(map, val, addr);
150         } else if (map->buswidth == 2) {
151                 map_write16(map, val, addr);
152         } else if (map->buswidth == 4) {
153                 map_write32(map, val, addr);
154         }
155 }
156
157 static inline __u32 make_cmd(struct map_info *map, __u32 cmd)
158 {
159         const struct amd_flash_private *private = map->fldrv_priv;
160         if ((private->interleave == 2) &&
161             (private->device_type == DEVICE_TYPE_X16)) {
162                 cmd |= (cmd << 16);
163         }
164
165         return cmd;
166 }
167
168 static inline void send_unlock(struct map_info *map, unsigned long base)
169 {
170         wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1,
171                    base + (map->buswidth * ADDR_UNLOCK_1));
172         wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2,
173                    base + (map->buswidth * ADDR_UNLOCK_2));
174 }
175
176 static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd)
177 {
178         send_unlock(map, base);
179         wide_write(map, make_cmd(map, cmd),
180                    base + (map->buswidth * ADDR_UNLOCK_1));
181 }
182
183 static inline void send_cmd_to_addr(struct map_info *map, unsigned long base,
184                                     __u32 cmd, unsigned long addr)
185 {
186         send_unlock(map, base);
187         wide_write(map, make_cmd(map, cmd), addr);
188 }
189
190 static inline int flash_is_busy(struct map_info *map, unsigned long addr,
191                                 int interleave)
192 {
193
194         if ((interleave == 2) && (map->buswidth == 4)) {
195                 __u32 read1, read2;
196
197                 read1 = wide_read(map, addr);
198                 read2 = wide_read(map, addr);
199
200                 return (((read1 >> 16) & D6_MASK) !=
201                         ((read2 >> 16) & D6_MASK)) ||
202                        (((read1 & 0xffff) & D6_MASK) !=
203                         ((read2 & 0xffff) & D6_MASK));
204         }
205
206         return ((wide_read(map, addr) & D6_MASK) !=
207                 (wide_read(map, addr) & D6_MASK));
208 }
209
210 static inline void unlock_sector(struct map_info *map, unsigned long sect_addr,
211                                  int unlock)
212 {
213         /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
214         int SLA = unlock ?
215                 (sect_addr |  (0x40 * map->buswidth)) :
216                 (sect_addr & ~(0x40 * map->buswidth)) ;
217
218         __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR);
219
220         wide_write(map, make_cmd(map, CMD_RESET_DATA), 0);
221         wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */
222         wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */
223         wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */
224 }
225
226 static inline int is_sector_locked(struct map_info *map,
227                                    unsigned long sect_addr)
228 {
229         int status;
230
231         wide_write(map, CMD_RESET_DATA, 0);
232         send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA);
233
234         /* status is 0x0000 for unlocked and 0x0001 for locked */
235         status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK));
236         wide_write(map, CMD_RESET_DATA, 0);
237         return status;
238 }
239
240 static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
241                                int is_unlock)
242 {
243         struct map_info *map;
244         struct mtd_erase_region_info *merip;
245         int eraseoffset, erasesize, eraseblocks;
246         int i;
247         int retval = 0;
248         int lock_status;
249
250         map = mtd->priv;
251
252         /* Pass the whole chip through sector by sector and check for each
253            sector if the sector and the given interval overlap */
254         for(i = 0; i < mtd->numeraseregions; i++) {
255                 merip = &mtd->eraseregions[i];
256
257                 eraseoffset = merip->offset;
258                 erasesize = merip->erasesize;
259                 eraseblocks = merip->numblocks;
260
261                 if (ofs > eraseoffset + erasesize)
262                         continue;
263
264                 while (eraseblocks > 0) {
265                         if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) {
266                                 unlock_sector(map, eraseoffset, is_unlock);
267
268                                 lock_status = is_sector_locked(map, eraseoffset);
269
270                                 if (is_unlock && lock_status) {
271                                         printk("Cannot unlock sector at address %x length %xx\n",
272                                                eraseoffset, merip->erasesize);
273                                         retval = -1;
274                                 } else if (!is_unlock && !lock_status) {
275                                         printk("Cannot lock sector at address %x length %x\n",
276                                                eraseoffset, merip->erasesize);
277                                         retval = -1;
278                                 }
279                         }
280                         eraseoffset += erasesize;
281                         eraseblocks --;
282                 }
283         }
284         return retval;
285 }
286
287 static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
288 {
289         return amd_flash_do_unlock(mtd, ofs, len, 1);
290 }
291
292 static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
293 {
294         return amd_flash_do_unlock(mtd, ofs, len, 0);
295 }
296
297
298 /*
299  * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300  * matching table entry (-1 if not found or alias for already found chip).
301  */
302 static int probe_new_chip(struct mtd_info *mtd, __u32 base,
303                           struct flchip *chips,
304                           struct amd_flash_private *private,
305                           const struct amd_flash_info *table, int table_size)
306 {
307         __u32 mfr_id;
308         __u32 dev_id;
309         struct map_info *map = mtd->priv;
310         struct amd_flash_private temp;
311         int i;
312
313         temp.device_type = DEVICE_TYPE_X16;     // Assume X16 (FIXME)
314         temp.interleave = 2;
315         map->fldrv_priv = &temp;
316
317         /* Enter autoselect mode. */
318         send_cmd(map, base, CMD_RESET_DATA);
319         send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA);
320
321         mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER));
322         dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID));
323
324         if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) &&
325             ((dev_id >> 16) == (dev_id & 0xffff))) {
326                 mfr_id &= 0xffff;
327                 dev_id &= 0xffff;
328         } else {
329                 temp.interleave = 1;
330         }
331
332         for (i = 0; i < table_size; i++) {
333                 if ((mfr_id == table[i].mfr_id) &&
334                     (dev_id == table[i].dev_id)) {
335                         if (chips) {
336                                 int j;
337
338                                 /* Is this an alias for an already found chip?
339                                  * In that case that chip should be in
340                                  * autoselect mode now.
341                                  */
342                                 for (j = 0; j < private->numchips; j++) {
343                                         __u32 mfr_id_other;
344                                         __u32 dev_id_other;
345
346                                         mfr_id_other =
347                                                 wide_read(map, chips[j].start +
348                                                                (map->buswidth *
349                                                                 ADDR_MANUFACTURER
350                                                                ));
351                                         dev_id_other =
352                                                 wide_read(map, chips[j].start +
353                                                                (map->buswidth *
354                                                                 ADDR_DEVICE_ID));
355                                         if (temp.interleave == 2) {
356                                                 mfr_id_other &= 0xffff;
357                                                 dev_id_other &= 0xffff;
358                                         }
359                                         if ((mfr_id_other == mfr_id) &&
360                                             (dev_id_other == dev_id)) {
361
362                                                 /* Exit autoselect mode. */
363                                                 send_cmd(map, base,
364                                                          CMD_RESET_DATA);
365
366                                                 return -1;
367                                         }
368                                 }
369
370                                 if (private->numchips == MAX_AMD_CHIPS) {
371                                         printk(KERN_WARNING
372                                                "%s: Too many flash chips "
373                                                "detected. Increase "
374                                                "MAX_AMD_CHIPS from %d.\n",
375                                                map->name, MAX_AMD_CHIPS);
376
377                                         return -1;
378                                 }
379
380                                 chips[private->numchips].start = base;
381                                 chips[private->numchips].state = FL_READY;
382                                 chips[private->numchips].mutex =
383                                         &chips[private->numchips]._spinlock;
384                                 private->numchips++;
385                         }
386
387                         printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name,
388                                temp.interleave, (table[i].size)/(1024*1024),
389                                table[i].name, base);
390
391                         mtd->size += table[i].size * temp.interleave;
392                         mtd->numeraseregions += table[i].numeraseregions;
393
394                         break;
395                 }
396         }
397
398         /* Exit autoselect mode. */
399         send_cmd(map, base, CMD_RESET_DATA);
400
401         if (i == table_size) {
402                 printk(KERN_DEBUG "%s: unknown flash device at 0x%x, "
403                        "mfr id 0x%x, dev id 0x%x\n", map->name,
404                        base, mfr_id, dev_id);
405                 map->fldrv_priv = NULL;
406
407                 return -1;
408         }
409
410         private->device_type = temp.device_type;
411         private->interleave = temp.interleave;
412
413         return i;
414 }
415
416
417
418 static struct mtd_info *amd_flash_probe(struct map_info *map)
419 {
420         static const struct amd_flash_info table[] = {
421         {
422                 .mfr_id = MANUFACTURER_AMD,
423                 .dev_id = AM29LV160DT,
424                 .name = "AMD AM29LV160DT",
425                 .size = 0x00200000,
426                 .numeraseregions = 4,
427                 .regions = {
428                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
429                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
430                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
431                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
432                 }
433         }, {
434                 .mfr_id = MANUFACTURER_AMD,
435                 .dev_id = AM29LV160DB,
436                 .name = "AMD AM29LV160DB",
437                 .size = 0x00200000,
438                 .numeraseregions = 4,
439                 .regions = {
440                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
441                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
442                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
443                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
444                 }
445         }, {
446                 .mfr_id = MANUFACTURER_TOSHIBA,
447                 .dev_id = TC58FVT160,
448                 .name = "Toshiba TC58FVT160",
449                 .size = 0x00200000,
450                 .numeraseregions = 4,
451                 .regions = {
452                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
453                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
454                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
455                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
456                 }
457         }, {
458                 .mfr_id = MANUFACTURER_FUJITSU,
459                 .dev_id = MBM29LV160TE,
460                 .name = "Fujitsu MBM29LV160TE",
461                 .size = 0x00200000,
462                 .numeraseregions = 4,
463                 .regions = {
464                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
465                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
466                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
467                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
468                 }
469         }, {
470                 .mfr_id = MANUFACTURER_TOSHIBA,
471                 .dev_id = TC58FVB160,
472                 .name = "Toshiba TC58FVB160",
473                 .size = 0x00200000,
474                 .numeraseregions = 4,
475                 .regions = {
476                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
477                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
478                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
479                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
480                 }
481         }, {
482                 .mfr_id = MANUFACTURER_FUJITSU,
483                 .dev_id = MBM29LV160BE,
484                 .name = "Fujitsu MBM29LV160BE",
485                 .size = 0x00200000,
486                 .numeraseregions = 4,
487                 .regions = {
488                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
489                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
490                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
491                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
492                 }
493         }, {
494                 .mfr_id = MANUFACTURER_AMD,
495                 .dev_id = AM29LV800BB,
496                 .name = "AMD AM29LV800BB",
497                 .size = 0x00100000,
498                 .numeraseregions = 4,
499                 .regions = {
500                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
501                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
502                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
503                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
504                 }
505         }, {
506                 .mfr_id = MANUFACTURER_AMD,
507                 .dev_id = AM29F800BB,
508                 .name = "AMD AM29F800BB",
509                 .size = 0x00100000,
510                 .numeraseregions = 4,
511                 .regions = {
512                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
513                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
514                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
515                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
516                 }
517         }, {
518                 .mfr_id = MANUFACTURER_AMD,
519                 .dev_id = AM29LV800BT,
520                 .name = "AMD AM29LV800BT",
521                 .size = 0x00100000,
522                 .numeraseregions = 4,
523                 .regions = {
524                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
525                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
526                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
527                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
528                 }
529         }, {
530                 .mfr_id = MANUFACTURER_AMD,
531                 .dev_id = AM29F800BT,
532                 .name = "AMD AM29F800BT",
533                 .size = 0x00100000,
534                 .numeraseregions = 4,
535                 .regions = {
536                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
537                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
538                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
539                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
540                 }
541         }, {
542                 .mfr_id = MANUFACTURER_AMD,
543                 .dev_id = AM29LV800BB,
544                 .name = "AMD AM29LV800BB",
545                 .size = 0x00100000,
546                 .numeraseregions = 4,
547                 .regions = {
548                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
549                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
550                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
551                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
552                 }
553         }, {
554                 .mfr_id = MANUFACTURER_FUJITSU,
555                 .dev_id = MBM29LV800BB,
556                 .name = "Fujitsu MBM29LV800BB",
557                 .size = 0x00100000,
558                 .numeraseregions = 4,
559                 .regions = {
560                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
561                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
562                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
563                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
564                 }
565         }, {
566                 .mfr_id = MANUFACTURER_ST,
567                 .dev_id = M29W800T,
568                 .name = "ST M29W800T",
569                 .size = 0x00100000,
570                 .numeraseregions = 4,
571                 .regions = {
572                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
573                         { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks =  1 },
574                         { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks =  2 },
575                         { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks =  1 }
576                 }
577         }, {
578                 .mfr_id = MANUFACTURER_ST,
579                 .dev_id = M29W160DT,
580                 .name = "ST M29W160DT",
581                 .size = 0x00200000,
582                 .numeraseregions = 4,
583                 .regions = {
584                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
585                         { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks =  1 },
586                         { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks =  2 },
587                         { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks =  1 }
588                 }
589         }, {
590                 .mfr_id = MANUFACTURER_ST,
591                 .dev_id = M29W160DB,
592                 .name = "ST M29W160DB",
593                 .size = 0x00200000,
594                 .numeraseregions = 4,
595                 .regions = {
596                         { .offset = 0x000000, .erasesize = 0x04000, .numblocks =  1 },
597                         { .offset = 0x004000, .erasesize = 0x02000, .numblocks =  2 },
598                         { .offset = 0x008000, .erasesize = 0x08000, .numblocks =  1 },
599                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
600                 }
601         }, {
602                 .mfr_id = MANUFACTURER_AMD,
603                 .dev_id = AM29BDS323D,
604                 .name = "AMD AM29BDS323D",
605                 .size = 0x00400000,
606                 .numeraseregions = 3,
607                 .regions = {
608                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
609                         { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
610                         { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks =  8 },
611                 }
612         }, {
613                 .mfr_id = MANUFACTURER_ATMEL,
614                 .dev_id = AT49xV16x,
615                 .name = "Atmel AT49xV16x",
616                 .size = 0x00200000,
617                 .numeraseregions = 2,
618                 .regions = {
619                         { .offset = 0x000000, .erasesize = 0x02000, .numblocks =  8 },
620                         { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
621                 }
622         }, {
623                 .mfr_id = MANUFACTURER_ATMEL,
624                 .dev_id = AT49xV16xT,
625                 .name = "Atmel AT49xV16xT",
626                 .size = 0x00200000,
627                 .numeraseregions = 2,
628                 .regions = {
629                         { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
630                         { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks =  8 }
631                 }
632         }
633         };
634
635         struct mtd_info *mtd;
636         struct flchip chips[MAX_AMD_CHIPS];
637         int table_pos[MAX_AMD_CHIPS];
638         struct amd_flash_private temp;
639         struct amd_flash_private *private;
640         u_long size;
641         unsigned long base;
642         int i;
643         int reg_idx;
644         int offset;
645
646         mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL);
647         if (!mtd) {
648                 printk(KERN_WARNING
649                        "%s: kmalloc failed for info structure\n", map->name);
650                 return NULL;
651         }
652         memset(mtd, 0, sizeof(*mtd));
653         mtd->priv = map;
654
655         memset(&temp, 0, sizeof(temp));
656
657         printk("%s: Probing for AMD compatible flash...\n", map->name);
658
659         if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
660                                            ARRAY_SIZE(table)))
661             == -1) {
662                 printk(KERN_WARNING
663                        "%s: Found no AMD compatible device at location zero\n",
664                        map->name);
665                 kfree(mtd);
666
667                 return NULL;
668         }
669
670         chips[0].start = 0;
671         chips[0].state = FL_READY;
672         chips[0].mutex = &chips[0]._spinlock;
673         temp.numchips = 1;
674         for (size = mtd->size; size > 1; size >>= 1) {
675                 temp.chipshift++;
676         }
677         switch (temp.interleave) {
678                 case 2:
679                         temp.chipshift += 1;
680                         break;
681                 case 4:
682                         temp.chipshift += 2;
683                         break;
684         }
685
686         /* Find out if there are any more chips in the map. */
687         for (base = (1 << temp.chipshift);
688              base < map->size;
689              base += (1 << temp.chipshift)) {
690                 int numchips = temp.numchips;
691                 table_pos[numchips] = probe_new_chip(mtd, base, chips,
692                         &temp, table, ARRAY_SIZE(table));
693         }
694
695         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
696                                     mtd->numeraseregions, GFP_KERNEL);
697         if (!mtd->eraseregions) {
698                 printk(KERN_WARNING "%s: Failed to allocate "
699                        "memory for MTD erase region info\n", map->name);
700                 kfree(mtd);
701                 map->fldrv_priv = NULL;
702                 return NULL;
703         }
704
705         reg_idx = 0;
706         offset = 0;
707         for (i = 0; i < temp.numchips; i++) {
708                 int dev_size;
709                 int j;
710
711                 dev_size = 0;
712                 for (j = 0; j < table[table_pos[i]].numeraseregions; j++) {
713                         mtd->eraseregions[reg_idx].offset = offset +
714                                 (table[table_pos[i]].regions[j].offset *
715                                  temp.interleave);
716                         mtd->eraseregions[reg_idx].erasesize =
717                                 table[table_pos[i]].regions[j].erasesize *
718                                 temp.interleave;
719                         mtd->eraseregions[reg_idx].numblocks =
720                                 table[table_pos[i]].regions[j].numblocks;
721                         if (mtd->erasesize <
722                             mtd->eraseregions[reg_idx].erasesize) {
723                                 mtd->erasesize =
724                                         mtd->eraseregions[reg_idx].erasesize;
725                         }
726                         dev_size += mtd->eraseregions[reg_idx].erasesize *
727                                     mtd->eraseregions[reg_idx].numblocks;
728                         reg_idx++;
729                 }
730                 offset += dev_size;
731         }
732         mtd->type = MTD_NORFLASH;
733         mtd->writesize = 1;
734         mtd->flags = MTD_CAP_NORFLASH;
735         mtd->name = map->name;
736         mtd->erase = amd_flash_erase;
737         mtd->read = amd_flash_read;
738         mtd->write = amd_flash_write;
739         mtd->sync = amd_flash_sync;
740         mtd->suspend = amd_flash_suspend;
741         mtd->resume = amd_flash_resume;
742         mtd->lock = amd_flash_lock;
743         mtd->unlock = amd_flash_unlock;
744
745         private = kmalloc(sizeof(*private) + (sizeof(struct flchip) *
746                                               temp.numchips), GFP_KERNEL);
747         if (!private) {
748                 printk(KERN_WARNING
749                        "%s: kmalloc failed for private structure\n", map->name);
750                 kfree(mtd);
751                 map->fldrv_priv = NULL;
752                 return NULL;
753         }
754         memcpy(private, &temp, sizeof(temp));
755         memcpy(private->chips, chips,
756                sizeof(struct flchip) * private->numchips);
757         for (i = 0; i < private->numchips; i++) {
758                 init_waitqueue_head(&private->chips[i].wq);
759                 spin_lock_init(&private->chips[i]._spinlock);
760         }
761
762         map->fldrv_priv = private;
763
764         map->fldrv = &amd_flash_chipdrv;
765
766         __module_get(THIS_MODULE);
767         return mtd;
768 }
769
770
771
772 static inline int read_one_chip(struct map_info *map, struct flchip *chip,
773                                loff_t adr, size_t len, u_char *buf)
774 {
775         DECLARE_WAITQUEUE(wait, current);
776         unsigned long timeo = jiffies + HZ;
777
778 retry:
779         spin_lock_bh(chip->mutex);
780
781         if (chip->state != FL_READY){
782                 printk(KERN_INFO "%s: waiting for chip to read, state = %d\n",
783                        map->name, chip->state);
784                 set_current_state(TASK_UNINTERRUPTIBLE);
785                 add_wait_queue(&chip->wq, &wait);
786
787                 spin_unlock_bh(chip->mutex);
788
789                 schedule();
790                 remove_wait_queue(&chip->wq, &wait);
791
792                 if(signal_pending(current)) {
793                         return -EINTR;
794                 }
795
796                 timeo = jiffies + HZ;
797
798                 goto retry;
799         }
800
801         adr += chip->start;
802
803         chip->state = FL_READY;
804
805         map_copy_from(map, buf, adr, len);
806
807         wake_up(&chip->wq);
808         spin_unlock_bh(chip->mutex);
809
810         return 0;
811 }
812
813
814
815 static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len,
816                           size_t *retlen, u_char *buf)
817 {
818         struct map_info *map = mtd->priv;
819         struct amd_flash_private *private = map->fldrv_priv;
820         unsigned long ofs;
821         int chipnum;
822         int ret = 0;
823
824         if ((from + len) > mtd->size) {
825                 printk(KERN_WARNING "%s: read request past end of device "
826                        "(0x%lx)\n", map->name, (unsigned long)from + len);
827
828                 return -EINVAL;
829         }
830
831         /* Offset within the first chip that the first read should start. */
832         chipnum = (from >> private->chipshift);
833         ofs = from - (chipnum <<  private->chipshift);
834
835         *retlen = 0;
836
837         while (len) {
838                 unsigned long this_len;
839
840                 if (chipnum >= private->numchips) {
841                         break;
842                 }
843
844                 if ((len + ofs - 1) >> private->chipshift) {
845                         this_len = (1 << private->chipshift) - ofs;
846                 } else {
847                         this_len = len;
848                 }
849
850                 ret = read_one_chip(map, &private->chips[chipnum], ofs,
851                                     this_len, buf);
852                 if (ret) {
853                         break;
854                 }
855
856                 *retlen += this_len;
857                 len -= this_len;
858                 buf += this_len;
859
860                 ofs = 0;
861                 chipnum++;
862         }
863
864         return ret;
865 }
866
867
868
869 static int write_one_word(struct map_info *map, struct flchip *chip,
870                           unsigned long adr, __u32 datum)
871 {
872         unsigned long timeo = jiffies + HZ;
873         struct amd_flash_private *private = map->fldrv_priv;
874         DECLARE_WAITQUEUE(wait, current);
875         int ret = 0;
876         int times_left;
877
878 retry:
879         spin_lock_bh(chip->mutex);
880
881         if (chip->state != FL_READY){
882                 printk("%s: waiting for chip to write, state = %d\n",
883                        map->name, chip->state);
884                 set_current_state(TASK_UNINTERRUPTIBLE);
885                 add_wait_queue(&chip->wq, &wait);
886
887                 spin_unlock_bh(chip->mutex);
888
889                 schedule();
890                 remove_wait_queue(&chip->wq, &wait);
891                 printk(KERN_INFO "%s: woke up to write\n", map->name);
892                 if(signal_pending(current))
893                         return -EINTR;
894
895                 timeo = jiffies + HZ;
896
897                 goto retry;
898         }
899
900         chip->state = FL_WRITING;
901
902         adr += chip->start;
903         ENABLE_VPP(map);
904         send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA);
905         wide_write(map, datum, adr);
906
907         times_left = 500000;
908         while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
909                 if (need_resched()) {
910                         spin_unlock_bh(chip->mutex);
911                         schedule();
912                         spin_lock_bh(chip->mutex);
913                 }
914         }
915
916         if (!times_left) {
917                 printk(KERN_WARNING "%s: write to 0x%lx timed out!\n",
918                        map->name, adr);
919                 ret = -EIO;
920         } else {
921                 __u32 verify;
922                 if ((verify = wide_read(map, adr)) != datum) {
923                         printk(KERN_WARNING "%s: write to 0x%lx failed. "
924                                "datum = %x, verify = %x\n",
925                                map->name, adr, datum, verify);
926                         ret = -EIO;
927                 }
928         }
929
930         DISABLE_VPP(map);
931         chip->state = FL_READY;
932         wake_up(&chip->wq);
933         spin_unlock_bh(chip->mutex);
934
935         return ret;
936 }
937
938
939
940 static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
941                            size_t *retlen, const u_char *buf)
942 {
943         struct map_info *map = mtd->priv;
944         struct amd_flash_private *private = map->fldrv_priv;
945         int ret = 0;
946         int chipnum;
947         unsigned long ofs;
948         unsigned long chipstart;
949
950         *retlen = 0;
951         if (!len) {
952                 return 0;
953         }
954
955         chipnum = to >> private->chipshift;
956         ofs = to  - (chipnum << private->chipshift);
957         chipstart = private->chips[chipnum].start;
958
959         /* If it's not bus-aligned, do the first byte write. */
960         if (ofs & (map->buswidth - 1)) {
961                 unsigned long bus_ofs = ofs & ~(map->buswidth - 1);
962                 int i = ofs - bus_ofs;
963                 int n = 0;
964                 u_char tmp_buf[4];
965                 __u32 datum;
966
967                 map_copy_from(map, tmp_buf,
968                                bus_ofs + private->chips[chipnum].start,
969                                map->buswidth);
970                 while (len && i < map->buswidth)
971                         tmp_buf[i++] = buf[n++], len--;
972
973                 if (map->buswidth == 2) {
974                         datum = *(__u16*)tmp_buf;
975                 } else if (map->buswidth == 4) {
976                         datum = *(__u32*)tmp_buf;
977                 } else {
978                         return -EINVAL;  /* should never happen, but be safe */
979                 }
980
981                 ret = write_one_word(map, &private->chips[chipnum], bus_ofs,
982                                      datum);
983                 if (ret) {
984                         return ret;
985                 }
986
987                 ofs += n;
988                 buf += n;
989                 (*retlen) += n;
990
991                 if (ofs >> private->chipshift) {
992                         chipnum++;
993                         ofs = 0;
994                         if (chipnum == private->numchips) {
995                                 return 0;
996                         }
997                 }
998         }
999
1000         /* We are now aligned, write as much as possible. */
1001         while(len >= map->buswidth) {
1002                 __u32 datum;
1003
1004                 if (map->buswidth == 1) {
1005                         datum = *(__u8*)buf;
1006                 } else if (map->buswidth == 2) {
1007                         datum = *(__u16*)buf;
1008                 } else if (map->buswidth == 4) {
1009                         datum = *(__u32*)buf;
1010                 } else {
1011                         return -EINVAL;
1012                 }
1013
1014                 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1015
1016                 if (ret) {
1017                         return ret;
1018                 }
1019
1020                 ofs += map->buswidth;
1021                 buf += map->buswidth;
1022                 (*retlen) += map->buswidth;
1023                 len -= map->buswidth;
1024
1025                 if (ofs >> private->chipshift) {
1026                         chipnum++;
1027                         ofs = 0;
1028                         if (chipnum == private->numchips) {
1029                                 return 0;
1030                         }
1031                         chipstart = private->chips[chipnum].start;
1032                 }
1033         }
1034
1035         if (len & (map->buswidth - 1)) {
1036                 int i = 0, n = 0;
1037                 u_char tmp_buf[2];
1038                 __u32 datum;
1039
1040                 map_copy_from(map, tmp_buf,
1041                                ofs + private->chips[chipnum].start,
1042                                map->buswidth);
1043                 while (len--) {
1044                         tmp_buf[i++] = buf[n++];
1045                 }
1046
1047                 if (map->buswidth == 2) {
1048                         datum = *(__u16*)tmp_buf;
1049                 } else if (map->buswidth == 4) {
1050                         datum = *(__u32*)tmp_buf;
1051                 } else {
1052                         return -EINVAL;  /* should never happen, but be safe */
1053                 }
1054
1055                 ret = write_one_word(map, &private->chips[chipnum], ofs, datum);
1056
1057                 if (ret) {
1058                         return ret;
1059                 }
1060
1061                 (*retlen) += n;
1062         }
1063
1064         return 0;
1065 }
1066
1067
1068
1069 static inline int erase_one_block(struct map_info *map, struct flchip *chip,
1070                                   unsigned long adr, u_long size)
1071 {
1072         unsigned long timeo = jiffies + HZ;
1073         struct amd_flash_private *private = map->fldrv_priv;
1074         DECLARE_WAITQUEUE(wait, current);
1075
1076 retry:
1077         spin_lock_bh(chip->mutex);
1078
1079         if (chip->state != FL_READY){
1080                 set_current_state(TASK_UNINTERRUPTIBLE);
1081                 add_wait_queue(&chip->wq, &wait);
1082
1083                 spin_unlock_bh(chip->mutex);
1084
1085                 schedule();
1086                 remove_wait_queue(&chip->wq, &wait);
1087
1088                 if (signal_pending(current)) {
1089                         return -EINTR;
1090                 }
1091
1092                 timeo = jiffies + HZ;
1093
1094                 goto retry;
1095         }
1096
1097         chip->state = FL_ERASING;
1098
1099         adr += chip->start;
1100         ENABLE_VPP(map);
1101         send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1102         send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1103
1104         timeo = jiffies + (HZ * 20);
1105
1106         spin_unlock_bh(chip->mutex);
1107         msleep(1000);
1108         spin_lock_bh(chip->mutex);
1109
1110         while (flash_is_busy(map, adr, private->interleave)) {
1111
1112                 if (chip->state != FL_ERASING) {
1113                         /* Someone's suspended the erase. Sleep */
1114                         set_current_state(TASK_UNINTERRUPTIBLE);
1115                         add_wait_queue(&chip->wq, &wait);
1116
1117                         spin_unlock_bh(chip->mutex);
1118                         printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1119                                map->name);
1120                         schedule();
1121                         remove_wait_queue(&chip->wq, &wait);
1122
1123                         if (signal_pending(current)) {
1124                                 return -EINTR;
1125                         }
1126
1127                         timeo = jiffies + (HZ*2); /* FIXME */
1128                         spin_lock_bh(chip->mutex);
1129                         continue;
1130                 }
1131
1132                 /* OK Still waiting */
1133                 if (time_after(jiffies, timeo)) {
1134                         chip->state = FL_READY;
1135                         spin_unlock_bh(chip->mutex);
1136                         printk(KERN_WARNING "%s: waiting for erase to complete "
1137                                "timed out.\n", map->name);
1138                         DISABLE_VPP(map);
1139
1140                         return -EIO;
1141                 }
1142
1143                 /* Latency issues. Drop the lock, wait a while and retry */
1144                 spin_unlock_bh(chip->mutex);
1145
1146                 if (need_resched())
1147                         schedule();
1148                 else
1149                         udelay(1);
1150
1151                 spin_lock_bh(chip->mutex);
1152         }
1153
1154         /* Verify every single word */
1155         {
1156                 int address;
1157                 int error = 0;
1158                 __u8 verify;
1159
1160                 for (address = adr; address < (adr + size); address++) {
1161                         if ((verify = map_read8(map, address)) != 0xFF) {
1162                                 error = 1;
1163                                 break;
1164                         }
1165                 }
1166                 if (error) {
1167                         chip->state = FL_READY;
1168                         spin_unlock_bh(chip->mutex);
1169                         printk(KERN_WARNING
1170                                "%s: verify error at 0x%x, size %ld.\n",
1171                                map->name, address, size);
1172                         DISABLE_VPP(map);
1173
1174                         return -EIO;
1175                 }
1176         }
1177
1178         DISABLE_VPP(map);
1179         chip->state = FL_READY;
1180         wake_up(&chip->wq);
1181         spin_unlock_bh(chip->mutex);
1182
1183         return 0;
1184 }
1185
1186
1187
1188 static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1189 {
1190         struct map_info *map = mtd->priv;
1191         struct amd_flash_private *private = map->fldrv_priv;
1192         unsigned long adr, len;
1193         int chipnum;
1194         int ret = 0;
1195         int i;
1196         int first;
1197         struct mtd_erase_region_info *regions = mtd->eraseregions;
1198
1199         if (instr->addr > mtd->size) {
1200                 return -EINVAL;
1201         }
1202
1203         if ((instr->len + instr->addr) > mtd->size) {
1204                 return -EINVAL;
1205         }
1206
1207         /* Check that both start and end of the requested erase are
1208          * aligned with the erasesize at the appropriate addresses.
1209          */
1210
1211         i = 0;
1212
1213         /* Skip all erase regions which are ended before the start of
1214            the requested erase. Actually, to save on the calculations,
1215            we skip to the first erase region which starts after the
1216            start of the requested erase, and then go back one.
1217         */
1218
1219         while ((i < mtd->numeraseregions) &&
1220                (instr->addr >= regions[i].offset)) {
1221                i++;
1222         }
1223         i--;
1224
1225         /* OK, now i is pointing at the erase region in which this
1226          * erase request starts. Check the start of the requested
1227          * erase range is aligned with the erase size which is in
1228          * effect here.
1229          */
1230
1231         if (instr->addr & (regions[i].erasesize-1)) {
1232                 return -EINVAL;
1233         }
1234
1235         /* Remember the erase region we start on. */
1236
1237         first = i;
1238
1239         /* Next, check that the end of the requested erase is aligned
1240          * with the erase region at that address.
1241          */
1242
1243         while ((i < mtd->numeraseregions) &&
1244                ((instr->addr + instr->len) >= regions[i].offset)) {
1245                 i++;
1246         }
1247
1248         /* As before, drop back one to point at the region in which
1249          * the address actually falls.
1250          */
1251
1252         i--;
1253
1254         if ((instr->addr + instr->len) & (regions[i].erasesize-1)) {
1255                 return -EINVAL;
1256         }
1257
1258         chipnum = instr->addr >> private->chipshift;
1259         adr = instr->addr - (chipnum << private->chipshift);
1260         len = instr->len;
1261
1262         i = first;
1263
1264         while (len) {
1265                 ret = erase_one_block(map, &private->chips[chipnum], adr,
1266                                       regions[i].erasesize);
1267
1268                 if (ret) {
1269                         return ret;
1270                 }
1271
1272                 adr += regions[i].erasesize;
1273                 len -= regions[i].erasesize;
1274
1275                 if ((adr % (1 << private->chipshift)) ==
1276                     ((regions[i].offset + (regions[i].erasesize *
1277                                            regions[i].numblocks))
1278                      % (1 << private->chipshift))) {
1279                         i++;
1280                 }
1281
1282                 if (adr >> private->chipshift) {
1283                         adr = 0;
1284                         chipnum++;
1285                         if (chipnum >= private->numchips) {
1286                                 break;
1287                         }
1288                 }
1289         }
1290
1291         instr->state = MTD_ERASE_DONE;
1292         mtd_erase_callback(instr);
1293
1294         return 0;
1295 }
1296
1297
1298
1299 static void amd_flash_sync(struct mtd_info *mtd)
1300 {
1301         struct map_info *map = mtd->priv;
1302         struct amd_flash_private *private = map->fldrv_priv;
1303         int i;
1304         struct flchip *chip;
1305         int ret = 0;
1306         DECLARE_WAITQUEUE(wait, current);
1307
1308         for (i = 0; !ret && (i < private->numchips); i++) {
1309                 chip = &private->chips[i];
1310
1311         retry:
1312                 spin_lock_bh(chip->mutex);
1313
1314                 switch(chip->state) {
1315                 case FL_READY:
1316                 case FL_STATUS:
1317                 case FL_CFI_QUERY:
1318                 case FL_JEDEC_QUERY:
1319                         chip->oldstate = chip->state;
1320                         chip->state = FL_SYNCING;
1321                         /* No need to wake_up() on this state change -
1322                          * as the whole point is that nobody can do anything
1323                          * with the chip now anyway.
1324                          */
1325                 case FL_SYNCING:
1326                         spin_unlock_bh(chip->mutex);
1327                         break;
1328
1329                 default:
1330                         /* Not an idle state */
1331                         add_wait_queue(&chip->wq, &wait);
1332
1333                         spin_unlock_bh(chip->mutex);
1334
1335                         schedule();
1336
1337                         remove_wait_queue(&chip->wq, &wait);
1338
1339                         goto retry;
1340                 }
1341         }
1342
1343         /* Unlock the chips again */
1344         for (i--; i >= 0; i--) {
1345                 chip = &private->chips[i];
1346
1347                 spin_lock_bh(chip->mutex);
1348
1349                 if (chip->state == FL_SYNCING) {
1350                         chip->state = chip->oldstate;
1351                         wake_up(&chip->wq);
1352                 }
1353                 spin_unlock_bh(chip->mutex);
1354         }
1355 }
1356
1357
1358
1359 static int amd_flash_suspend(struct mtd_info *mtd)
1360 {
1361 printk("amd_flash_suspend(): not implemented!\n");
1362         return -EINVAL;
1363 }
1364
1365
1366
1367 static void amd_flash_resume(struct mtd_info *mtd)
1368 {
1369 printk("amd_flash_resume(): not implemented!\n");
1370 }
1371
1372
1373
1374 static void amd_flash_destroy(struct mtd_info *mtd)
1375 {
1376         struct map_info *map = mtd->priv;
1377         struct amd_flash_private *private = map->fldrv_priv;
1378         kfree(private);
1379 }
1380
1381 int __init amd_flash_init(void)
1382 {
1383         register_mtd_chip_driver(&amd_flash_chipdrv);
1384         return 0;
1385 }
1386
1387 void __exit amd_flash_exit(void)
1388 {
1389         unregister_mtd_chip_driver(&amd_flash_chipdrv);
1390 }
1391
1392 module_init(amd_flash_init);
1393 module_exit(amd_flash_exit);
1394
1395 MODULE_LICENSE("GPL");
1396 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1397 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");