Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[linux-2.6] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/clk.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/nand.h>
20 #include <linux/mtd/partitions.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <asm/dma.h>
24
25 #include <asm/arch/pxa-regs.h>
26 #include <asm/arch/pxa3xx_nand.h>
27
28 #define CHIP_DELAY_TIMEOUT      (2 * HZ/10)
29
30 /* registers and bit definitions */
31 #define NDCR            (0x00) /* Control register */
32 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
33 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
34 #define NDSR            (0x14) /* Status Register */
35 #define NDPCR           (0x18) /* Page Count Register */
36 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
37 #define NDBDR1          (0x20) /* Bad Block Register 1 */
38 #define NDDB            (0x40) /* Data Buffer */
39 #define NDCB0           (0x48) /* Command Buffer0 */
40 #define NDCB1           (0x4C) /* Command Buffer1 */
41 #define NDCB2           (0x50) /* Command Buffer2 */
42
43 #define NDCR_SPARE_EN           (0x1 << 31)
44 #define NDCR_ECC_EN             (0x1 << 30)
45 #define NDCR_DMA_EN             (0x1 << 29)
46 #define NDCR_ND_RUN             (0x1 << 28)
47 #define NDCR_DWIDTH_C           (0x1 << 27)
48 #define NDCR_DWIDTH_M           (0x1 << 26)
49 #define NDCR_PAGE_SZ            (0x1 << 24)
50 #define NDCR_NCSX               (0x1 << 23)
51 #define NDCR_ND_MODE            (0x3 << 21)
52 #define NDCR_NAND_MODE          (0x0)
53 #define NDCR_CLR_PG_CNT         (0x1 << 20)
54 #define NDCR_CLR_ECC            (0x1 << 19)
55 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
56 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
57
58 #define NDCR_RA_START           (0x1 << 15)
59 #define NDCR_PG_PER_BLK         (0x1 << 14)
60 #define NDCR_ND_ARB_EN          (0x1 << 12)
61
62 #define NDSR_MASK               (0xfff)
63 #define NDSR_RDY                (0x1 << 11)
64 #define NDSR_CS0_PAGED          (0x1 << 10)
65 #define NDSR_CS1_PAGED          (0x1 << 9)
66 #define NDSR_CS0_CMDD           (0x1 << 8)
67 #define NDSR_CS1_CMDD           (0x1 << 7)
68 #define NDSR_CS0_BBD            (0x1 << 6)
69 #define NDSR_CS1_BBD            (0x1 << 5)
70 #define NDSR_DBERR              (0x1 << 4)
71 #define NDSR_SBERR              (0x1 << 3)
72 #define NDSR_WRDREQ             (0x1 << 2)
73 #define NDSR_RDDREQ             (0x1 << 1)
74 #define NDSR_WRCMDREQ           (0x1)
75
76 #define NDCB0_AUTO_RS           (0x1 << 25)
77 #define NDCB0_CSEL              (0x1 << 24)
78 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
79 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
80 #define NDCB0_NC                (0x1 << 20)
81 #define NDCB0_DBC               (0x1 << 19)
82 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
83 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
84 #define NDCB0_CMD2_MASK         (0xff << 8)
85 #define NDCB0_CMD1_MASK         (0xff)
86 #define NDCB0_ADDR_CYC_SHIFT    (16)
87
88 /* dma-able I/O address for the NAND data and commands */
89 #define NDCB0_DMA_ADDR          (0x43100048)
90 #define NDDB_DMA_ADDR           (0x43100040)
91
92 /* macros for registers read/write */
93 #define nand_writel(info, off, val)     \
94         __raw_writel((val), (info)->mmio_base + (off))
95
96 #define nand_readl(info, off)           \
97         __raw_readl((info)->mmio_base + (off))
98
99 /* error code and state */
100 enum {
101         ERR_NONE        = 0,
102         ERR_DMABUSERR   = -1,
103         ERR_SENDCMD     = -2,
104         ERR_DBERR       = -3,
105         ERR_BBERR       = -4,
106 };
107
108 enum {
109         STATE_READY     = 0,
110         STATE_CMD_HANDLE,
111         STATE_DMA_READING,
112         STATE_DMA_WRITING,
113         STATE_DMA_DONE,
114         STATE_PIO_READING,
115         STATE_PIO_WRITING,
116 };
117
118 struct pxa3xx_nand_timing {
119         unsigned int    tCH;  /* Enable signal hold time */
120         unsigned int    tCS;  /* Enable signal setup time */
121         unsigned int    tWH;  /* ND_nWE high duration */
122         unsigned int    tWP;  /* ND_nWE pulse time */
123         unsigned int    tRH;  /* ND_nRE high duration */
124         unsigned int    tRP;  /* ND_nRE pulse width */
125         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
126         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
127         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
128 };
129
130 struct pxa3xx_nand_cmdset {
131         uint16_t        read1;
132         uint16_t        read2;
133         uint16_t        program;
134         uint16_t        read_status;
135         uint16_t        read_id;
136         uint16_t        erase;
137         uint16_t        reset;
138         uint16_t        lock;
139         uint16_t        unlock;
140         uint16_t        lock_status;
141 };
142
143 struct pxa3xx_nand_flash {
144         struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
145         struct pxa3xx_nand_cmdset *cmdset;
146
147         uint32_t page_per_block;/* Pages per block (PG_PER_BLK) */
148         uint32_t page_size;     /* Page size in bytes (PAGE_SZ) */
149         uint32_t flash_width;   /* Width of Flash memory (DWIDTH_M) */
150         uint32_t dfc_width;     /* Width of flash controller(DWIDTH_C) */
151         uint32_t num_blocks;    /* Number of physical blocks in Flash */
152         uint32_t chip_id;
153
154         /* NOTE: these are automatically calculated, do not define */
155         size_t          oob_size;
156         size_t          read_id_bytes;
157
158         unsigned int    col_addr_cycles;
159         unsigned int    row_addr_cycles;
160 };
161
162 struct pxa3xx_nand_info {
163         struct nand_chip        nand_chip;
164
165         struct platform_device   *pdev;
166         struct pxa3xx_nand_flash *flash_info;
167
168         struct clk              *clk;
169         void __iomem            *mmio_base;
170
171         unsigned int            buf_start;
172         unsigned int            buf_count;
173
174         /* DMA information */
175         int                     drcmr_dat;
176         int                     drcmr_cmd;
177
178         unsigned char           *data_buff;
179         dma_addr_t              data_buff_phys;
180         size_t                  data_buff_size;
181         int                     data_dma_ch;
182         struct pxa_dma_desc     *data_desc;
183         dma_addr_t              data_desc_addr;
184
185         uint32_t                reg_ndcr;
186
187         /* saved column/page_addr during CMD_SEQIN */
188         int                     seqin_column;
189         int                     seqin_page_addr;
190
191         /* relate to the command */
192         unsigned int            state;
193
194         int                     use_ecc;        /* use HW ECC ? */
195         int                     use_dma;        /* use DMA ? */
196
197         size_t                  data_size;      /* data size in FIFO */
198         int                     retcode;
199         struct completion       cmd_complete;
200
201         /* generated NDCBx register values */
202         uint32_t                ndcb0;
203         uint32_t                ndcb1;
204         uint32_t                ndcb2;
205 };
206
207 static int use_dma = 1;
208 module_param(use_dma, bool, 0444);
209 MODULE_PARM_DESC(use_dma, "enable DMA for data transfering to/from NAND HW");
210
211 static struct pxa3xx_nand_cmdset smallpage_cmdset = {
212         .read1          = 0x0000,
213         .read2          = 0x0050,
214         .program        = 0x1080,
215         .read_status    = 0x0070,
216         .read_id        = 0x0090,
217         .erase          = 0xD060,
218         .reset          = 0x00FF,
219         .lock           = 0x002A,
220         .unlock         = 0x2423,
221         .lock_status    = 0x007A,
222 };
223
224 static struct pxa3xx_nand_cmdset largepage_cmdset = {
225         .read1          = 0x3000,
226         .read2          = 0x0050,
227         .program        = 0x1080,
228         .read_status    = 0x0070,
229         .read_id        = 0x0090,
230         .erase          = 0xD060,
231         .reset          = 0x00FF,
232         .lock           = 0x002A,
233         .unlock         = 0x2423,
234         .lock_status    = 0x007A,
235 };
236
237 static struct pxa3xx_nand_timing samsung512MbX16_timing = {
238         .tCH    = 10,
239         .tCS    = 0,
240         .tWH    = 20,
241         .tWP    = 40,
242         .tRH    = 30,
243         .tRP    = 40,
244         .tR     = 11123,
245         .tWHR   = 110,
246         .tAR    = 10,
247 };
248
249 static struct pxa3xx_nand_flash samsung512MbX16 = {
250         .timing         = &samsung512MbX16_timing,
251         .cmdset         = &smallpage_cmdset,
252         .page_per_block = 32,
253         .page_size      = 512,
254         .flash_width    = 16,
255         .dfc_width      = 16,
256         .num_blocks     = 4096,
257         .chip_id        = 0x46ec,
258 };
259
260 static struct pxa3xx_nand_timing micron_timing = {
261         .tCH    = 10,
262         .tCS    = 25,
263         .tWH    = 15,
264         .tWP    = 25,
265         .tRH    = 15,
266         .tRP    = 25,
267         .tR     = 25000,
268         .tWHR   = 60,
269         .tAR    = 10,
270 };
271
272 static struct pxa3xx_nand_flash micron1GbX8 = {
273         .timing         = &micron_timing,
274         .cmdset         = &largepage_cmdset,
275         .page_per_block = 64,
276         .page_size      = 2048,
277         .flash_width    = 8,
278         .dfc_width      = 8,
279         .num_blocks     = 1024,
280         .chip_id        = 0xa12c,
281 };
282
283 static struct pxa3xx_nand_flash micron1GbX16 = {
284         .timing         = &micron_timing,
285         .cmdset         = &largepage_cmdset,
286         .page_per_block = 64,
287         .page_size      = 2048,
288         .flash_width    = 16,
289         .dfc_width      = 16,
290         .num_blocks     = 1024,
291         .chip_id        = 0xb12c,
292 };
293
294 static struct pxa3xx_nand_flash *builtin_flash_types[] = {
295         &samsung512MbX16,
296         &micron1GbX8,
297         &micron1GbX16,
298 };
299
300 #define NDTR0_tCH(c)    (min((c), 7) << 19)
301 #define NDTR0_tCS(c)    (min((c), 7) << 16)
302 #define NDTR0_tWH(c)    (min((c), 7) << 11)
303 #define NDTR0_tWP(c)    (min((c), 7) << 8)
304 #define NDTR0_tRH(c)    (min((c), 7) << 3)
305 #define NDTR0_tRP(c)    (min((c), 7) << 0)
306
307 #define NDTR1_tR(c)     (min((c), 65535) << 16)
308 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
309 #define NDTR1_tAR(c)    (min((c), 15) << 0)
310
311 /* convert nano-seconds to nand flash controller clock cycles */
312 #define ns2cycle(ns, clk)       (int)(((ns) * (clk / 1000000) / 1000) + 1)
313
314 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
315                                    struct pxa3xx_nand_timing *t)
316 {
317         unsigned long nand_clk = clk_get_rate(info->clk);
318         uint32_t ndtr0, ndtr1;
319
320         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
321                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
322                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
323                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
324                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
325                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
326
327         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
328                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
329                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
330
331         nand_writel(info, NDTR0CS0, ndtr0);
332         nand_writel(info, NDTR1CS0, ndtr1);
333 }
334
335 #define WAIT_EVENT_TIMEOUT      10
336
337 static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event)
338 {
339         int timeout = WAIT_EVENT_TIMEOUT;
340         uint32_t ndsr;
341
342         while (timeout--) {
343                 ndsr = nand_readl(info, NDSR) & NDSR_MASK;
344                 if (ndsr & event) {
345                         nand_writel(info, NDSR, ndsr);
346                         return 0;
347                 }
348                 udelay(10);
349         }
350
351         return -ETIMEDOUT;
352 }
353
354 static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info,
355                         uint16_t cmd, int column, int page_addr)
356 {
357         struct pxa3xx_nand_flash *f = info->flash_info;
358         struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
359
360         /* calculate data size */
361         switch (f->page_size) {
362         case 2048:
363                 info->data_size = (info->use_ecc) ? 2088 : 2112;
364                 break;
365         case 512:
366                 info->data_size = (info->use_ecc) ? 520 : 528;
367                 break;
368         default:
369                 return -EINVAL;
370         }
371
372         /* generate values for NDCBx registers */
373         info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
374         info->ndcb1 = 0;
375         info->ndcb2 = 0;
376         info->ndcb0 |= NDCB0_ADDR_CYC(f->row_addr_cycles + f->col_addr_cycles);
377
378         if (f->col_addr_cycles == 2) {
379                 /* large block, 2 cycles for column address
380                  * row address starts from 3rd cycle
381                  */
382                 info->ndcb1 |= (page_addr << 16) | (column & 0xffff);
383                 if (f->row_addr_cycles == 3)
384                         info->ndcb2 = (page_addr >> 16) & 0xff;
385         } else
386                 /* small block, 1 cycles for column address
387                  * row address starts from 2nd cycle
388                  */
389                 info->ndcb1 = (page_addr << 8) | (column & 0xff);
390
391         if (cmd == cmdset->program)
392                 info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS;
393
394         return 0;
395 }
396
397 static int prepare_erase_cmd(struct pxa3xx_nand_info *info,
398                         uint16_t cmd, int page_addr)
399 {
400         info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
401         info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3);
402         info->ndcb1 = page_addr;
403         info->ndcb2 = 0;
404         return 0;
405 }
406
407 static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd)
408 {
409         struct pxa3xx_nand_cmdset *cmdset = info->flash_info->cmdset;
410
411         info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0);
412         info->ndcb1 = 0;
413         info->ndcb2 = 0;
414
415         if (cmd == cmdset->read_id) {
416                 info->ndcb0 |= NDCB0_CMD_TYPE(3);
417                 info->data_size = 8;
418         } else if (cmd == cmdset->read_status) {
419                 info->ndcb0 |= NDCB0_CMD_TYPE(4);
420                 info->data_size = 8;
421         } else if (cmd == cmdset->reset || cmd == cmdset->lock ||
422                    cmd == cmdset->unlock) {
423                 info->ndcb0 |= NDCB0_CMD_TYPE(5);
424         } else
425                 return -EINVAL;
426
427         return 0;
428 }
429
430 static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
431 {
432         uint32_t ndcr;
433
434         ndcr = nand_readl(info, NDCR);
435         nand_writel(info, NDCR, ndcr & ~int_mask);
436 }
437
438 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
439 {
440         uint32_t ndcr;
441
442         ndcr = nand_readl(info, NDCR);
443         nand_writel(info, NDCR, ndcr | int_mask);
444 }
445
446 /* NOTE: it is a must to set ND_RUN firstly, then write command buffer
447  * otherwise, it does not work
448  */
449 static int write_cmd(struct pxa3xx_nand_info *info)
450 {
451         uint32_t ndcr;
452
453         /* clear status bits and run */
454         nand_writel(info, NDSR, NDSR_MASK);
455
456         ndcr = info->reg_ndcr;
457
458         ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
459         ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
460         ndcr |= NDCR_ND_RUN;
461
462         nand_writel(info, NDCR, ndcr);
463
464         if (wait_for_event(info, NDSR_WRCMDREQ)) {
465                 printk(KERN_ERR "timed out writing command\n");
466                 return -ETIMEDOUT;
467         }
468
469         nand_writel(info, NDCB0, info->ndcb0);
470         nand_writel(info, NDCB0, info->ndcb1);
471         nand_writel(info, NDCB0, info->ndcb2);
472         return 0;
473 }
474
475 static int handle_data_pio(struct pxa3xx_nand_info *info)
476 {
477         int ret, timeout = CHIP_DELAY_TIMEOUT;
478
479         switch (info->state) {
480         case STATE_PIO_WRITING:
481                 __raw_writesl(info->mmio_base + NDDB, info->data_buff,
482                                 info->data_size << 2);
483
484                 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
485
486                 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
487                 if (!ret) {
488                         printk(KERN_ERR "program command time out\n");
489                         return -1;
490                 }
491                 break;
492         case STATE_PIO_READING:
493                 __raw_readsl(info->mmio_base + NDDB, info->data_buff,
494                                 info->data_size << 2);
495                 break;
496         default:
497                 printk(KERN_ERR "%s: invalid state %d\n", __func__,
498                                 info->state);
499                 return -EINVAL;
500         }
501
502         info->state = STATE_READY;
503         return 0;
504 }
505
506 static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out)
507 {
508         struct pxa_dma_desc *desc = info->data_desc;
509         int dma_len = ALIGN(info->data_size, 32);
510
511         desc->ddadr = DDADR_STOP;
512         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
513
514         if (dir_out) {
515                 desc->dsadr = info->data_buff_phys;
516                 desc->dtadr = NDDB_DMA_ADDR;
517                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
518         } else {
519                 desc->dtadr = info->data_buff_phys;
520                 desc->dsadr = NDDB_DMA_ADDR;
521                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
522         }
523
524         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
525         DDADR(info->data_dma_ch) = info->data_desc_addr;
526         DCSR(info->data_dma_ch) |= DCSR_RUN;
527 }
528
529 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
530 {
531         struct pxa3xx_nand_info *info = data;
532         uint32_t dcsr;
533
534         dcsr = DCSR(channel);
535         DCSR(channel) = dcsr;
536
537         if (dcsr & DCSR_BUSERR) {
538                 info->retcode = ERR_DMABUSERR;
539                 complete(&info->cmd_complete);
540         }
541
542         if (info->state == STATE_DMA_WRITING) {
543                 info->state = STATE_DMA_DONE;
544                 enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
545         } else {
546                 info->state = STATE_READY;
547                 complete(&info->cmd_complete);
548         }
549 }
550
551 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
552 {
553         struct pxa3xx_nand_info *info = devid;
554         unsigned int status;
555
556         status = nand_readl(info, NDSR);
557
558         if (status & (NDSR_RDDREQ | NDSR_DBERR)) {
559                 if (status & NDSR_DBERR)
560                         info->retcode = ERR_DBERR;
561
562                 disable_int(info, NDSR_RDDREQ | NDSR_DBERR);
563
564                 if (info->use_dma) {
565                         info->state = STATE_DMA_READING;
566                         start_data_dma(info, 0);
567                 } else {
568                         info->state = STATE_PIO_READING;
569                         complete(&info->cmd_complete);
570                 }
571         } else if (status & NDSR_WRDREQ) {
572                 disable_int(info, NDSR_WRDREQ);
573                 if (info->use_dma) {
574                         info->state = STATE_DMA_WRITING;
575                         start_data_dma(info, 1);
576                 } else {
577                         info->state = STATE_PIO_WRITING;
578                         complete(&info->cmd_complete);
579                 }
580         } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) {
581                 if (status & NDSR_CS0_BBD)
582                         info->retcode = ERR_BBERR;
583
584                 disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
585                 info->state = STATE_READY;
586                 complete(&info->cmd_complete);
587         }
588         nand_writel(info, NDSR, status);
589         return IRQ_HANDLED;
590 }
591
592 static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event)
593 {
594         uint32_t ndcr;
595         int ret, timeout = CHIP_DELAY_TIMEOUT;
596
597         if (write_cmd(info)) {
598                 info->retcode = ERR_SENDCMD;
599                 goto fail_stop;
600         }
601
602         info->state = STATE_CMD_HANDLE;
603
604         enable_int(info, event);
605
606         ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
607         if (!ret) {
608                 printk(KERN_ERR "command execution timed out\n");
609                 info->retcode = ERR_SENDCMD;
610                 goto fail_stop;
611         }
612
613         if (info->use_dma == 0 && info->data_size > 0)
614                 if (handle_data_pio(info))
615                         goto fail_stop;
616
617         return 0;
618
619 fail_stop:
620         ndcr = nand_readl(info, NDCR);
621         nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
622         udelay(10);
623         return -ETIMEDOUT;
624 }
625
626 static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
627 {
628         struct pxa3xx_nand_info *info = mtd->priv;
629         return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
630 }
631
632 static inline int is_buf_blank(uint8_t *buf, size_t len)
633 {
634         for (; len > 0; len--)
635                 if (*buf++ != 0xff)
636                         return 0;
637         return 1;
638 }
639
640 static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
641                                 int column, int page_addr)
642 {
643         struct pxa3xx_nand_info *info = mtd->priv;
644         struct pxa3xx_nand_flash *flash_info = info->flash_info;
645         struct pxa3xx_nand_cmdset *cmdset = flash_info->cmdset;
646         int ret;
647
648         info->use_dma = (use_dma) ? 1 : 0;
649         info->use_ecc = 0;
650         info->data_size = 0;
651         info->state = STATE_READY;
652
653         init_completion(&info->cmd_complete);
654
655         switch (command) {
656         case NAND_CMD_READOOB:
657                 /* disable HW ECC to get all the OOB data */
658                 info->buf_count = mtd->writesize + mtd->oobsize;
659                 info->buf_start = mtd->writesize + column;
660
661                 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
662                         break;
663
664                 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
665
666                 /* We only are OOB, so if the data has error, does not matter */
667                 if (info->retcode == ERR_DBERR)
668                         info->retcode = ERR_NONE;
669                 break;
670
671         case NAND_CMD_READ0:
672                 info->use_ecc = 1;
673                 info->retcode = ERR_NONE;
674                 info->buf_start = column;
675                 info->buf_count = mtd->writesize + mtd->oobsize;
676                 memset(info->data_buff, 0xFF, info->buf_count);
677
678                 if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr))
679                         break;
680
681                 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR);
682
683                 if (info->retcode == ERR_DBERR) {
684                         /* for blank page (all 0xff), HW will calculate its ECC as
685                          * 0, which is different from the ECC information within
686                          * OOB, ignore such double bit errors
687                          */
688                         if (is_buf_blank(info->data_buff, mtd->writesize))
689                                 info->retcode = ERR_NONE;
690                 }
691                 break;
692         case NAND_CMD_SEQIN:
693                 info->buf_start = column;
694                 info->buf_count = mtd->writesize + mtd->oobsize;
695                 memset(info->data_buff, 0xff, info->buf_count);
696
697                 /* save column/page_addr for next CMD_PAGEPROG */
698                 info->seqin_column = column;
699                 info->seqin_page_addr = page_addr;
700                 break;
701         case NAND_CMD_PAGEPROG:
702                 info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1;
703
704                 if (prepare_read_prog_cmd(info, cmdset->program,
705                                 info->seqin_column, info->seqin_page_addr))
706                         break;
707
708                 pxa3xx_nand_do_cmd(info, NDSR_WRDREQ);
709                 break;
710         case NAND_CMD_ERASE1:
711                 if (prepare_erase_cmd(info, cmdset->erase, page_addr))
712                         break;
713
714                 pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD);
715                 break;
716         case NAND_CMD_ERASE2:
717                 break;
718         case NAND_CMD_READID:
719         case NAND_CMD_STATUS:
720                 info->use_dma = 0;      /* force PIO read */
721                 info->buf_start = 0;
722                 info->buf_count = (command == NAND_CMD_READID) ?
723                                 flash_info->read_id_bytes : 1;
724
725                 if (prepare_other_cmd(info, (command == NAND_CMD_READID) ?
726                                 cmdset->read_id : cmdset->read_status))
727                         break;
728
729                 pxa3xx_nand_do_cmd(info, NDSR_RDDREQ);
730                 break;
731         case NAND_CMD_RESET:
732                 if (prepare_other_cmd(info, cmdset->reset))
733                         break;
734
735                 ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD);
736                 if (ret == 0) {
737                         int timeout = 2;
738                         uint32_t ndcr;
739
740                         while (timeout--) {
741                                 if (nand_readl(info, NDSR) & NDSR_RDY)
742                                         break;
743                                 msleep(10);
744                         }
745
746                         ndcr = nand_readl(info, NDCR);
747                         nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
748                 }
749                 break;
750         default:
751                 printk(KERN_ERR "non-supported command.\n");
752                 break;
753         }
754
755         if (info->retcode == ERR_DBERR) {
756                 printk(KERN_ERR "double bit error @ page %08x\n", page_addr);
757                 info->retcode = ERR_NONE;
758         }
759 }
760
761 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
762 {
763         struct pxa3xx_nand_info *info = mtd->priv;
764         char retval = 0xFF;
765
766         if (info->buf_start < info->buf_count)
767                 /* Has just send a new command? */
768                 retval = info->data_buff[info->buf_start++];
769
770         return retval;
771 }
772
773 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
774 {
775         struct pxa3xx_nand_info *info = mtd->priv;
776         u16 retval = 0xFFFF;
777
778         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
779                 retval = *((u16 *)(info->data_buff+info->buf_start));
780                 info->buf_start += 2;
781         }
782         return retval;
783 }
784
785 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
786 {
787         struct pxa3xx_nand_info *info = mtd->priv;
788         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
789
790         memcpy(buf, info->data_buff + info->buf_start, real_len);
791         info->buf_start += real_len;
792 }
793
794 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
795                 const uint8_t *buf, int len)
796 {
797         struct pxa3xx_nand_info *info = mtd->priv;
798         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
799
800         memcpy(info->data_buff + info->buf_start, buf, real_len);
801         info->buf_start += real_len;
802 }
803
804 static int pxa3xx_nand_verify_buf(struct mtd_info *mtd,
805                 const uint8_t *buf, int len)
806 {
807         return 0;
808 }
809
810 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
811 {
812         return;
813 }
814
815 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
816 {
817         struct pxa3xx_nand_info *info = mtd->priv;
818
819         /* pxa3xx_nand_send_command has waited for command complete */
820         if (this->state == FL_WRITING || this->state == FL_ERASING) {
821                 if (info->retcode == ERR_NONE)
822                         return 0;
823                 else {
824                         /*
825                          * any error make it return 0x01 which will tell
826                          * the caller the erase and write fail
827                          */
828                         return 0x01;
829                 }
830         }
831
832         return 0;
833 }
834
835 static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
836 {
837         return;
838 }
839
840 static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd,
841                 const uint8_t *dat, uint8_t *ecc_code)
842 {
843         return 0;
844 }
845
846 static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd,
847                 uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
848 {
849         struct pxa3xx_nand_info *info = mtd->priv;
850         /*
851          * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
852          * consider it as a ecc error which will tell the caller the
853          * read fail We have distinguish all the errors, but the
854          * nand_read_ecc only check this function return value
855          */
856         if (info->retcode != ERR_NONE)
857                 return -1;
858
859         return 0;
860 }
861
862 static int __readid(struct pxa3xx_nand_info *info, uint32_t *id)
863 {
864         struct pxa3xx_nand_flash *f = info->flash_info;
865         struct pxa3xx_nand_cmdset *cmdset = f->cmdset;
866         uint32_t ndcr;
867         uint8_t  id_buff[8];
868
869         if (prepare_other_cmd(info, cmdset->read_id)) {
870                 printk(KERN_ERR "failed to prepare command\n");
871                 return -EINVAL;
872         }
873
874         /* Send command */
875         if (write_cmd(info))
876                 goto fail_timeout;
877
878         /* Wait for CMDDM(command done successfully) */
879         if (wait_for_event(info, NDSR_RDDREQ))
880                 goto fail_timeout;
881
882         __raw_readsl(info->mmio_base + NDDB, id_buff, 2);
883         *id = id_buff[0] | (id_buff[1] << 8);
884         return 0;
885
886 fail_timeout:
887         ndcr = nand_readl(info, NDCR);
888         nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN);
889         udelay(10);
890         return -ETIMEDOUT;
891 }
892
893 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
894                                     struct pxa3xx_nand_flash *f)
895 {
896         struct platform_device *pdev = info->pdev;
897         struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
898         uint32_t ndcr = 0x00000FFF; /* disable all interrupts */
899
900         if (f->page_size != 2048 && f->page_size != 512)
901                 return -EINVAL;
902
903         if (f->flash_width != 16 && f->flash_width != 8)
904                 return -EINVAL;
905
906         /* calculate flash information */
907         f->oob_size = (f->page_size == 2048) ? 64 : 16;
908         f->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
909
910         /* calculate addressing information */
911         f->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
912
913         if (f->num_blocks * f->page_per_block > 65536)
914                 f->row_addr_cycles = 3;
915         else
916                 f->row_addr_cycles = 2;
917
918         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
919         ndcr |= (f->col_addr_cycles == 2) ? NDCR_RA_START : 0;
920         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
921         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
922         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
923         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
924
925         ndcr |= NDCR_RD_ID_CNT(f->read_id_bytes);
926         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
927
928         info->reg_ndcr = ndcr;
929
930         pxa3xx_nand_set_timing(info, f->timing);
931         info->flash_info = f;
932         return 0;
933 }
934
935 static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info)
936 {
937         struct pxa3xx_nand_flash *f;
938         uint32_t id;
939         int i;
940
941         for (i = 0; i < ARRAY_SIZE(builtin_flash_types); i++) {
942
943                 f = builtin_flash_types[i];
944
945                 if (pxa3xx_nand_config_flash(info, f))
946                         continue;
947
948                 if (__readid(info, &id))
949                         continue;
950
951                 if (id == f->chip_id)
952                         return 0;
953         }
954
955         return -ENODEV;
956 }
957
958 /* the maximum possible buffer size for large page with OOB data
959  * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
960  * data buffer and the DMA descriptor
961  */
962 #define MAX_BUFF_SIZE   PAGE_SIZE
963
964 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
965 {
966         struct platform_device *pdev = info->pdev;
967         int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
968
969         if (use_dma == 0) {
970                 info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
971                 if (info->data_buff == NULL)
972                         return -ENOMEM;
973                 return 0;
974         }
975
976         info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
977                                 &info->data_buff_phys, GFP_KERNEL);
978         if (info->data_buff == NULL) {
979                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
980                 return -ENOMEM;
981         }
982
983         info->data_buff_size = MAX_BUFF_SIZE;
984         info->data_desc = (void *)info->data_buff + data_desc_offset;
985         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
986
987         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
988                                 pxa3xx_nand_data_dma_irq, info);
989         if (info->data_dma_ch < 0) {
990                 dev_err(&pdev->dev, "failed to request data dma\n");
991                 dma_free_coherent(&pdev->dev, info->data_buff_size,
992                                 info->data_buff, info->data_buff_phys);
993                 return info->data_dma_ch;
994         }
995
996         return 0;
997 }
998
999 static struct nand_ecclayout hw_smallpage_ecclayout = {
1000         .eccbytes = 6,
1001         .eccpos = {8, 9, 10, 11, 12, 13 },
1002         .oobfree = { {2, 6} }
1003 };
1004
1005 static struct nand_ecclayout hw_largepage_ecclayout = {
1006         .eccbytes = 24,
1007         .eccpos = {
1008                 40, 41, 42, 43, 44, 45, 46, 47,
1009                 48, 49, 50, 51, 52, 53, 54, 55,
1010                 56, 57, 58, 59, 60, 61, 62, 63},
1011         .oobfree = { {2, 38} }
1012 };
1013
1014 static void pxa3xx_nand_init_mtd(struct mtd_info *mtd,
1015                                  struct pxa3xx_nand_info *info)
1016 {
1017         struct pxa3xx_nand_flash *f = info->flash_info;
1018         struct nand_chip *this = &info->nand_chip;
1019
1020         this->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16: 0;
1021
1022         this->waitfunc          = pxa3xx_nand_waitfunc;
1023         this->select_chip       = pxa3xx_nand_select_chip;
1024         this->dev_ready         = pxa3xx_nand_dev_ready;
1025         this->cmdfunc           = pxa3xx_nand_cmdfunc;
1026         this->read_word         = pxa3xx_nand_read_word;
1027         this->read_byte         = pxa3xx_nand_read_byte;
1028         this->read_buf          = pxa3xx_nand_read_buf;
1029         this->write_buf         = pxa3xx_nand_write_buf;
1030         this->verify_buf        = pxa3xx_nand_verify_buf;
1031
1032         this->ecc.mode          = NAND_ECC_HW;
1033         this->ecc.hwctl         = pxa3xx_nand_ecc_hwctl;
1034         this->ecc.calculate     = pxa3xx_nand_ecc_calculate;
1035         this->ecc.correct       = pxa3xx_nand_ecc_correct;
1036         this->ecc.size          = f->page_size;
1037
1038         if (f->page_size == 2048)
1039                 this->ecc.layout = &hw_largepage_ecclayout;
1040         else
1041                 this->ecc.layout = &hw_smallpage_ecclayout;
1042
1043         this->chip_delay = 25;
1044 }
1045
1046 static int pxa3xx_nand_probe(struct platform_device *pdev)
1047 {
1048         struct pxa3xx_nand_platform_data *pdata;
1049         struct pxa3xx_nand_info *info;
1050         struct nand_chip *this;
1051         struct mtd_info *mtd;
1052         struct resource *r;
1053         int ret = 0, irq;
1054
1055         pdata = pdev->dev.platform_data;
1056
1057         if (!pdata) {
1058                 dev_err(&pdev->dev, "no platform data defined\n");
1059                 return -ENODEV;
1060         }
1061
1062         mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
1063                         GFP_KERNEL);
1064         if (!mtd) {
1065                 dev_err(&pdev->dev, "failed to allocate memory\n");
1066                 return -ENOMEM;
1067         }
1068
1069         info = (struct pxa3xx_nand_info *)(&mtd[1]);
1070         info->pdev = pdev;
1071
1072         this = &info->nand_chip;
1073         mtd->priv = info;
1074
1075         info->clk = clk_get(&pdev->dev, "NANDCLK");
1076         if (IS_ERR(info->clk)) {
1077                 dev_err(&pdev->dev, "failed to get nand clock\n");
1078                 ret = PTR_ERR(info->clk);
1079                 goto fail_free_mtd;
1080         }
1081         clk_enable(info->clk);
1082
1083         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1084         if (r == NULL) {
1085                 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1086                 ret = -ENXIO;
1087                 goto fail_put_clk;
1088         }
1089         info->drcmr_dat = r->start;
1090
1091         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1092         if (r == NULL) {
1093                 dev_err(&pdev->dev, "no resource defined for command DMA\n");
1094                 ret = -ENXIO;
1095                 goto fail_put_clk;
1096         }
1097         info->drcmr_cmd = r->start;
1098
1099         irq = platform_get_irq(pdev, 0);
1100         if (irq < 0) {
1101                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1102                 ret = -ENXIO;
1103                 goto fail_put_clk;
1104         }
1105
1106         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1107         if (r == NULL) {
1108                 dev_err(&pdev->dev, "no IO memory resource defined\n");
1109                 ret = -ENODEV;
1110                 goto fail_put_clk;
1111         }
1112
1113         r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
1114         if (r == NULL) {
1115                 dev_err(&pdev->dev, "failed to request memory resource\n");
1116                 ret = -EBUSY;
1117                 goto fail_put_clk;
1118         }
1119
1120         info->mmio_base = ioremap(r->start, r->end - r->start + 1);
1121         if (info->mmio_base == NULL) {
1122                 dev_err(&pdev->dev, "ioremap() failed\n");
1123                 ret = -ENODEV;
1124                 goto fail_free_res;
1125         }
1126
1127         ret = pxa3xx_nand_init_buff(info);
1128         if (ret)
1129                 goto fail_free_io;
1130
1131         ret = request_irq(IRQ_NAND, pxa3xx_nand_irq, IRQF_DISABLED,
1132                                 pdev->name, info);
1133         if (ret < 0) {
1134                 dev_err(&pdev->dev, "failed to request IRQ\n");
1135                 goto fail_free_buf;
1136         }
1137
1138         ret = pxa3xx_nand_detect_flash(info);
1139         if (ret) {
1140                 dev_err(&pdev->dev, "failed to detect flash\n");
1141                 ret = -ENODEV;
1142                 goto fail_free_irq;
1143         }
1144
1145         pxa3xx_nand_init_mtd(mtd, info);
1146
1147         platform_set_drvdata(pdev, mtd);
1148
1149         if (nand_scan(mtd, 1)) {
1150                 dev_err(&pdev->dev, "failed to scan nand\n");
1151                 ret = -ENXIO;
1152                 goto fail_free_irq;
1153         }
1154
1155         return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1156
1157 fail_free_irq:
1158         free_irq(IRQ_NAND, info);
1159 fail_free_buf:
1160         if (use_dma) {
1161                 pxa_free_dma(info->data_dma_ch);
1162                 dma_free_coherent(&pdev->dev, info->data_buff_size,
1163                         info->data_buff, info->data_buff_phys);
1164         } else
1165                 kfree(info->data_buff);
1166 fail_free_io:
1167         iounmap(info->mmio_base);
1168 fail_free_res:
1169         release_mem_region(r->start, r->end - r->start + 1);
1170 fail_put_clk:
1171         clk_disable(info->clk);
1172         clk_put(info->clk);
1173 fail_free_mtd:
1174         kfree(mtd);
1175         return ret;
1176 }
1177
1178 static int pxa3xx_nand_remove(struct platform_device *pdev)
1179 {
1180         struct mtd_info *mtd = platform_get_drvdata(pdev);
1181         struct pxa3xx_nand_info *info = mtd->priv;
1182
1183         platform_set_drvdata(pdev, NULL);
1184
1185         del_mtd_device(mtd);
1186         del_mtd_partitions(mtd);
1187         free_irq(IRQ_NAND, info);
1188         if (use_dma) {
1189                 pxa_free_dma(info->data_dma_ch);
1190                 dma_free_writecombine(&pdev->dev, info->data_buff_size,
1191                                 info->data_buff, info->data_buff_phys);
1192         } else
1193                 kfree(info->data_buff);
1194         kfree(mtd);
1195         return 0;
1196 }
1197
1198 #ifdef CONFIG_PM
1199 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1200 {
1201         struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1202         struct pxa3xx_nand_info *info = mtd->priv;
1203
1204         if (info->state != STATE_READY) {
1205                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1206                 return -EAGAIN;
1207         }
1208
1209         return 0;
1210 }
1211
1212 static int pxa3xx_nand_resume(struct platform_device *pdev)
1213 {
1214         struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
1215         struct pxa3xx_nand_info *info = mtd->priv;
1216
1217         clk_enable(info->clk);
1218
1219         return pxa3xx_nand_config_flash(info, info->flash_info);
1220 }
1221 #else
1222 #define pxa3xx_nand_suspend     NULL
1223 #define pxa3xx_nand_resume      NULL
1224 #endif
1225
1226 static struct platform_driver pxa3xx_nand_driver = {
1227         .driver = {
1228                 .name   = "pxa3xx-nand",
1229         },
1230         .probe          = pxa3xx_nand_probe,
1231         .remove         = pxa3xx_nand_remove,
1232         .suspend        = pxa3xx_nand_suspend,
1233         .resume         = pxa3xx_nand_resume,
1234 };
1235
1236 static int __init pxa3xx_nand_init(void)
1237 {
1238         return platform_driver_register(&pxa3xx_nand_driver);
1239 }
1240 module_init(pxa3xx_nand_init);
1241
1242 static void __exit pxa3xx_nand_exit(void)
1243 {
1244         platform_driver_unregister(&pxa3xx_nand_driver);
1245 }
1246 module_exit(pxa3xx_nand_exit);
1247
1248 MODULE_LICENSE("GPL");
1249 MODULE_DESCRIPTION("PXA3xx NAND controller driver");