Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6] / drivers / mmc / host / pxamci.c
1 /*
2  *  linux/drivers/mmc/host/pxa.c - PXA MMCI driver
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  This hardware is really sick:
11  *   - No way to clear interrupts.
12  *   - Have to turn off the clock whenever we touch the device.
13  *   - Doesn't tell you how many data blocks were transferred.
14  *  Yuck!
15  *
16  *      1 and 3 byte data transfers not supported
17  *      max block length up to 1023
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/platform_device.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/clk.h>
27 #include <linux/err.h>
28 #include <linux/mmc/host.h>
29
30 #include <asm/dma.h>
31 #include <asm/io.h>
32 #include <asm/sizes.h>
33
34 #include <asm/arch/pxa-regs.h>
35 #include <asm/arch/mmc.h>
36
37 #include "pxamci.h"
38
39 #define DRIVER_NAME     "pxa2xx-mci"
40
41 #define NR_SG   1
42 #define CLKRT_OFF       (~0)
43
44 struct pxamci_host {
45         struct mmc_host         *mmc;
46         spinlock_t              lock;
47         struct resource         *res;
48         void __iomem            *base;
49         struct clk              *clk;
50         unsigned long           clkrate;
51         int                     irq;
52         int                     dma;
53         unsigned int            clkrt;
54         unsigned int            cmdat;
55         unsigned int            imask;
56         unsigned int            power_mode;
57         struct pxamci_platform_data *pdata;
58
59         struct mmc_request      *mrq;
60         struct mmc_command      *cmd;
61         struct mmc_data         *data;
62
63         dma_addr_t              sg_dma;
64         struct pxa_dma_desc     *sg_cpu;
65         unsigned int            dma_len;
66
67         unsigned int            dma_dir;
68         unsigned int            dma_drcmrrx;
69         unsigned int            dma_drcmrtx;
70 };
71
72 static void pxamci_stop_clock(struct pxamci_host *host)
73 {
74         if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
75                 unsigned long timeout = 10000;
76                 unsigned int v;
77
78                 writel(STOP_CLOCK, host->base + MMC_STRPCL);
79
80                 do {
81                         v = readl(host->base + MMC_STAT);
82                         if (!(v & STAT_CLK_EN))
83                                 break;
84                         udelay(1);
85                 } while (timeout--);
86
87                 if (v & STAT_CLK_EN)
88                         dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
89         }
90 }
91
92 static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
93 {
94         unsigned long flags;
95
96         spin_lock_irqsave(&host->lock, flags);
97         host->imask &= ~mask;
98         writel(host->imask, host->base + MMC_I_MASK);
99         spin_unlock_irqrestore(&host->lock, flags);
100 }
101
102 static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
103 {
104         unsigned long flags;
105
106         spin_lock_irqsave(&host->lock, flags);
107         host->imask |= mask;
108         writel(host->imask, host->base + MMC_I_MASK);
109         spin_unlock_irqrestore(&host->lock, flags);
110 }
111
112 static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
113 {
114         unsigned int nob = data->blocks;
115         unsigned long long clks;
116         unsigned int timeout;
117         u32 dcmd;
118         int i;
119
120         host->data = data;
121
122         if (data->flags & MMC_DATA_STREAM)
123                 nob = 0xffff;
124
125         writel(nob, host->base + MMC_NOB);
126         writel(data->blksz, host->base + MMC_BLKLEN);
127
128         clks = (unsigned long long)data->timeout_ns * host->clkrate;
129         do_div(clks, 1000000000UL);
130         timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
131         writel((timeout + 255) / 256, host->base + MMC_RDTO);
132
133         if (data->flags & MMC_DATA_READ) {
134                 host->dma_dir = DMA_FROM_DEVICE;
135                 dcmd = DCMD_INCTRGADDR | DCMD_FLOWTRG;
136                 DRCMR(host->dma_drcmrtx) = 0;
137                 DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
138         } else {
139                 host->dma_dir = DMA_TO_DEVICE;
140                 dcmd = DCMD_INCSRCADDR | DCMD_FLOWSRC;
141                 DRCMR(host->dma_drcmrrx) = 0;
142                 DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
143         }
144
145         dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
146
147         host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
148                                    host->dma_dir);
149
150         for (i = 0; i < host->dma_len; i++) {
151                 unsigned int length = sg_dma_len(&data->sg[i]);
152                 host->sg_cpu[i].dcmd = dcmd | length;
153                 if (length & 31 && !(data->flags & MMC_DATA_READ))
154                         host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
155                 if (data->flags & MMC_DATA_READ) {
156                         host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
157                         host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
158                 } else {
159                         host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
160                         host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
161                 }
162                 host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
163                                         sizeof(struct pxa_dma_desc);
164         }
165         host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
166         wmb();
167
168         DDADR(host->dma) = host->sg_dma;
169         DCSR(host->dma) = DCSR_RUN;
170 }
171
172 static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
173 {
174         WARN_ON(host->cmd != NULL);
175         host->cmd = cmd;
176
177         if (cmd->flags & MMC_RSP_BUSY)
178                 cmdat |= CMDAT_BUSY;
179
180 #define RSP_TYPE(x)     ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
181         switch (RSP_TYPE(mmc_resp_type(cmd))) {
182         case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
183                 cmdat |= CMDAT_RESP_SHORT;
184                 break;
185         case RSP_TYPE(MMC_RSP_R3):
186                 cmdat |= CMDAT_RESP_R3;
187                 break;
188         case RSP_TYPE(MMC_RSP_R2):
189                 cmdat |= CMDAT_RESP_R2;
190                 break;
191         default:
192                 break;
193         }
194
195         writel(cmd->opcode, host->base + MMC_CMD);
196         writel(cmd->arg >> 16, host->base + MMC_ARGH);
197         writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
198         writel(cmdat, host->base + MMC_CMDAT);
199         writel(host->clkrt, host->base + MMC_CLKRT);
200
201         writel(START_CLOCK, host->base + MMC_STRPCL);
202
203         pxamci_enable_irq(host, END_CMD_RES);
204 }
205
206 static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
207 {
208         host->mrq = NULL;
209         host->cmd = NULL;
210         host->data = NULL;
211         mmc_request_done(host->mmc, mrq);
212 }
213
214 static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
215 {
216         struct mmc_command *cmd = host->cmd;
217         int i;
218         u32 v;
219
220         if (!cmd)
221                 return 0;
222
223         host->cmd = NULL;
224
225         /*
226          * Did I mention this is Sick.  We always need to
227          * discard the upper 8 bits of the first 16-bit word.
228          */
229         v = readl(host->base + MMC_RES) & 0xffff;
230         for (i = 0; i < 4; i++) {
231                 u32 w1 = readl(host->base + MMC_RES) & 0xffff;
232                 u32 w2 = readl(host->base + MMC_RES) & 0xffff;
233                 cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
234                 v = w2;
235         }
236
237         if (stat & STAT_TIME_OUT_RESPONSE) {
238                 cmd->error = -ETIMEDOUT;
239         } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
240 #ifdef CONFIG_PXA27x
241                 /*
242                  * workaround for erratum #42:
243                  * Intel PXA27x Family Processor Specification Update Rev 001
244                  * A bogus CRC error can appear if the msb of a 136 bit
245                  * response is a one.
246                  */
247                 if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
248                         pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
249                 } else
250 #endif
251                 cmd->error = -EILSEQ;
252         }
253
254         pxamci_disable_irq(host, END_CMD_RES);
255         if (host->data && !cmd->error) {
256                 pxamci_enable_irq(host, DATA_TRAN_DONE);
257         } else {
258                 pxamci_finish_request(host, host->mrq);
259         }
260
261         return 1;
262 }
263
264 static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
265 {
266         struct mmc_data *data = host->data;
267
268         if (!data)
269                 return 0;
270
271         DCSR(host->dma) = 0;
272         dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len,
273                      host->dma_dir);
274
275         if (stat & STAT_READ_TIME_OUT)
276                 data->error = -ETIMEDOUT;
277         else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
278                 data->error = -EILSEQ;
279
280         /*
281          * There appears to be a hardware design bug here.  There seems to
282          * be no way to find out how much data was transferred to the card.
283          * This means that if there was an error on any block, we mark all
284          * data blocks as being in error.
285          */
286         if (!data->error)
287                 data->bytes_xfered = data->blocks * data->blksz;
288         else
289                 data->bytes_xfered = 0;
290
291         pxamci_disable_irq(host, DATA_TRAN_DONE);
292
293         host->data = NULL;
294         if (host->mrq->stop) {
295                 pxamci_stop_clock(host);
296                 pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
297         } else {
298                 pxamci_finish_request(host, host->mrq);
299         }
300
301         return 1;
302 }
303
304 static irqreturn_t pxamci_irq(int irq, void *devid)
305 {
306         struct pxamci_host *host = devid;
307         unsigned int ireg;
308         int handled = 0;
309
310         ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
311
312         if (ireg) {
313                 unsigned stat = readl(host->base + MMC_STAT);
314
315                 pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
316
317                 if (ireg & END_CMD_RES)
318                         handled |= pxamci_cmd_done(host, stat);
319                 if (ireg & DATA_TRAN_DONE)
320                         handled |= pxamci_data_done(host, stat);
321                 if (ireg & SDIO_INT) {
322                         mmc_signal_sdio_irq(host->mmc);
323                         handled = 1;
324                 }
325         }
326
327         return IRQ_RETVAL(handled);
328 }
329
330 static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
331 {
332         struct pxamci_host *host = mmc_priv(mmc);
333         unsigned int cmdat;
334
335         WARN_ON(host->mrq != NULL);
336
337         host->mrq = mrq;
338
339         pxamci_stop_clock(host);
340
341         cmdat = host->cmdat;
342         host->cmdat &= ~CMDAT_INIT;
343
344         if (mrq->data) {
345                 pxamci_setup_data(host, mrq->data);
346
347                 cmdat &= ~CMDAT_BUSY;
348                 cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
349                 if (mrq->data->flags & MMC_DATA_WRITE)
350                         cmdat |= CMDAT_WRITE;
351
352                 if (mrq->data->flags & MMC_DATA_STREAM)
353                         cmdat |= CMDAT_STREAM;
354         }
355
356         pxamci_start_cmd(host, mrq->cmd, cmdat);
357 }
358
359 static int pxamci_get_ro(struct mmc_host *mmc)
360 {
361         struct pxamci_host *host = mmc_priv(mmc);
362
363         if (host->pdata && host->pdata->get_ro)
364                 return host->pdata->get_ro(mmc_dev(mmc));
365         /* Host doesn't support read only detection so assume writeable */
366         return 0;
367 }
368
369 static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
370 {
371         struct pxamci_host *host = mmc_priv(mmc);
372
373         if (ios->clock) {
374                 unsigned long rate = host->clkrate;
375                 unsigned int clk = rate / ios->clock;
376
377                 if (host->clkrt == CLKRT_OFF)
378                         clk_enable(host->clk);
379
380                 if (ios->clock == 26000000) {
381                         /* to support 26MHz on pxa300/pxa310 */
382                         host->clkrt = 7;
383                 } else {
384                         /* to handle (19.5MHz, 26MHz) */
385                         if (!clk)
386                                 clk = 1;
387
388                         /*
389                          * clk might result in a lower divisor than we
390                          * desire.  check for that condition and adjust
391                          * as appropriate.
392                          */
393                         if (rate / clk > ios->clock)
394                                 clk <<= 1;
395                         host->clkrt = fls(clk) - 1;
396                 }
397
398                 /*
399                  * we write clkrt on the next command
400                  */
401         } else {
402                 pxamci_stop_clock(host);
403                 if (host->clkrt != CLKRT_OFF) {
404                         host->clkrt = CLKRT_OFF;
405                         clk_disable(host->clk);
406                 }
407         }
408
409         if (host->power_mode != ios->power_mode) {
410                 host->power_mode = ios->power_mode;
411
412                 if (host->pdata && host->pdata->setpower)
413                         host->pdata->setpower(mmc_dev(mmc), ios->vdd);
414
415                 if (ios->power_mode == MMC_POWER_ON)
416                         host->cmdat |= CMDAT_INIT;
417         }
418
419         if (ios->bus_width == MMC_BUS_WIDTH_4)
420                 host->cmdat |= CMDAT_SD_4DAT;
421         else
422                 host->cmdat &= ~CMDAT_SD_4DAT;
423
424         pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
425                  host->clkrt, host->cmdat);
426 }
427
428 static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
429 {
430         struct pxamci_host *pxa_host = mmc_priv(host);
431
432         if (enable)
433                 pxamci_enable_irq(pxa_host, SDIO_INT);
434         else
435                 pxamci_disable_irq(pxa_host, SDIO_INT);
436 }
437
438 static const struct mmc_host_ops pxamci_ops = {
439         .request                = pxamci_request,
440         .get_ro                 = pxamci_get_ro,
441         .set_ios                = pxamci_set_ios,
442         .enable_sdio_irq        = pxamci_enable_sdio_irq,
443 };
444
445 static void pxamci_dma_irq(int dma, void *devid)
446 {
447         struct pxamci_host *host = devid;
448         int dcsr = DCSR(dma);
449         DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
450
451         if (dcsr & DCSR_ENDINTR) {
452                 writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
453         } else {
454                 printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
455                        mmc_hostname(host->mmc), dma, dcsr);
456                 host->data->error = -EIO;
457                 pxamci_data_done(host, 0);
458         }
459 }
460
461 static irqreturn_t pxamci_detect_irq(int irq, void *devid)
462 {
463         struct pxamci_host *host = mmc_priv(devid);
464
465         mmc_detect_change(devid, host->pdata->detect_delay);
466         return IRQ_HANDLED;
467 }
468
469 static int pxamci_probe(struct platform_device *pdev)
470 {
471         struct mmc_host *mmc;
472         struct pxamci_host *host = NULL;
473         struct resource *r, *dmarx, *dmatx;
474         int ret, irq;
475
476         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
477         irq = platform_get_irq(pdev, 0);
478         if (!r || irq < 0)
479                 return -ENXIO;
480
481         r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
482         if (!r)
483                 return -EBUSY;
484
485         mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
486         if (!mmc) {
487                 ret = -ENOMEM;
488                 goto out;
489         }
490
491         mmc->ops = &pxamci_ops;
492
493         /*
494          * We can do SG-DMA, but we don't because we never know how much
495          * data we successfully wrote to the card.
496          */
497         mmc->max_phys_segs = NR_SG;
498
499         /*
500          * Our hardware DMA can handle a maximum of one page per SG entry.
501          */
502         mmc->max_seg_size = PAGE_SIZE;
503
504         /*
505          * Block length register is only 10 bits before PXA27x.
506          */
507         mmc->max_blk_size = (cpu_is_pxa21x() || cpu_is_pxa25x()) ? 1023 : 2048;
508
509         /*
510          * Block count register is 16 bits.
511          */
512         mmc->max_blk_count = 65535;
513
514         host = mmc_priv(mmc);
515         host->mmc = mmc;
516         host->dma = -1;
517         host->pdata = pdev->dev.platform_data;
518         host->clkrt = CLKRT_OFF;
519
520         host->clk = clk_get(&pdev->dev, "MMCCLK");
521         if (IS_ERR(host->clk)) {
522                 ret = PTR_ERR(host->clk);
523                 host->clk = NULL;
524                 goto out;
525         }
526
527         host->clkrate = clk_get_rate(host->clk);
528
529         /*
530          * Calculate minimum clock rate, rounding up.
531          */
532         mmc->f_min = (host->clkrate + 63) / 64;
533         mmc->f_max = (cpu_is_pxa300() || cpu_is_pxa310()) ? 26000000
534                                                           : host->clkrate;
535
536         mmc->ocr_avail = host->pdata ?
537                          host->pdata->ocr_mask :
538                          MMC_VDD_32_33|MMC_VDD_33_34;
539         mmc->caps = 0;
540         host->cmdat = 0;
541         if (!cpu_is_pxa21x() && !cpu_is_pxa25x()) {
542                 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
543                 host->cmdat |= CMDAT_SDIO_INT_EN;
544                 if (cpu_is_pxa300() || cpu_is_pxa310())
545                         mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
546                                      MMC_CAP_SD_HIGHSPEED;
547         }
548
549         host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
550         if (!host->sg_cpu) {
551                 ret = -ENOMEM;
552                 goto out;
553         }
554
555         spin_lock_init(&host->lock);
556         host->res = r;
557         host->irq = irq;
558         host->imask = MMC_I_MASK_ALL;
559
560         host->base = ioremap(r->start, SZ_4K);
561         if (!host->base) {
562                 ret = -ENOMEM;
563                 goto out;
564         }
565
566         /*
567          * Ensure that the host controller is shut down, and setup
568          * with our defaults.
569          */
570         pxamci_stop_clock(host);
571         writel(0, host->base + MMC_SPI);
572         writel(64, host->base + MMC_RESTO);
573         writel(host->imask, host->base + MMC_I_MASK);
574
575         host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
576                                     pxamci_dma_irq, host);
577         if (host->dma < 0) {
578                 ret = -EBUSY;
579                 goto out;
580         }
581
582         ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
583         if (ret)
584                 goto out;
585
586         platform_set_drvdata(pdev, mmc);
587
588         dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
589         if (!dmarx) {
590                 ret = -ENXIO;
591                 goto out;
592         }
593         host->dma_drcmrrx = dmarx->start;
594
595         dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
596         if (!dmatx) {
597                 ret = -ENXIO;
598                 goto out;
599         }
600         host->dma_drcmrtx = dmatx->start;
601
602         if (host->pdata && host->pdata->init)
603                 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
604
605         mmc_add_host(mmc);
606
607         return 0;
608
609  out:
610         if (host) {
611                 if (host->dma >= 0)
612                         pxa_free_dma(host->dma);
613                 if (host->base)
614                         iounmap(host->base);
615                 if (host->sg_cpu)
616                         dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
617                 if (host->clk)
618                         clk_put(host->clk);
619         }
620         if (mmc)
621                 mmc_free_host(mmc);
622         release_resource(r);
623         return ret;
624 }
625
626 static int pxamci_remove(struct platform_device *pdev)
627 {
628         struct mmc_host *mmc = platform_get_drvdata(pdev);
629
630         platform_set_drvdata(pdev, NULL);
631
632         if (mmc) {
633                 struct pxamci_host *host = mmc_priv(mmc);
634
635                 if (host->pdata && host->pdata->exit)
636                         host->pdata->exit(&pdev->dev, mmc);
637
638                 mmc_remove_host(mmc);
639
640                 pxamci_stop_clock(host);
641                 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
642                        END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
643                        host->base + MMC_I_MASK);
644
645                 DRCMR(host->dma_drcmrrx) = 0;
646                 DRCMR(host->dma_drcmrtx) = 0;
647
648                 free_irq(host->irq, host);
649                 pxa_free_dma(host->dma);
650                 iounmap(host->base);
651                 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
652
653                 clk_put(host->clk);
654
655                 release_resource(host->res);
656
657                 mmc_free_host(mmc);
658         }
659         return 0;
660 }
661
662 #ifdef CONFIG_PM
663 static int pxamci_suspend(struct platform_device *dev, pm_message_t state)
664 {
665         struct mmc_host *mmc = platform_get_drvdata(dev);
666         int ret = 0;
667
668         if (mmc)
669                 ret = mmc_suspend_host(mmc, state);
670
671         return ret;
672 }
673
674 static int pxamci_resume(struct platform_device *dev)
675 {
676         struct mmc_host *mmc = platform_get_drvdata(dev);
677         int ret = 0;
678
679         if (mmc)
680                 ret = mmc_resume_host(mmc);
681
682         return ret;
683 }
684 #else
685 #define pxamci_suspend  NULL
686 #define pxamci_resume   NULL
687 #endif
688
689 static struct platform_driver pxamci_driver = {
690         .probe          = pxamci_probe,
691         .remove         = pxamci_remove,
692         .suspend        = pxamci_suspend,
693         .resume         = pxamci_resume,
694         .driver         = {
695                 .name   = DRIVER_NAME,
696         },
697 };
698
699 static int __init pxamci_init(void)
700 {
701         return platform_driver_register(&pxamci_driver);
702 }
703
704 static void __exit pxamci_exit(void)
705 {
706         platform_driver_unregister(&pxamci_driver);
707 }
708
709 module_init(pxamci_init);
710 module_exit(pxamci_exit);
711
712 MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
713 MODULE_LICENSE("GPL");