2 * linux/drivers/mmc/au1xmmc.c - AU1XX0 MMC driver
4 * Copyright (c) 2005, Advanced Micro Devices, Inc.
6 * Developed with help from the 2.4.30 MMC AU1XXX controller including
7 * the following copyright notices:
8 * Copyright (c) 2003-2004 Embedded Edge, LLC.
9 * Portions Copyright (C) 2002 Embedix, Inc
10 * Copyright 2002 Hewlett-Packard Company
12 * 2.6 version of this driver inspired by:
13 * (drivers/mmc/wbsd.c) Copyright (C) 2004-2005 Pierre Ossman,
14 * All Rights Reserved.
15 * (drivers/mmc/pxa.c) Copyright (C) 2003 Russell King,
16 * All Rights Reserved.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation.
24 /* Why is a timer used to detect insert events?
26 * From the AU1100 MMC application guide:
27 * If the Au1100-based design is intended to support both MultiMediaCards
28 * and 1- or 4-data bit SecureDigital cards, then the solution is to
29 * connect a weak (560KOhm) pull-up resistor to connector pin 1.
30 * In doing so, a MMC card never enters SPI-mode communications,
31 * but now the SecureDigital card-detect feature of CD/DAT3 is ineffective
32 * (the low to high transition will not occur).
34 * So we use the timer to check the status manually.
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/platform_device.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
44 #include <linux/mmc/host.h>
46 #include <asm/mach-au1x00/au1000.h>
47 #include <asm/mach-au1x00/au1xxx_dbdma.h>
48 #include <asm/mach-au1x00/au1100_mmc.h>
49 #include <asm/scatterlist.h>
54 #define DRIVER_NAME "au1xxx-mmc"
56 /* Set this to enable special debugging macros */
59 #define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
61 #define DBG(fmt, idx, args...)
66 u32 tx_devid, rx_devid;
70 } au1xmmc_card_table[] = {
71 { SD0_BASE, DSCR_CMD0_SDMS_TX0, DSCR_CMD0_SDMS_RX0,
72 BCSR_BOARD_SD0PWR, BCSR_INT_SD0INSERT, BCSR_STATUS_SD0WP },
73 #ifndef CONFIG_MIPS_DB1200
74 { SD1_BASE, DSCR_CMD0_SDMS_TX1, DSCR_CMD0_SDMS_RX1,
75 BCSR_BOARD_DS1PWR, BCSR_INT_SD1INSERT, BCSR_STATUS_SD1WP }
79 #define AU1XMMC_CONTROLLER_COUNT \
80 (sizeof(au1xmmc_card_table) / sizeof(au1xmmc_card_table[0]))
82 /* This array stores pointers for the hosts (used by the IRQ handler) */
83 struct au1xmmc_host *au1xmmc_hosts[AU1XMMC_CONTROLLER_COUNT];
87 module_param(dma, bool, 0);
88 MODULE_PARM_DESC(dma, "Use DMA engine for data transfers (0 = disabled)");
91 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
93 u32 val = au_readl(HOST_CONFIG(host));
95 au_writel(val, HOST_CONFIG(host));
99 static inline void FLUSH_FIFO(struct au1xmmc_host *host)
101 u32 val = au_readl(HOST_CONFIG2(host));
103 au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
106 /* SEND_STOP will turn off clock control - this re-enables it */
107 val &= ~SD_CONFIG2_DF;
109 au_writel(val, HOST_CONFIG2(host));
113 static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
115 u32 val = au_readl(HOST_CONFIG(host));
117 au_writel(val, HOST_CONFIG(host));
121 static inline void SEND_STOP(struct au1xmmc_host *host)
124 /* We know the value of CONFIG2, so avoid a read we don't need */
125 u32 mask = SD_CONFIG2_EN;
127 WARN_ON(host->status != HOST_S_DATA);
128 host->status = HOST_S_STOP;
130 au_writel(mask | SD_CONFIG2_DF, HOST_CONFIG2(host));
133 /* Send the stop commmand */
134 au_writel(STOP_CMD, HOST_CMD(host));
137 static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
140 u32 val = au1xmmc_card_table[host->id].bcsrpwr;
143 if (state) bcsr->board |= val;
148 static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
150 return (bcsr->sig_status & au1xmmc_card_table[host->id].bcsrstatus)
154 static int au1xmmc_card_readonly(struct mmc_host *mmc)
156 struct au1xmmc_host *host = mmc_priv(mmc);
157 return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
161 static void au1xmmc_finish_request(struct au1xmmc_host *host)
164 struct mmc_request *mrq = host->mrq;
167 host->flags &= HOST_F_ACTIVE;
173 host->pio.offset = 0;
176 host->status = HOST_S_IDLE;
178 bcsr->disk_leds |= (1 << 8);
180 mmc_request_done(host->mmc, mrq);
183 static void au1xmmc_tasklet_finish(unsigned long param)
185 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
186 au1xmmc_finish_request(host);
189 static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
190 struct mmc_command *cmd, unsigned int flags)
192 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 switch (mmc_resp_type(cmd)) {
198 mmccmd |= SD_CMD_RT_1;
201 mmccmd |= SD_CMD_RT_1B;
204 mmccmd |= SD_CMD_RT_2;
207 mmccmd |= SD_CMD_RT_3;
210 printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
212 return MMC_ERR_INVALID;
215 if (flags & MMC_DATA_READ) {
216 if (flags & MMC_DATA_MULTI)
217 mmccmd |= SD_CMD_CT_4;
219 mmccmd |= SD_CMD_CT_2;
220 } else if (flags & MMC_DATA_WRITE) {
221 if (flags & MMC_DATA_MULTI)
222 mmccmd |= SD_CMD_CT_3;
224 mmccmd |= SD_CMD_CT_1;
227 au_writel(cmd->arg, HOST_CMDARG(host));
231 IRQ_OFF(host, SD_CONFIG_CR);
233 au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
236 /* Wait for the command to go on the line */
239 if (!(au_readl(HOST_CMD(host)) & SD_CMD_GO))
243 /* Wait for the command to come back */
246 u32 status = au_readl(HOST_STATUS(host));
248 while(!(status & SD_STATUS_CR))
249 status = au_readl(HOST_STATUS(host));
251 /* Clear the CR status */
252 au_writel(SD_STATUS_CR, HOST_STATUS(host));
254 IRQ_ON(host, SD_CONFIG_CR);
260 static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
263 struct mmc_request *mrq = host->mrq;
264 struct mmc_data *data;
267 WARN_ON(host->status != HOST_S_DATA && host->status != HOST_S_STOP);
269 if (host->mrq == NULL)
272 data = mrq->cmd->data;
275 status = au_readl(HOST_STATUS(host));
277 /* The transaction is really over when the SD_STATUS_DB bit is clear */
279 while((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
280 status = au_readl(HOST_STATUS(host));
282 data->error = MMC_ERR_NONE;
283 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
285 /* Process any errors */
287 crc = (status & (SD_STATUS_WC | SD_STATUS_RC));
288 if (host->flags & HOST_F_XMIT)
289 crc |= ((status & 0x07) == 0x02) ? 0 : 1;
292 data->error = MMC_ERR_BADCRC;
294 /* Clear the CRC bits */
295 au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
297 data->bytes_xfered = 0;
299 if (data->error == MMC_ERR_NONE) {
300 if (host->flags & HOST_F_DMA) {
301 u32 chan = DMA_CHANNEL(host);
303 chan_tab_t *c = *((chan_tab_t **) chan);
304 au1x_dma_chan_t *cp = c->chan_ptr;
305 data->bytes_xfered = cp->ddma_bytecnt;
309 (data->blocks * data->blksz) -
313 au1xmmc_finish_request(host);
316 static void au1xmmc_tasklet_data(unsigned long param)
318 struct au1xmmc_host *host = (struct au1xmmc_host *) param;
320 u32 status = au_readl(HOST_STATUS(host));
321 au1xmmc_data_complete(host, status);
324 #define AU1XMMC_MAX_TRANSFER 8
326 static void au1xmmc_send_pio(struct au1xmmc_host *host)
329 struct mmc_data *data = 0;
330 int sg_len, max, count = 0;
331 unsigned char *sg_ptr;
333 struct scatterlist *sg;
335 data = host->mrq->data;
337 if (!(host->flags & HOST_F_XMIT))
340 /* This is the pointer to the data buffer */
341 sg = &data->sg[host->pio.index];
342 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
344 /* This is the space left inside the buffer */
345 sg_len = data->sg[host->pio.index].length - host->pio.offset;
347 /* Check to if we need less then the size of the sg_buffer */
349 max = (sg_len > host->pio.len) ? host->pio.len : sg_len;
350 if (max > AU1XMMC_MAX_TRANSFER) max = AU1XMMC_MAX_TRANSFER;
352 for(count = 0; count < max; count++ ) {
355 status = au_readl(HOST_STATUS(host));
357 if (!(status & SD_STATUS_TH))
362 au_writel((unsigned long) val, HOST_TXPORT(host));
366 host->pio.len -= count;
367 host->pio.offset += count;
369 if (count == sg_len) {
371 host->pio.offset = 0;
374 if (host->pio.len == 0) {
375 IRQ_OFF(host, SD_CONFIG_TH);
377 if (host->flags & HOST_F_STOP)
380 tasklet_schedule(&host->data_task);
384 static void au1xmmc_receive_pio(struct au1xmmc_host *host)
387 struct mmc_data *data = 0;
388 int sg_len = 0, max = 0, count = 0;
389 unsigned char *sg_ptr = 0;
391 struct scatterlist *sg;
393 data = host->mrq->data;
395 if (!(host->flags & HOST_F_RECV))
400 if (host->pio.index < host->dma.len) {
401 sg = &data->sg[host->pio.index];
402 sg_ptr = page_address(sg->page) + sg->offset + host->pio.offset;
404 /* This is the space left inside the buffer */
405 sg_len = sg_dma_len(&data->sg[host->pio.index]) - host->pio.offset;
407 /* Check to if we need less then the size of the sg_buffer */
408 if (sg_len < max) max = sg_len;
411 if (max > AU1XMMC_MAX_TRANSFER)
412 max = AU1XMMC_MAX_TRANSFER;
414 for(count = 0; count < max; count++ ) {
416 status = au_readl(HOST_STATUS(host));
418 if (!(status & SD_STATUS_NE))
421 if (status & SD_STATUS_RC) {
422 DBG("RX CRC Error [%d + %d].\n", host->id,
423 host->pio.len, count);
427 if (status & SD_STATUS_RO) {
428 DBG("RX Overrun [%d + %d]\n", host->id,
429 host->pio.len, count);
432 else if (status & SD_STATUS_RU) {
433 DBG("RX Underrun [%d + %d]\n", host->id,
434 host->pio.len, count);
438 val = au_readl(HOST_RXPORT(host));
441 *sg_ptr++ = (unsigned char) (val & 0xFF);
444 host->pio.len -= count;
445 host->pio.offset += count;
447 if (sg_len && count == sg_len) {
449 host->pio.offset = 0;
452 if (host->pio.len == 0) {
453 //IRQ_OFF(host, SD_CONFIG_RA | SD_CONFIG_RF);
454 IRQ_OFF(host, SD_CONFIG_NE);
456 if (host->flags & HOST_F_STOP)
459 tasklet_schedule(&host->data_task);
463 /* static void au1xmmc_cmd_complete
464 This is called when a command has been completed - grab the response
465 and check for errors. Then start the data transfer if it is indicated.
468 static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
471 struct mmc_request *mrq = host->mrq;
472 struct mmc_command *cmd;
479 cmd->error = MMC_ERR_NONE;
481 if (cmd->flags & MMC_RSP_PRESENT) {
482 if (cmd->flags & MMC_RSP_136) {
486 r[0] = au_readl(host->iobase + SD_RESP3);
487 r[1] = au_readl(host->iobase + SD_RESP2);
488 r[2] = au_readl(host->iobase + SD_RESP1);
489 r[3] = au_readl(host->iobase + SD_RESP0);
491 /* The CRC is omitted from the response, so really
492 * we only got 120 bytes, but the engine expects
493 * 128 bits, so we have to shift things up
496 for(i = 0; i < 4; i++) {
497 cmd->resp[i] = (r[i] & 0x00FFFFFF) << 8;
499 cmd->resp[i] |= (r[i + 1] & 0xFF000000) >> 24;
502 /* Techincally, we should be getting all 48 bits of
503 * the response (SD_RESP1 + SD_RESP2), but because
504 * our response omits the CRC, our data ends up
505 * being shifted 8 bits to the right. In this case,
506 * that means that the OSR data starts at bit 31,
507 * so we can just read RESP0 and return that
509 cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
513 /* Figure out errors */
515 if (status & (SD_STATUS_SC | SD_STATUS_WC | SD_STATUS_RC))
516 cmd->error = MMC_ERR_BADCRC;
518 trans = host->flags & (HOST_F_XMIT | HOST_F_RECV);
520 if (!trans || cmd->error != MMC_ERR_NONE) {
522 IRQ_OFF(host, SD_CONFIG_TH | SD_CONFIG_RA|SD_CONFIG_RF);
523 tasklet_schedule(&host->finish_task);
527 host->status = HOST_S_DATA;
529 if (host->flags & HOST_F_DMA) {
530 u32 channel = DMA_CHANNEL(host);
532 /* Start the DMA as soon as the buffer gets something in it */
534 if (host->flags & HOST_F_RECV) {
535 u32 mask = SD_STATUS_DB | SD_STATUS_NE;
537 while((status & mask) != mask)
538 status = au_readl(HOST_STATUS(host));
541 au1xxx_dbdma_start(channel);
545 static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
548 unsigned int pbus = get_au1x00_speed();
549 unsigned int divisor;
553 divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
556 pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
559 divisor = ((pbus / rate) / 2) - 1;
561 config = au_readl(HOST_CONFIG(host));
563 config &= ~(SD_CONFIG_DIV);
564 config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
566 au_writel(config, HOST_CONFIG(host));
571 au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data)
574 int datalen = data->blocks * data->blksz;
577 host->flags |= HOST_F_DMA;
579 if (data->flags & MMC_DATA_READ)
580 host->flags |= HOST_F_RECV;
582 host->flags |= HOST_F_XMIT;
585 host->flags |= HOST_F_STOP;
587 host->dma.dir = DMA_BIDIRECTIONAL;
589 host->dma.len = dma_map_sg(mmc_dev(host->mmc), data->sg,
590 data->sg_len, host->dma.dir);
592 if (host->dma.len == 0)
593 return MMC_ERR_TIMEOUT;
595 au_writel(data->blksz - 1, HOST_BLKSIZE(host));
597 if (host->flags & HOST_F_DMA) {
599 u32 channel = DMA_CHANNEL(host);
601 au1xxx_dbdma_stop(channel);
603 for(i = 0; i < host->dma.len; i++) {
604 u32 ret = 0, flags = DDMA_FLAGS_NOIE;
605 struct scatterlist *sg = &data->sg[i];
606 int sg_len = sg->length;
608 int len = (datalen > sg_len) ? sg_len : datalen;
610 if (i == host->dma.len - 1)
611 flags = DDMA_FLAGS_IE;
613 if (host->flags & HOST_F_XMIT){
614 ret = au1xxx_dbdma_put_source_flags(channel,
615 (void *) (page_address(sg->page) +
620 ret = au1xxx_dbdma_put_dest_flags(channel,
621 (void *) (page_address(sg->page) +
634 host->pio.offset = 0;
635 host->pio.len = datalen;
637 if (host->flags & HOST_F_XMIT)
638 IRQ_ON(host, SD_CONFIG_TH);
640 IRQ_ON(host, SD_CONFIG_NE);
641 //IRQ_ON(host, SD_CONFIG_RA|SD_CONFIG_RF);
647 dma_unmap_sg(mmc_dev(host->mmc),data->sg,data->sg_len,host->dma.dir);
648 return MMC_ERR_TIMEOUT;
651 /* static void au1xmmc_request
652 This actually starts a command or data transaction
655 static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
658 struct au1xmmc_host *host = mmc_priv(mmc);
659 unsigned int flags = 0;
660 int ret = MMC_ERR_NONE;
662 WARN_ON(irqs_disabled());
663 WARN_ON(host->status != HOST_S_IDLE);
666 host->status = HOST_S_CMD;
668 bcsr->disk_leds &= ~(1 << 8);
672 flags = mrq->data->flags;
673 ret = au1xmmc_prepare_data(host, mrq->data);
676 if (ret == MMC_ERR_NONE)
677 ret = au1xmmc_send_command(host, 0, mrq->cmd, flags);
679 if (ret != MMC_ERR_NONE) {
680 mrq->cmd->error = ret;
681 au1xmmc_finish_request(host);
685 static void au1xmmc_reset_controller(struct au1xmmc_host *host)
688 /* Apply the clock */
689 au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
692 au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
695 au_writel(~0, HOST_STATUS(host));
698 au_writel(0, HOST_BLKSIZE(host));
699 au_writel(0x001fffff, HOST_TIMEOUT(host));
702 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
705 au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
708 au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
711 /* Configure interrupts */
712 au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
717 static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
719 struct au1xmmc_host *host = mmc_priv(mmc);
721 if (ios->power_mode == MMC_POWER_OFF)
722 au1xmmc_set_power(host, 0);
723 else if (ios->power_mode == MMC_POWER_ON) {
724 au1xmmc_set_power(host, 1);
727 if (ios->clock && ios->clock != host->clock) {
728 au1xmmc_set_clock(host, ios->clock);
729 host->clock = ios->clock;
733 static void au1xmmc_dma_callback(int irq, void *dev_id)
735 struct au1xmmc_host *host = (struct au1xmmc_host *) dev_id;
737 /* Avoid spurious interrupts */
742 if (host->flags & HOST_F_STOP)
745 tasklet_schedule(&host->data_task);
748 #define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
749 #define STATUS_DATA_IN (SD_STATUS_NE)
750 #define STATUS_DATA_OUT (SD_STATUS_TH)
752 static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
758 disable_irq(AU1100_SD_IRQ);
760 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
761 struct au1xmmc_host * host = au1xmmc_hosts[i];
764 status = au_readl(HOST_STATUS(host));
766 if (host->mrq && (status & STATUS_TIMEOUT)) {
767 if (status & SD_STATUS_RAT)
768 host->mrq->cmd->error = MMC_ERR_TIMEOUT;
770 else if (status & SD_STATUS_DT)
771 host->mrq->data->error = MMC_ERR_TIMEOUT;
773 /* In PIO mode, interrupts might still be enabled */
774 IRQ_OFF(host, SD_CONFIG_NE | SD_CONFIG_TH);
776 //IRQ_OFF(host, SD_CONFIG_TH|SD_CONFIG_RA|SD_CONFIG_RF);
777 tasklet_schedule(&host->finish_task);
780 else if (status & SD_STATUS_DD) {
782 /* Sometimes we get a DD before a NE in PIO mode */
784 if (!(host->flags & HOST_F_DMA) &&
785 (status & SD_STATUS_NE))
786 au1xmmc_receive_pio(host);
788 au1xmmc_data_complete(host, status);
789 //tasklet_schedule(&host->data_task);
793 else if (status & (SD_STATUS_CR)) {
794 if (host->status == HOST_S_CMD)
795 au1xmmc_cmd_complete(host,status);
797 else if (!(host->flags & HOST_F_DMA)) {
798 if ((host->flags & HOST_F_XMIT) &&
799 (status & STATUS_DATA_OUT))
800 au1xmmc_send_pio(host);
801 else if ((host->flags & HOST_F_RECV) &&
802 (status & STATUS_DATA_IN))
803 au1xmmc_receive_pio(host);
805 else if (status & 0x203FBC70) {
806 DBG("Unhandled status %8.8x\n", host->id, status);
810 au_writel(status, HOST_STATUS(host));
816 enable_irq(AU1100_SD_IRQ);
820 static void au1xmmc_poll_event(unsigned long arg)
822 struct au1xmmc_host *host = (struct au1xmmc_host *) arg;
824 int card = au1xmmc_card_inserted(host);
825 int controller = (host->flags & HOST_F_ACTIVE) ? 1 : 0;
827 if (card != controller) {
828 host->flags &= ~HOST_F_ACTIVE;
829 if (card) host->flags |= HOST_F_ACTIVE;
830 mmc_detect_change(host->mmc, 0);
833 if (host->mrq != NULL) {
834 u32 status = au_readl(HOST_STATUS(host));
835 DBG("PENDING - %8.8x\n", host->id, status);
838 mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);
841 static dbdev_tab_t au1xmmc_mem_dbdev =
843 DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 8, 0x00000000, 0, 0
846 static void au1xmmc_init_dma(struct au1xmmc_host *host)
851 int txid = au1xmmc_card_table[host->id].tx_devid;
852 int rxid = au1xmmc_card_table[host->id].rx_devid;
854 /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride
855 of 8 bits. And since devices are shared, we need to create
856 our own to avoid freaking out other devices
859 int memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev);
861 txchan = au1xxx_dbdma_chan_alloc(memid, txid,
862 au1xmmc_dma_callback, (void *) host);
864 rxchan = au1xxx_dbdma_chan_alloc(rxid, memid,
865 au1xmmc_dma_callback, (void *) host);
867 au1xxx_dbdma_set_devwidth(txchan, 8);
868 au1xxx_dbdma_set_devwidth(rxchan, 8);
870 au1xxx_dbdma_ring_alloc(txchan, AU1XMMC_DESCRIPTOR_COUNT);
871 au1xxx_dbdma_ring_alloc(rxchan, AU1XMMC_DESCRIPTOR_COUNT);
873 host->tx_chan = txchan;
874 host->rx_chan = rxchan;
877 static const struct mmc_host_ops au1xmmc_ops = {
878 .request = au1xmmc_request,
879 .set_ios = au1xmmc_set_ios,
880 .get_ro = au1xmmc_card_readonly,
883 static int __devinit au1xmmc_probe(struct platform_device *pdev)
888 /* THe interrupt is shared among all controllers */
889 ret = request_irq(AU1100_SD_IRQ, au1xmmc_irq, IRQF_DISABLED, "MMC", 0);
892 printk(DRIVER_NAME "ERROR: Couldn't get int %d: %d\n",
897 disable_irq(AU1100_SD_IRQ);
899 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
900 struct mmc_host *mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
901 struct au1xmmc_host *host = 0;
904 printk(DRIVER_NAME "ERROR: no mem for host %d\n", i);
905 au1xmmc_hosts[i] = 0;
909 mmc->ops = &au1xmmc_ops;
912 mmc->f_max = 24000000;
914 mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
915 mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
917 mmc->max_blk_size = 2048;
918 mmc->max_blk_count = 512;
920 mmc->ocr_avail = AU1XMMC_OCR;
922 host = mmc_priv(mmc);
926 host->iobase = au1xmmc_card_table[host->id].iobase;
928 host->power_mode = MMC_POWER_OFF;
930 host->flags = au1xmmc_card_inserted(host) ? HOST_F_ACTIVE : 0;
931 host->status = HOST_S_IDLE;
933 init_timer(&host->timer);
935 host->timer.function = au1xmmc_poll_event;
936 host->timer.data = (unsigned long) host;
937 host->timer.expires = jiffies + AU1XMMC_DETECT_TIMEOUT;
939 tasklet_init(&host->data_task, au1xmmc_tasklet_data,
940 (unsigned long) host);
942 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,
943 (unsigned long) host);
945 spin_lock_init(&host->lock);
948 au1xmmc_init_dma(host);
950 au1xmmc_reset_controller(host);
953 au1xmmc_hosts[i] = host;
955 add_timer(&host->timer);
957 printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X (mode=%s)\n",
958 host->id, host->iobase, dma ? "dma" : "pio");
961 enable_irq(AU1100_SD_IRQ);
966 static int __devexit au1xmmc_remove(struct platform_device *pdev)
971 disable_irq(AU1100_SD_IRQ);
973 for(i = 0; i < AU1XMMC_CONTROLLER_COUNT; i++) {
974 struct au1xmmc_host *host = au1xmmc_hosts[i];
977 tasklet_kill(&host->data_task);
978 tasklet_kill(&host->finish_task);
980 del_timer_sync(&host->timer);
981 au1xmmc_set_power(host, 0);
983 mmc_remove_host(host->mmc);
985 au1xxx_dbdma_chan_free(host->tx_chan);
986 au1xxx_dbdma_chan_free(host->rx_chan);
988 au_writel(0x0, HOST_ENABLE(host));
992 free_irq(AU1100_SD_IRQ, 0);
996 static struct platform_driver au1xmmc_driver = {
997 .probe = au1xmmc_probe,
998 .remove = au1xmmc_remove,
1002 .name = DRIVER_NAME,
1006 static int __init au1xmmc_init(void)
1008 return platform_driver_register(&au1xmmc_driver);
1011 static void __exit au1xmmc_exit(void)
1013 platform_driver_unregister(&au1xmmc_driver);
1016 module_init(au1xmmc_init);
1017 module_exit(au1xmmc_exit);
1020 MODULE_AUTHOR("Advanced Micro Devices, Inc");
1021 MODULE_DESCRIPTION("MMC/SD driver for the Alchemy Au1XXX");
1022 MODULE_LICENSE("GPL");