2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
14 * Changes to the FIFO system should be done with extreme care since
15 * the hardware is full of bugs related to the FIFO. Known issues are:
17 * - FIFO size field in FSR is always zero.
19 * - FIFO interrupts tend not to work as they should. Interrupts are
20 * triggered only for full/empty events, not for threshold values.
22 * - On APIC systems the FIFO empty interrupt is sometimes lost.
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/platform_device.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/delay.h>
33 #include <linux/pnp.h>
34 #include <linux/highmem.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/protocol.h>
40 #include <asm/scatterlist.h>
44 #define DRIVER_NAME "wbsd"
45 #define DRIVER_VERSION "1.6"
48 pr_debug(DRIVER_NAME ": " x)
49 #define DBGF(f, x...) \
50 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
58 static const struct pnp_device_id pnp_dev_table[] = {
64 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
66 #endif /* CONFIG_PNP */
68 static const int config_ports[] = { 0x2E, 0x4E };
69 static const int unlock_codes[] = { 0x83, 0x87 };
71 static const int valid_ids[] = {
76 static unsigned int nopnp = 0;
78 static const unsigned int nopnp = 1;
80 static unsigned int io = 0x248;
81 static unsigned int irq = 6;
88 static inline void wbsd_unlock_config(struct wbsd_host *host)
90 BUG_ON(host->config == 0);
92 outb(host->unlock_code, host->config);
93 outb(host->unlock_code, host->config);
96 static inline void wbsd_lock_config(struct wbsd_host *host)
98 BUG_ON(host->config == 0);
100 outb(LOCK_CODE, host->config);
103 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
105 BUG_ON(host->config == 0);
107 outb(reg, host->config);
108 outb(value, host->config + 1);
111 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
113 BUG_ON(host->config == 0);
115 outb(reg, host->config);
116 return inb(host->config + 1);
119 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
121 outb(index, host->base + WBSD_IDXR);
122 outb(value, host->base + WBSD_DATAR);
125 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
127 outb(index, host->base + WBSD_IDXR);
128 return inb(host->base + WBSD_DATAR);
135 static void wbsd_init_device(struct wbsd_host *host)
140 * Reset chip (SD/MMC part) and fifo.
142 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
143 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
144 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
149 setup &= ~WBSD_DAT3_H;
150 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
151 host->flags &= ~WBSD_FIGNORE_DETECT;
154 * Read back default clock.
156 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
161 outb(WBSD_POWER_N, host->base + WBSD_CSR);
164 * Set maximum timeout.
166 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
169 * Test for card presence
171 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
172 host->flags |= WBSD_FCARD_PRESENT;
174 host->flags &= ~WBSD_FCARD_PRESENT;
177 * Enable interesting interrupts.
180 ier |= WBSD_EINT_CARD;
181 ier |= WBSD_EINT_FIFO_THRE;
182 ier |= WBSD_EINT_CCRC;
183 ier |= WBSD_EINT_TIMEOUT;
184 ier |= WBSD_EINT_CRC;
187 outb(ier, host->base + WBSD_EIR);
192 inb(host->base + WBSD_ISR);
195 static void wbsd_reset(struct wbsd_host *host)
199 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
202 * Soft reset of chip (SD/MMC part).
204 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
205 setup |= WBSD_SOFT_RESET;
206 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
209 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
211 unsigned long dmaflags;
213 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
215 if (host->dma >= 0) {
217 * Release ISA DMA controller.
219 dmaflags = claim_dma_lock();
220 disable_dma(host->dma);
221 clear_dma_ff(host->dma);
222 release_dma_lock(dmaflags);
225 * Disable DMA on host.
227 wbsd_write_index(host, WBSD_IDX_DMA, 0);
233 * MMC layer might call back into the driver so first unlock.
235 spin_unlock(&host->lock);
236 mmc_request_done(host->mmc, mrq);
237 spin_lock(&host->lock);
241 * Scatter/gather functions
244 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
247 * Get info. about SG list from data structure.
249 host->cur_sg = data->sg;
250 host->num_sg = data->sg_len;
253 host->remain = host->cur_sg->length;
256 static inline int wbsd_next_sg(struct wbsd_host *host)
259 * Skip to next SG entry.
267 if (host->num_sg > 0) {
269 host->remain = host->cur_sg->length;
275 static inline char *wbsd_kmap_sg(struct wbsd_host *host)
277 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
278 host->cur_sg->offset;
279 return host->mapped_sg;
282 static inline void wbsd_kunmap_sg(struct wbsd_host *host)
284 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
287 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
289 unsigned int len, i, size;
290 struct scatterlist *sg;
291 char *dmabuf = host->dma_buffer;
300 * Just loop through all entries. Size might not
301 * be the entire list though so make sure that
302 * we do not transfer too much.
304 for (i = 0; i < len; i++) {
305 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
306 if (size < sg[i].length)
307 memcpy(dmabuf, sgbuf, size);
309 memcpy(dmabuf, sgbuf, sg[i].length);
310 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
311 dmabuf += sg[i].length;
313 if (size < sg[i].length)
316 size -= sg[i].length;
323 * Check that we didn't get a request to transfer
324 * more data than can fit into the SG list.
332 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
334 unsigned int len, i, size;
335 struct scatterlist *sg;
336 char *dmabuf = host->dma_buffer;
345 * Just loop through all entries. Size might not
346 * be the entire list though so make sure that
347 * we do not transfer too much.
349 for (i = 0; i < len; i++) {
350 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
351 if (size < sg[i].length)
352 memcpy(sgbuf, dmabuf, size);
354 memcpy(sgbuf, dmabuf, sg[i].length);
355 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
356 dmabuf += sg[i].length;
358 if (size < sg[i].length)
361 size -= sg[i].length;
368 * Check that we didn't get a request to transfer
369 * more data than can fit into the SG list.
381 static inline void wbsd_get_short_reply(struct wbsd_host *host,
382 struct mmc_command *cmd)
385 * Correct response type?
387 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
388 cmd->error = MMC_ERR_INVALID;
392 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
393 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
394 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
395 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
396 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
399 static inline void wbsd_get_long_reply(struct wbsd_host *host,
400 struct mmc_command *cmd)
405 * Correct response type?
407 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
408 cmd->error = MMC_ERR_INVALID;
412 for (i = 0; i < 4; i++) {
414 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
416 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
418 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
420 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
424 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
429 DBGF("Sending cmd (%x)\n", cmd->opcode);
432 * Clear accumulated ISR. The interrupt routine
433 * will fill this one with events that occur during
439 * Send the command (CRC calculated by host).
441 outb(cmd->opcode, host->base + WBSD_CMDR);
442 for (i = 3; i >= 0; i--)
443 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
445 cmd->error = MMC_ERR_NONE;
448 * Wait for the request to complete.
451 status = wbsd_read_index(host, WBSD_IDX_STATUS);
452 } while (status & WBSD_CARDTRAFFIC);
455 * Do we expect a reply?
457 if (cmd->flags & MMC_RSP_PRESENT) {
464 if (isr & WBSD_INT_CARD)
465 cmd->error = MMC_ERR_TIMEOUT;
467 else if (isr & WBSD_INT_TIMEOUT)
468 cmd->error = MMC_ERR_TIMEOUT;
470 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
471 cmd->error = MMC_ERR_BADCRC;
474 if (cmd->flags & MMC_RSP_136)
475 wbsd_get_long_reply(host, cmd);
477 wbsd_get_short_reply(host, cmd);
481 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
488 static void wbsd_empty_fifo(struct wbsd_host *host)
490 struct mmc_data *data = host->mrq->cmd->data;
495 * Handle excessive data.
497 if (data->bytes_xfered == host->size)
500 buffer = wbsd_kmap_sg(host) + host->offset;
503 * Drain the fifo. This has a tendency to loop longer
504 * than the FIFO length (usually one block).
506 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
508 * The size field in the FSR is broken so we have to
511 if (fsr & WBSD_FIFO_FULL)
513 else if (fsr & WBSD_FIFO_FUTHRE)
518 for (i = 0; i < fifo; i++) {
519 *buffer = inb(host->base + WBSD_DFR);
524 data->bytes_xfered++;
529 if (data->bytes_xfered == host->size) {
530 wbsd_kunmap_sg(host);
535 * End of scatter list entry?
537 if (host->remain == 0) {
538 wbsd_kunmap_sg(host);
541 * Get next entry. Check if last.
543 if (!wbsd_next_sg(host)) {
545 * We should never reach this point.
546 * It means that we're trying to
547 * transfer more blocks than can fit
548 * into the scatter list.
552 host->size = data->bytes_xfered;
557 buffer = wbsd_kmap_sg(host);
562 wbsd_kunmap_sg(host);
565 * This is a very dirty hack to solve a
566 * hardware problem. The chip doesn't trigger
567 * FIFO threshold interrupts properly.
569 if ((host->size - data->bytes_xfered) < 16)
570 tasklet_schedule(&host->fifo_tasklet);
573 static void wbsd_fill_fifo(struct wbsd_host *host)
575 struct mmc_data *data = host->mrq->cmd->data;
580 * Check that we aren't being called after the
581 * entire buffer has been transfered.
583 if (data->bytes_xfered == host->size)
586 buffer = wbsd_kmap_sg(host) + host->offset;
589 * Fill the fifo. This has a tendency to loop longer
590 * than the FIFO length (usually one block).
592 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
594 * The size field in the FSR is broken so we have to
597 if (fsr & WBSD_FIFO_EMPTY)
599 else if (fsr & WBSD_FIFO_EMTHRE)
604 for (i = 16; i > fifo; i--) {
605 outb(*buffer, host->base + WBSD_DFR);
610 data->bytes_xfered++;
615 if (data->bytes_xfered == host->size) {
616 wbsd_kunmap_sg(host);
621 * End of scatter list entry?
623 if (host->remain == 0) {
624 wbsd_kunmap_sg(host);
627 * Get next entry. Check if last.
629 if (!wbsd_next_sg(host)) {
631 * We should never reach this point.
632 * It means that we're trying to
633 * transfer more blocks than can fit
634 * into the scatter list.
638 host->size = data->bytes_xfered;
643 buffer = wbsd_kmap_sg(host);
648 wbsd_kunmap_sg(host);
651 * The controller stops sending interrupts for
652 * 'FIFO empty' under certain conditions. So we
653 * need to be a bit more pro-active.
655 tasklet_schedule(&host->fifo_tasklet);
658 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
662 unsigned long dmaflags;
664 DBGF("blksz %04x blks %04x flags %08x\n",
665 data->blksz, data->blocks, data->flags);
666 DBGF("tsac %d ms nsac %d clk\n",
667 data->timeout_ns / 1000000, data->timeout_clks);
672 host->size = data->blocks * data->blksz;
675 * Check timeout values for overflow.
676 * (Yes, some cards cause this value to overflow).
678 if (data->timeout_ns > 127000000)
679 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
681 wbsd_write_index(host, WBSD_IDX_TAAC,
682 data->timeout_ns / 1000000);
685 if (data->timeout_clks > 255)
686 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
688 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
691 * Inform the chip of how large blocks will be
692 * sent. It needs this to determine when to
695 * Space for CRC must be included in the size.
696 * Two bytes are needed for each data line.
698 if (host->bus_width == MMC_BUS_WIDTH_1) {
699 blksize = data->blksz + 2;
701 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
702 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
703 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
704 blksize = data->blksz + 2 * 4;
706 wbsd_write_index(host, WBSD_IDX_PBSMSB,
707 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
708 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
710 data->error = MMC_ERR_INVALID;
715 * Clear the FIFO. This is needed even for DMA
716 * transfers since the chip still uses the FIFO
719 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
720 setup |= WBSD_FIFO_RESET;
721 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
726 if (host->dma >= 0) {
728 * The buffer for DMA is only 64 kB.
730 BUG_ON(host->size > 0x10000);
731 if (host->size > 0x10000) {
732 data->error = MMC_ERR_INVALID;
737 * Transfer data from the SG list to
740 if (data->flags & MMC_DATA_WRITE)
741 wbsd_sg_to_dma(host, data);
744 * Initialise the ISA DMA controller.
746 dmaflags = claim_dma_lock();
747 disable_dma(host->dma);
748 clear_dma_ff(host->dma);
749 if (data->flags & MMC_DATA_READ)
750 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
752 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
753 set_dma_addr(host->dma, host->dma_addr);
754 set_dma_count(host->dma, host->size);
756 enable_dma(host->dma);
757 release_dma_lock(dmaflags);
760 * Enable DMA on the host.
762 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
765 * This flag is used to keep printk
766 * output to a minimum.
771 * Initialise the SG list.
773 wbsd_init_sg(host, data);
778 wbsd_write_index(host, WBSD_IDX_DMA, 0);
781 * Set up FIFO threshold levels (and fill
782 * buffer if doing a write).
784 if (data->flags & MMC_DATA_READ) {
785 wbsd_write_index(host, WBSD_IDX_FIFOEN,
786 WBSD_FIFOEN_FULL | 8);
788 wbsd_write_index(host, WBSD_IDX_FIFOEN,
789 WBSD_FIFOEN_EMPTY | 8);
790 wbsd_fill_fifo(host);
794 data->error = MMC_ERR_NONE;
797 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
799 unsigned long dmaflags;
803 WARN_ON(host->mrq == NULL);
806 * Send a stop command if needed.
809 wbsd_send_command(host, data->stop);
812 * Wait for the controller to leave data
816 status = wbsd_read_index(host, WBSD_IDX_STATUS);
817 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
822 if (host->dma >= 0) {
824 * Disable DMA on the host.
826 wbsd_write_index(host, WBSD_IDX_DMA, 0);
829 * Turn of ISA DMA controller.
831 dmaflags = claim_dma_lock();
832 disable_dma(host->dma);
833 clear_dma_ff(host->dma);
834 count = get_dma_residue(host->dma);
835 release_dma_lock(dmaflags);
841 printk(KERN_ERR "%s: Incomplete DMA transfer. "
843 mmc_hostname(host->mmc), count);
845 data->error = MMC_ERR_FAILED;
848 * Transfer data from DMA buffer to
851 if (data->flags & MMC_DATA_READ)
852 wbsd_dma_to_sg(host, data);
854 data->bytes_xfered = host->size;
858 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
860 wbsd_request_end(host, host->mrq);
863 /*****************************************************************************\
865 * MMC layer callbacks *
867 \*****************************************************************************/
869 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
871 struct wbsd_host *host = mmc_priv(mmc);
872 struct mmc_command *cmd;
875 * Disable tasklets to avoid a deadlock.
877 spin_lock_bh(&host->lock);
879 BUG_ON(host->mrq != NULL);
886 * If there is no card in the slot then
887 * timeout immediatly.
889 if (!(host->flags & WBSD_FCARD_PRESENT)) {
890 cmd->error = MMC_ERR_TIMEOUT;
895 * Does the request include data?
898 wbsd_prepare_data(host, cmd->data);
900 if (cmd->data->error != MMC_ERR_NONE)
904 wbsd_send_command(host, cmd);
907 * If this is a data transfer the request
908 * will be finished after the data has
911 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
913 * Dirty fix for hardware bug.
916 tasklet_schedule(&host->fifo_tasklet);
918 spin_unlock_bh(&host->lock);
924 wbsd_request_end(host, mrq);
926 spin_unlock_bh(&host->lock);
929 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
931 struct wbsd_host *host = mmc_priv(mmc);
934 spin_lock_bh(&host->lock);
937 * Reset the chip on each power off.
938 * Should clear out any weird states.
940 if (ios->power_mode == MMC_POWER_OFF)
941 wbsd_init_device(host);
943 if (ios->clock >= 24000000)
945 else if (ios->clock >= 16000000)
947 else if (ios->clock >= 12000000)
953 * Only write to the clock register when
954 * there is an actual change.
956 if (clk != host->clk) {
957 wbsd_write_index(host, WBSD_IDX_CLK, clk);
964 if (ios->power_mode != MMC_POWER_OFF) {
965 pwr = inb(host->base + WBSD_CSR);
966 pwr &= ~WBSD_POWER_N;
967 outb(pwr, host->base + WBSD_CSR);
971 * MMC cards need to have pin 1 high during init.
972 * It wreaks havoc with the card detection though so
973 * that needs to be disabled.
975 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
976 if (ios->chip_select == MMC_CS_HIGH) {
977 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
978 setup |= WBSD_DAT3_H;
979 host->flags |= WBSD_FIGNORE_DETECT;
981 if (setup & WBSD_DAT3_H) {
982 setup &= ~WBSD_DAT3_H;
985 * We cannot resume card detection immediatly
986 * because of capacitance and delays in the chip.
988 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
991 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
994 * Store bus width for later. Will be used when
995 * setting up the data transfer.
997 host->bus_width = ios->bus_width;
999 spin_unlock_bh(&host->lock);
1002 static int wbsd_get_ro(struct mmc_host *mmc)
1004 struct wbsd_host *host = mmc_priv(mmc);
1007 spin_lock_bh(&host->lock);
1009 csr = inb(host->base + WBSD_CSR);
1011 outb(csr, host->base + WBSD_CSR);
1015 csr = inb(host->base + WBSD_CSR);
1017 outb(csr, host->base + WBSD_CSR);
1019 spin_unlock_bh(&host->lock);
1021 return csr & WBSD_WRPT;
1024 static struct mmc_host_ops wbsd_ops = {
1025 .request = wbsd_request,
1026 .set_ios = wbsd_set_ios,
1027 .get_ro = wbsd_get_ro,
1030 /*****************************************************************************\
1032 * Interrupt handling *
1034 \*****************************************************************************/
1037 * Helper function to reset detection ignore
1040 static void wbsd_reset_ignore(unsigned long data)
1042 struct wbsd_host *host = (struct wbsd_host *)data;
1044 BUG_ON(host == NULL);
1046 DBG("Resetting card detection ignore\n");
1048 spin_lock_bh(&host->lock);
1050 host->flags &= ~WBSD_FIGNORE_DETECT;
1053 * Card status might have changed during the
1056 tasklet_schedule(&host->card_tasklet);
1058 spin_unlock_bh(&host->lock);
1065 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1067 WARN_ON(!host->mrq);
1071 WARN_ON(!host->mrq->cmd);
1072 if (!host->mrq->cmd)
1075 WARN_ON(!host->mrq->cmd->data);
1076 if (!host->mrq->cmd->data)
1079 return host->mrq->cmd->data;
1082 static void wbsd_tasklet_card(unsigned long param)
1084 struct wbsd_host *host = (struct wbsd_host *)param;
1088 spin_lock(&host->lock);
1090 if (host->flags & WBSD_FIGNORE_DETECT) {
1091 spin_unlock(&host->lock);
1095 csr = inb(host->base + WBSD_CSR);
1096 WARN_ON(csr == 0xff);
1098 if (csr & WBSD_CARDPRESENT) {
1099 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1100 DBG("Card inserted\n");
1101 host->flags |= WBSD_FCARD_PRESENT;
1105 } else if (host->flags & WBSD_FCARD_PRESENT) {
1106 DBG("Card removed\n");
1107 host->flags &= ~WBSD_FCARD_PRESENT;
1110 printk(KERN_ERR "%s: Card removed during transfer!\n",
1111 mmc_hostname(host->mmc));
1114 host->mrq->cmd->error = MMC_ERR_FAILED;
1115 tasklet_schedule(&host->finish_tasklet);
1122 * Unlock first since we might get a call back.
1125 spin_unlock(&host->lock);
1128 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1131 static void wbsd_tasklet_fifo(unsigned long param)
1133 struct wbsd_host *host = (struct wbsd_host *)param;
1134 struct mmc_data *data;
1136 spin_lock(&host->lock);
1141 data = wbsd_get_data(host);
1145 if (data->flags & MMC_DATA_WRITE)
1146 wbsd_fill_fifo(host);
1148 wbsd_empty_fifo(host);
1153 if (host->size == data->bytes_xfered) {
1154 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1155 tasklet_schedule(&host->finish_tasklet);
1159 spin_unlock(&host->lock);
1162 static void wbsd_tasklet_crc(unsigned long param)
1164 struct wbsd_host *host = (struct wbsd_host *)param;
1165 struct mmc_data *data;
1167 spin_lock(&host->lock);
1172 data = wbsd_get_data(host);
1176 DBGF("CRC error\n");
1178 data->error = MMC_ERR_BADCRC;
1180 tasklet_schedule(&host->finish_tasklet);
1183 spin_unlock(&host->lock);
1186 static void wbsd_tasklet_timeout(unsigned long param)
1188 struct wbsd_host *host = (struct wbsd_host *)param;
1189 struct mmc_data *data;
1191 spin_lock(&host->lock);
1196 data = wbsd_get_data(host);
1202 data->error = MMC_ERR_TIMEOUT;
1204 tasklet_schedule(&host->finish_tasklet);
1207 spin_unlock(&host->lock);
1210 static void wbsd_tasklet_finish(unsigned long param)
1212 struct wbsd_host *host = (struct wbsd_host *)param;
1213 struct mmc_data *data;
1215 spin_lock(&host->lock);
1217 WARN_ON(!host->mrq);
1221 data = wbsd_get_data(host);
1225 wbsd_finish_data(host, data);
1228 spin_unlock(&host->lock);
1231 static void wbsd_tasklet_block(unsigned long param)
1233 struct wbsd_host *host = (struct wbsd_host *)param;
1234 struct mmc_data *data;
1236 spin_lock(&host->lock);
1238 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1240 data = wbsd_get_data(host);
1244 DBGF("CRC error\n");
1246 data->error = MMC_ERR_BADCRC;
1248 tasklet_schedule(&host->finish_tasklet);
1252 spin_unlock(&host->lock);
1256 * Interrupt handling
1259 static irqreturn_t wbsd_irq(int irq, void *dev_id)
1261 struct wbsd_host *host = dev_id;
1264 isr = inb(host->base + WBSD_ISR);
1267 * Was it actually our hardware that caused the interrupt?
1269 if (isr == 0xff || isr == 0x00)
1275 * Schedule tasklets as needed.
1277 if (isr & WBSD_INT_CARD)
1278 tasklet_schedule(&host->card_tasklet);
1279 if (isr & WBSD_INT_FIFO_THRE)
1280 tasklet_schedule(&host->fifo_tasklet);
1281 if (isr & WBSD_INT_CRC)
1282 tasklet_hi_schedule(&host->crc_tasklet);
1283 if (isr & WBSD_INT_TIMEOUT)
1284 tasklet_hi_schedule(&host->timeout_tasklet);
1285 if (isr & WBSD_INT_BUSYEND)
1286 tasklet_hi_schedule(&host->block_tasklet);
1287 if (isr & WBSD_INT_TC)
1288 tasklet_schedule(&host->finish_tasklet);
1293 /*****************************************************************************\
1295 * Device initialisation and shutdown *
1297 \*****************************************************************************/
1300 * Allocate/free MMC structure.
1303 static int __devinit wbsd_alloc_mmc(struct device *dev)
1305 struct mmc_host *mmc;
1306 struct wbsd_host *host;
1309 * Allocate MMC structure.
1311 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1315 host = mmc_priv(mmc);
1321 * Set host parameters.
1323 mmc->ops = &wbsd_ops;
1324 mmc->f_min = 375000;
1325 mmc->f_max = 24000000;
1326 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1327 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_BYTEBLOCK;
1329 spin_lock_init(&host->lock);
1334 init_timer(&host->ignore_timer);
1335 host->ignore_timer.data = (unsigned long)host;
1336 host->ignore_timer.function = wbsd_reset_ignore;
1339 * Maximum number of segments. Worst case is one sector per segment
1340 * so this will be 64kB/512.
1342 mmc->max_hw_segs = 128;
1343 mmc->max_phys_segs = 128;
1346 * Maximum number of sectors in one transfer. Also limited by 64kB
1349 mmc->max_sectors = 128;
1352 * Maximum segment size. Could be one segment with the maximum number
1355 mmc->max_seg_size = mmc->max_sectors * 512;
1357 dev_set_drvdata(dev, mmc);
1362 static void __devexit wbsd_free_mmc(struct device *dev)
1364 struct mmc_host *mmc;
1365 struct wbsd_host *host;
1367 mmc = dev_get_drvdata(dev);
1371 host = mmc_priv(mmc);
1372 BUG_ON(host == NULL);
1374 del_timer_sync(&host->ignore_timer);
1378 dev_set_drvdata(dev, NULL);
1382 * Scan for known chip id:s
1385 static int __devinit wbsd_scan(struct wbsd_host *host)
1391 * Iterate through all ports, all codes to
1392 * find hardware that is in our known list.
1394 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1395 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1398 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1401 host->config = config_ports[i];
1402 host->unlock_code = unlock_codes[j];
1404 wbsd_unlock_config(host);
1406 outb(WBSD_CONF_ID_HI, config_ports[i]);
1407 id = inb(config_ports[i] + 1) << 8;
1409 outb(WBSD_CONF_ID_LO, config_ports[i]);
1410 id |= inb(config_ports[i] + 1);
1412 wbsd_lock_config(host);
1414 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1415 if (id == valid_ids[k]) {
1423 DBG("Unknown hardware (id %x) found at %x\n",
1424 id, config_ports[i]);
1428 release_region(config_ports[i], 2);
1432 host->unlock_code = 0;
1438 * Allocate/free io port ranges
1441 static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1446 if (!request_region(base, 8, DRIVER_NAME))
1454 static void __devexit wbsd_release_regions(struct wbsd_host *host)
1457 release_region(host->base, 8);
1462 release_region(host->config, 2);
1468 * Allocate/free DMA port and buffer
1471 static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1476 if (request_dma(dma, DRIVER_NAME))
1480 * We need to allocate a special buffer in
1481 * order for ISA to be able to DMA to it.
1483 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1484 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1485 if (!host->dma_buffer)
1489 * Translate the address to a physical address.
1491 host->dma_addr = dma_map_single(mmc_dev(host->mmc), host->dma_buffer,
1492 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1495 * ISA DMA must be aligned on a 64k basis.
1497 if ((host->dma_addr & 0xffff) != 0)
1500 * ISA cannot access memory above 16 MB.
1502 else if (host->dma_addr >= 0x1000000)
1511 * If we've gotten here then there is some kind of alignment bug
1515 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1516 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1517 host->dma_addr = (dma_addr_t)NULL;
1519 kfree(host->dma_buffer);
1520 host->dma_buffer = NULL;
1526 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1527 "Falling back on FIFO.\n", dma);
1530 static void __devexit wbsd_release_dma(struct wbsd_host *host)
1532 if (host->dma_addr) {
1533 dma_unmap_single(mmc_dev(host->mmc), host->dma_addr,
1534 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1536 kfree(host->dma_buffer);
1538 free_dma(host->dma);
1541 host->dma_buffer = NULL;
1542 host->dma_addr = (dma_addr_t)NULL;
1546 * Allocate/free IRQ.
1549 static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1554 * Allocate interrupt.
1557 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1566 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1567 (unsigned long)host);
1568 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1569 (unsigned long)host);
1570 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1571 (unsigned long)host);
1572 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1573 (unsigned long)host);
1574 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1575 (unsigned long)host);
1576 tasklet_init(&host->block_tasklet, wbsd_tasklet_block,
1577 (unsigned long)host);
1582 static void __devexit wbsd_release_irq(struct wbsd_host *host)
1587 free_irq(host->irq, host);
1591 tasklet_kill(&host->card_tasklet);
1592 tasklet_kill(&host->fifo_tasklet);
1593 tasklet_kill(&host->crc_tasklet);
1594 tasklet_kill(&host->timeout_tasklet);
1595 tasklet_kill(&host->finish_tasklet);
1596 tasklet_kill(&host->block_tasklet);
1600 * Allocate all resources for the host.
1603 static int __devinit wbsd_request_resources(struct wbsd_host *host,
1604 int base, int irq, int dma)
1609 * Allocate I/O ports.
1611 ret = wbsd_request_region(host, base);
1616 * Allocate interrupt.
1618 ret = wbsd_request_irq(host, irq);
1625 wbsd_request_dma(host, dma);
1631 * Release all resources for the host.
1634 static void __devexit wbsd_release_resources(struct wbsd_host *host)
1636 wbsd_release_dma(host);
1637 wbsd_release_irq(host);
1638 wbsd_release_regions(host);
1642 * Configure the resources the chip should use.
1645 static void wbsd_chip_config(struct wbsd_host *host)
1647 wbsd_unlock_config(host);
1652 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1653 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1656 * Select SD/MMC function.
1658 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1661 * Set up card detection.
1663 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1668 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1669 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1671 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1674 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1677 * Enable and power up chip.
1679 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1680 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1682 wbsd_lock_config(host);
1686 * Check that configured resources are correct.
1689 static int wbsd_chip_validate(struct wbsd_host *host)
1693 wbsd_unlock_config(host);
1696 * Select SD/MMC function.
1698 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1701 * Read configuration.
1703 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1704 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1706 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1708 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1710 wbsd_lock_config(host);
1713 * Validate against given configuration.
1715 if (base != host->base)
1717 if (irq != host->irq)
1719 if ((dma != host->dma) && (host->dma != -1))
1726 * Powers down the SD function
1729 static void wbsd_chip_poweroff(struct wbsd_host *host)
1731 wbsd_unlock_config(host);
1733 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1734 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1736 wbsd_lock_config(host);
1739 /*****************************************************************************\
1741 * Devices setup and shutdown *
1743 \*****************************************************************************/
1745 static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1748 struct wbsd_host *host = NULL;
1749 struct mmc_host *mmc = NULL;
1752 ret = wbsd_alloc_mmc(dev);
1756 mmc = dev_get_drvdata(dev);
1757 host = mmc_priv(mmc);
1760 * Scan for hardware.
1762 ret = wbsd_scan(host);
1764 if (pnp && (ret == -ENODEV)) {
1765 printk(KERN_WARNING DRIVER_NAME
1766 ": Unable to confirm device presence. You may "
1767 "experience lock-ups.\n");
1775 * Request resources.
1777 ret = wbsd_request_resources(host, base, irq, dma);
1779 wbsd_release_resources(host);
1785 * See if chip needs to be configured.
1788 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1789 printk(KERN_WARNING DRIVER_NAME
1790 ": PnP active but chip not configured! "
1791 "You probably have a buggy BIOS. "
1792 "Configuring chip manually.\n");
1793 wbsd_chip_config(host);
1796 wbsd_chip_config(host);
1799 * Power Management stuff. No idea how this works.
1804 wbsd_unlock_config(host);
1805 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1806 wbsd_lock_config(host);
1810 * Allow device to initialise itself properly.
1815 * Reset the chip into a known state.
1817 wbsd_init_device(host);
1821 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1822 if (host->chip_id != 0)
1823 printk(" id %x", (int)host->chip_id);
1824 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1826 printk(" dma %d", (int)host->dma);
1836 static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1838 struct mmc_host *mmc = dev_get_drvdata(dev);
1839 struct wbsd_host *host;
1844 host = mmc_priv(mmc);
1846 mmc_remove_host(mmc);
1849 * Power down the SD/MMC function.
1852 wbsd_chip_poweroff(host);
1854 wbsd_release_resources(host);
1863 static int __devinit wbsd_probe(struct platform_device *dev)
1865 /* Use the module parameters for resources */
1866 return wbsd_init(&dev->dev, io, irq, dma, 0);
1869 static int __devexit wbsd_remove(struct platform_device *dev)
1871 wbsd_shutdown(&dev->dev, 0);
1882 static int __devinit
1883 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1888 * Get resources from PnP layer.
1890 io = pnp_port_start(pnpdev, 0);
1891 irq = pnp_irq(pnpdev, 0);
1892 if (pnp_dma_valid(pnpdev, 0))
1893 dma = pnp_dma(pnpdev, 0);
1897 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1899 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1902 static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1904 wbsd_shutdown(&dev->dev, 1);
1907 #endif /* CONFIG_PNP */
1915 static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1917 BUG_ON(host == NULL);
1919 return mmc_suspend_host(host->mmc, state);
1922 static int wbsd_resume(struct wbsd_host *host)
1924 BUG_ON(host == NULL);
1926 wbsd_init_device(host);
1928 return mmc_resume_host(host->mmc);
1931 static int wbsd_platform_suspend(struct platform_device *dev,
1934 struct mmc_host *mmc = platform_get_drvdata(dev);
1935 struct wbsd_host *host;
1941 DBGF("Suspending...\n");
1943 host = mmc_priv(mmc);
1945 ret = wbsd_suspend(host, state);
1949 wbsd_chip_poweroff(host);
1954 static int wbsd_platform_resume(struct platform_device *dev)
1956 struct mmc_host *mmc = platform_get_drvdata(dev);
1957 struct wbsd_host *host;
1962 DBGF("Resuming...\n");
1964 host = mmc_priv(mmc);
1966 wbsd_chip_config(host);
1969 * Allow device to initialise itself properly.
1973 return wbsd_resume(host);
1978 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1980 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1981 struct wbsd_host *host;
1986 DBGF("Suspending...\n");
1988 host = mmc_priv(mmc);
1990 return wbsd_suspend(host, state);
1993 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
1995 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1996 struct wbsd_host *host;
2001 DBGF("Resuming...\n");
2003 host = mmc_priv(mmc);
2006 * See if chip needs to be configured.
2008 if (host->config != 0) {
2009 if (!wbsd_chip_validate(host)) {
2010 printk(KERN_WARNING DRIVER_NAME
2011 ": PnP active but chip not configured! "
2012 "You probably have a buggy BIOS. "
2013 "Configuring chip manually.\n");
2014 wbsd_chip_config(host);
2019 * Allow device to initialise itself properly.
2023 return wbsd_resume(host);
2026 #endif /* CONFIG_PNP */
2028 #else /* CONFIG_PM */
2030 #define wbsd_platform_suspend NULL
2031 #define wbsd_platform_resume NULL
2033 #define wbsd_pnp_suspend NULL
2034 #define wbsd_pnp_resume NULL
2036 #endif /* CONFIG_PM */
2038 static struct platform_device *wbsd_device;
2040 static struct platform_driver wbsd_driver = {
2041 .probe = wbsd_probe,
2042 .remove = __devexit_p(wbsd_remove),
2044 .suspend = wbsd_platform_suspend,
2045 .resume = wbsd_platform_resume,
2047 .name = DRIVER_NAME,
2053 static struct pnp_driver wbsd_pnp_driver = {
2054 .name = DRIVER_NAME,
2055 .id_table = pnp_dev_table,
2056 .probe = wbsd_pnp_probe,
2057 .remove = __devexit_p(wbsd_pnp_remove),
2059 .suspend = wbsd_pnp_suspend,
2060 .resume = wbsd_pnp_resume,
2063 #endif /* CONFIG_PNP */
2066 * Module loading/unloading
2069 static int __init wbsd_drv_init(void)
2073 printk(KERN_INFO DRIVER_NAME
2074 ": Winbond W83L51xD SD/MMC card interface driver, "
2075 DRIVER_VERSION "\n");
2076 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2081 result = pnp_register_driver(&wbsd_pnp_driver);
2085 #endif /* CONFIG_PNP */
2088 result = platform_driver_register(&wbsd_driver);
2092 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2094 platform_driver_unregister(&wbsd_driver);
2098 result = platform_device_add(wbsd_device);
2100 platform_device_put(wbsd_device);
2101 platform_driver_unregister(&wbsd_driver);
2109 static void __exit wbsd_drv_exit(void)
2114 pnp_unregister_driver(&wbsd_pnp_driver);
2116 #endif /* CONFIG_PNP */
2119 platform_device_unregister(wbsd_device);
2121 platform_driver_unregister(&wbsd_driver);
2127 module_init(wbsd_drv_init);
2128 module_exit(wbsd_drv_exit);
2130 module_param(nopnp, uint, 0444);
2132 module_param(io, uint, 0444);
2133 module_param(irq, uint, 0444);
2134 module_param(dma, int, 0444);
2136 MODULE_LICENSE("GPL");
2137 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2138 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2139 MODULE_VERSION(DRIVER_VERSION);
2142 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2144 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2145 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2146 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");