2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
16 * - FIFO size field in FSR is always zero.
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/init.h>
27 #include <linux/ioport.h>
28 #include <linux/platform_device.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/delay.h>
32 #include <linux/pnp.h>
33 #include <linux/highmem.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/protocol.h>
39 #include <asm/scatterlist.h>
43 #define DRIVER_NAME "wbsd"
44 #define DRIVER_VERSION "1.6"
47 pr_debug(DRIVER_NAME ": " x)
48 #define DBGF(f, x...) \
49 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
57 static const struct pnp_device_id pnp_dev_table[] = {
63 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
65 #endif /* CONFIG_PNP */
67 static const int config_ports[] = { 0x2E, 0x4E };
68 static const int unlock_codes[] = { 0x83, 0x87 };
70 static const int valid_ids[] = {
75 static unsigned int nopnp = 0;
77 static const unsigned int nopnp = 1;
79 static unsigned int io = 0x248;
80 static unsigned int irq = 6;
87 static inline void wbsd_unlock_config(struct wbsd_host *host)
89 BUG_ON(host->config == 0);
91 outb(host->unlock_code, host->config);
92 outb(host->unlock_code, host->config);
95 static inline void wbsd_lock_config(struct wbsd_host *host)
97 BUG_ON(host->config == 0);
99 outb(LOCK_CODE, host->config);
102 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
104 BUG_ON(host->config == 0);
106 outb(reg, host->config);
107 outb(value, host->config + 1);
110 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
112 BUG_ON(host->config == 0);
114 outb(reg, host->config);
115 return inb(host->config + 1);
118 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
120 outb(index, host->base + WBSD_IDXR);
121 outb(value, host->base + WBSD_DATAR);
124 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
126 outb(index, host->base + WBSD_IDXR);
127 return inb(host->base + WBSD_DATAR);
134 static void wbsd_init_device(struct wbsd_host *host)
139 * Reset chip (SD/MMC part) and fifo.
141 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
142 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
143 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
148 setup &= ~WBSD_DAT3_H;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
150 host->flags &= ~WBSD_FIGNORE_DETECT;
153 * Read back default clock.
155 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
160 outb(WBSD_POWER_N, host->base + WBSD_CSR);
163 * Set maximum timeout.
165 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
168 * Test for card presence
170 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
171 host->flags |= WBSD_FCARD_PRESENT;
173 host->flags &= ~WBSD_FCARD_PRESENT;
176 * Enable interesting interrupts.
179 ier |= WBSD_EINT_CARD;
180 ier |= WBSD_EINT_FIFO_THRE;
181 ier |= WBSD_EINT_CCRC;
182 ier |= WBSD_EINT_TIMEOUT;
183 ier |= WBSD_EINT_CRC;
186 outb(ier, host->base + WBSD_EIR);
191 inb(host->base + WBSD_ISR);
194 static void wbsd_reset(struct wbsd_host *host)
198 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
201 * Soft reset of chip (SD/MMC part).
203 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
204 setup |= WBSD_SOFT_RESET;
205 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
208 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
210 unsigned long dmaflags;
212 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
214 if (host->dma >= 0) {
216 * Release ISA DMA controller.
218 dmaflags = claim_dma_lock();
219 disable_dma(host->dma);
220 clear_dma_ff(host->dma);
221 release_dma_lock(dmaflags);
224 * Disable DMA on host.
226 wbsd_write_index(host, WBSD_IDX_DMA, 0);
232 * MMC layer might call back into the driver so first unlock.
234 spin_unlock(&host->lock);
235 mmc_request_done(host->mmc, mrq);
236 spin_lock(&host->lock);
240 * Scatter/gather functions
243 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
246 * Get info. about SG list from data structure.
248 host->cur_sg = data->sg;
249 host->num_sg = data->sg_len;
252 host->remain = host->cur_sg->length;
255 static inline int wbsd_next_sg(struct wbsd_host *host)
258 * Skip to next SG entry.
266 if (host->num_sg > 0) {
268 host->remain = host->cur_sg->length;
274 static inline char *wbsd_kmap_sg(struct wbsd_host *host)
276 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
277 host->cur_sg->offset;
278 return host->mapped_sg;
281 static inline void wbsd_kunmap_sg(struct wbsd_host *host)
283 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
286 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
288 unsigned int len, i, size;
289 struct scatterlist *sg;
290 char *dmabuf = host->dma_buffer;
299 * Just loop through all entries. Size might not
300 * be the entire list though so make sure that
301 * we do not transfer too much.
303 for (i = 0; i < len; i++) {
304 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
305 if (size < sg[i].length)
306 memcpy(dmabuf, sgbuf, size);
308 memcpy(dmabuf, sgbuf, sg[i].length);
309 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
310 dmabuf += sg[i].length;
312 if (size < sg[i].length)
315 size -= sg[i].length;
322 * Check that we didn't get a request to transfer
323 * more data than can fit into the SG list.
331 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
333 unsigned int len, i, size;
334 struct scatterlist *sg;
335 char *dmabuf = host->dma_buffer;
344 * Just loop through all entries. Size might not
345 * be the entire list though so make sure that
346 * we do not transfer too much.
348 for (i = 0; i < len; i++) {
349 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
350 if (size < sg[i].length)
351 memcpy(sgbuf, dmabuf, size);
353 memcpy(sgbuf, dmabuf, sg[i].length);
354 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
355 dmabuf += sg[i].length;
357 if (size < sg[i].length)
360 size -= sg[i].length;
367 * Check that we didn't get a request to transfer
368 * more data than can fit into the SG list.
380 static inline void wbsd_get_short_reply(struct wbsd_host *host,
381 struct mmc_command *cmd)
384 * Correct response type?
386 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
387 cmd->error = MMC_ERR_INVALID;
391 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
392 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
393 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
394 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
395 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
398 static inline void wbsd_get_long_reply(struct wbsd_host *host,
399 struct mmc_command *cmd)
404 * Correct response type?
406 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
407 cmd->error = MMC_ERR_INVALID;
411 for (i = 0; i < 4; i++) {
413 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
415 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
417 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
419 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
423 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
428 DBGF("Sending cmd (%x)\n", cmd->opcode);
431 * Clear accumulated ISR. The interrupt routine
432 * will fill this one with events that occur during
438 * Send the command (CRC calculated by host).
440 outb(cmd->opcode, host->base + WBSD_CMDR);
441 for (i = 3; i >= 0; i--)
442 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
444 cmd->error = MMC_ERR_NONE;
447 * Wait for the request to complete.
450 status = wbsd_read_index(host, WBSD_IDX_STATUS);
451 } while (status & WBSD_CARDTRAFFIC);
454 * Do we expect a reply?
456 if (cmd->flags & MMC_RSP_PRESENT) {
463 if (isr & WBSD_INT_CARD)
464 cmd->error = MMC_ERR_TIMEOUT;
466 else if (isr & WBSD_INT_TIMEOUT)
467 cmd->error = MMC_ERR_TIMEOUT;
469 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
470 cmd->error = MMC_ERR_BADCRC;
473 if (cmd->flags & MMC_RSP_136)
474 wbsd_get_long_reply(host, cmd);
476 wbsd_get_short_reply(host, cmd);
480 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
487 static void wbsd_empty_fifo(struct wbsd_host *host)
489 struct mmc_data *data = host->mrq->cmd->data;
494 * Handle excessive data.
496 if (data->bytes_xfered == host->size)
499 buffer = wbsd_kmap_sg(host) + host->offset;
502 * Drain the fifo. This has a tendency to loop longer
503 * than the FIFO length (usually one block).
505 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
507 * The size field in the FSR is broken so we have to
510 if (fsr & WBSD_FIFO_FULL)
512 else if (fsr & WBSD_FIFO_FUTHRE)
517 for (i = 0; i < fifo; i++) {
518 *buffer = inb(host->base + WBSD_DFR);
523 data->bytes_xfered++;
528 if (data->bytes_xfered == host->size) {
529 wbsd_kunmap_sg(host);
534 * End of scatter list entry?
536 if (host->remain == 0) {
537 wbsd_kunmap_sg(host);
540 * Get next entry. Check if last.
542 if (!wbsd_next_sg(host)) {
544 * We should never reach this point.
545 * It means that we're trying to
546 * transfer more blocks than can fit
547 * into the scatter list.
551 host->size = data->bytes_xfered;
556 buffer = wbsd_kmap_sg(host);
561 wbsd_kunmap_sg(host);
564 * This is a very dirty hack to solve a
565 * hardware problem. The chip doesn't trigger
566 * FIFO threshold interrupts properly.
568 if ((host->size - data->bytes_xfered) < 16)
569 tasklet_schedule(&host->fifo_tasklet);
572 static void wbsd_fill_fifo(struct wbsd_host *host)
574 struct mmc_data *data = host->mrq->cmd->data;
579 * Check that we aren't being called after the
580 * entire buffer has been transfered.
582 if (data->bytes_xfered == host->size)
585 buffer = wbsd_kmap_sg(host) + host->offset;
588 * Fill the fifo. This has a tendency to loop longer
589 * than the FIFO length (usually one block).
591 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
593 * The size field in the FSR is broken so we have to
596 if (fsr & WBSD_FIFO_EMPTY)
598 else if (fsr & WBSD_FIFO_EMTHRE)
603 for (i = 16; i > fifo; i--) {
604 outb(*buffer, host->base + WBSD_DFR);
609 data->bytes_xfered++;
614 if (data->bytes_xfered == host->size) {
615 wbsd_kunmap_sg(host);
620 * End of scatter list entry?
622 if (host->remain == 0) {
623 wbsd_kunmap_sg(host);
626 * Get next entry. Check if last.
628 if (!wbsd_next_sg(host)) {
630 * We should never reach this point.
631 * It means that we're trying to
632 * transfer more blocks than can fit
633 * into the scatter list.
637 host->size = data->bytes_xfered;
642 buffer = wbsd_kmap_sg(host);
647 wbsd_kunmap_sg(host);
650 * The controller stops sending interrupts for
651 * 'FIFO empty' under certain conditions. So we
652 * need to be a bit more pro-active.
654 tasklet_schedule(&host->fifo_tasklet);
657 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
661 unsigned long dmaflags;
663 DBGF("blksz %04x blks %04x flags %08x\n",
664 data->blksz, data->blocks, data->flags);
665 DBGF("tsac %d ms nsac %d clk\n",
666 data->timeout_ns / 1000000, data->timeout_clks);
671 host->size = data->blocks * data->blksz;
674 * Check timeout values for overflow.
675 * (Yes, some cards cause this value to overflow).
677 if (data->timeout_ns > 127000000)
678 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
680 wbsd_write_index(host, WBSD_IDX_TAAC,
681 data->timeout_ns / 1000000);
684 if (data->timeout_clks > 255)
685 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
687 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
690 * Inform the chip of how large blocks will be
691 * sent. It needs this to determine when to
694 * Space for CRC must be included in the size.
695 * Two bytes are needed for each data line.
697 if (host->bus_width == MMC_BUS_WIDTH_1) {
698 blksize = data->blksz + 2;
700 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
701 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
702 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
703 blksize = data->blksz + 2 * 4;
705 wbsd_write_index(host, WBSD_IDX_PBSMSB,
706 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
707 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
709 data->error = MMC_ERR_INVALID;
714 * Clear the FIFO. This is needed even for DMA
715 * transfers since the chip still uses the FIFO
718 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
719 setup |= WBSD_FIFO_RESET;
720 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
725 if (host->dma >= 0) {
727 * The buffer for DMA is only 64 kB.
729 BUG_ON(host->size > 0x10000);
730 if (host->size > 0x10000) {
731 data->error = MMC_ERR_INVALID;
736 * Transfer data from the SG list to
739 if (data->flags & MMC_DATA_WRITE)
740 wbsd_sg_to_dma(host, data);
743 * Initialise the ISA DMA controller.
745 dmaflags = claim_dma_lock();
746 disable_dma(host->dma);
747 clear_dma_ff(host->dma);
748 if (data->flags & MMC_DATA_READ)
749 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
751 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
752 set_dma_addr(host->dma, host->dma_addr);
753 set_dma_count(host->dma, host->size);
755 enable_dma(host->dma);
756 release_dma_lock(dmaflags);
759 * Enable DMA on the host.
761 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
764 * This flag is used to keep printk
765 * output to a minimum.
770 * Initialise the SG list.
772 wbsd_init_sg(host, data);
777 wbsd_write_index(host, WBSD_IDX_DMA, 0);
780 * Set up FIFO threshold levels (and fill
781 * buffer if doing a write).
783 if (data->flags & MMC_DATA_READ) {
784 wbsd_write_index(host, WBSD_IDX_FIFOEN,
785 WBSD_FIFOEN_FULL | 8);
787 wbsd_write_index(host, WBSD_IDX_FIFOEN,
788 WBSD_FIFOEN_EMPTY | 8);
789 wbsd_fill_fifo(host);
793 data->error = MMC_ERR_NONE;
796 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
798 unsigned long dmaflags;
802 WARN_ON(host->mrq == NULL);
805 * Send a stop command if needed.
808 wbsd_send_command(host, data->stop);
811 * Wait for the controller to leave data
815 status = wbsd_read_index(host, WBSD_IDX_STATUS);
816 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
821 if (host->dma >= 0) {
823 * Disable DMA on the host.
825 wbsd_write_index(host, WBSD_IDX_DMA, 0);
828 * Turn of ISA DMA controller.
830 dmaflags = claim_dma_lock();
831 disable_dma(host->dma);
832 clear_dma_ff(host->dma);
833 count = get_dma_residue(host->dma);
834 release_dma_lock(dmaflags);
840 printk(KERN_ERR "%s: Incomplete DMA transfer. "
842 mmc_hostname(host->mmc), count);
844 data->error = MMC_ERR_FAILED;
847 * Transfer data from DMA buffer to
850 if (data->flags & MMC_DATA_READ)
851 wbsd_dma_to_sg(host, data);
853 data->bytes_xfered = host->size;
857 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
859 wbsd_request_end(host, host->mrq);
862 /*****************************************************************************\
864 * MMC layer callbacks *
866 \*****************************************************************************/
868 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
870 struct wbsd_host *host = mmc_priv(mmc);
871 struct mmc_command *cmd;
874 * Disable tasklets to avoid a deadlock.
876 spin_lock_bh(&host->lock);
878 BUG_ON(host->mrq != NULL);
885 * If there is no card in the slot then
886 * timeout immediatly.
888 if (!(host->flags & WBSD_FCARD_PRESENT)) {
889 cmd->error = MMC_ERR_TIMEOUT;
894 * Does the request include data?
897 wbsd_prepare_data(host, cmd->data);
899 if (cmd->data->error != MMC_ERR_NONE)
903 wbsd_send_command(host, cmd);
906 * If this is a data transfer the request
907 * will be finished after the data has
910 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
912 * Dirty fix for hardware bug.
915 tasklet_schedule(&host->fifo_tasklet);
917 spin_unlock_bh(&host->lock);
923 wbsd_request_end(host, mrq);
925 spin_unlock_bh(&host->lock);
928 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
930 struct wbsd_host *host = mmc_priv(mmc);
933 spin_lock_bh(&host->lock);
936 * Reset the chip on each power off.
937 * Should clear out any weird states.
939 if (ios->power_mode == MMC_POWER_OFF)
940 wbsd_init_device(host);
942 if (ios->clock >= 24000000)
944 else if (ios->clock >= 16000000)
946 else if (ios->clock >= 12000000)
952 * Only write to the clock register when
953 * there is an actual change.
955 if (clk != host->clk) {
956 wbsd_write_index(host, WBSD_IDX_CLK, clk);
963 if (ios->power_mode != MMC_POWER_OFF) {
964 pwr = inb(host->base + WBSD_CSR);
965 pwr &= ~WBSD_POWER_N;
966 outb(pwr, host->base + WBSD_CSR);
970 * MMC cards need to have pin 1 high during init.
971 * It wreaks havoc with the card detection though so
972 * that needs to be disabled.
974 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
975 if (ios->chip_select == MMC_CS_HIGH) {
976 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
977 setup |= WBSD_DAT3_H;
978 host->flags |= WBSD_FIGNORE_DETECT;
980 if (setup & WBSD_DAT3_H) {
981 setup &= ~WBSD_DAT3_H;
984 * We cannot resume card detection immediatly
985 * because of capacitance and delays in the chip.
987 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
990 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
993 * Store bus width for later. Will be used when
994 * setting up the data transfer.
996 host->bus_width = ios->bus_width;
998 spin_unlock_bh(&host->lock);
1001 static int wbsd_get_ro(struct mmc_host *mmc)
1003 struct wbsd_host *host = mmc_priv(mmc);
1006 spin_lock_bh(&host->lock);
1008 csr = inb(host->base + WBSD_CSR);
1010 outb(csr, host->base + WBSD_CSR);
1014 csr = inb(host->base + WBSD_CSR);
1016 outb(csr, host->base + WBSD_CSR);
1018 spin_unlock_bh(&host->lock);
1020 return csr & WBSD_WRPT;
1023 static struct mmc_host_ops wbsd_ops = {
1024 .request = wbsd_request,
1025 .set_ios = wbsd_set_ios,
1026 .get_ro = wbsd_get_ro,
1029 /*****************************************************************************\
1031 * Interrupt handling *
1033 \*****************************************************************************/
1036 * Helper function to reset detection ignore
1039 static void wbsd_reset_ignore(unsigned long data)
1041 struct wbsd_host *host = (struct wbsd_host *)data;
1043 BUG_ON(host == NULL);
1045 DBG("Resetting card detection ignore\n");
1047 spin_lock_bh(&host->lock);
1049 host->flags &= ~WBSD_FIGNORE_DETECT;
1052 * Card status might have changed during the
1055 tasklet_schedule(&host->card_tasklet);
1057 spin_unlock_bh(&host->lock);
1064 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1066 WARN_ON(!host->mrq);
1070 WARN_ON(!host->mrq->cmd);
1071 if (!host->mrq->cmd)
1074 WARN_ON(!host->mrq->cmd->data);
1075 if (!host->mrq->cmd->data)
1078 return host->mrq->cmd->data;
1081 static void wbsd_tasklet_card(unsigned long param)
1083 struct wbsd_host *host = (struct wbsd_host *)param;
1087 spin_lock(&host->lock);
1089 if (host->flags & WBSD_FIGNORE_DETECT) {
1090 spin_unlock(&host->lock);
1094 csr = inb(host->base + WBSD_CSR);
1095 WARN_ON(csr == 0xff);
1097 if (csr & WBSD_CARDPRESENT) {
1098 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1099 DBG("Card inserted\n");
1100 host->flags |= WBSD_FCARD_PRESENT;
1104 } else if (host->flags & WBSD_FCARD_PRESENT) {
1105 DBG("Card removed\n");
1106 host->flags &= ~WBSD_FCARD_PRESENT;
1109 printk(KERN_ERR "%s: Card removed during transfer!\n",
1110 mmc_hostname(host->mmc));
1113 host->mrq->cmd->error = MMC_ERR_FAILED;
1114 tasklet_schedule(&host->finish_tasklet);
1121 * Unlock first since we might get a call back.
1124 spin_unlock(&host->lock);
1127 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1130 static void wbsd_tasklet_fifo(unsigned long param)
1132 struct wbsd_host *host = (struct wbsd_host *)param;
1133 struct mmc_data *data;
1135 spin_lock(&host->lock);
1140 data = wbsd_get_data(host);
1144 if (data->flags & MMC_DATA_WRITE)
1145 wbsd_fill_fifo(host);
1147 wbsd_empty_fifo(host);
1152 if (host->size == data->bytes_xfered) {
1153 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1154 tasklet_schedule(&host->finish_tasklet);
1158 spin_unlock(&host->lock);
1161 static void wbsd_tasklet_crc(unsigned long param)
1163 struct wbsd_host *host = (struct wbsd_host *)param;
1164 struct mmc_data *data;
1166 spin_lock(&host->lock);
1171 data = wbsd_get_data(host);
1175 DBGF("CRC error\n");
1177 data->error = MMC_ERR_BADCRC;
1179 tasklet_schedule(&host->finish_tasklet);
1182 spin_unlock(&host->lock);
1185 static void wbsd_tasklet_timeout(unsigned long param)
1187 struct wbsd_host *host = (struct wbsd_host *)param;
1188 struct mmc_data *data;
1190 spin_lock(&host->lock);
1195 data = wbsd_get_data(host);
1201 data->error = MMC_ERR_TIMEOUT;
1203 tasklet_schedule(&host->finish_tasklet);
1206 spin_unlock(&host->lock);
1209 static void wbsd_tasklet_finish(unsigned long param)
1211 struct wbsd_host *host = (struct wbsd_host *)param;
1212 struct mmc_data *data;
1214 spin_lock(&host->lock);
1216 WARN_ON(!host->mrq);
1220 data = wbsd_get_data(host);
1224 wbsd_finish_data(host, data);
1227 spin_unlock(&host->lock);
1230 static void wbsd_tasklet_block(unsigned long param)
1232 struct wbsd_host *host = (struct wbsd_host *)param;
1233 struct mmc_data *data;
1235 spin_lock(&host->lock);
1237 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1239 data = wbsd_get_data(host);
1243 DBGF("CRC error\n");
1245 data->error = MMC_ERR_BADCRC;
1247 tasklet_schedule(&host->finish_tasklet);
1251 spin_unlock(&host->lock);
1255 * Interrupt handling
1258 static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1260 struct wbsd_host *host = dev_id;
1263 isr = inb(host->base + WBSD_ISR);
1266 * Was it actually our hardware that caused the interrupt?
1268 if (isr == 0xff || isr == 0x00)
1274 * Schedule tasklets as needed.
1276 if (isr & WBSD_INT_CARD)
1277 tasklet_schedule(&host->card_tasklet);
1278 if (isr & WBSD_INT_FIFO_THRE)
1279 tasklet_schedule(&host->fifo_tasklet);
1280 if (isr & WBSD_INT_CRC)
1281 tasklet_hi_schedule(&host->crc_tasklet);
1282 if (isr & WBSD_INT_TIMEOUT)
1283 tasklet_hi_schedule(&host->timeout_tasklet);
1284 if (isr & WBSD_INT_BUSYEND)
1285 tasklet_hi_schedule(&host->block_tasklet);
1286 if (isr & WBSD_INT_TC)
1287 tasklet_schedule(&host->finish_tasklet);
1292 /*****************************************************************************\
1294 * Device initialisation and shutdown *
1296 \*****************************************************************************/
1299 * Allocate/free MMC structure.
1302 static int __devinit wbsd_alloc_mmc(struct device *dev)
1304 struct mmc_host *mmc;
1305 struct wbsd_host *host;
1308 * Allocate MMC structure.
1310 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1314 host = mmc_priv(mmc);
1320 * Set host parameters.
1322 mmc->ops = &wbsd_ops;
1323 mmc->f_min = 375000;
1324 mmc->f_max = 24000000;
1325 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1326 mmc->caps = MMC_CAP_4_BIT_DATA;
1328 spin_lock_init(&host->lock);
1333 init_timer(&host->ignore_timer);
1334 host->ignore_timer.data = (unsigned long)host;
1335 host->ignore_timer.function = wbsd_reset_ignore;
1338 * Maximum number of segments. Worst case is one sector per segment
1339 * so this will be 64kB/512.
1341 mmc->max_hw_segs = 128;
1342 mmc->max_phys_segs = 128;
1345 * Maximum number of sectors in one transfer. Also limited by 64kB
1348 mmc->max_sectors = 128;
1351 * Maximum segment size. Could be one segment with the maximum number
1354 mmc->max_seg_size = mmc->max_sectors * 512;
1356 dev_set_drvdata(dev, mmc);
1361 static void __devexit wbsd_free_mmc(struct device *dev)
1363 struct mmc_host *mmc;
1364 struct wbsd_host *host;
1366 mmc = dev_get_drvdata(dev);
1370 host = mmc_priv(mmc);
1371 BUG_ON(host == NULL);
1373 del_timer_sync(&host->ignore_timer);
1377 dev_set_drvdata(dev, NULL);
1381 * Scan for known chip id:s
1384 static int __devinit wbsd_scan(struct wbsd_host *host)
1390 * Iterate through all ports, all codes to
1391 * find hardware that is in our known list.
1393 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1394 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1397 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1400 host->config = config_ports[i];
1401 host->unlock_code = unlock_codes[j];
1403 wbsd_unlock_config(host);
1405 outb(WBSD_CONF_ID_HI, config_ports[i]);
1406 id = inb(config_ports[i] + 1) << 8;
1408 outb(WBSD_CONF_ID_LO, config_ports[i]);
1409 id |= inb(config_ports[i] + 1);
1411 wbsd_lock_config(host);
1413 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1414 if (id == valid_ids[k]) {
1422 DBG("Unknown hardware (id %x) found at %x\n",
1423 id, config_ports[i]);
1427 release_region(config_ports[i], 2);
1431 host->unlock_code = 0;
1437 * Allocate/free io port ranges
1440 static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1445 if (!request_region(base, 8, DRIVER_NAME))
1453 static void __devexit wbsd_release_regions(struct wbsd_host *host)
1456 release_region(host->base, 8);
1461 release_region(host->config, 2);
1467 * Allocate/free DMA port and buffer
1470 static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1475 if (request_dma(dma, DRIVER_NAME))
1479 * We need to allocate a special buffer in
1480 * order for ISA to be able to DMA to it.
1482 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1483 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1484 if (!host->dma_buffer)
1488 * Translate the address to a physical address.
1490 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1491 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1494 * ISA DMA must be aligned on a 64k basis.
1496 if ((host->dma_addr & 0xffff) != 0)
1499 * ISA cannot access memory above 16 MB.
1501 else if (host->dma_addr >= 0x1000000)
1510 * If we've gotten here then there is some kind of alignment bug
1514 dma_unmap_single(host->mmc->dev, host->dma_addr,
1515 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1516 host->dma_addr = (dma_addr_t)NULL;
1518 kfree(host->dma_buffer);
1519 host->dma_buffer = NULL;
1525 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1526 "Falling back on FIFO.\n", dma);
1529 static void __devexit wbsd_release_dma(struct wbsd_host *host)
1531 if (host->dma_addr) {
1532 dma_unmap_single(host->mmc->dev, host->dma_addr,
1533 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1535 kfree(host->dma_buffer);
1537 free_dma(host->dma);
1540 host->dma_buffer = NULL;
1541 host->dma_addr = (dma_addr_t)NULL;
1545 * Allocate/free IRQ.
1548 static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1553 * Allocate interrupt.
1556 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1565 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1566 (unsigned long)host);
1567 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1568 (unsigned long)host);
1569 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1570 (unsigned long)host);
1571 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1572 (unsigned long)host);
1573 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1574 (unsigned long)host);
1575 tasklet_init(&host->block_tasklet, wbsd_tasklet_block,
1576 (unsigned long)host);
1581 static void __devexit wbsd_release_irq(struct wbsd_host *host)
1586 free_irq(host->irq, host);
1590 tasklet_kill(&host->card_tasklet);
1591 tasklet_kill(&host->fifo_tasklet);
1592 tasklet_kill(&host->crc_tasklet);
1593 tasklet_kill(&host->timeout_tasklet);
1594 tasklet_kill(&host->finish_tasklet);
1595 tasklet_kill(&host->block_tasklet);
1599 * Allocate all resources for the host.
1602 static int __devinit wbsd_request_resources(struct wbsd_host *host,
1603 int base, int irq, int dma)
1608 * Allocate I/O ports.
1610 ret = wbsd_request_region(host, base);
1615 * Allocate interrupt.
1617 ret = wbsd_request_irq(host, irq);
1624 wbsd_request_dma(host, dma);
1630 * Release all resources for the host.
1633 static void __devexit wbsd_release_resources(struct wbsd_host *host)
1635 wbsd_release_dma(host);
1636 wbsd_release_irq(host);
1637 wbsd_release_regions(host);
1641 * Configure the resources the chip should use.
1644 static void wbsd_chip_config(struct wbsd_host *host)
1646 wbsd_unlock_config(host);
1651 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1652 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1655 * Select SD/MMC function.
1657 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1660 * Set up card detection.
1662 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1667 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1668 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1670 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1673 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1676 * Enable and power up chip.
1678 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1679 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1681 wbsd_lock_config(host);
1685 * Check that configured resources are correct.
1688 static int wbsd_chip_validate(struct wbsd_host *host)
1692 wbsd_unlock_config(host);
1695 * Select SD/MMC function.
1697 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1700 * Read configuration.
1702 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1703 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1705 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1707 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1709 wbsd_lock_config(host);
1712 * Validate against given configuration.
1714 if (base != host->base)
1716 if (irq != host->irq)
1718 if ((dma != host->dma) && (host->dma != -1))
1725 * Powers down the SD function
1728 static void wbsd_chip_poweroff(struct wbsd_host *host)
1730 wbsd_unlock_config(host);
1732 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1733 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1735 wbsd_lock_config(host);
1738 /*****************************************************************************\
1740 * Devices setup and shutdown *
1742 \*****************************************************************************/
1744 static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1747 struct wbsd_host *host = NULL;
1748 struct mmc_host *mmc = NULL;
1751 ret = wbsd_alloc_mmc(dev);
1755 mmc = dev_get_drvdata(dev);
1756 host = mmc_priv(mmc);
1759 * Scan for hardware.
1761 ret = wbsd_scan(host);
1763 if (pnp && (ret == -ENODEV)) {
1764 printk(KERN_WARNING DRIVER_NAME
1765 ": Unable to confirm device presence. You may "
1766 "experience lock-ups.\n");
1774 * Request resources.
1776 ret = wbsd_request_resources(host, base, irq, dma);
1778 wbsd_release_resources(host);
1784 * See if chip needs to be configured.
1787 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1788 printk(KERN_WARNING DRIVER_NAME
1789 ": PnP active but chip not configured! "
1790 "You probably have a buggy BIOS. "
1791 "Configuring chip manually.\n");
1792 wbsd_chip_config(host);
1795 wbsd_chip_config(host);
1798 * Power Management stuff. No idea how this works.
1803 wbsd_unlock_config(host);
1804 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1805 wbsd_lock_config(host);
1809 * Allow device to initialise itself properly.
1814 * Reset the chip into a known state.
1816 wbsd_init_device(host);
1820 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1821 if (host->chip_id != 0)
1822 printk(" id %x", (int)host->chip_id);
1823 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1825 printk(" dma %d", (int)host->dma);
1835 static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1837 struct mmc_host *mmc = dev_get_drvdata(dev);
1838 struct wbsd_host *host;
1843 host = mmc_priv(mmc);
1845 mmc_remove_host(mmc);
1848 * Power down the SD/MMC function.
1851 wbsd_chip_poweroff(host);
1853 wbsd_release_resources(host);
1862 static int __devinit wbsd_probe(struct platform_device *dev)
1864 /* Use the module parameters for resources */
1865 return wbsd_init(&dev->dev, io, irq, dma, 0);
1868 static int __devexit wbsd_remove(struct platform_device *dev)
1870 wbsd_shutdown(&dev->dev, 0);
1881 static int __devinit
1882 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1887 * Get resources from PnP layer.
1889 io = pnp_port_start(pnpdev, 0);
1890 irq = pnp_irq(pnpdev, 0);
1891 if (pnp_dma_valid(pnpdev, 0))
1892 dma = pnp_dma(pnpdev, 0);
1896 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1898 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1901 static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1903 wbsd_shutdown(&dev->dev, 1);
1906 #endif /* CONFIG_PNP */
1914 static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1916 BUG_ON(host == NULL);
1918 return mmc_suspend_host(host->mmc, state);
1921 static int wbsd_resume(struct wbsd_host *host)
1923 BUG_ON(host == NULL);
1925 wbsd_init_device(host);
1927 return mmc_resume_host(host->mmc);
1930 static int wbsd_platform_suspend(struct platform_device *dev,
1933 struct mmc_host *mmc = platform_get_drvdata(dev);
1934 struct wbsd_host *host;
1940 DBGF("Suspending...\n");
1942 host = mmc_priv(mmc);
1944 ret = wbsd_suspend(host, state);
1948 wbsd_chip_poweroff(host);
1953 static int wbsd_platform_resume(struct platform_device *dev)
1955 struct mmc_host *mmc = platform_get_drvdata(dev);
1956 struct wbsd_host *host;
1961 DBGF("Resuming...\n");
1963 host = mmc_priv(mmc);
1965 wbsd_chip_config(host);
1968 * Allow device to initialise itself properly.
1972 return wbsd_resume(host);
1977 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1979 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1980 struct wbsd_host *host;
1985 DBGF("Suspending...\n");
1987 host = mmc_priv(mmc);
1989 return wbsd_suspend(host, state);
1992 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
1994 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1995 struct wbsd_host *host;
2000 DBGF("Resuming...\n");
2002 host = mmc_priv(mmc);
2005 * See if chip needs to be configured.
2007 if (host->config != 0) {
2008 if (!wbsd_chip_validate(host)) {
2009 printk(KERN_WARNING DRIVER_NAME
2010 ": PnP active but chip not configured! "
2011 "You probably have a buggy BIOS. "
2012 "Configuring chip manually.\n");
2013 wbsd_chip_config(host);
2018 * Allow device to initialise itself properly.
2022 return wbsd_resume(host);
2025 #endif /* CONFIG_PNP */
2027 #else /* CONFIG_PM */
2029 #define wbsd_platform_suspend NULL
2030 #define wbsd_platform_resume NULL
2032 #define wbsd_pnp_suspend NULL
2033 #define wbsd_pnp_resume NULL
2035 #endif /* CONFIG_PM */
2037 static struct platform_device *wbsd_device;
2039 static struct platform_driver wbsd_driver = {
2040 .probe = wbsd_probe,
2041 .remove = __devexit_p(wbsd_remove),
2043 .suspend = wbsd_platform_suspend,
2044 .resume = wbsd_platform_resume,
2046 .name = DRIVER_NAME,
2052 static struct pnp_driver wbsd_pnp_driver = {
2053 .name = DRIVER_NAME,
2054 .id_table = pnp_dev_table,
2055 .probe = wbsd_pnp_probe,
2056 .remove = __devexit_p(wbsd_pnp_remove),
2058 .suspend = wbsd_pnp_suspend,
2059 .resume = wbsd_pnp_resume,
2062 #endif /* CONFIG_PNP */
2065 * Module loading/unloading
2068 static int __init wbsd_drv_init(void)
2072 printk(KERN_INFO DRIVER_NAME
2073 ": Winbond W83L51xD SD/MMC card interface driver, "
2074 DRIVER_VERSION "\n");
2075 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2080 result = pnp_register_driver(&wbsd_pnp_driver);
2084 #endif /* CONFIG_PNP */
2087 result = platform_driver_register(&wbsd_driver);
2091 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2093 platform_driver_unregister(&wbsd_driver);
2097 result = platform_device_add(wbsd_device);
2099 platform_device_put(wbsd_device);
2100 platform_driver_unregister(&wbsd_driver);
2108 static void __exit wbsd_drv_exit(void)
2113 pnp_unregister_driver(&wbsd_pnp_driver);
2115 #endif /* CONFIG_PNP */
2118 platform_device_unregister(wbsd_device);
2120 platform_driver_unregister(&wbsd_driver);
2126 module_init(wbsd_drv_init);
2127 module_exit(wbsd_drv_exit);
2129 module_param(nopnp, uint, 0444);
2131 module_param(io, uint, 0444);
2132 module_param(irq, uint, 0444);
2133 module_param(dma, int, 0444);
2135 MODULE_LICENSE("GPL");
2136 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2137 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2138 MODULE_VERSION(DRIVER_VERSION);
2141 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2143 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2144 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2145 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");