2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
16 * - FIFO size field in FSR is always zero.
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/device.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/delay.h>
33 #include <linux/pnp.h>
34 #include <linux/highmem.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/protocol.h>
40 #include <asm/scatterlist.h>
44 #define DRIVER_NAME "wbsd"
45 #define DRIVER_VERSION "1.4"
47 #ifdef CONFIG_MMC_DEBUG
49 printk(KERN_DEBUG DRIVER_NAME ": " x)
50 #define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
53 #define DBG(x...) do { } while (0)
54 #define DBGF(x...) do { } while (0)
63 static const struct pnp_device_id pnp_dev_table[] = {
69 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
71 #endif /* CONFIG_PNP */
73 static const int config_ports[] = { 0x2E, 0x4E };
74 static const int unlock_codes[] = { 0x83, 0x87 };
76 static const int valid_ids[] = {
81 static unsigned int nopnp = 0;
83 static const unsigned int nopnp = 1;
85 static unsigned int io = 0x248;
86 static unsigned int irq = 6;
93 static inline void wbsd_unlock_config(struct wbsd_host* host)
95 BUG_ON(host->config == 0);
97 outb(host->unlock_code, host->config);
98 outb(host->unlock_code, host->config);
101 static inline void wbsd_lock_config(struct wbsd_host* host)
103 BUG_ON(host->config == 0);
105 outb(LOCK_CODE, host->config);
108 static inline void wbsd_write_config(struct wbsd_host* host, u8 reg, u8 value)
110 BUG_ON(host->config == 0);
112 outb(reg, host->config);
113 outb(value, host->config + 1);
116 static inline u8 wbsd_read_config(struct wbsd_host* host, u8 reg)
118 BUG_ON(host->config == 0);
120 outb(reg, host->config);
121 return inb(host->config + 1);
124 static inline void wbsd_write_index(struct wbsd_host* host, u8 index, u8 value)
126 outb(index, host->base + WBSD_IDXR);
127 outb(value, host->base + WBSD_DATAR);
130 static inline u8 wbsd_read_index(struct wbsd_host* host, u8 index)
132 outb(index, host->base + WBSD_IDXR);
133 return inb(host->base + WBSD_DATAR);
140 static void wbsd_init_device(struct wbsd_host* host)
145 * Reset chip (SD/MMC part) and fifo.
147 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
148 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
154 setup &= ~WBSD_DAT3_H;
155 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
156 host->flags &= ~WBSD_FIGNORE_DETECT;
159 * Read back default clock.
161 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
166 outb(WBSD_POWER_N, host->base + WBSD_CSR);
169 * Set maximum timeout.
171 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
174 * Test for card presence
176 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
177 host->flags |= WBSD_FCARD_PRESENT;
179 host->flags &= ~WBSD_FCARD_PRESENT;
182 * Enable interesting interrupts.
185 ier |= WBSD_EINT_CARD;
186 ier |= WBSD_EINT_FIFO_THRE;
187 ier |= WBSD_EINT_CCRC;
188 ier |= WBSD_EINT_TIMEOUT;
189 ier |= WBSD_EINT_CRC;
192 outb(ier, host->base + WBSD_EIR);
197 inb(host->base + WBSD_ISR);
200 static void wbsd_reset(struct wbsd_host* host)
204 printk(KERN_ERR DRIVER_NAME ": Resetting chip\n");
207 * Soft reset of chip (SD/MMC part).
209 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
210 setup |= WBSD_SOFT_RESET;
211 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
214 static void wbsd_request_end(struct wbsd_host* host, struct mmc_request* mrq)
216 unsigned long dmaflags;
218 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
223 * Release ISA DMA controller.
225 dmaflags = claim_dma_lock();
226 disable_dma(host->dma);
227 clear_dma_ff(host->dma);
228 release_dma_lock(dmaflags);
231 * Disable DMA on host.
233 wbsd_write_index(host, WBSD_IDX_DMA, 0);
239 * MMC layer might call back into the driver so first unlock.
241 spin_unlock(&host->lock);
242 mmc_request_done(host->mmc, mrq);
243 spin_lock(&host->lock);
247 * Scatter/gather functions
250 static inline void wbsd_init_sg(struct wbsd_host* host, struct mmc_data* data)
253 * Get info. about SG list from data structure.
255 host->cur_sg = data->sg;
256 host->num_sg = data->sg_len;
259 host->remain = host->cur_sg->length;
262 static inline int wbsd_next_sg(struct wbsd_host* host)
265 * Skip to next SG entry.
273 if (host->num_sg > 0)
276 host->remain = host->cur_sg->length;
282 static inline char* wbsd_kmap_sg(struct wbsd_host* host)
284 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
285 host->cur_sg->offset;
286 return host->mapped_sg;
289 static inline void wbsd_kunmap_sg(struct wbsd_host* host)
291 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
294 static inline void wbsd_sg_to_dma(struct wbsd_host* host, struct mmc_data* data)
296 unsigned int len, i, size;
297 struct scatterlist* sg;
298 char* dmabuf = host->dma_buffer;
307 * Just loop through all entries. Size might not
308 * be the entire list though so make sure that
309 * we do not transfer too much.
311 for (i = 0;i < len;i++)
313 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
314 if (size < sg[i].length)
315 memcpy(dmabuf, sgbuf, size);
317 memcpy(dmabuf, sgbuf, sg[i].length);
318 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
319 dmabuf += sg[i].length;
321 if (size < sg[i].length)
324 size -= sg[i].length;
331 * Check that we didn't get a request to transfer
332 * more data than can fit into the SG list.
340 static inline void wbsd_dma_to_sg(struct wbsd_host* host, struct mmc_data* data)
342 unsigned int len, i, size;
343 struct scatterlist* sg;
344 char* dmabuf = host->dma_buffer;
353 * Just loop through all entries. Size might not
354 * be the entire list though so make sure that
355 * we do not transfer too much.
357 for (i = 0;i < len;i++)
359 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
360 if (size < sg[i].length)
361 memcpy(sgbuf, dmabuf, size);
363 memcpy(sgbuf, dmabuf, sg[i].length);
364 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
365 dmabuf += sg[i].length;
367 if (size < sg[i].length)
370 size -= sg[i].length;
377 * Check that we didn't get a request to transfer
378 * more data than can fit into the SG list.
390 static inline void wbsd_get_short_reply(struct wbsd_host* host,
391 struct mmc_command* cmd)
394 * Correct response type?
396 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT)
398 cmd->error = MMC_ERR_INVALID;
403 wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
405 wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
407 wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
409 wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
411 wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
414 static inline void wbsd_get_long_reply(struct wbsd_host* host,
415 struct mmc_command* cmd)
420 * Correct response type?
422 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG)
424 cmd->error = MMC_ERR_INVALID;
428 for (i = 0;i < 4;i++)
431 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
433 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
435 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
437 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
441 static void wbsd_send_command(struct wbsd_host* host, struct mmc_command* cmd)
446 DBGF("Sending cmd (%x)\n", cmd->opcode);
449 * Clear accumulated ISR. The interrupt routine
450 * will fill this one with events that occur during
456 * Send the command (CRC calculated by host).
458 outb(cmd->opcode, host->base + WBSD_CMDR);
459 for (i = 3;i >= 0;i--)
460 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
462 cmd->error = MMC_ERR_NONE;
465 * Wait for the request to complete.
468 status = wbsd_read_index(host, WBSD_IDX_STATUS);
469 } while (status & WBSD_CARDTRAFFIC);
472 * Do we expect a reply?
474 if ((cmd->flags & MMC_RSP_MASK) != MMC_RSP_NONE)
482 if (isr & WBSD_INT_CARD)
483 cmd->error = MMC_ERR_TIMEOUT;
485 else if (isr & WBSD_INT_TIMEOUT)
486 cmd->error = MMC_ERR_TIMEOUT;
488 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
489 cmd->error = MMC_ERR_BADCRC;
493 if ((cmd->flags & MMC_RSP_MASK) == MMC_RSP_SHORT)
494 wbsd_get_short_reply(host, cmd);
496 wbsd_get_long_reply(host, cmd);
500 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
507 static void wbsd_empty_fifo(struct wbsd_host* host)
509 struct mmc_data* data = host->mrq->cmd->data;
514 * Handle excessive data.
516 if (data->bytes_xfered == host->size)
519 buffer = wbsd_kmap_sg(host) + host->offset;
522 * Drain the fifo. This has a tendency to loop longer
523 * than the FIFO length (usually one block).
525 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY))
528 * The size field in the FSR is broken so we have to
531 if (fsr & WBSD_FIFO_FULL)
533 else if (fsr & WBSD_FIFO_FUTHRE)
538 for (i = 0;i < fifo;i++)
540 *buffer = inb(host->base + WBSD_DFR);
545 data->bytes_xfered++;
550 if (data->bytes_xfered == host->size)
552 wbsd_kunmap_sg(host);
557 * End of scatter list entry?
559 if (host->remain == 0)
561 wbsd_kunmap_sg(host);
564 * Get next entry. Check if last.
566 if (!wbsd_next_sg(host))
569 * We should never reach this point.
570 * It means that we're trying to
571 * transfer more blocks than can fit
572 * into the scatter list.
576 host->size = data->bytes_xfered;
581 buffer = wbsd_kmap_sg(host);
586 wbsd_kunmap_sg(host);
589 * This is a very dirty hack to solve a
590 * hardware problem. The chip doesn't trigger
591 * FIFO threshold interrupts properly.
593 if ((host->size - data->bytes_xfered) < 16)
594 tasklet_schedule(&host->fifo_tasklet);
597 static void wbsd_fill_fifo(struct wbsd_host* host)
599 struct mmc_data* data = host->mrq->cmd->data;
604 * Check that we aren't being called after the
605 * entire buffer has been transfered.
607 if (data->bytes_xfered == host->size)
610 buffer = wbsd_kmap_sg(host) + host->offset;
613 * Fill the fifo. This has a tendency to loop longer
614 * than the FIFO length (usually one block).
616 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL))
619 * The size field in the FSR is broken so we have to
622 if (fsr & WBSD_FIFO_EMPTY)
624 else if (fsr & WBSD_FIFO_EMTHRE)
629 for (i = 16;i > fifo;i--)
631 outb(*buffer, host->base + WBSD_DFR);
636 data->bytes_xfered++;
641 if (data->bytes_xfered == host->size)
643 wbsd_kunmap_sg(host);
648 * End of scatter list entry?
650 if (host->remain == 0)
652 wbsd_kunmap_sg(host);
655 * Get next entry. Check if last.
657 if (!wbsd_next_sg(host))
660 * We should never reach this point.
661 * It means that we're trying to
662 * transfer more blocks than can fit
663 * into the scatter list.
667 host->size = data->bytes_xfered;
672 buffer = wbsd_kmap_sg(host);
677 wbsd_kunmap_sg(host);
680 * The controller stops sending interrupts for
681 * 'FIFO empty' under certain conditions. So we
682 * need to be a bit more pro-active.
684 tasklet_schedule(&host->fifo_tasklet);
687 static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
691 unsigned long dmaflags;
693 DBGF("blksz %04x blks %04x flags %08x\n",
694 1 << data->blksz_bits, data->blocks, data->flags);
695 DBGF("tsac %d ms nsac %d clk\n",
696 data->timeout_ns / 1000000, data->timeout_clks);
701 host->size = data->blocks << data->blksz_bits;
704 * Check timeout values for overflow.
705 * (Yes, some cards cause this value to overflow).
707 if (data->timeout_ns > 127000000)
708 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
710 wbsd_write_index(host, WBSD_IDX_TAAC, data->timeout_ns/1000000);
712 if (data->timeout_clks > 255)
713 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
715 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
718 * Inform the chip of how large blocks will be
719 * sent. It needs this to determine when to
722 * Space for CRC must be included in the size.
723 * Two bytes are needed for each data line.
725 if (host->bus_width == MMC_BUS_WIDTH_1)
727 blksize = (1 << data->blksz_bits) + 2;
729 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
730 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
732 else if (host->bus_width == MMC_BUS_WIDTH_4)
734 blksize = (1 << data->blksz_bits) + 2 * 4;
736 wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0)
738 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
742 data->error = MMC_ERR_INVALID;
747 * Clear the FIFO. This is needed even for DMA
748 * transfers since the chip still uses the FIFO
751 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
752 setup |= WBSD_FIFO_RESET;
753 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
761 * The buffer for DMA is only 64 kB.
763 BUG_ON(host->size > 0x10000);
764 if (host->size > 0x10000)
766 data->error = MMC_ERR_INVALID;
771 * Transfer data from the SG list to
774 if (data->flags & MMC_DATA_WRITE)
775 wbsd_sg_to_dma(host, data);
778 * Initialise the ISA DMA controller.
780 dmaflags = claim_dma_lock();
781 disable_dma(host->dma);
782 clear_dma_ff(host->dma);
783 if (data->flags & MMC_DATA_READ)
784 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
786 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
787 set_dma_addr(host->dma, host->dma_addr);
788 set_dma_count(host->dma, host->size);
790 enable_dma(host->dma);
791 release_dma_lock(dmaflags);
794 * Enable DMA on the host.
796 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
801 * This flag is used to keep printk
802 * output to a minimum.
807 * Initialise the SG list.
809 wbsd_init_sg(host, data);
814 wbsd_write_index(host, WBSD_IDX_DMA, 0);
817 * Set up FIFO threshold levels (and fill
818 * buffer if doing a write).
820 if (data->flags & MMC_DATA_READ)
822 wbsd_write_index(host, WBSD_IDX_FIFOEN,
823 WBSD_FIFOEN_FULL | 8);
827 wbsd_write_index(host, WBSD_IDX_FIFOEN,
828 WBSD_FIFOEN_EMPTY | 8);
829 wbsd_fill_fifo(host);
833 data->error = MMC_ERR_NONE;
836 static void wbsd_finish_data(struct wbsd_host* host, struct mmc_data* data)
838 unsigned long dmaflags;
842 WARN_ON(host->mrq == NULL);
845 * Send a stop command if needed.
848 wbsd_send_command(host, data->stop);
851 * Wait for the controller to leave data
856 status = wbsd_read_index(host, WBSD_IDX_STATUS);
857 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
865 * Disable DMA on the host.
867 wbsd_write_index(host, WBSD_IDX_DMA, 0);
870 * Turn of ISA DMA controller.
872 dmaflags = claim_dma_lock();
873 disable_dma(host->dma);
874 clear_dma_ff(host->dma);
875 count = get_dma_residue(host->dma);
876 release_dma_lock(dmaflags);
883 printk(KERN_ERR DRIVER_NAME ": Incomplete DMA "
884 "transfer. %d bytes left.\n", count);
886 data->error = MMC_ERR_FAILED;
891 * Transfer data from DMA buffer to
894 if (data->flags & MMC_DATA_READ)
895 wbsd_dma_to_sg(host, data);
897 data->bytes_xfered = host->size;
901 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
903 wbsd_request_end(host, host->mrq);
906 /*****************************************************************************\
908 * MMC layer callbacks *
910 \*****************************************************************************/
912 static void wbsd_request(struct mmc_host* mmc, struct mmc_request* mrq)
914 struct wbsd_host* host = mmc_priv(mmc);
915 struct mmc_command* cmd;
918 * Disable tasklets to avoid a deadlock.
920 spin_lock_bh(&host->lock);
922 BUG_ON(host->mrq != NULL);
929 * If there is no card in the slot then
930 * timeout immediatly.
932 if (!(host->flags & WBSD_FCARD_PRESENT))
934 cmd->error = MMC_ERR_TIMEOUT;
939 * Does the request include data?
943 wbsd_prepare_data(host, cmd->data);
945 if (cmd->data->error != MMC_ERR_NONE)
949 wbsd_send_command(host, cmd);
952 * If this is a data transfer the request
953 * will be finished after the data has
956 if (cmd->data && (cmd->error == MMC_ERR_NONE))
959 * Dirty fix for hardware bug.
962 tasklet_schedule(&host->fifo_tasklet);
964 spin_unlock_bh(&host->lock);
970 wbsd_request_end(host, mrq);
972 spin_unlock_bh(&host->lock);
975 static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
977 struct wbsd_host* host = mmc_priv(mmc);
980 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
981 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
982 ios->vdd, ios->bus_width);
984 spin_lock_bh(&host->lock);
987 * Reset the chip on each power off.
988 * Should clear out any weird states.
990 if (ios->power_mode == MMC_POWER_OFF)
991 wbsd_init_device(host);
993 if (ios->clock >= 24000000)
995 else if (ios->clock >= 16000000)
997 else if (ios->clock >= 12000000)
1000 clk = WBSD_CLK_375K;
1003 * Only write to the clock register when
1004 * there is an actual change.
1006 if (clk != host->clk)
1008 wbsd_write_index(host, WBSD_IDX_CLK, clk);
1015 if (ios->power_mode != MMC_POWER_OFF)
1017 pwr = inb(host->base + WBSD_CSR);
1018 pwr &= ~WBSD_POWER_N;
1019 outb(pwr, host->base + WBSD_CSR);
1023 * MMC cards need to have pin 1 high during init.
1024 * It wreaks havoc with the card detection though so
1025 * that needs to be disabled.
1027 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1028 if (ios->chip_select == MMC_CS_HIGH)
1030 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
1031 setup |= WBSD_DAT3_H;
1032 host->flags |= WBSD_FIGNORE_DETECT;
1036 setup &= ~WBSD_DAT3_H;
1039 * We cannot resume card detection immediatly
1040 * because of capacitance and delays in the chip.
1042 mod_timer(&host->ignore_timer, jiffies + HZ/100);
1044 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1047 * Store bus width for later. Will be used when
1048 * setting up the data transfer.
1050 host->bus_width = ios->bus_width;
1052 spin_unlock_bh(&host->lock);
1055 static int wbsd_get_ro(struct mmc_host* mmc)
1057 struct wbsd_host* host = mmc_priv(mmc);
1060 spin_lock_bh(&host->lock);
1062 csr = inb(host->base + WBSD_CSR);
1064 outb(csr, host->base + WBSD_CSR);
1068 csr = inb(host->base + WBSD_CSR);
1070 outb(csr, host->base + WBSD_CSR);
1072 spin_unlock_bh(&host->lock);
1074 return csr & WBSD_WRPT;
1077 static struct mmc_host_ops wbsd_ops = {
1078 .request = wbsd_request,
1079 .set_ios = wbsd_set_ios,
1080 .get_ro = wbsd_get_ro,
1083 /*****************************************************************************\
1085 * Interrupt handling *
1087 \*****************************************************************************/
1090 * Helper function to reset detection ignore
1093 static void wbsd_reset_ignore(unsigned long data)
1095 struct wbsd_host *host = (struct wbsd_host*)data;
1097 BUG_ON(host == NULL);
1099 DBG("Resetting card detection ignore\n");
1101 spin_lock_bh(&host->lock);
1103 host->flags &= ~WBSD_FIGNORE_DETECT;
1106 * Card status might have changed during the
1109 tasklet_schedule(&host->card_tasklet);
1111 spin_unlock_bh(&host->lock);
1118 static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
1120 WARN_ON(!host->mrq);
1124 WARN_ON(!host->mrq->cmd);
1125 if (!host->mrq->cmd)
1128 WARN_ON(!host->mrq->cmd->data);
1129 if (!host->mrq->cmd->data)
1132 return host->mrq->cmd->data;
1135 static void wbsd_tasklet_card(unsigned long param)
1137 struct wbsd_host* host = (struct wbsd_host*)param;
1141 spin_lock(&host->lock);
1143 if (host->flags & WBSD_FIGNORE_DETECT)
1145 spin_unlock(&host->lock);
1149 csr = inb(host->base + WBSD_CSR);
1150 WARN_ON(csr == 0xff);
1152 if (csr & WBSD_CARDPRESENT)
1154 if (!(host->flags & WBSD_FCARD_PRESENT))
1156 DBG("Card inserted\n");
1157 host->flags |= WBSD_FCARD_PRESENT;
1162 else if (host->flags & WBSD_FCARD_PRESENT)
1164 DBG("Card removed\n");
1165 host->flags &= ~WBSD_FCARD_PRESENT;
1169 printk(KERN_ERR DRIVER_NAME
1170 ": Card removed during transfer!\n");
1173 host->mrq->cmd->error = MMC_ERR_FAILED;
1174 tasklet_schedule(&host->finish_tasklet);
1181 * Unlock first since we might get a call back.
1184 spin_unlock(&host->lock);
1187 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1190 static void wbsd_tasklet_fifo(unsigned long param)
1192 struct wbsd_host* host = (struct wbsd_host*)param;
1193 struct mmc_data* data;
1195 spin_lock(&host->lock);
1200 data = wbsd_get_data(host);
1204 if (data->flags & MMC_DATA_WRITE)
1205 wbsd_fill_fifo(host);
1207 wbsd_empty_fifo(host);
1212 if (host->size == data->bytes_xfered)
1214 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1215 tasklet_schedule(&host->finish_tasklet);
1219 spin_unlock(&host->lock);
1222 static void wbsd_tasklet_crc(unsigned long param)
1224 struct wbsd_host* host = (struct wbsd_host*)param;
1225 struct mmc_data* data;
1227 spin_lock(&host->lock);
1232 data = wbsd_get_data(host);
1236 DBGF("CRC error\n");
1238 data->error = MMC_ERR_BADCRC;
1240 tasklet_schedule(&host->finish_tasklet);
1243 spin_unlock(&host->lock);
1246 static void wbsd_tasklet_timeout(unsigned long param)
1248 struct wbsd_host* host = (struct wbsd_host*)param;
1249 struct mmc_data* data;
1251 spin_lock(&host->lock);
1256 data = wbsd_get_data(host);
1262 data->error = MMC_ERR_TIMEOUT;
1264 tasklet_schedule(&host->finish_tasklet);
1267 spin_unlock(&host->lock);
1270 static void wbsd_tasklet_finish(unsigned long param)
1272 struct wbsd_host* host = (struct wbsd_host*)param;
1273 struct mmc_data* data;
1275 spin_lock(&host->lock);
1277 WARN_ON(!host->mrq);
1281 data = wbsd_get_data(host);
1285 wbsd_finish_data(host, data);
1288 spin_unlock(&host->lock);
1291 static void wbsd_tasklet_block(unsigned long param)
1293 struct wbsd_host* host = (struct wbsd_host*)param;
1294 struct mmc_data* data;
1296 spin_lock(&host->lock);
1298 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1301 data = wbsd_get_data(host);
1305 DBGF("CRC error\n");
1307 data->error = MMC_ERR_BADCRC;
1309 tasklet_schedule(&host->finish_tasklet);
1313 spin_unlock(&host->lock);
1317 * Interrupt handling
1320 static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1322 struct wbsd_host* host = dev_id;
1325 isr = inb(host->base + WBSD_ISR);
1328 * Was it actually our hardware that caused the interrupt?
1330 if (isr == 0xff || isr == 0x00)
1336 * Schedule tasklets as needed.
1338 if (isr & WBSD_INT_CARD)
1339 tasklet_schedule(&host->card_tasklet);
1340 if (isr & WBSD_INT_FIFO_THRE)
1341 tasklet_schedule(&host->fifo_tasklet);
1342 if (isr & WBSD_INT_CRC)
1343 tasklet_hi_schedule(&host->crc_tasklet);
1344 if (isr & WBSD_INT_TIMEOUT)
1345 tasklet_hi_schedule(&host->timeout_tasklet);
1346 if (isr & WBSD_INT_BUSYEND)
1347 tasklet_hi_schedule(&host->block_tasklet);
1348 if (isr & WBSD_INT_TC)
1349 tasklet_schedule(&host->finish_tasklet);
1354 /*****************************************************************************\
1356 * Device initialisation and shutdown *
1358 \*****************************************************************************/
1361 * Allocate/free MMC structure.
1364 static int __devinit wbsd_alloc_mmc(struct device* dev)
1366 struct mmc_host* mmc;
1367 struct wbsd_host* host;
1370 * Allocate MMC structure.
1372 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1376 host = mmc_priv(mmc);
1382 * Set host parameters.
1384 mmc->ops = &wbsd_ops;
1385 mmc->f_min = 375000;
1386 mmc->f_max = 24000000;
1387 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1388 mmc->caps = MMC_CAP_4_BIT_DATA;
1390 spin_lock_init(&host->lock);
1395 init_timer(&host->ignore_timer);
1396 host->ignore_timer.data = (unsigned long)host;
1397 host->ignore_timer.function = wbsd_reset_ignore;
1400 * Maximum number of segments. Worst case is one sector per segment
1401 * so this will be 64kB/512.
1403 mmc->max_hw_segs = 128;
1404 mmc->max_phys_segs = 128;
1407 * Maximum number of sectors in one transfer. Also limited by 64kB
1410 mmc->max_sectors = 128;
1413 * Maximum segment size. Could be one segment with the maximum number
1416 mmc->max_seg_size = mmc->max_sectors * 512;
1418 dev_set_drvdata(dev, mmc);
1423 static void __devexit wbsd_free_mmc(struct device* dev)
1425 struct mmc_host* mmc;
1426 struct wbsd_host* host;
1428 mmc = dev_get_drvdata(dev);
1432 host = mmc_priv(mmc);
1433 BUG_ON(host == NULL);
1435 del_timer_sync(&host->ignore_timer);
1439 dev_set_drvdata(dev, NULL);
1443 * Scan for known chip id:s
1446 static int __devinit wbsd_scan(struct wbsd_host* host)
1452 * Iterate through all ports, all codes to
1453 * find hardware that is in our known list.
1455 for (i = 0;i < sizeof(config_ports)/sizeof(int);i++)
1457 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1460 for (j = 0;j < sizeof(unlock_codes)/sizeof(int);j++)
1464 outb(unlock_codes[j], config_ports[i]);
1465 outb(unlock_codes[j], config_ports[i]);
1467 outb(WBSD_CONF_ID_HI, config_ports[i]);
1468 id = inb(config_ports[i] + 1) << 8;
1470 outb(WBSD_CONF_ID_LO, config_ports[i]);
1471 id |= inb(config_ports[i] + 1);
1473 for (k = 0;k < sizeof(valid_ids)/sizeof(int);k++)
1475 if (id == valid_ids[k])
1478 host->config = config_ports[i];
1479 host->unlock_code = unlock_codes[i];
1487 DBG("Unknown hardware (id %x) found at %x\n",
1488 id, config_ports[i]);
1491 outb(LOCK_CODE, config_ports[i]);
1494 release_region(config_ports[i], 2);
1501 * Allocate/free io port ranges
1504 static int __devinit wbsd_request_region(struct wbsd_host* host, int base)
1509 if (!request_region(base, 8, DRIVER_NAME))
1517 static void __devexit wbsd_release_regions(struct wbsd_host* host)
1520 release_region(host->base, 8);
1525 release_region(host->config, 2);
1531 * Allocate/free DMA port and buffer
1534 static void __devinit wbsd_request_dma(struct wbsd_host* host, int dma)
1539 if (request_dma(dma, DRIVER_NAME))
1543 * We need to allocate a special buffer in
1544 * order for ISA to be able to DMA to it.
1546 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1547 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1548 if (!host->dma_buffer)
1552 * Translate the address to a physical address.
1554 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1555 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1558 * ISA DMA must be aligned on a 64k basis.
1560 if ((host->dma_addr & 0xffff) != 0)
1563 * ISA cannot access memory above 16 MB.
1565 else if (host->dma_addr >= 0x1000000)
1574 * If we've gotten here then there is some kind of alignment bug
1578 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1580 host->dma_addr = (dma_addr_t)NULL;
1582 kfree(host->dma_buffer);
1583 host->dma_buffer = NULL;
1589 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1590 "Falling back on FIFO.\n", dma);
1593 static void __devexit wbsd_release_dma(struct wbsd_host* host)
1596 dma_unmap_single(host->mmc->dev, host->dma_addr, WBSD_DMA_SIZE,
1598 if (host->dma_buffer)
1599 kfree(host->dma_buffer);
1601 free_dma(host->dma);
1604 host->dma_buffer = NULL;
1605 host->dma_addr = (dma_addr_t)NULL;
1609 * Allocate/free IRQ.
1612 static int __devinit wbsd_request_irq(struct wbsd_host* host, int irq)
1617 * Allocate interrupt.
1620 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1629 tasklet_init(&host->card_tasklet, wbsd_tasklet_card, (unsigned long)host);
1630 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo, (unsigned long)host);
1631 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc, (unsigned long)host);
1632 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout, (unsigned long)host);
1633 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish, (unsigned long)host);
1634 tasklet_init(&host->block_tasklet, wbsd_tasklet_block, (unsigned long)host);
1639 static void __devexit wbsd_release_irq(struct wbsd_host* host)
1644 free_irq(host->irq, host);
1648 tasklet_kill(&host->card_tasklet);
1649 tasklet_kill(&host->fifo_tasklet);
1650 tasklet_kill(&host->crc_tasklet);
1651 tasklet_kill(&host->timeout_tasklet);
1652 tasklet_kill(&host->finish_tasklet);
1653 tasklet_kill(&host->block_tasklet);
1657 * Allocate all resources for the host.
1660 static int __devinit wbsd_request_resources(struct wbsd_host* host,
1661 int base, int irq, int dma)
1666 * Allocate I/O ports.
1668 ret = wbsd_request_region(host, base);
1673 * Allocate interrupt.
1675 ret = wbsd_request_irq(host, irq);
1682 wbsd_request_dma(host, dma);
1688 * Release all resources for the host.
1691 static void __devexit wbsd_release_resources(struct wbsd_host* host)
1693 wbsd_release_dma(host);
1694 wbsd_release_irq(host);
1695 wbsd_release_regions(host);
1699 * Configure the resources the chip should use.
1702 static void __devinit wbsd_chip_config(struct wbsd_host* host)
1707 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1708 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1711 * Select SD/MMC function.
1713 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1716 * Set up card detection.
1718 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1723 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1724 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1726 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1729 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1732 * Enable and power up chip.
1734 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1735 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1739 * Check that configured resources are correct.
1742 static int __devinit wbsd_chip_validate(struct wbsd_host* host)
1747 * Select SD/MMC function.
1749 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1752 * Read configuration.
1754 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1755 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1757 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1759 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1762 * Validate against given configuration.
1764 if (base != host->base)
1766 if (irq != host->irq)
1768 if ((dma != host->dma) && (host->dma != -1))
1774 /*****************************************************************************\
1776 * Devices setup and shutdown *
1778 \*****************************************************************************/
1780 static int __devinit wbsd_init(struct device* dev, int base, int irq, int dma,
1783 struct wbsd_host* host = NULL;
1784 struct mmc_host* mmc = NULL;
1787 ret = wbsd_alloc_mmc(dev);
1791 mmc = dev_get_drvdata(dev);
1792 host = mmc_priv(mmc);
1795 * Scan for hardware.
1797 ret = wbsd_scan(host);
1800 if (pnp && (ret == -ENODEV))
1802 printk(KERN_WARNING DRIVER_NAME
1803 ": Unable to confirm device presence. You may "
1804 "experience lock-ups.\n");
1814 * Request resources.
1816 ret = wbsd_request_resources(host, io, irq, dma);
1819 wbsd_release_resources(host);
1825 * See if chip needs to be configured.
1827 if (pnp && (host->config != 0))
1829 if (!wbsd_chip_validate(host))
1831 printk(KERN_WARNING DRIVER_NAME
1832 ": PnP active but chip not configured! "
1833 "You probably have a buggy BIOS. "
1834 "Configuring chip manually.\n");
1835 wbsd_chip_config(host);
1839 wbsd_chip_config(host);
1842 * Power Management stuff. No idea how this works.
1847 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1850 * Allow device to initialise itself properly.
1855 * Reset the chip into a known state.
1857 wbsd_init_device(host);
1861 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1862 if (host->chip_id != 0)
1863 printk(" id %x", (int)host->chip_id);
1864 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1866 printk(" dma %d", (int)host->dma);
1876 static void __devexit wbsd_shutdown(struct device* dev, int pnp)
1878 struct mmc_host* mmc = dev_get_drvdata(dev);
1879 struct wbsd_host* host;
1884 host = mmc_priv(mmc);
1886 mmc_remove_host(mmc);
1891 * Power down the SD/MMC function.
1893 wbsd_unlock_config(host);
1894 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1895 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1896 wbsd_lock_config(host);
1899 wbsd_release_resources(host);
1908 static int __devinit wbsd_probe(struct device* dev)
1910 return wbsd_init(dev, io, irq, dma, 0);
1913 static int __devexit wbsd_remove(struct device* dev)
1915 wbsd_shutdown(dev, 0);
1926 static int __devinit
1927 wbsd_pnp_probe(struct pnp_dev * pnpdev, const struct pnp_device_id *dev_id)
1932 * Get resources from PnP layer.
1934 io = pnp_port_start(pnpdev, 0);
1935 irq = pnp_irq(pnpdev, 0);
1936 if (pnp_dma_valid(pnpdev, 0))
1937 dma = pnp_dma(pnpdev, 0);
1941 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1943 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1946 static void __devexit wbsd_pnp_remove(struct pnp_dev * dev)
1948 wbsd_shutdown(&dev->dev, 1);
1951 #endif /* CONFIG_PNP */
1958 static int wbsd_suspend(struct device *dev, pm_message_t state)
1960 DBGF("Not yet supported\n");
1965 static int wbsd_resume(struct device *dev)
1967 DBGF("Not yet supported\n");
1972 #define wbsd_suspend NULL
1973 #define wbsd_resume NULL
1976 static struct platform_device *wbsd_device;
1978 static struct device_driver wbsd_driver = {
1979 .name = DRIVER_NAME,
1980 .bus = &platform_bus_type,
1981 .probe = wbsd_probe,
1982 .remove = wbsd_remove,
1984 .suspend = wbsd_suspend,
1985 .resume = wbsd_resume,
1990 static struct pnp_driver wbsd_pnp_driver = {
1991 .name = DRIVER_NAME,
1992 .id_table = pnp_dev_table,
1993 .probe = wbsd_pnp_probe,
1994 .remove = wbsd_pnp_remove,
1997 #endif /* CONFIG_PNP */
2000 * Module loading/unloading
2003 static int __init wbsd_drv_init(void)
2007 printk(KERN_INFO DRIVER_NAME
2008 ": Winbond W83L51xD SD/MMC card interface driver, "
2009 DRIVER_VERSION "\n");
2010 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2016 result = pnp_register_driver(&wbsd_pnp_driver);
2021 #endif /* CONFIG_PNP */
2025 result = driver_register(&wbsd_driver);
2029 wbsd_device = platform_device_register_simple(DRIVER_NAME, -1,
2031 if (IS_ERR(wbsd_device))
2032 return PTR_ERR(wbsd_device);
2038 static void __exit wbsd_drv_exit(void)
2043 pnp_unregister_driver(&wbsd_pnp_driver);
2045 #endif /* CONFIG_PNP */
2049 platform_device_unregister(wbsd_device);
2051 driver_unregister(&wbsd_driver);
2057 module_init(wbsd_drv_init);
2058 module_exit(wbsd_drv_exit);
2060 module_param(nopnp, uint, 0444);
2062 module_param(io, uint, 0444);
2063 module_param(irq, uint, 0444);
2064 module_param(dma, int, 0444);
2066 MODULE_LICENSE("GPL");
2067 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2068 MODULE_VERSION(DRIVER_VERSION);
2071 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2073 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2074 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2075 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");