2 * drivers/spi/spi_imx.c
4 * Copyright (C) 2006 SWAPP
5 * Andrea Paterniani <a.paterniani@swapp-eng.it>
7 * Initial version inspired by:
8 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/ioport.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/spi/spi.h>
30 #include <linux/workqueue.h>
31 #include <linux/delay.h>
32 #include <linux/clk.h>
36 #include <asm/delay.h>
38 #include <mach/hardware.h>
39 #include <mach/imx-dma.h>
40 #include <mach/spi_imx.h>
42 /*-------------------------------------------------------------------------*/
43 /* SPI Registers offsets from peripheral base address */
44 #define SPI_RXDATA (0x00)
45 #define SPI_TXDATA (0x04)
46 #define SPI_CONTROL (0x08)
47 #define SPI_INT_STATUS (0x0C)
48 #define SPI_TEST (0x10)
49 #define SPI_PERIOD (0x14)
50 #define SPI_DMA (0x18)
51 #define SPI_RESET (0x1C)
53 /* SPI Control Register Bit Fields & Masks */
54 #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
55 #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
56 #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
57 #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
58 #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
59 #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
60 #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
61 #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
62 #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
63 #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
64 Slave: RXFIFO advanced by BIT_COUNT */
65 #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
66 Slave: RXFIFO advanced by /SS rising edge */
67 #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
68 #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
69 #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
70 #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
71 #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
72 #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
73 #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
74 #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
75 #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
76 #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
77 #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
78 #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
79 #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
80 #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
81 #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
82 #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
83 #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
84 #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
86 /* SPI Interrupt/Status Register Bit Fields & Masks */
87 #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
88 #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
89 #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
90 #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
91 #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
92 #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
93 #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
94 #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
95 #define SPI_STATUS (0xFF) /* SPI Status Mask */
96 #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
97 #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
98 #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
99 #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
100 #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
101 #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
102 #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
103 #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
104 #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
106 /* SPI Test Register Bit Fields & Masks */
107 #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
108 #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
109 #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
110 #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
111 #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
113 /* SPI Period Register Bit Fields & Masks */
114 #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
115 #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
117 #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
118 #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
120 #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
123 /* SPI DMA Register Bit Fields & Masks */
124 #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
125 #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
126 #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
127 #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
128 #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
129 #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
130 #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
131 #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
133 /* SPI Soft Reset Register Bit Fields & Masks */
134 #define SPI_RESET_START (0x1) /* Start */
136 /* Default SPI configuration values */
137 #define SPI_DEFAULT_CONTROL \
139 SPI_CONTROL_BITCOUNT(16) | \
140 SPI_CONTROL_POL_ACT_HIGH | \
141 SPI_CONTROL_PHA_0 | \
142 SPI_CONTROL_SPIEN | \
143 SPI_CONTROL_SSCTL_1 | \
144 SPI_CONTROL_MODE_MASTER | \
145 SPI_CONTROL_DRCTL_0 | \
146 SPI_CONTROL_DATARATE_MIN \
148 #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
149 #define SPI_DEFAULT_ENABLE_DMA (0)
150 #define SPI_DEFAULT_PERIOD_WAIT (8)
151 /*-------------------------------------------------------------------------*/
154 /*-------------------------------------------------------------------------*/
155 /* TX/RX SPI FIFO size */
156 #define SPI_FIFO_DEPTH (8)
157 #define SPI_FIFO_BYTE_WIDTH (2)
158 #define SPI_FIFO_OVERFLOW_MARGIN (2)
160 /* DMA burst length for half full/empty request trigger */
161 #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
163 /* Dummy char output to achieve reads.
164 Choosing something different from all zeroes may help pattern recogition
165 for oscilloscope analysis, but may break some drivers. */
166 #define SPI_DUMMY_u8 0
167 #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
168 #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
171 * Macro to change a u32 field:
172 * @r : register to edit
174 * @v : new value for the field correctly bit-alligned
176 #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
179 #define START_STATE ((void*)0)
180 #define RUNNING_STATE ((void*)1)
181 #define DONE_STATE ((void*)2)
182 #define ERROR_STATE ((void*)-1)
185 #define QUEUE_RUNNING (0)
186 #define QUEUE_STOPPED (1)
188 #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
189 #define DMA_ALIGNMENT 4
190 /*-------------------------------------------------------------------------*/
193 /*-------------------------------------------------------------------------*/
194 /* Driver data structs */
198 /* Driver model hookup */
199 struct platform_device *pdev;
201 /* SPI framework hookup */
202 struct spi_master *master;
205 struct spi_imx_master *master_info;
207 /* Memory resources and SPI regs virtual address */
208 struct resource *ioarea;
211 /* SPI RX_DATA physical address */
212 dma_addr_t rd_data_phys;
214 /* Driver message queue */
215 struct workqueue_struct *workqueue;
216 struct work_struct work;
218 struct list_head queue;
222 /* Message Transfer pump */
223 struct tasklet_struct pump_transfers;
225 /* Current message, transfer and state */
226 struct spi_message *cur_msg;
227 struct spi_transfer *cur_transfer;
228 struct chip_data *cur_chip;
230 /* Rd / Wr buffers pointers */
241 /* Function pointers */
242 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
243 void (*cs_control)(u32 command);
250 int rx_dma_needs_unmap;
251 int tx_dma_needs_unmap;
253 u32 dummy_dma_buf ____cacheline_aligned;
269 void (*cs_control)(u32 command);
271 /*-------------------------------------------------------------------------*/
274 static void pump_messages(struct work_struct *work);
276 static void flush(struct driver_data *drv_data)
278 void __iomem *regs = drv_data->regs;
281 dev_dbg(&drv_data->pdev->dev, "flush\n");
283 /* Wait for end of transaction */
285 control = readl(regs + SPI_CONTROL);
286 } while (control & SPI_CONTROL_XCH);
288 /* Release chip select if requested, transfer delays are
289 handled in pump_transfers */
290 if (drv_data->cs_change)
291 drv_data->cs_control(SPI_CS_DEASSERT);
293 /* Disable SPI to flush FIFOs */
294 writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
295 writel(control, regs + SPI_CONTROL);
298 static void restore_state(struct driver_data *drv_data)
300 void __iomem *regs = drv_data->regs;
301 struct chip_data *chip = drv_data->cur_chip;
303 /* Load chip registers */
304 dev_dbg(&drv_data->pdev->dev,
307 " control = 0x%08X\n",
310 writel(chip->test, regs + SPI_TEST);
311 writel(chip->period, regs + SPI_PERIOD);
312 writel(0, regs + SPI_INT_STATUS);
313 writel(chip->control, regs + SPI_CONTROL);
316 static void null_cs_control(u32 command)
320 static inline u32 data_to_write(struct driver_data *drv_data)
322 return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
325 static inline u32 data_to_read(struct driver_data *drv_data)
327 return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
330 static int write(struct driver_data *drv_data)
332 void __iomem *regs = drv_data->regs;
333 void *tx = drv_data->tx;
334 void *tx_end = drv_data->tx_end;
335 u8 n_bytes = drv_data->n_bytes;
336 u32 remaining_writes;
337 u32 fifo_avail_space;
341 /* Compute how many fifo writes to do */
342 remaining_writes = (u32)(tx_end - tx) / n_bytes;
343 fifo_avail_space = SPI_FIFO_DEPTH -
344 (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
345 if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
346 /* Fix misunderstood receive overflow */
347 fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
348 n = min(remaining_writes, fifo_avail_space);
350 dev_dbg(&drv_data->pdev->dev,
352 " remaining writes = %d\n"
353 " fifo avail space = %d\n"
354 " fifo writes = %d\n",
355 (n_bytes == 1) ? "u8" : "u16",
361 /* Fill SPI TXFIFO */
362 if (drv_data->rd_only) {
365 writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
370 writel(d, regs + SPI_TXDATA);
376 writel(d, regs + SPI_TXDATA);
382 /* Trigger transfer */
383 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
386 /* Update tx pointer */
390 return (tx >= tx_end);
393 static int read(struct driver_data *drv_data)
395 void __iomem *regs = drv_data->regs;
396 void *rx = drv_data->rx;
397 void *rx_end = drv_data->rx_end;
398 u8 n_bytes = drv_data->n_bytes;
404 /* Compute how many fifo reads to do */
405 remaining_reads = (u32)(rx_end - rx) / n_bytes;
406 fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
408 n = min(remaining_reads, fifo_rxcnt);
410 dev_dbg(&drv_data->pdev->dev,
412 " remaining reads = %d\n"
413 " fifo rx count = %d\n"
414 " fifo reads = %d\n",
415 (n_bytes == 1) ? "u8" : "u16",
421 /* Read SPI RXFIFO */
424 d = readl(regs + SPI_RXDATA);
430 d = readl(regs + SPI_RXDATA);
436 /* Update rx pointer */
440 return (rx >= rx_end);
443 static void *next_transfer(struct driver_data *drv_data)
445 struct spi_message *msg = drv_data->cur_msg;
446 struct spi_transfer *trans = drv_data->cur_transfer;
448 /* Move to next transfer */
449 if (trans->transfer_list.next != &msg->transfers) {
450 drv_data->cur_transfer =
451 list_entry(trans->transfer_list.next,
454 return RUNNING_STATE;
460 static int map_dma_buffers(struct driver_data *drv_data)
462 struct spi_message *msg;
466 drv_data->rx_dma_needs_unmap = 0;
467 drv_data->tx_dma_needs_unmap = 0;
469 if (!drv_data->master_info->enable_dma ||
470 !drv_data->cur_chip->enable_dma)
473 msg = drv_data->cur_msg;
474 dev = &msg->spi->dev;
475 if (msg->is_dma_mapped) {
476 if (drv_data->tx_dma)
477 /* The caller provided at least dma and cpu virtual
478 address for write; pump_transfers() will consider the
479 transfer as write only if cpu rx virtual address is
483 if (drv_data->rx_dma) {
484 /* The caller provided dma and cpu virtual address to
485 performe read only transfer -->
486 use drv_data->dummy_dma_buf for dummy writes to
488 buf = &drv_data->dummy_dma_buf;
489 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
490 drv_data->tx_dma = dma_map_single(dev,
492 drv_data->tx_map_len,
494 if (dma_mapping_error(dev, drv_data->tx_dma))
497 drv_data->tx_dma_needs_unmap = 1;
499 /* Flags transfer as rd_only for pump_transfers() DMA
500 regs programming (should be redundant) */
507 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
510 if (drv_data->tx == NULL) {
511 /* Read only message --> use drv_data->dummy_dma_buf for dummy
512 writes to achive reads */
513 buf = &drv_data->dummy_dma_buf;
514 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
517 drv_data->tx_map_len = drv_data->len;
519 drv_data->tx_dma = dma_map_single(dev,
521 drv_data->tx_map_len,
523 if (dma_mapping_error(dev, drv_data->tx_dma))
525 drv_data->tx_dma_needs_unmap = 1;
527 /* NULL rx means write-only transfer and no map needed
528 * since rx DMA will not be used */
531 drv_data->rx_dma = dma_map_single(dev,
535 if (dma_mapping_error(dev, drv_data->rx_dma)) {
536 if (drv_data->tx_dma) {
537 dma_unmap_single(dev,
539 drv_data->tx_map_len,
541 drv_data->tx_dma_needs_unmap = 0;
545 drv_data->rx_dma_needs_unmap = 1;
551 static void unmap_dma_buffers(struct driver_data *drv_data)
553 struct spi_message *msg = drv_data->cur_msg;
554 struct device *dev = &msg->spi->dev;
556 if (drv_data->rx_dma_needs_unmap) {
557 dma_unmap_single(dev,
561 drv_data->rx_dma_needs_unmap = 0;
563 if (drv_data->tx_dma_needs_unmap) {
564 dma_unmap_single(dev,
566 drv_data->tx_map_len,
568 drv_data->tx_dma_needs_unmap = 0;
572 /* Caller already set message->status (dma is already blocked) */
573 static void giveback(struct spi_message *message, struct driver_data *drv_data)
575 void __iomem *regs = drv_data->regs;
577 /* Bring SPI to sleep; restore_state() and pump_transfer()
579 writel(0, regs + SPI_INT_STATUS);
580 writel(0, regs + SPI_DMA);
582 /* Unconditioned deselct */
583 drv_data->cs_control(SPI_CS_DEASSERT);
585 message->state = NULL;
586 if (message->complete)
587 message->complete(message->context);
589 drv_data->cur_msg = NULL;
590 drv_data->cur_transfer = NULL;
591 drv_data->cur_chip = NULL;
592 queue_work(drv_data->workqueue, &drv_data->work);
595 static void dma_err_handler(int channel, void *data, int errcode)
597 struct driver_data *drv_data = data;
598 struct spi_message *msg = drv_data->cur_msg;
600 dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
602 /* Disable both rx and tx dma channels */
603 imx_dma_disable(drv_data->rx_channel);
604 imx_dma_disable(drv_data->tx_channel);
605 unmap_dma_buffers(drv_data);
609 msg->state = ERROR_STATE;
610 tasklet_schedule(&drv_data->pump_transfers);
613 static void dma_tx_handler(int channel, void *data)
615 struct driver_data *drv_data = data;
617 dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
619 imx_dma_disable(channel);
621 /* Now waits for TX FIFO empty */
622 writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
625 static irqreturn_t dma_transfer(struct driver_data *drv_data)
628 struct spi_message *msg = drv_data->cur_msg;
629 void __iomem *regs = drv_data->regs;
631 status = readl(regs + SPI_INT_STATUS);
633 if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
634 == (SPI_INTEN_RO | SPI_STATUS_RO)) {
635 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
637 imx_dma_disable(drv_data->tx_channel);
638 imx_dma_disable(drv_data->rx_channel);
639 unmap_dma_buffers(drv_data);
643 dev_warn(&drv_data->pdev->dev,
644 "dma_transfer - fifo overun\n");
646 msg->state = ERROR_STATE;
647 tasklet_schedule(&drv_data->pump_transfers);
652 if (status & SPI_STATUS_TE) {
653 writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
656 /* Wait end of transfer before read trailing data */
657 while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
660 imx_dma_disable(drv_data->rx_channel);
661 unmap_dma_buffers(drv_data);
663 /* Release chip select if requested, transfer delays are
664 handled in pump_transfers() */
665 if (drv_data->cs_change)
666 drv_data->cs_control(SPI_CS_DEASSERT);
668 /* Calculate number of trailing data and read them */
669 dev_dbg(&drv_data->pdev->dev,
670 "dma_transfer - test = 0x%08X\n",
671 readl(regs + SPI_TEST));
672 drv_data->rx = drv_data->rx_end -
673 ((readl(regs + SPI_TEST) &
675 SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
678 /* Write only transfer */
679 unmap_dma_buffers(drv_data);
684 /* End of transfer, update total byte transfered */
685 msg->actual_length += drv_data->len;
687 /* Move to next transfer */
688 msg->state = next_transfer(drv_data);
690 /* Schedule transfer tasklet */
691 tasklet_schedule(&drv_data->pump_transfers);
696 /* Opps problem detected */
700 static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
702 struct spi_message *msg = drv_data->cur_msg;
703 void __iomem *regs = drv_data->regs;
705 irqreturn_t handled = IRQ_NONE;
707 status = readl(regs + SPI_INT_STATUS);
709 if (status & SPI_INTEN_TE) {
710 /* TXFIFO Empty Interrupt on the last transfered word */
711 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
712 dev_dbg(&drv_data->pdev->dev,
713 "interrupt_wronly_transfer - end of tx\n");
717 /* Update total byte transfered */
718 msg->actual_length += drv_data->len;
720 /* Move to next transfer */
721 msg->state = next_transfer(drv_data);
723 /* Schedule transfer tasklet */
724 tasklet_schedule(&drv_data->pump_transfers);
728 while (status & SPI_STATUS_TH) {
729 dev_dbg(&drv_data->pdev->dev,
730 "interrupt_wronly_transfer - status = 0x%08X\n",
734 if (write(drv_data)) {
735 /* End of TXFIFO writes,
736 now wait until TXFIFO is empty */
737 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
741 status = readl(regs + SPI_INT_STATUS);
743 /* We did something */
744 handled = IRQ_HANDLED;
751 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
753 struct spi_message *msg = drv_data->cur_msg;
754 void __iomem *regs = drv_data->regs;
756 irqreturn_t handled = IRQ_NONE;
759 status = readl(regs + SPI_INT_STATUS);
761 if (status & SPI_INTEN_TE) {
762 /* TXFIFO Empty Interrupt on the last transfered word */
763 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
764 dev_dbg(&drv_data->pdev->dev,
765 "interrupt_transfer - end of tx\n");
767 if (msg->state == ERROR_STATE) {
768 /* RXFIFO overrun was detected and message aborted */
771 /* Wait for end of transaction */
773 control = readl(regs + SPI_CONTROL);
774 } while (control & SPI_CONTROL_XCH);
776 /* Release chip select if requested, transfer delays are
777 handled in pump_transfers */
778 if (drv_data->cs_change)
779 drv_data->cs_control(SPI_CS_DEASSERT);
781 /* Read trailing bytes */
782 limit = loops_per_jiffy << 1;
783 while ((read(drv_data) == 0) && --limit)
787 dev_err(&drv_data->pdev->dev,
788 "interrupt_transfer - "
789 "trailing byte read failed\n");
791 dev_dbg(&drv_data->pdev->dev,
792 "interrupt_transfer - end of rx\n");
794 /* Update total byte transfered */
795 msg->actual_length += drv_data->len;
797 /* Move to next transfer */
798 msg->state = next_transfer(drv_data);
801 /* Schedule transfer tasklet */
802 tasklet_schedule(&drv_data->pump_transfers);
806 while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
807 dev_dbg(&drv_data->pdev->dev,
808 "interrupt_transfer - status = 0x%08X\n",
811 if (status & SPI_STATUS_RO) {
812 /* RXFIFO overrun, abort message end wait
813 until TXFIFO is empty */
814 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
816 dev_warn(&drv_data->pdev->dev,
817 "interrupt_transfer - fifo overun\n"
818 " data not yet written = %d\n"
819 " data not yet read = %d\n",
820 data_to_write(drv_data),
821 data_to_read(drv_data));
823 msg->state = ERROR_STATE;
830 if (write(drv_data)) {
831 /* End of TXFIFO writes,
832 now wait until TXFIFO is empty */
833 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
837 status = readl(regs + SPI_INT_STATUS);
839 /* We did something */
840 handled = IRQ_HANDLED;
847 static irqreturn_t spi_int(int irq, void *dev_id)
849 struct driver_data *drv_data = (struct driver_data *)dev_id;
851 if (!drv_data->cur_msg) {
852 dev_err(&drv_data->pdev->dev,
853 "spi_int - bad message state\n");
858 return drv_data->transfer_handler(drv_data);
861 static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
863 return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
866 static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
869 u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
871 for (div = SPI_PERCLK2_DIV_MIN;
872 div <= SPI_PERCLK2_DIV_MAX;
873 div++, quantized_hz >>= 1) {
874 if (quantized_hz <= speed_hz)
875 /* Max available speed LEQ required speed */
878 return SPI_CONTROL_DATARATE_BAD;
881 static void pump_transfers(unsigned long data)
883 struct driver_data *drv_data = (struct driver_data *)data;
884 struct spi_message *message;
885 struct spi_transfer *transfer, *previous;
886 struct chip_data *chip;
890 dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
892 message = drv_data->cur_msg;
894 /* Handle for abort */
895 if (message->state == ERROR_STATE) {
896 message->status = -EIO;
897 giveback(message, drv_data);
901 /* Handle end of message */
902 if (message->state == DONE_STATE) {
904 giveback(message, drv_data);
908 chip = drv_data->cur_chip;
910 /* Delay if requested at end of transfer*/
911 transfer = drv_data->cur_transfer;
912 if (message->state == RUNNING_STATE) {
913 previous = list_entry(transfer->transfer_list.prev,
916 if (previous->delay_usecs)
917 udelay(previous->delay_usecs);
920 message->state = RUNNING_STATE;
921 drv_data->cs_control = chip->cs_control;
924 transfer = drv_data->cur_transfer;
925 drv_data->tx = (void *)transfer->tx_buf;
926 drv_data->tx_end = drv_data->tx + transfer->len;
927 drv_data->rx = transfer->rx_buf;
928 drv_data->rx_end = drv_data->rx + transfer->len;
929 drv_data->rx_dma = transfer->rx_dma;
930 drv_data->tx_dma = transfer->tx_dma;
931 drv_data->len = transfer->len;
932 drv_data->cs_change = transfer->cs_change;
933 drv_data->rd_only = (drv_data->tx == NULL);
935 regs = drv_data->regs;
936 control = readl(regs + SPI_CONTROL);
938 /* Bits per word setup */
939 tmp = transfer->bits_per_word;
941 /* Use device setup */
942 tmp = chip->bits_per_word;
943 drv_data->n_bytes = chip->n_bytes;
945 /* Use per-transfer setup */
946 drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
947 u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
949 /* Speed setup (surely valid because already checked) */
950 tmp = transfer->speed_hz;
952 tmp = chip->max_speed_hz;
953 tmp = spi_data_rate(drv_data, tmp);
954 u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
956 writel(control, regs + SPI_CONTROL);
958 /* Assert device chip-select */
959 drv_data->cs_control(SPI_CS_ASSERT);
961 /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
962 if bits_per_word is less or equal 8 PIO transfers are performed.
963 Moreover DMA is convinient for transfer length bigger than FIFOs
965 if ((drv_data->n_bytes == 2) &&
966 (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
967 (map_dma_buffers(drv_data) == 0)) {
968 dev_dbg(&drv_data->pdev->dev,
969 "pump dma transfer\n"
976 (unsigned int)drv_data->tx_dma,
978 (unsigned int)drv_data->rx_dma,
981 /* Ensure we have the correct interrupt handler */
982 drv_data->transfer_handler = dma_transfer;
984 /* Trigger transfer */
985 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
990 /* Linear source address */
991 CCR(drv_data->tx_channel) =
994 CCR_SSIZ_32 | CCR_DSIZ_16 |
997 /* Read only transfer -> fixed source address for
998 dummy write to achive read */
999 CCR(drv_data->tx_channel) =
1002 CCR_SSIZ_32 | CCR_DSIZ_16 |
1005 imx_dma_setup_single(
1006 drv_data->tx_channel,
1009 drv_data->rd_data_phys + 4,
1013 /* Setup rx DMA for linear destination address */
1014 CCR(drv_data->rx_channel) =
1017 CCR_DSIZ_32 | CCR_SSIZ_16 |
1019 imx_dma_setup_single(
1020 drv_data->rx_channel,
1023 drv_data->rd_data_phys,
1025 imx_dma_enable(drv_data->rx_channel);
1027 /* Enable SPI interrupt */
1028 writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
1030 /* Set SPI to request DMA service on both
1031 Rx and Tx half fifo watermark */
1032 writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
1034 /* Write only access -> set SPI to request DMA
1035 service on Tx half fifo watermark */
1036 writel(SPI_DMA_THDEN, regs + SPI_DMA);
1038 imx_dma_enable(drv_data->tx_channel);
1040 dev_dbg(&drv_data->pdev->dev,
1041 "pump pio transfer\n"
1049 /* Ensure we have the correct interrupt handler */
1051 drv_data->transfer_handler = interrupt_transfer;
1053 drv_data->transfer_handler = interrupt_wronly_transfer;
1055 /* Enable SPI interrupt */
1057 writel(SPI_INTEN_TH | SPI_INTEN_RO,
1058 regs + SPI_INT_STATUS);
1060 writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
1064 static void pump_messages(struct work_struct *work)
1066 struct driver_data *drv_data =
1067 container_of(work, struct driver_data, work);
1068 unsigned long flags;
1070 /* Lock queue and check for queue work */
1071 spin_lock_irqsave(&drv_data->lock, flags);
1072 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1074 spin_unlock_irqrestore(&drv_data->lock, flags);
1078 /* Make sure we are not already running a message */
1079 if (drv_data->cur_msg) {
1080 spin_unlock_irqrestore(&drv_data->lock, flags);
1084 /* Extract head of queue */
1085 drv_data->cur_msg = list_entry(drv_data->queue.next,
1086 struct spi_message, queue);
1087 list_del_init(&drv_data->cur_msg->queue);
1089 spin_unlock_irqrestore(&drv_data->lock, flags);
1091 /* Initial message state */
1092 drv_data->cur_msg->state = START_STATE;
1093 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1094 struct spi_transfer,
1097 /* Setup the SPI using the per chip configuration */
1098 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1099 restore_state(drv_data);
1101 /* Mark as busy and launch transfers */
1102 tasklet_schedule(&drv_data->pump_transfers);
1105 static int transfer(struct spi_device *spi, struct spi_message *msg)
1107 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1108 u32 min_speed_hz, max_speed_hz, tmp;
1109 struct spi_transfer *trans;
1110 unsigned long flags;
1112 msg->actual_length = 0;
1114 /* Per transfer setup check */
1115 min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
1116 max_speed_hz = spi->max_speed_hz;
1117 list_for_each_entry(trans, &msg->transfers, transfer_list) {
1118 tmp = trans->bits_per_word;
1120 dev_err(&drv_data->pdev->dev,
1121 "message rejected : "
1122 "invalid transfer bits_per_word (%d bits)\n",
1126 tmp = trans->speed_hz;
1128 if (tmp < min_speed_hz) {
1129 dev_err(&drv_data->pdev->dev,
1130 "message rejected : "
1131 "device min speed (%d Hz) exceeds "
1132 "required transfer speed (%d Hz)\n",
1136 } else if (tmp > max_speed_hz) {
1137 dev_err(&drv_data->pdev->dev,
1138 "message rejected : "
1139 "transfer speed (%d Hz) exceeds "
1140 "device max speed (%d Hz)\n",
1148 /* Message accepted */
1149 msg->status = -EINPROGRESS;
1150 msg->state = START_STATE;
1152 spin_lock_irqsave(&drv_data->lock, flags);
1153 if (drv_data->run == QUEUE_STOPPED) {
1154 spin_unlock_irqrestore(&drv_data->lock, flags);
1158 list_add_tail(&msg->queue, &drv_data->queue);
1159 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1160 queue_work(drv_data->workqueue, &drv_data->work);
1162 spin_unlock_irqrestore(&drv_data->lock, flags);
1166 /* Message rejected and not queued */
1167 msg->status = -EINVAL;
1168 msg->state = ERROR_STATE;
1170 msg->complete(msg->context);
1174 /* the spi->mode bits understood by this driver: */
1175 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
1177 /* On first setup bad values must free chip_data memory since will cause
1178 spi_new_device to fail. Bad value setup from protocol driver are simply not
1179 applied and notified to the calling driver. */
1180 static int setup(struct spi_device *spi)
1182 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1183 struct spi_imx_chip *chip_info;
1184 struct chip_data *chip;
1185 int first_setup = 0;
1189 if (spi->mode & ~MODEBITS) {
1190 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1191 spi->mode & ~MODEBITS);
1195 /* Get controller data */
1196 chip_info = spi->controller_data;
1198 /* Get controller_state */
1199 chip = spi_get_ctldata(spi);
1203 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1206 "setup - cannot allocate controller state\n");
1209 chip->control = SPI_DEFAULT_CONTROL;
1211 if (chip_info == NULL) {
1212 /* spi_board_info.controller_data not is supplied */
1213 chip_info = kzalloc(sizeof(struct spi_imx_chip),
1218 "cannot allocate controller data\n");
1220 goto err_first_setup;
1222 /* Set controller data default value */
1223 chip_info->enable_loopback =
1224 SPI_DEFAULT_ENABLE_LOOPBACK;
1225 chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
1226 chip_info->ins_ss_pulse = 1;
1227 chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
1228 chip_info->cs_control = null_cs_control;
1232 /* Now set controller state based on controller data */
1236 if (chip_info->enable_loopback)
1237 chip->test = SPI_TEST_LBC;
1241 /* SPI dma driven */
1242 chip->enable_dma = chip_info->enable_dma;
1244 /* SPI /SS pulse between spi burst */
1245 if (chip_info->ins_ss_pulse)
1246 u32_EDIT(chip->control,
1247 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
1249 u32_EDIT(chip->control,
1250 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
1252 /* SPI bclk waits between each bits_per_word spi burst */
1253 if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
1256 "bclk_wait exceeds max allowed (%d)\n",
1257 SPI_PERIOD_MAX_WAIT);
1258 goto err_first_setup;
1260 chip->period = SPI_PERIOD_CSRC_BCLK |
1261 (chip_info->bclk_wait & SPI_PERIOD_WAIT);
1266 if (tmp & SPI_CS_HIGH) {
1267 u32_EDIT(chip->control,
1268 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
1270 switch (tmp & SPI_MODE_3) {
1275 tmp = SPI_CONTROL_PHA_1;
1278 tmp = SPI_CONTROL_POL_ACT_LOW;
1282 tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
1285 u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
1287 /* SPI word width */
1288 tmp = spi->bits_per_word;
1291 spi->bits_per_word = 8;
1292 } else if (tmp > 16) {
1296 "invalid bits_per_word (%d)\n",
1299 goto err_first_setup;
1301 /* Undo setup using chip as backup copy */
1302 tmp = chip->bits_per_word;
1303 spi->bits_per_word = tmp;
1306 chip->bits_per_word = tmp;
1307 u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
1308 chip->n_bytes = (tmp <= 8) ? 1 : 2;
1311 tmp = spi_data_rate(drv_data, spi->max_speed_hz);
1312 if (tmp == SPI_CONTROL_DATARATE_BAD) {
1316 "HW min speed (%d Hz) exceeds required "
1317 "max speed (%d Hz)\n",
1318 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1321 goto err_first_setup;
1323 /* Undo setup using chip as backup copy */
1324 spi->max_speed_hz = chip->max_speed_hz;
1326 u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
1327 /* Actual rounded max_speed_hz */
1328 tmp = spi_speed_hz(drv_data, tmp);
1329 spi->max_speed_hz = tmp;
1330 chip->max_speed_hz = tmp;
1333 /* SPI chip-select management */
1334 if (chip_info->cs_control)
1335 chip->cs_control = chip_info->cs_control;
1337 chip->cs_control = null_cs_control;
1339 /* Save controller_state */
1340 spi_set_ctldata(spi, chip);
1345 " loopback enable = %s\n"
1346 " dma enable = %s\n"
1347 " insert /ss pulse = %s\n"
1348 " period wait = %d\n"
1350 " bits per word = %d\n"
1351 " min speed = %d Hz\n"
1352 " rounded max speed = %d Hz\n",
1353 chip->test & SPI_TEST_LBC ? "Yes" : "No",
1354 chip->enable_dma ? "Yes" : "No",
1355 chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
1356 chip->period & SPI_PERIOD_WAIT,
1359 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1368 static void cleanup(struct spi_device *spi)
1370 kfree(spi_get_ctldata(spi));
1373 static int __init init_queue(struct driver_data *drv_data)
1375 INIT_LIST_HEAD(&drv_data->queue);
1376 spin_lock_init(&drv_data->lock);
1378 drv_data->run = QUEUE_STOPPED;
1381 tasklet_init(&drv_data->pump_transfers,
1382 pump_transfers, (unsigned long)drv_data);
1384 INIT_WORK(&drv_data->work, pump_messages);
1385 drv_data->workqueue = create_singlethread_workqueue(
1386 dev_name(drv_data->master->dev.parent));
1387 if (drv_data->workqueue == NULL)
1393 static int start_queue(struct driver_data *drv_data)
1395 unsigned long flags;
1397 spin_lock_irqsave(&drv_data->lock, flags);
1399 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1400 spin_unlock_irqrestore(&drv_data->lock, flags);
1404 drv_data->run = QUEUE_RUNNING;
1405 drv_data->cur_msg = NULL;
1406 drv_data->cur_transfer = NULL;
1407 drv_data->cur_chip = NULL;
1408 spin_unlock_irqrestore(&drv_data->lock, flags);
1410 queue_work(drv_data->workqueue, &drv_data->work);
1415 static int stop_queue(struct driver_data *drv_data)
1417 unsigned long flags;
1418 unsigned limit = 500;
1421 spin_lock_irqsave(&drv_data->lock, flags);
1423 /* This is a bit lame, but is optimized for the common execution path.
1424 * A wait_queue on the drv_data->busy could be used, but then the common
1425 * execution path (pump_messages) would be required to call wake_up or
1426 * friends on every SPI message. Do this instead */
1427 drv_data->run = QUEUE_STOPPED;
1428 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1429 spin_unlock_irqrestore(&drv_data->lock, flags);
1431 spin_lock_irqsave(&drv_data->lock, flags);
1434 if (!list_empty(&drv_data->queue) || drv_data->busy)
1437 spin_unlock_irqrestore(&drv_data->lock, flags);
1442 static int destroy_queue(struct driver_data *drv_data)
1446 status = stop_queue(drv_data);
1450 if (drv_data->workqueue)
1451 destroy_workqueue(drv_data->workqueue);
1456 static int __init spi_imx_probe(struct platform_device *pdev)
1458 struct device *dev = &pdev->dev;
1459 struct spi_imx_master *platform_info;
1460 struct spi_master *master;
1461 struct driver_data *drv_data;
1462 struct resource *res;
1463 int irq, status = 0;
1465 platform_info = dev->platform_data;
1466 if (platform_info == NULL) {
1467 dev_err(&pdev->dev, "probe - no platform data supplied\n");
1472 /* Allocate master with space for drv_data */
1473 master = spi_alloc_master(dev, sizeof(struct driver_data));
1475 dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
1479 drv_data = spi_master_get_devdata(master);
1480 drv_data->master = master;
1481 drv_data->master_info = platform_info;
1482 drv_data->pdev = pdev;
1484 master->bus_num = pdev->id;
1485 master->num_chipselect = platform_info->num_chipselect;
1486 master->dma_alignment = DMA_ALIGNMENT;
1487 master->cleanup = cleanup;
1488 master->setup = setup;
1489 master->transfer = transfer;
1491 drv_data->dummy_dma_buf = SPI_DUMMY_u32;
1493 drv_data->clk = clk_get(&pdev->dev, "perclk2");
1494 if (IS_ERR(drv_data->clk)) {
1495 dev_err(&pdev->dev, "probe - cannot get clock\n");
1496 status = PTR_ERR(drv_data->clk);
1499 clk_enable(drv_data->clk);
1501 /* Find and map resources */
1502 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1504 dev_err(&pdev->dev, "probe - MEM resources not defined\n");
1508 drv_data->ioarea = request_mem_region(res->start,
1509 res->end - res->start + 1,
1511 if (drv_data->ioarea == NULL) {
1512 dev_err(&pdev->dev, "probe - cannot reserve region\n");
1516 drv_data->regs = ioremap(res->start, res->end - res->start + 1);
1517 if (drv_data->regs == NULL) {
1518 dev_err(&pdev->dev, "probe - cannot map IO\n");
1522 drv_data->rd_data_phys = (dma_addr_t)res->start;
1525 irq = platform_get_irq(pdev, 0);
1527 dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
1531 status = request_irq(irq, spi_int, IRQF_DISABLED,
1532 dev_name(dev), drv_data);
1534 dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
1538 /* Setup DMA if requested */
1539 drv_data->tx_channel = -1;
1540 drv_data->rx_channel = -1;
1541 if (platform_info->enable_dma) {
1542 /* Get rx DMA channel */
1543 drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
1545 if (drv_data->rx_channel < 0) {
1547 "probe - problem (%d) requesting rx channel\n",
1548 drv_data->rx_channel);
1551 imx_dma_setup_handlers(drv_data->rx_channel, NULL,
1552 dma_err_handler, drv_data);
1554 /* Get tx DMA channel */
1555 drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
1557 if (drv_data->tx_channel < 0) {
1559 "probe - problem (%d) requesting tx channel\n",
1560 drv_data->tx_channel);
1561 imx_dma_free(drv_data->rx_channel);
1564 imx_dma_setup_handlers(drv_data->tx_channel,
1565 dma_tx_handler, dma_err_handler,
1568 /* Set request source and burst length for allocated channels */
1569 switch (drv_data->pdev->id) {
1572 RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
1573 RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
1577 RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
1578 RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
1581 dev_err(dev, "probe - bad SPI Id\n");
1582 imx_dma_free(drv_data->rx_channel);
1583 imx_dma_free(drv_data->tx_channel);
1587 BLR(drv_data->rx_channel) = SPI_DMA_BLR;
1588 BLR(drv_data->tx_channel) = SPI_DMA_BLR;
1591 /* Load default SPI configuration */
1592 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1593 writel(0, drv_data->regs + SPI_RESET);
1594 writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
1596 /* Initial and start queue */
1597 status = init_queue(drv_data);
1599 dev_err(&pdev->dev, "probe - problem initializing queue\n");
1600 goto err_init_queue;
1602 status = start_queue(drv_data);
1604 dev_err(&pdev->dev, "probe - problem starting queue\n");
1605 goto err_start_queue;
1608 /* Register with the SPI framework */
1609 platform_set_drvdata(pdev, drv_data);
1610 status = spi_register_master(master);
1612 dev_err(&pdev->dev, "probe - problem registering spi master\n");
1613 goto err_spi_register;
1616 dev_dbg(dev, "probe succeded\n");
1622 destroy_queue(drv_data);
1627 free_irq(irq, drv_data);
1630 iounmap(drv_data->regs);
1633 release_resource(drv_data->ioarea);
1634 kfree(drv_data->ioarea);
1637 clk_disable(drv_data->clk);
1638 clk_put(drv_data->clk);
1641 spi_master_put(master);
1648 static int __exit spi_imx_remove(struct platform_device *pdev)
1650 struct driver_data *drv_data = platform_get_drvdata(pdev);
1657 tasklet_kill(&drv_data->pump_transfers);
1659 /* Remove the queue */
1660 status = destroy_queue(drv_data);
1662 dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
1667 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1668 writel(0, drv_data->regs + SPI_RESET);
1671 if (drv_data->master_info->enable_dma) {
1672 RSSR(drv_data->rx_channel) = 0;
1673 RSSR(drv_data->tx_channel) = 0;
1674 imx_dma_free(drv_data->tx_channel);
1675 imx_dma_free(drv_data->rx_channel);
1679 irq = platform_get_irq(pdev, 0);
1681 free_irq(irq, drv_data);
1683 clk_disable(drv_data->clk);
1684 clk_put(drv_data->clk);
1686 /* Release map resources */
1687 iounmap(drv_data->regs);
1688 release_resource(drv_data->ioarea);
1689 kfree(drv_data->ioarea);
1691 /* Disconnect from the SPI framework */
1692 spi_unregister_master(drv_data->master);
1693 spi_master_put(drv_data->master);
1695 /* Prevent double remove */
1696 platform_set_drvdata(pdev, NULL);
1698 dev_dbg(&pdev->dev, "remove succeded\n");
1703 static void spi_imx_shutdown(struct platform_device *pdev)
1705 struct driver_data *drv_data = platform_get_drvdata(pdev);
1708 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1709 writel(0, drv_data->regs + SPI_RESET);
1711 dev_dbg(&pdev->dev, "shutdown succeded\n");
1716 static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
1718 struct driver_data *drv_data = platform_get_drvdata(pdev);
1721 status = stop_queue(drv_data);
1723 dev_warn(&pdev->dev, "suspend cannot stop queue\n");
1727 dev_dbg(&pdev->dev, "suspended\n");
1732 static int spi_imx_resume(struct platform_device *pdev)
1734 struct driver_data *drv_data = platform_get_drvdata(pdev);
1737 /* Start the queue running */
1738 status = start_queue(drv_data);
1740 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1742 dev_dbg(&pdev->dev, "resumed\n");
1747 #define spi_imx_suspend NULL
1748 #define spi_imx_resume NULL
1749 #endif /* CONFIG_PM */
1751 /* work with hotplug and coldplug */
1752 MODULE_ALIAS("platform:spi_imx");
1754 static struct platform_driver driver = {
1757 .owner = THIS_MODULE,
1759 .remove = __exit_p(spi_imx_remove),
1760 .shutdown = spi_imx_shutdown,
1761 .suspend = spi_imx_suspend,
1762 .resume = spi_imx_resume,
1765 static int __init spi_imx_init(void)
1767 return platform_driver_probe(&driver, spi_imx_probe);
1769 module_init(spi_imx_init);
1771 static void __exit spi_imx_exit(void)
1773 platform_driver_unregister(&driver);
1775 module_exit(spi_imx_exit);
1777 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
1778 MODULE_DESCRIPTION("iMX SPI Controller Driver");
1779 MODULE_LICENSE("GPL");