2 * drivers/spi/spi_imx.c
4 * Copyright (C) 2006 SWAPP
5 * Andrea Paterniani <a.paterniani@swapp-eng.it>
7 * Initial version inspired by:
8 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/ioport.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/spi/spi.h>
30 #include <linux/workqueue.h>
31 #include <linux/delay.h>
32 #include <linux/clk.h>
36 #include <asm/delay.h>
38 #include <mach/hardware.h>
39 #include <mach/imx-dma.h>
40 #include <mach/spi_imx.h>
42 /*-------------------------------------------------------------------------*/
43 /* SPI Registers offsets from peripheral base address */
44 #define SPI_RXDATA (0x00)
45 #define SPI_TXDATA (0x04)
46 #define SPI_CONTROL (0x08)
47 #define SPI_INT_STATUS (0x0C)
48 #define SPI_TEST (0x10)
49 #define SPI_PERIOD (0x14)
50 #define SPI_DMA (0x18)
51 #define SPI_RESET (0x1C)
53 /* SPI Control Register Bit Fields & Masks */
54 #define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
55 #define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
56 #define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
57 #define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
58 #define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
59 #define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
60 #define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
61 #define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
62 #define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
63 #define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
64 Slave: RXFIFO advanced by BIT_COUNT */
65 #define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
66 Slave: RXFIFO advanced by /SS rising edge */
67 #define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
68 #define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
69 #define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
70 #define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
71 #define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
72 #define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
73 #define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
74 #define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
75 #define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
76 #define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
77 #define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
78 #define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
79 #define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
80 #define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
81 #define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
82 #define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
83 #define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
84 #define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
86 /* SPI Interrupt/Status Register Bit Fields & Masks */
87 #define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
88 #define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
89 #define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
90 #define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
91 #define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
92 #define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
93 #define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
94 #define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
95 #define SPI_STATUS (0xFF) /* SPI Status Mask */
96 #define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
97 #define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
98 #define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
99 #define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
100 #define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
101 #define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
102 #define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
103 #define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
104 #define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
106 /* SPI Test Register Bit Fields & Masks */
107 #define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
108 #define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
109 #define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
110 #define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
111 #define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
113 /* SPI Period Register Bit Fields & Masks */
114 #define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
115 #define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
117 #define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
118 #define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
120 #define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
123 /* SPI DMA Register Bit Fields & Masks */
124 #define SPI_DMA_RHDMA (0x1 << 4) /* RXFIFO Half Status */
125 #define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
126 #define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
127 #define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
128 #define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
129 #define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
130 #define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
131 #define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
133 /* SPI Soft Reset Register Bit Fields & Masks */
134 #define SPI_RESET_START (0x1) /* Start */
136 /* Default SPI configuration values */
137 #define SPI_DEFAULT_CONTROL \
139 SPI_CONTROL_BITCOUNT(16) | \
140 SPI_CONTROL_POL_ACT_HIGH | \
141 SPI_CONTROL_PHA_0 | \
142 SPI_CONTROL_SPIEN | \
143 SPI_CONTROL_SSCTL_1 | \
144 SPI_CONTROL_MODE_MASTER | \
145 SPI_CONTROL_DRCTL_0 | \
146 SPI_CONTROL_DATARATE_MIN \
148 #define SPI_DEFAULT_ENABLE_LOOPBACK (0)
149 #define SPI_DEFAULT_ENABLE_DMA (0)
150 #define SPI_DEFAULT_PERIOD_WAIT (8)
151 /*-------------------------------------------------------------------------*/
154 /*-------------------------------------------------------------------------*/
155 /* TX/RX SPI FIFO size */
156 #define SPI_FIFO_DEPTH (8)
157 #define SPI_FIFO_BYTE_WIDTH (2)
158 #define SPI_FIFO_OVERFLOW_MARGIN (2)
160 /* DMA burst length for half full/empty request trigger */
161 #define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
163 /* Dummy char output to achieve reads.
164 Choosing something different from all zeroes may help pattern recogition
165 for oscilloscope analysis, but may break some drivers. */
166 #define SPI_DUMMY_u8 0
167 #define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
168 #define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
171 * Macro to change a u32 field:
172 * @r : register to edit
174 * @v : new value for the field correctly bit-alligned
176 #define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
179 #define START_STATE ((void*)0)
180 #define RUNNING_STATE ((void*)1)
181 #define DONE_STATE ((void*)2)
182 #define ERROR_STATE ((void*)-1)
185 #define QUEUE_RUNNING (0)
186 #define QUEUE_STOPPED (1)
188 #define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
189 /*-------------------------------------------------------------------------*/
192 /*-------------------------------------------------------------------------*/
193 /* Driver data structs */
197 /* Driver model hookup */
198 struct platform_device *pdev;
200 /* SPI framework hookup */
201 struct spi_master *master;
204 struct spi_imx_master *master_info;
206 /* Memory resources and SPI regs virtual address */
207 struct resource *ioarea;
210 /* SPI RX_DATA physical address */
211 dma_addr_t rd_data_phys;
213 /* Driver message queue */
214 struct workqueue_struct *workqueue;
215 struct work_struct work;
217 struct list_head queue;
221 /* Message Transfer pump */
222 struct tasklet_struct pump_transfers;
224 /* Current message, transfer and state */
225 struct spi_message *cur_msg;
226 struct spi_transfer *cur_transfer;
227 struct chip_data *cur_chip;
229 /* Rd / Wr buffers pointers */
240 /* Function pointers */
241 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
242 void (*cs_control)(u32 command);
249 int rx_dma_needs_unmap;
250 int tx_dma_needs_unmap;
252 u32 dummy_dma_buf ____cacheline_aligned;
268 void (*cs_control)(u32 command);
270 /*-------------------------------------------------------------------------*/
273 static void pump_messages(struct work_struct *work);
275 static void flush(struct driver_data *drv_data)
277 void __iomem *regs = drv_data->regs;
280 dev_dbg(&drv_data->pdev->dev, "flush\n");
282 /* Wait for end of transaction */
284 control = readl(regs + SPI_CONTROL);
285 } while (control & SPI_CONTROL_XCH);
287 /* Release chip select if requested, transfer delays are
288 handled in pump_transfers */
289 if (drv_data->cs_change)
290 drv_data->cs_control(SPI_CS_DEASSERT);
292 /* Disable SPI to flush FIFOs */
293 writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
294 writel(control, regs + SPI_CONTROL);
297 static void restore_state(struct driver_data *drv_data)
299 void __iomem *regs = drv_data->regs;
300 struct chip_data *chip = drv_data->cur_chip;
302 /* Load chip registers */
303 dev_dbg(&drv_data->pdev->dev,
306 " control = 0x%08X\n",
309 writel(chip->test, regs + SPI_TEST);
310 writel(chip->period, regs + SPI_PERIOD);
311 writel(0, regs + SPI_INT_STATUS);
312 writel(chip->control, regs + SPI_CONTROL);
315 static void null_cs_control(u32 command)
319 static inline u32 data_to_write(struct driver_data *drv_data)
321 return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
324 static inline u32 data_to_read(struct driver_data *drv_data)
326 return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
329 static int write(struct driver_data *drv_data)
331 void __iomem *regs = drv_data->regs;
332 void *tx = drv_data->tx;
333 void *tx_end = drv_data->tx_end;
334 u8 n_bytes = drv_data->n_bytes;
335 u32 remaining_writes;
336 u32 fifo_avail_space;
340 /* Compute how many fifo writes to do */
341 remaining_writes = (u32)(tx_end - tx) / n_bytes;
342 fifo_avail_space = SPI_FIFO_DEPTH -
343 (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
344 if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
345 /* Fix misunderstood receive overflow */
346 fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
347 n = min(remaining_writes, fifo_avail_space);
349 dev_dbg(&drv_data->pdev->dev,
351 " remaining writes = %d\n"
352 " fifo avail space = %d\n"
353 " fifo writes = %d\n",
354 (n_bytes == 1) ? "u8" : "u16",
360 /* Fill SPI TXFIFO */
361 if (drv_data->rd_only) {
364 writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
369 writel(d, regs + SPI_TXDATA);
375 writel(d, regs + SPI_TXDATA);
381 /* Trigger transfer */
382 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
385 /* Update tx pointer */
389 return (tx >= tx_end);
392 static int read(struct driver_data *drv_data)
394 void __iomem *regs = drv_data->regs;
395 void *rx = drv_data->rx;
396 void *rx_end = drv_data->rx_end;
397 u8 n_bytes = drv_data->n_bytes;
403 /* Compute how many fifo reads to do */
404 remaining_reads = (u32)(rx_end - rx) / n_bytes;
405 fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
407 n = min(remaining_reads, fifo_rxcnt);
409 dev_dbg(&drv_data->pdev->dev,
411 " remaining reads = %d\n"
412 " fifo rx count = %d\n"
413 " fifo reads = %d\n",
414 (n_bytes == 1) ? "u8" : "u16",
420 /* Read SPI RXFIFO */
423 d = readl(regs + SPI_RXDATA);
429 d = readl(regs + SPI_RXDATA);
435 /* Update rx pointer */
439 return (rx >= rx_end);
442 static void *next_transfer(struct driver_data *drv_data)
444 struct spi_message *msg = drv_data->cur_msg;
445 struct spi_transfer *trans = drv_data->cur_transfer;
447 /* Move to next transfer */
448 if (trans->transfer_list.next != &msg->transfers) {
449 drv_data->cur_transfer =
450 list_entry(trans->transfer_list.next,
453 return RUNNING_STATE;
459 static int map_dma_buffers(struct driver_data *drv_data)
461 struct spi_message *msg;
465 drv_data->rx_dma_needs_unmap = 0;
466 drv_data->tx_dma_needs_unmap = 0;
468 if (!drv_data->master_info->enable_dma ||
469 !drv_data->cur_chip->enable_dma)
472 msg = drv_data->cur_msg;
473 dev = &msg->spi->dev;
474 if (msg->is_dma_mapped) {
475 if (drv_data->tx_dma)
476 /* The caller provided at least dma and cpu virtual
477 address for write; pump_transfers() will consider the
478 transfer as write only if cpu rx virtual address is
482 if (drv_data->rx_dma) {
483 /* The caller provided dma and cpu virtual address to
484 performe read only transfer -->
485 use drv_data->dummy_dma_buf for dummy writes to
487 buf = &drv_data->dummy_dma_buf;
488 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
489 drv_data->tx_dma = dma_map_single(dev,
491 drv_data->tx_map_len,
493 if (dma_mapping_error(dev, drv_data->tx_dma))
496 drv_data->tx_dma_needs_unmap = 1;
498 /* Flags transfer as rd_only for pump_transfers() DMA
499 regs programming (should be redundant) */
506 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
509 /* NULL rx means write-only transfer and no map needed
510 since rx DMA will not be used */
513 drv_data->rx_dma = dma_map_single(
518 if (dma_mapping_error(dev, drv_data->rx_dma))
520 drv_data->rx_dma_needs_unmap = 1;
523 if (drv_data->tx == NULL) {
524 /* Read only message --> use drv_data->dummy_dma_buf for dummy
525 writes to achive reads */
526 buf = &drv_data->dummy_dma_buf;
527 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
530 drv_data->tx_map_len = drv_data->len;
532 drv_data->tx_dma = dma_map_single(dev,
534 drv_data->tx_map_len,
536 if (dma_mapping_error(dev, drv_data->tx_dma)) {
537 if (drv_data->rx_dma) {
538 dma_unmap_single(dev,
542 drv_data->rx_dma_needs_unmap = 0;
546 drv_data->tx_dma_needs_unmap = 1;
551 static void unmap_dma_buffers(struct driver_data *drv_data)
553 struct spi_message *msg = drv_data->cur_msg;
554 struct device *dev = &msg->spi->dev;
556 if (drv_data->rx_dma_needs_unmap) {
557 dma_unmap_single(dev,
561 drv_data->rx_dma_needs_unmap = 0;
563 if (drv_data->tx_dma_needs_unmap) {
564 dma_unmap_single(dev,
566 drv_data->tx_map_len,
568 drv_data->tx_dma_needs_unmap = 0;
572 /* Caller already set message->status (dma is already blocked) */
573 static void giveback(struct spi_message *message, struct driver_data *drv_data)
575 void __iomem *regs = drv_data->regs;
577 /* Bring SPI to sleep; restore_state() and pump_transfer()
579 writel(0, regs + SPI_INT_STATUS);
580 writel(0, regs + SPI_DMA);
582 /* Unconditioned deselct */
583 drv_data->cs_control(SPI_CS_DEASSERT);
585 message->state = NULL;
586 if (message->complete)
587 message->complete(message->context);
589 drv_data->cur_msg = NULL;
590 drv_data->cur_transfer = NULL;
591 drv_data->cur_chip = NULL;
592 queue_work(drv_data->workqueue, &drv_data->work);
595 static void dma_err_handler(int channel, void *data, int errcode)
597 struct driver_data *drv_data = data;
598 struct spi_message *msg = drv_data->cur_msg;
600 dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
602 /* Disable both rx and tx dma channels */
603 imx_dma_disable(drv_data->rx_channel);
604 imx_dma_disable(drv_data->tx_channel);
605 unmap_dma_buffers(drv_data);
609 msg->state = ERROR_STATE;
610 tasklet_schedule(&drv_data->pump_transfers);
613 static void dma_tx_handler(int channel, void *data)
615 struct driver_data *drv_data = data;
617 dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
619 imx_dma_disable(channel);
621 /* Now waits for TX FIFO empty */
622 writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
625 static irqreturn_t dma_transfer(struct driver_data *drv_data)
628 struct spi_message *msg = drv_data->cur_msg;
629 void __iomem *regs = drv_data->regs;
631 status = readl(regs + SPI_INT_STATUS);
633 if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
634 == (SPI_INTEN_RO | SPI_STATUS_RO)) {
635 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
637 imx_dma_disable(drv_data->tx_channel);
638 imx_dma_disable(drv_data->rx_channel);
639 unmap_dma_buffers(drv_data);
643 dev_warn(&drv_data->pdev->dev,
644 "dma_transfer - fifo overun\n");
646 msg->state = ERROR_STATE;
647 tasklet_schedule(&drv_data->pump_transfers);
652 if (status & SPI_STATUS_TE) {
653 writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
656 /* Wait end of transfer before read trailing data */
657 while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
660 imx_dma_disable(drv_data->rx_channel);
661 unmap_dma_buffers(drv_data);
663 /* Release chip select if requested, transfer delays are
664 handled in pump_transfers() */
665 if (drv_data->cs_change)
666 drv_data->cs_control(SPI_CS_DEASSERT);
668 /* Calculate number of trailing data and read them */
669 dev_dbg(&drv_data->pdev->dev,
670 "dma_transfer - test = 0x%08X\n",
671 readl(regs + SPI_TEST));
672 drv_data->rx = drv_data->rx_end -
673 ((readl(regs + SPI_TEST) &
675 SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
678 /* Write only transfer */
679 unmap_dma_buffers(drv_data);
684 /* End of transfer, update total byte transfered */
685 msg->actual_length += drv_data->len;
687 /* Move to next transfer */
688 msg->state = next_transfer(drv_data);
690 /* Schedule transfer tasklet */
691 tasklet_schedule(&drv_data->pump_transfers);
696 /* Opps problem detected */
700 static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
702 struct spi_message *msg = drv_data->cur_msg;
703 void __iomem *regs = drv_data->regs;
705 irqreturn_t handled = IRQ_NONE;
707 status = readl(regs + SPI_INT_STATUS);
709 if (status & SPI_INTEN_TE) {
710 /* TXFIFO Empty Interrupt on the last transfered word */
711 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
712 dev_dbg(&drv_data->pdev->dev,
713 "interrupt_wronly_transfer - end of tx\n");
717 /* Update total byte transfered */
718 msg->actual_length += drv_data->len;
720 /* Move to next transfer */
721 msg->state = next_transfer(drv_data);
723 /* Schedule transfer tasklet */
724 tasklet_schedule(&drv_data->pump_transfers);
728 while (status & SPI_STATUS_TH) {
729 dev_dbg(&drv_data->pdev->dev,
730 "interrupt_wronly_transfer - status = 0x%08X\n",
734 if (write(drv_data)) {
735 /* End of TXFIFO writes,
736 now wait until TXFIFO is empty */
737 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
741 status = readl(regs + SPI_INT_STATUS);
743 /* We did something */
744 handled = IRQ_HANDLED;
751 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
753 struct spi_message *msg = drv_data->cur_msg;
754 void __iomem *regs = drv_data->regs;
756 irqreturn_t handled = IRQ_NONE;
759 status = readl(regs + SPI_INT_STATUS);
761 if (status & SPI_INTEN_TE) {
762 /* TXFIFO Empty Interrupt on the last transfered word */
763 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
764 dev_dbg(&drv_data->pdev->dev,
765 "interrupt_transfer - end of tx\n");
767 if (msg->state == ERROR_STATE) {
768 /* RXFIFO overrun was detected and message aborted */
771 /* Wait for end of transaction */
773 control = readl(regs + SPI_CONTROL);
774 } while (control & SPI_CONTROL_XCH);
776 /* Release chip select if requested, transfer delays are
777 handled in pump_transfers */
778 if (drv_data->cs_change)
779 drv_data->cs_control(SPI_CS_DEASSERT);
781 /* Read trailing bytes */
782 limit = loops_per_jiffy << 1;
783 while ((read(drv_data) == 0) && limit--);
786 dev_err(&drv_data->pdev->dev,
787 "interrupt_transfer - "
788 "trailing byte read failed\n");
790 dev_dbg(&drv_data->pdev->dev,
791 "interrupt_transfer - end of rx\n");
793 /* Update total byte transfered */
794 msg->actual_length += drv_data->len;
796 /* Move to next transfer */
797 msg->state = next_transfer(drv_data);
800 /* Schedule transfer tasklet */
801 tasklet_schedule(&drv_data->pump_transfers);
805 while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
806 dev_dbg(&drv_data->pdev->dev,
807 "interrupt_transfer - status = 0x%08X\n",
810 if (status & SPI_STATUS_RO) {
811 /* RXFIFO overrun, abort message end wait
812 until TXFIFO is empty */
813 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
815 dev_warn(&drv_data->pdev->dev,
816 "interrupt_transfer - fifo overun\n"
817 " data not yet written = %d\n"
818 " data not yet read = %d\n",
819 data_to_write(drv_data),
820 data_to_read(drv_data));
822 msg->state = ERROR_STATE;
829 if (write(drv_data)) {
830 /* End of TXFIFO writes,
831 now wait until TXFIFO is empty */
832 writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
836 status = readl(regs + SPI_INT_STATUS);
838 /* We did something */
839 handled = IRQ_HANDLED;
846 static irqreturn_t spi_int(int irq, void *dev_id)
848 struct driver_data *drv_data = (struct driver_data *)dev_id;
850 if (!drv_data->cur_msg) {
851 dev_err(&drv_data->pdev->dev,
852 "spi_int - bad message state\n");
857 return drv_data->transfer_handler(drv_data);
860 static inline u32 spi_speed_hz(struct driver_data *drv_data, u32 data_rate)
862 return clk_get_rate(drv_data->clk) / (4 << ((data_rate) >> 13));
865 static u32 spi_data_rate(struct driver_data *drv_data, u32 speed_hz)
868 u32 quantized_hz = clk_get_rate(drv_data->clk) >> 2;
870 for (div = SPI_PERCLK2_DIV_MIN;
871 div <= SPI_PERCLK2_DIV_MAX;
872 div++, quantized_hz >>= 1) {
873 if (quantized_hz <= speed_hz)
874 /* Max available speed LEQ required speed */
877 return SPI_CONTROL_DATARATE_BAD;
880 static void pump_transfers(unsigned long data)
882 struct driver_data *drv_data = (struct driver_data *)data;
883 struct spi_message *message;
884 struct spi_transfer *transfer, *previous;
885 struct chip_data *chip;
889 dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
891 message = drv_data->cur_msg;
893 /* Handle for abort */
894 if (message->state == ERROR_STATE) {
895 message->status = -EIO;
896 giveback(message, drv_data);
900 /* Handle end of message */
901 if (message->state == DONE_STATE) {
903 giveback(message, drv_data);
907 chip = drv_data->cur_chip;
909 /* Delay if requested at end of transfer*/
910 transfer = drv_data->cur_transfer;
911 if (message->state == RUNNING_STATE) {
912 previous = list_entry(transfer->transfer_list.prev,
915 if (previous->delay_usecs)
916 udelay(previous->delay_usecs);
919 message->state = RUNNING_STATE;
920 drv_data->cs_control = chip->cs_control;
923 transfer = drv_data->cur_transfer;
924 drv_data->tx = (void *)transfer->tx_buf;
925 drv_data->tx_end = drv_data->tx + transfer->len;
926 drv_data->rx = transfer->rx_buf;
927 drv_data->rx_end = drv_data->rx + transfer->len;
928 drv_data->rx_dma = transfer->rx_dma;
929 drv_data->tx_dma = transfer->tx_dma;
930 drv_data->len = transfer->len;
931 drv_data->cs_change = transfer->cs_change;
932 drv_data->rd_only = (drv_data->tx == NULL);
934 regs = drv_data->regs;
935 control = readl(regs + SPI_CONTROL);
937 /* Bits per word setup */
938 tmp = transfer->bits_per_word;
940 /* Use device setup */
941 tmp = chip->bits_per_word;
942 drv_data->n_bytes = chip->n_bytes;
944 /* Use per-transfer setup */
945 drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
946 u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
948 /* Speed setup (surely valid because already checked) */
949 tmp = transfer->speed_hz;
951 tmp = chip->max_speed_hz;
952 tmp = spi_data_rate(drv_data, tmp);
953 u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
955 writel(control, regs + SPI_CONTROL);
957 /* Assert device chip-select */
958 drv_data->cs_control(SPI_CS_ASSERT);
960 /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
961 if bits_per_word is less or equal 8 PIO transfers are performed.
962 Moreover DMA is convinient for transfer length bigger than FIFOs
964 if ((drv_data->n_bytes == 2) &&
965 (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
966 (map_dma_buffers(drv_data) == 0)) {
967 dev_dbg(&drv_data->pdev->dev,
968 "pump dma transfer\n"
975 (unsigned int)drv_data->tx_dma,
977 (unsigned int)drv_data->rx_dma,
980 /* Ensure we have the correct interrupt handler */
981 drv_data->transfer_handler = dma_transfer;
983 /* Trigger transfer */
984 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
989 /* Linear source address */
990 CCR(drv_data->tx_channel) =
993 CCR_SSIZ_32 | CCR_DSIZ_16 |
996 /* Read only transfer -> fixed source address for
997 dummy write to achive read */
998 CCR(drv_data->tx_channel) =
1001 CCR_SSIZ_32 | CCR_DSIZ_16 |
1004 imx_dma_setup_single(
1005 drv_data->tx_channel,
1008 drv_data->rd_data_phys + 4,
1012 /* Setup rx DMA for linear destination address */
1013 CCR(drv_data->rx_channel) =
1016 CCR_DSIZ_32 | CCR_SSIZ_16 |
1018 imx_dma_setup_single(
1019 drv_data->rx_channel,
1022 drv_data->rd_data_phys,
1024 imx_dma_enable(drv_data->rx_channel);
1026 /* Enable SPI interrupt */
1027 writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
1029 /* Set SPI to request DMA service on both
1030 Rx and Tx half fifo watermark */
1031 writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
1033 /* Write only access -> set SPI to request DMA
1034 service on Tx half fifo watermark */
1035 writel(SPI_DMA_THDEN, regs + SPI_DMA);
1037 imx_dma_enable(drv_data->tx_channel);
1039 dev_dbg(&drv_data->pdev->dev,
1040 "pump pio transfer\n"
1048 /* Ensure we have the correct interrupt handler */
1050 drv_data->transfer_handler = interrupt_transfer;
1052 drv_data->transfer_handler = interrupt_wronly_transfer;
1054 /* Enable SPI interrupt */
1056 writel(SPI_INTEN_TH | SPI_INTEN_RO,
1057 regs + SPI_INT_STATUS);
1059 writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
1063 static void pump_messages(struct work_struct *work)
1065 struct driver_data *drv_data =
1066 container_of(work, struct driver_data, work);
1067 unsigned long flags;
1069 /* Lock queue and check for queue work */
1070 spin_lock_irqsave(&drv_data->lock, flags);
1071 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1073 spin_unlock_irqrestore(&drv_data->lock, flags);
1077 /* Make sure we are not already running a message */
1078 if (drv_data->cur_msg) {
1079 spin_unlock_irqrestore(&drv_data->lock, flags);
1083 /* Extract head of queue */
1084 drv_data->cur_msg = list_entry(drv_data->queue.next,
1085 struct spi_message, queue);
1086 list_del_init(&drv_data->cur_msg->queue);
1088 spin_unlock_irqrestore(&drv_data->lock, flags);
1090 /* Initial message state */
1091 drv_data->cur_msg->state = START_STATE;
1092 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1093 struct spi_transfer,
1096 /* Setup the SPI using the per chip configuration */
1097 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1098 restore_state(drv_data);
1100 /* Mark as busy and launch transfers */
1101 tasklet_schedule(&drv_data->pump_transfers);
1104 static int transfer(struct spi_device *spi, struct spi_message *msg)
1106 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1107 u32 min_speed_hz, max_speed_hz, tmp;
1108 struct spi_transfer *trans;
1109 unsigned long flags;
1111 msg->actual_length = 0;
1113 /* Per transfer setup check */
1114 min_speed_hz = spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN);
1115 max_speed_hz = spi->max_speed_hz;
1116 list_for_each_entry(trans, &msg->transfers, transfer_list) {
1117 tmp = trans->bits_per_word;
1119 dev_err(&drv_data->pdev->dev,
1120 "message rejected : "
1121 "invalid transfer bits_per_word (%d bits)\n",
1125 tmp = trans->speed_hz;
1127 if (tmp < min_speed_hz) {
1128 dev_err(&drv_data->pdev->dev,
1129 "message rejected : "
1130 "device min speed (%d Hz) exceeds "
1131 "required transfer speed (%d Hz)\n",
1135 } else if (tmp > max_speed_hz) {
1136 dev_err(&drv_data->pdev->dev,
1137 "message rejected : "
1138 "transfer speed (%d Hz) exceeds "
1139 "device max speed (%d Hz)\n",
1147 /* Message accepted */
1148 msg->status = -EINPROGRESS;
1149 msg->state = START_STATE;
1151 spin_lock_irqsave(&drv_data->lock, flags);
1152 if (drv_data->run == QUEUE_STOPPED) {
1153 spin_unlock_irqrestore(&drv_data->lock, flags);
1157 list_add_tail(&msg->queue, &drv_data->queue);
1158 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1159 queue_work(drv_data->workqueue, &drv_data->work);
1161 spin_unlock_irqrestore(&drv_data->lock, flags);
1165 /* Message rejected and not queued */
1166 msg->status = -EINVAL;
1167 msg->state = ERROR_STATE;
1169 msg->complete(msg->context);
1173 /* the spi->mode bits understood by this driver: */
1174 #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
1176 /* On first setup bad values must free chip_data memory since will cause
1177 spi_new_device to fail. Bad value setup from protocol driver are simply not
1178 applied and notified to the calling driver. */
1179 static int setup(struct spi_device *spi)
1181 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1182 struct spi_imx_chip *chip_info;
1183 struct chip_data *chip;
1184 int first_setup = 0;
1188 if (spi->mode & ~MODEBITS) {
1189 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1190 spi->mode & ~MODEBITS);
1194 /* Get controller data */
1195 chip_info = spi->controller_data;
1197 /* Get controller_state */
1198 chip = spi_get_ctldata(spi);
1202 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1205 "setup - cannot allocate controller state\n");
1208 chip->control = SPI_DEFAULT_CONTROL;
1210 if (chip_info == NULL) {
1211 /* spi_board_info.controller_data not is supplied */
1212 chip_info = kzalloc(sizeof(struct spi_imx_chip),
1217 "cannot allocate controller data\n");
1219 goto err_first_setup;
1221 /* Set controller data default value */
1222 chip_info->enable_loopback =
1223 SPI_DEFAULT_ENABLE_LOOPBACK;
1224 chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
1225 chip_info->ins_ss_pulse = 1;
1226 chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
1227 chip_info->cs_control = null_cs_control;
1231 /* Now set controller state based on controller data */
1235 if (chip_info->enable_loopback)
1236 chip->test = SPI_TEST_LBC;
1240 /* SPI dma driven */
1241 chip->enable_dma = chip_info->enable_dma;
1243 /* SPI /SS pulse between spi burst */
1244 if (chip_info->ins_ss_pulse)
1245 u32_EDIT(chip->control,
1246 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
1248 u32_EDIT(chip->control,
1249 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
1251 /* SPI bclk waits between each bits_per_word spi burst */
1252 if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
1255 "bclk_wait exceeds max allowed (%d)\n",
1256 SPI_PERIOD_MAX_WAIT);
1257 goto err_first_setup;
1259 chip->period = SPI_PERIOD_CSRC_BCLK |
1260 (chip_info->bclk_wait & SPI_PERIOD_WAIT);
1265 if (tmp & SPI_CS_HIGH) {
1266 u32_EDIT(chip->control,
1267 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
1269 switch (tmp & SPI_MODE_3) {
1274 tmp = SPI_CONTROL_PHA_1;
1277 tmp = SPI_CONTROL_POL_ACT_LOW;
1281 tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
1284 u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
1286 /* SPI word width */
1287 tmp = spi->bits_per_word;
1290 spi->bits_per_word = 8;
1291 } else if (tmp > 16) {
1295 "invalid bits_per_word (%d)\n",
1298 goto err_first_setup;
1300 /* Undo setup using chip as backup copy */
1301 tmp = chip->bits_per_word;
1302 spi->bits_per_word = tmp;
1305 chip->bits_per_word = tmp;
1306 u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
1307 chip->n_bytes = (tmp <= 8) ? 1 : 2;
1310 tmp = spi_data_rate(drv_data, spi->max_speed_hz);
1311 if (tmp == SPI_CONTROL_DATARATE_BAD) {
1315 "HW min speed (%d Hz) exceeds required "
1316 "max speed (%d Hz)\n",
1317 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1320 goto err_first_setup;
1322 /* Undo setup using chip as backup copy */
1323 spi->max_speed_hz = chip->max_speed_hz;
1325 u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
1326 /* Actual rounded max_speed_hz */
1327 tmp = spi_speed_hz(drv_data, tmp);
1328 spi->max_speed_hz = tmp;
1329 chip->max_speed_hz = tmp;
1332 /* SPI chip-select management */
1333 if (chip_info->cs_control)
1334 chip->cs_control = chip_info->cs_control;
1336 chip->cs_control = null_cs_control;
1338 /* Save controller_state */
1339 spi_set_ctldata(spi, chip);
1344 " loopback enable = %s\n"
1345 " dma enable = %s\n"
1346 " insert /ss pulse = %s\n"
1347 " period wait = %d\n"
1349 " bits per word = %d\n"
1350 " min speed = %d Hz\n"
1351 " rounded max speed = %d Hz\n",
1352 chip->test & SPI_TEST_LBC ? "Yes" : "No",
1353 chip->enable_dma ? "Yes" : "No",
1354 chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
1355 chip->period & SPI_PERIOD_WAIT,
1358 spi_speed_hz(drv_data, SPI_CONTROL_DATARATE_MIN),
1367 static void cleanup(struct spi_device *spi)
1369 kfree(spi_get_ctldata(spi));
1372 static int __init init_queue(struct driver_data *drv_data)
1374 INIT_LIST_HEAD(&drv_data->queue);
1375 spin_lock_init(&drv_data->lock);
1377 drv_data->run = QUEUE_STOPPED;
1380 tasklet_init(&drv_data->pump_transfers,
1381 pump_transfers, (unsigned long)drv_data);
1383 INIT_WORK(&drv_data->work, pump_messages);
1384 drv_data->workqueue = create_singlethread_workqueue(
1385 drv_data->master->dev.parent->bus_id);
1386 if (drv_data->workqueue == NULL)
1392 static int start_queue(struct driver_data *drv_data)
1394 unsigned long flags;
1396 spin_lock_irqsave(&drv_data->lock, flags);
1398 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1399 spin_unlock_irqrestore(&drv_data->lock, flags);
1403 drv_data->run = QUEUE_RUNNING;
1404 drv_data->cur_msg = NULL;
1405 drv_data->cur_transfer = NULL;
1406 drv_data->cur_chip = NULL;
1407 spin_unlock_irqrestore(&drv_data->lock, flags);
1409 queue_work(drv_data->workqueue, &drv_data->work);
1414 static int stop_queue(struct driver_data *drv_data)
1416 unsigned long flags;
1417 unsigned limit = 500;
1420 spin_lock_irqsave(&drv_data->lock, flags);
1422 /* This is a bit lame, but is optimized for the common execution path.
1423 * A wait_queue on the drv_data->busy could be used, but then the common
1424 * execution path (pump_messages) would be required to call wake_up or
1425 * friends on every SPI message. Do this instead */
1426 drv_data->run = QUEUE_STOPPED;
1427 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1428 spin_unlock_irqrestore(&drv_data->lock, flags);
1430 spin_lock_irqsave(&drv_data->lock, flags);
1433 if (!list_empty(&drv_data->queue) || drv_data->busy)
1436 spin_unlock_irqrestore(&drv_data->lock, flags);
1441 static int destroy_queue(struct driver_data *drv_data)
1445 status = stop_queue(drv_data);
1449 if (drv_data->workqueue)
1450 destroy_workqueue(drv_data->workqueue);
1455 static int __init spi_imx_probe(struct platform_device *pdev)
1457 struct device *dev = &pdev->dev;
1458 struct spi_imx_master *platform_info;
1459 struct spi_master *master;
1460 struct driver_data *drv_data = NULL;
1461 struct resource *res;
1462 int irq, status = 0;
1464 platform_info = dev->platform_data;
1465 if (platform_info == NULL) {
1466 dev_err(&pdev->dev, "probe - no platform data supplied\n");
1471 drv_data->clk = clk_get(&pdev->dev, "perclk2");
1472 if (IS_ERR(drv_data->clk)) {
1473 dev_err(&pdev->dev, "probe - cannot get get\n");
1474 status = PTR_ERR(drv_data->clk);
1477 clk_enable(drv_data->clk);
1479 /* Allocate master with space for drv_data */
1480 master = spi_alloc_master(dev, sizeof(struct driver_data));
1482 dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
1486 drv_data = spi_master_get_devdata(master);
1487 drv_data->master = master;
1488 drv_data->master_info = platform_info;
1489 drv_data->pdev = pdev;
1491 master->bus_num = pdev->id;
1492 master->num_chipselect = platform_info->num_chipselect;
1493 master->cleanup = cleanup;
1494 master->setup = setup;
1495 master->transfer = transfer;
1497 drv_data->dummy_dma_buf = SPI_DUMMY_u32;
1499 /* Find and map resources */
1500 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 dev_err(&pdev->dev, "probe - MEM resources not defined\n");
1506 drv_data->ioarea = request_mem_region(res->start,
1507 res->end - res->start + 1,
1509 if (drv_data->ioarea == NULL) {
1510 dev_err(&pdev->dev, "probe - cannot reserve region\n");
1514 drv_data->regs = ioremap(res->start, res->end - res->start + 1);
1515 if (drv_data->regs == NULL) {
1516 dev_err(&pdev->dev, "probe - cannot map IO\n");
1520 drv_data->rd_data_phys = (dma_addr_t)res->start;
1523 irq = platform_get_irq(pdev, 0);
1525 dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
1529 status = request_irq(irq, spi_int, IRQF_DISABLED, dev->bus_id, drv_data);
1531 dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
1535 /* Setup DMA if requested */
1536 drv_data->tx_channel = -1;
1537 drv_data->rx_channel = -1;
1538 if (platform_info->enable_dma) {
1539 /* Get rx DMA channel */
1540 drv_data->rx_channel = imx_dma_request_by_prio("spi_imx_rx",
1542 if (drv_data->rx_channel < 0) {
1544 "probe - problem (%d) requesting rx channel\n",
1545 drv_data->rx_channel);
1548 imx_dma_setup_handlers(drv_data->rx_channel, NULL,
1549 dma_err_handler, drv_data);
1551 /* Get tx DMA channel */
1552 drv_data->tx_channel = imx_dma_request_by_prio("spi_imx_tx",
1554 if (drv_data->tx_channel < 0) {
1556 "probe - problem (%d) requesting tx channel\n",
1557 drv_data->tx_channel);
1558 imx_dma_free(drv_data->rx_channel);
1561 imx_dma_setup_handlers(drv_data->tx_channel,
1562 dma_tx_handler, dma_err_handler,
1565 /* Set request source and burst length for allocated channels */
1566 switch (drv_data->pdev->id) {
1569 RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
1570 RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
1574 RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
1575 RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
1578 dev_err(dev, "probe - bad SPI Id\n");
1579 imx_dma_free(drv_data->rx_channel);
1580 imx_dma_free(drv_data->tx_channel);
1584 BLR(drv_data->rx_channel) = SPI_DMA_BLR;
1585 BLR(drv_data->tx_channel) = SPI_DMA_BLR;
1588 /* Load default SPI configuration */
1589 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1590 writel(0, drv_data->regs + SPI_RESET);
1591 writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
1593 /* Initial and start queue */
1594 status = init_queue(drv_data);
1596 dev_err(&pdev->dev, "probe - problem initializing queue\n");
1597 goto err_init_queue;
1599 status = start_queue(drv_data);
1601 dev_err(&pdev->dev, "probe - problem starting queue\n");
1602 goto err_start_queue;
1605 /* Register with the SPI framework */
1606 platform_set_drvdata(pdev, drv_data);
1607 status = spi_register_master(master);
1609 dev_err(&pdev->dev, "probe - problem registering spi master\n");
1610 goto err_spi_register;
1613 dev_dbg(dev, "probe succeded\n");
1619 destroy_queue(drv_data);
1624 free_irq(irq, drv_data);
1627 iounmap(drv_data->regs);
1630 release_resource(drv_data->ioarea);
1631 kfree(drv_data->ioarea);
1634 spi_master_put(master);
1637 clk_disable(drv_data->clk);
1638 clk_put(drv_data->clk);
1644 static int __exit spi_imx_remove(struct platform_device *pdev)
1646 struct driver_data *drv_data = platform_get_drvdata(pdev);
1653 tasklet_kill(&drv_data->pump_transfers);
1655 /* Remove the queue */
1656 status = destroy_queue(drv_data);
1658 dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
1663 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1664 writel(0, drv_data->regs + SPI_RESET);
1667 if (drv_data->master_info->enable_dma) {
1668 RSSR(drv_data->rx_channel) = 0;
1669 RSSR(drv_data->tx_channel) = 0;
1670 imx_dma_free(drv_data->tx_channel);
1671 imx_dma_free(drv_data->rx_channel);
1675 irq = platform_get_irq(pdev, 0);
1677 free_irq(irq, drv_data);
1679 clk_disable(drv_data->clk);
1680 clk_put(drv_data->clk);
1682 /* Release map resources */
1683 iounmap(drv_data->regs);
1684 release_resource(drv_data->ioarea);
1685 kfree(drv_data->ioarea);
1687 /* Disconnect from the SPI framework */
1688 spi_unregister_master(drv_data->master);
1689 spi_master_put(drv_data->master);
1691 /* Prevent double remove */
1692 platform_set_drvdata(pdev, NULL);
1694 dev_dbg(&pdev->dev, "remove succeded\n");
1699 static void spi_imx_shutdown(struct platform_device *pdev)
1701 struct driver_data *drv_data = platform_get_drvdata(pdev);
1704 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1705 writel(0, drv_data->regs + SPI_RESET);
1707 dev_dbg(&pdev->dev, "shutdown succeded\n");
1712 static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
1714 struct driver_data *drv_data = platform_get_drvdata(pdev);
1717 status = stop_queue(drv_data);
1719 dev_warn(&pdev->dev, "suspend cannot stop queue\n");
1723 dev_dbg(&pdev->dev, "suspended\n");
1728 static int spi_imx_resume(struct platform_device *pdev)
1730 struct driver_data *drv_data = platform_get_drvdata(pdev);
1733 /* Start the queue running */
1734 status = start_queue(drv_data);
1736 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1738 dev_dbg(&pdev->dev, "resumed\n");
1743 #define spi_imx_suspend NULL
1744 #define spi_imx_resume NULL
1745 #endif /* CONFIG_PM */
1747 /* work with hotplug and coldplug */
1748 MODULE_ALIAS("platform:spi_imx");
1750 static struct platform_driver driver = {
1753 .owner = THIS_MODULE,
1755 .remove = __exit_p(spi_imx_remove),
1756 .shutdown = spi_imx_shutdown,
1757 .suspend = spi_imx_suspend,
1758 .resume = spi_imx_resume,
1761 static int __init spi_imx_init(void)
1763 return platform_driver_probe(&driver, spi_imx_probe);
1765 module_init(spi_imx_init);
1767 static void __exit spi_imx_exit(void)
1769 platform_driver_unregister(&driver);
1771 module_exit(spi_imx_exit);
1773 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
1774 MODULE_DESCRIPTION("iMX SPI Controller Driver");
1775 MODULE_LICENSE("GPL");