2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
5 * Copyright (C) 2007-2008 Atmel Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
23 #include "dw_dmac_regs.h"
26 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
28 * of which use ARM any more). See the "Databook" from Synopsys for
29 * information beyond what licensees probably provide.
31 * The driver has currently been tested only with the Atmel AT32AP7000,
32 * which does not support descriptor writeback.
35 /* NOTE: DMS+SMS is system-specific. We should get this information
36 * from the platform code somehow.
38 #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \
39 | DWC_CTLL_SRC_MSIZE(0) \
46 * This is configuration-dependent and usually a funny size like 4095.
47 * Let's round it down to the nearest power of two.
49 * Note that this is a transfer count, i.e. if we transfer 32-bit
50 * words, we can do 8192 bytes per descriptor.
52 * This parameter is also system-specific.
54 #define DWC_MAX_COUNT 2048U
57 * Number of descriptors to allocate for each channel. This should be
58 * made configurable somehow; preferably, the clients (at least the
59 * ones using slave transfers) should be able to give us a hint.
61 #define NR_DESCS_PER_CHANNEL 64
63 /*----------------------------------------------------------------------*/
66 * Because we're not relying on writeback from the controller (it may not
67 * even be configured into the core!) we don't need to use dma_pool. These
68 * descriptors -- and associated data -- are cacheable. We do need to make
69 * sure their dcache entries are written back before handing them off to
70 * the controller, though.
73 static struct device *chan2dev(struct dma_chan *chan)
75 return &chan->dev->device;
77 static struct device *chan2parent(struct dma_chan *chan)
79 return chan->dev->device.parent;
82 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
87 static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
89 return list_entry(dwc->queue.next, struct dw_desc, desc_node);
92 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
94 struct dw_desc *desc, *_desc;
95 struct dw_desc *ret = NULL;
98 spin_lock_bh(&dwc->lock);
99 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
100 if (async_tx_test_ack(&desc->txd)) {
101 list_del(&desc->desc_node);
105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
108 spin_unlock_bh(&dwc->lock);
110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
115 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
117 struct dw_desc *child;
119 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
120 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
121 child->txd.phys, sizeof(child->lli),
123 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
124 desc->txd.phys, sizeof(desc->lli),
129 * Move a descriptor, including any children, to the free list.
130 * `desc' must not be on any lists.
132 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
135 struct dw_desc *child;
137 dwc_sync_desc_for_cpu(dwc, desc);
139 spin_lock_bh(&dwc->lock);
140 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
141 dev_vdbg(chan2dev(&dwc->chan),
142 "moving child desc %p to freelist\n",
144 list_splice_init(&desc->txd.tx_list, &dwc->free_list);
145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
146 list_add(&desc->desc_node, &dwc->free_list);
147 spin_unlock_bh(&dwc->lock);
151 /* Called with dwc->lock held and bh disabled */
153 dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
155 dma_cookie_t cookie = dwc->chan.cookie;
160 dwc->chan.cookie = cookie;
161 desc->txd.cookie = cookie;
166 /*----------------------------------------------------------------------*/
168 /* Called with dwc->lock held and bh disabled */
169 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
171 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
173 /* ASSERT: channel is idle */
174 if (dma_readl(dw, CH_EN) & dwc->mask) {
175 dev_err(chan2dev(&dwc->chan),
176 "BUG: Attempted to start non-idle channel\n");
177 dev_err(chan2dev(&dwc->chan),
178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
179 channel_readl(dwc, SAR),
180 channel_readl(dwc, DAR),
181 channel_readl(dwc, LLP),
182 channel_readl(dwc, CTL_HI),
183 channel_readl(dwc, CTL_LO));
185 /* The tasklet will hopefully advance the queue... */
189 channel_writel(dwc, LLP, first->txd.phys);
190 channel_writel(dwc, CTL_LO,
191 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
192 channel_writel(dwc, CTL_HI, 0);
193 channel_set_bit(dw, CH_EN, dwc->mask);
196 /*----------------------------------------------------------------------*/
199 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
201 dma_async_tx_callback callback;
203 struct dma_async_tx_descriptor *txd = &desc->txd;
205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
207 dwc->completed = txd->cookie;
208 callback = txd->callback;
209 param = txd->callback_param;
211 dwc_sync_desc_for_cpu(dwc, desc);
212 list_splice_init(&txd->tx_list, &dwc->free_list);
213 list_move(&desc->desc_node, &dwc->free_list);
216 * We use dma_unmap_page() regardless of how the buffers were
217 * mapped before they were submitted...
219 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
220 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
221 desc->len, DMA_FROM_DEVICE);
222 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
223 dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
224 desc->len, DMA_TO_DEVICE);
227 * The API requires that no submissions are done from a
228 * callback, so we don't need to drop the lock here
234 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
236 struct dw_desc *desc, *_desc;
239 if (dma_readl(dw, CH_EN) & dwc->mask) {
240 dev_err(chan2dev(&dwc->chan),
241 "BUG: XFER bit set, but channel not idle!\n");
243 /* Try to continue after resetting the channel... */
244 channel_clear_bit(dw, CH_EN, dwc->mask);
245 while (dma_readl(dw, CH_EN) & dwc->mask)
250 * Submit queued descriptors ASAP, i.e. before we go through
251 * the completed ones.
253 if (!list_empty(&dwc->queue))
254 dwc_dostart(dwc, dwc_first_queued(dwc));
255 list_splice_init(&dwc->active_list, &list);
256 list_splice_init(&dwc->queue, &dwc->active_list);
258 list_for_each_entry_safe(desc, _desc, &list, desc_node)
259 dwc_descriptor_complete(dwc, desc);
262 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
265 struct dw_desc *desc, *_desc;
266 struct dw_desc *child;
270 * Clear block interrupt flag before scanning so that we don't
271 * miss any, and read LLP before RAW_XFER to ensure it is
272 * valid if we decide to scan the list.
274 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
275 llp = channel_readl(dwc, LLP);
276 status_xfer = dma_readl(dw, RAW.XFER);
278 if (status_xfer & dwc->mask) {
279 /* Everything we've submitted is done */
280 dma_writel(dw, CLEAR.XFER, dwc->mask);
281 dwc_complete_all(dw, dwc);
285 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
287 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
288 if (desc->lli.llp == llp)
289 /* This one is currently in progress */
292 list_for_each_entry(child, &desc->txd.tx_list, desc_node)
293 if (child->lli.llp == llp)
294 /* Currently in progress */
298 * No descriptors so far seem to be in progress, i.e.
299 * this one must be done.
301 dwc_descriptor_complete(dwc, desc);
304 dev_err(chan2dev(&dwc->chan),
305 "BUG: All descriptors done, but channel not idle!\n");
307 /* Try to continue after resetting the channel... */
308 channel_clear_bit(dw, CH_EN, dwc->mask);
309 while (dma_readl(dw, CH_EN) & dwc->mask)
312 if (!list_empty(&dwc->queue)) {
313 dwc_dostart(dwc, dwc_first_queued(dwc));
314 list_splice_init(&dwc->queue, &dwc->active_list);
318 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
320 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
321 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
322 lli->sar, lli->dar, lli->llp,
323 lli->ctlhi, lli->ctllo);
326 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
328 struct dw_desc *bad_desc;
329 struct dw_desc *child;
331 dwc_scan_descriptors(dw, dwc);
334 * The descriptor currently at the head of the active list is
335 * borked. Since we don't have any way to report errors, we'll
336 * just have to scream loudly and try to carry on.
338 bad_desc = dwc_first_active(dwc);
339 list_del_init(&bad_desc->desc_node);
340 list_splice_init(&dwc->queue, dwc->active_list.prev);
342 /* Clear the error flag and try to restart the controller */
343 dma_writel(dw, CLEAR.ERROR, dwc->mask);
344 if (!list_empty(&dwc->active_list))
345 dwc_dostart(dwc, dwc_first_active(dwc));
348 * KERN_CRITICAL may seem harsh, but since this only happens
349 * when someone submits a bad physical address in a
350 * descriptor, we should consider ourselves lucky that the
351 * controller flagged an error instead of scribbling over
352 * random memory locations.
354 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
355 "Bad descriptor submitted for DMA!\n");
356 dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
357 " cookie: %d\n", bad_desc->txd.cookie);
358 dwc_dump_lli(dwc, &bad_desc->lli);
359 list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
360 dwc_dump_lli(dwc, &child->lli);
362 /* Pretend the descriptor completed successfully */
363 dwc_descriptor_complete(dwc, bad_desc);
366 static void dw_dma_tasklet(unsigned long data)
368 struct dw_dma *dw = (struct dw_dma *)data;
369 struct dw_dma_chan *dwc;
375 status_block = dma_readl(dw, RAW.BLOCK);
376 status_xfer = dma_readl(dw, RAW.XFER);
377 status_err = dma_readl(dw, RAW.ERROR);
379 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n",
380 status_block, status_err);
382 for (i = 0; i < dw->dma.chancnt; i++) {
384 spin_lock(&dwc->lock);
385 if (status_err & (1 << i))
386 dwc_handle_error(dw, dwc);
387 else if ((status_block | status_xfer) & (1 << i))
388 dwc_scan_descriptors(dw, dwc);
389 spin_unlock(&dwc->lock);
393 * Re-enable interrupts. Block Complete interrupts are only
394 * enabled if the INT_EN bit in the descriptor is set. This
395 * will trigger a scan before the whole list is done.
397 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
398 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
399 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
402 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
404 struct dw_dma *dw = dev_id;
407 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
408 dma_readl(dw, STATUS_INT));
411 * Just disable the interrupts. We'll turn them back on in the
414 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
415 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
416 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
418 status = dma_readl(dw, STATUS_INT);
421 "BUG: Unexpected interrupts pending: 0x%x\n",
425 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
426 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
427 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
428 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
429 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
432 tasklet_schedule(&dw->tasklet);
437 /*----------------------------------------------------------------------*/
439 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
441 struct dw_desc *desc = txd_to_dw_desc(tx);
442 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
445 spin_lock_bh(&dwc->lock);
446 cookie = dwc_assign_cookie(dwc, desc);
449 * REVISIT: We should attempt to chain as many descriptors as
450 * possible, perhaps even appending to those already submitted
451 * for DMA. But this is hard to do in a race-free manner.
453 if (list_empty(&dwc->active_list)) {
454 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
456 dwc_dostart(dwc, desc);
457 list_add_tail(&desc->desc_node, &dwc->active_list);
459 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
462 list_add_tail(&desc->desc_node, &dwc->queue);
465 spin_unlock_bh(&dwc->lock);
470 static struct dma_async_tx_descriptor *
471 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
472 size_t len, unsigned long flags)
474 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
475 struct dw_desc *desc;
476 struct dw_desc *first;
477 struct dw_desc *prev;
480 unsigned int src_width;
481 unsigned int dst_width;
484 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
485 dest, src, len, flags);
487 if (unlikely(!len)) {
488 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
493 * We can be a lot more clever here, but this should take care
494 * of the most common optimization.
496 if (!((src | dest | len) & 3))
497 src_width = dst_width = 2;
498 else if (!((src | dest | len) & 1))
499 src_width = dst_width = 1;
501 src_width = dst_width = 0;
503 ctllo = DWC_DEFAULT_CTLLO
504 | DWC_CTLL_DST_WIDTH(dst_width)
505 | DWC_CTLL_SRC_WIDTH(src_width)
511 for (offset = 0; offset < len; offset += xfer_count << src_width) {
512 xfer_count = min_t(size_t, (len - offset) >> src_width,
515 desc = dwc_desc_get(dwc);
519 desc->lli.sar = src + offset;
520 desc->lli.dar = dest + offset;
521 desc->lli.ctllo = ctllo;
522 desc->lli.ctlhi = xfer_count;
527 prev->lli.llp = desc->txd.phys;
528 dma_sync_single_for_device(chan2parent(chan),
529 prev->txd.phys, sizeof(prev->lli),
531 list_add_tail(&desc->desc_node,
532 &first->txd.tx_list);
538 if (flags & DMA_PREP_INTERRUPT)
539 /* Trigger interrupt after last block */
540 prev->lli.ctllo |= DWC_CTLL_INT_EN;
543 dma_sync_single_for_device(chan2parent(chan),
544 prev->txd.phys, sizeof(prev->lli),
547 first->txd.flags = flags;
553 dwc_desc_put(dwc, first);
557 static struct dma_async_tx_descriptor *
558 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
559 unsigned int sg_len, enum dma_data_direction direction,
562 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
563 struct dw_dma_slave *dws = chan->private;
564 struct dw_desc *prev;
565 struct dw_desc *first;
568 unsigned int reg_width;
569 unsigned int mem_width;
571 struct scatterlist *sg;
572 size_t total_len = 0;
574 dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
576 if (unlikely(!dws || !sg_len))
579 reg_width = dws->reg_width;
582 sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
586 ctllo = (DWC_DEFAULT_CTLLO
587 | DWC_CTLL_DST_WIDTH(reg_width)
592 for_each_sg(sgl, sg, sg_len, i) {
593 struct dw_desc *desc;
597 desc = dwc_desc_get(dwc);
599 dev_err(chan2dev(chan),
600 "not enough descriptors available\n");
605 len = sg_dma_len(sg);
607 if (unlikely(mem & 3 || len & 3))
612 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
613 desc->lli.ctlhi = len >> mem_width;
618 prev->lli.llp = desc->txd.phys;
619 dma_sync_single_for_device(chan2parent(chan),
623 list_add_tail(&desc->desc_node,
624 &first->txd.tx_list);
630 case DMA_FROM_DEVICE:
631 ctllo = (DWC_DEFAULT_CTLLO
632 | DWC_CTLL_SRC_WIDTH(reg_width)
638 for_each_sg(sgl, sg, sg_len, i) {
639 struct dw_desc *desc;
643 desc = dwc_desc_get(dwc);
645 dev_err(chan2dev(chan),
646 "not enough descriptors available\n");
651 len = sg_dma_len(sg);
653 if (unlikely(mem & 3 || len & 3))
658 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
659 desc->lli.ctlhi = len >> reg_width;
664 prev->lli.llp = desc->txd.phys;
665 dma_sync_single_for_device(chan2parent(chan),
669 list_add_tail(&desc->desc_node,
670 &first->txd.tx_list);
680 if (flags & DMA_PREP_INTERRUPT)
681 /* Trigger interrupt after last block */
682 prev->lli.ctllo |= DWC_CTLL_INT_EN;
685 dma_sync_single_for_device(chan2parent(chan),
686 prev->txd.phys, sizeof(prev->lli),
689 first->len = total_len;
694 dwc_desc_put(dwc, first);
698 static void dwc_terminate_all(struct dma_chan *chan)
700 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
701 struct dw_dma *dw = to_dw_dma(chan->device);
702 struct dw_desc *desc, *_desc;
706 * This is only called when something went wrong elsewhere, so
707 * we don't really care about the data. Just disable the
708 * channel. We still have to poll the channel enable bit due
709 * to AHB/HSB limitations.
711 spin_lock_bh(&dwc->lock);
713 channel_clear_bit(dw, CH_EN, dwc->mask);
715 while (dma_readl(dw, CH_EN) & dwc->mask)
718 /* active_list entries will end up before queued entries */
719 list_splice_init(&dwc->queue, &list);
720 list_splice_init(&dwc->active_list, &list);
722 spin_unlock_bh(&dwc->lock);
724 /* Flush all pending and queued descriptors */
725 list_for_each_entry_safe(desc, _desc, &list, desc_node)
726 dwc_descriptor_complete(dwc, desc);
729 static enum dma_status
730 dwc_is_tx_complete(struct dma_chan *chan,
732 dma_cookie_t *done, dma_cookie_t *used)
734 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
735 dma_cookie_t last_used;
736 dma_cookie_t last_complete;
739 last_complete = dwc->completed;
740 last_used = chan->cookie;
742 ret = dma_async_is_complete(cookie, last_complete, last_used);
743 if (ret != DMA_SUCCESS) {
744 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
746 last_complete = dwc->completed;
747 last_used = chan->cookie;
749 ret = dma_async_is_complete(cookie, last_complete, last_used);
753 *done = last_complete;
760 static void dwc_issue_pending(struct dma_chan *chan)
762 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
764 spin_lock_bh(&dwc->lock);
765 if (!list_empty(&dwc->queue))
766 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
767 spin_unlock_bh(&dwc->lock);
770 static int dwc_alloc_chan_resources(struct dma_chan *chan)
772 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
773 struct dw_dma *dw = to_dw_dma(chan->device);
774 struct dw_desc *desc;
775 struct dw_dma_slave *dws;
780 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
782 /* ASSERT: channel is idle */
783 if (dma_readl(dw, CH_EN) & dwc->mask) {
784 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
788 dwc->completed = chan->cookie = 1;
790 cfghi = DWC_CFGH_FIFO_MODE;
796 * We need controller-specific data to set up slave
799 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
804 channel_writel(dwc, CFG_LO, cfglo);
805 channel_writel(dwc, CFG_HI, cfghi);
808 * NOTE: some controllers may have additional features that we
809 * need to initialize here, like "scatter-gather" (which
810 * doesn't mean what you think it means), and status writeback.
813 spin_lock_bh(&dwc->lock);
814 i = dwc->descs_allocated;
815 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
816 spin_unlock_bh(&dwc->lock);
818 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
820 dev_info(chan2dev(chan),
821 "only allocated %d descriptors\n", i);
822 spin_lock_bh(&dwc->lock);
826 dma_async_tx_descriptor_init(&desc->txd, chan);
827 desc->txd.tx_submit = dwc_tx_submit;
828 desc->txd.flags = DMA_CTRL_ACK;
829 INIT_LIST_HEAD(&desc->txd.tx_list);
830 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
831 sizeof(desc->lli), DMA_TO_DEVICE);
832 dwc_desc_put(dwc, desc);
834 spin_lock_bh(&dwc->lock);
835 i = ++dwc->descs_allocated;
838 /* Enable interrupts */
839 channel_set_bit(dw, MASK.XFER, dwc->mask);
840 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
841 channel_set_bit(dw, MASK.ERROR, dwc->mask);
843 spin_unlock_bh(&dwc->lock);
845 dev_dbg(chan2dev(chan),
846 "alloc_chan_resources allocated %d descriptors\n", i);
851 static void dwc_free_chan_resources(struct dma_chan *chan)
853 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
854 struct dw_dma *dw = to_dw_dma(chan->device);
855 struct dw_desc *desc, *_desc;
858 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
859 dwc->descs_allocated);
861 /* ASSERT: channel is idle */
862 BUG_ON(!list_empty(&dwc->active_list));
863 BUG_ON(!list_empty(&dwc->queue));
864 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
866 spin_lock_bh(&dwc->lock);
867 list_splice_init(&dwc->free_list, &list);
868 dwc->descs_allocated = 0;
870 /* Disable interrupts */
871 channel_clear_bit(dw, MASK.XFER, dwc->mask);
872 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
873 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
875 spin_unlock_bh(&dwc->lock);
877 list_for_each_entry_safe(desc, _desc, &list, desc_node) {
878 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
879 dma_unmap_single(chan2parent(chan), desc->txd.phys,
880 sizeof(desc->lli), DMA_TO_DEVICE);
884 dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
887 /*----------------------------------------------------------------------*/
889 static void dw_dma_off(struct dw_dma *dw)
891 dma_writel(dw, CFG, 0);
893 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
894 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
895 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
896 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
897 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
899 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
903 static int __init dw_probe(struct platform_device *pdev)
905 struct dw_dma_platform_data *pdata;
913 pdata = pdev->dev.platform_data;
914 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
917 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
921 irq = platform_get_irq(pdev, 0);
925 size = sizeof(struct dw_dma);
926 size += pdata->nr_channels * sizeof(struct dw_dma_chan);
927 dw = kzalloc(size, GFP_KERNEL);
931 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
936 memset(dw, 0, sizeof *dw);
938 dw->regs = ioremap(io->start, DW_REGLEN);
944 dw->clk = clk_get(&pdev->dev, "hclk");
945 if (IS_ERR(dw->clk)) {
946 err = PTR_ERR(dw->clk);
951 /* force dma off, just in case */
954 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
958 platform_set_drvdata(pdev, dw);
960 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
962 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
964 INIT_LIST_HEAD(&dw->dma.channels);
965 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
966 struct dw_dma_chan *dwc = &dw->chan[i];
968 dwc->chan.device = &dw->dma;
969 dwc->chan.cookie = dwc->completed = 1;
970 dwc->chan.chan_id = i;
971 list_add_tail(&dwc->chan.device_node, &dw->dma.channels);
973 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
974 spin_lock_init(&dwc->lock);
977 INIT_LIST_HEAD(&dwc->active_list);
978 INIT_LIST_HEAD(&dwc->queue);
979 INIT_LIST_HEAD(&dwc->free_list);
981 channel_clear_bit(dw, CH_EN, dwc->mask);
984 /* Clear/disable all interrupts on all channels. */
985 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
986 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
987 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
988 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
989 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
991 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
992 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
993 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
994 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
995 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
997 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
998 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
999 dw->dma.dev = &pdev->dev;
1000 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1001 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1003 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1005 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1006 dw->dma.device_terminate_all = dwc_terminate_all;
1008 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1009 dw->dma.device_issue_pending = dwc_issue_pending;
1011 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1013 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1014 pdev->dev.bus_id, dw->dma.chancnt);
1016 dma_async_device_register(&dw->dma);
1021 clk_disable(dw->clk);
1027 release_resource(io);
1033 static int __exit dw_remove(struct platform_device *pdev)
1035 struct dw_dma *dw = platform_get_drvdata(pdev);
1036 struct dw_dma_chan *dwc, *_dwc;
1037 struct resource *io;
1040 dma_async_device_unregister(&dw->dma);
1042 free_irq(platform_get_irq(pdev, 0), dw);
1043 tasklet_kill(&dw->tasklet);
1045 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1047 list_del(&dwc->chan.device_node);
1048 channel_clear_bit(dw, CH_EN, dwc->mask);
1051 clk_disable(dw->clk);
1057 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1058 release_mem_region(io->start, DW_REGLEN);
1065 static void dw_shutdown(struct platform_device *pdev)
1067 struct dw_dma *dw = platform_get_drvdata(pdev);
1069 dw_dma_off(platform_get_drvdata(pdev));
1070 clk_disable(dw->clk);
1073 static int dw_suspend_late(struct platform_device *pdev, pm_message_t mesg)
1075 struct dw_dma *dw = platform_get_drvdata(pdev);
1077 dw_dma_off(platform_get_drvdata(pdev));
1078 clk_disable(dw->clk);
1082 static int dw_resume_early(struct platform_device *pdev)
1084 struct dw_dma *dw = platform_get_drvdata(pdev);
1086 clk_enable(dw->clk);
1087 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1092 static struct platform_driver dw_driver = {
1093 .remove = __exit_p(dw_remove),
1094 .shutdown = dw_shutdown,
1095 .suspend_late = dw_suspend_late,
1096 .resume_early = dw_resume_early,
1102 static int __init dw_init(void)
1104 return platform_driver_probe(&dw_driver, dw_probe);
1106 module_init(dw_init);
1108 static void __exit dw_exit(void)
1110 platform_driver_unregister(&dw_driver);
1112 module_exit(dw_exit);
1114 MODULE_LICENSE("GPL v2");
1115 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1116 MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");