1 /* linux/arch/arm/mach-bast/dma.c
3 * (c) 2003-2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
8 * http://www.simtec.co.uk/products/EB2410ITX/
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 * 27-Feb-2005 BJD Added kmem cache for dma descriptors
16 * 18-Nov-2004 BJD Removed error for loading onto stopped channel
17 * 10-Nov-2004 BJD Ensure all external symbols exported for modules
18 * 10-Nov-2004 BJD Use sys_device and sysdev_class for power management
19 * 08-Aug-2004 BJD Apply rmk's suggestions
20 * 21-Jul-2004 BJD Ported to linux 2.6
21 * 12-Jul-2004 BJD Finished re-write and change of API
22 * 06-Jul-2004 BJD Rewrote dma code to try and cope with various problems
23 * 23-May-2003 BJD Created file
24 * 19-Aug-2003 BJD Cleanup, header fix, added URL
26 * This file is based on the Sangwook Lee/Samsung patches, re-written due
27 * to various ommisions from the code (such as flexible dma configuration)
28 * for use with the BAST system board.
30 * The re-write is pretty much complete, and should be good enough for any
31 * possible DMA function
35 #ifdef CONFIG_S3C2410_DMA_DEBUG
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/sched.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/sysdev.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/delay.h>
49 #include <asm/system.h>
51 #include <asm/hardware.h>
55 #include <asm/mach/dma.h>
56 #include <asm/arch/map.h>
59 static void __iomem *dma_base;
60 static kmem_cache_t *dma_kmem;
62 /* dma channel state information */
63 struct s3c2410_dma_chan s3c2410_chans[S3C2410_DMA_CHANNELS];
65 /* debugging functions */
67 #define BUF_MAGIC (0xcafebabe)
69 #define dmawarn(fmt...) printk(KERN_DEBUG fmt)
71 #define dma_regaddr(chan, reg) ((chan)->regs + (reg))
74 #define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
77 dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
79 pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
80 writel(val, dma_regaddr(chan, reg));
85 #define dma_rdreg(chan, reg) readl((chan)->regs + (reg))
87 /* captured register state for debug */
89 struct s3c2410_dma_regstate {
94 unsigned long dmsktrig;
97 #ifdef CONFIG_S3C2410_DMA_DEBUG
101 * simple debug routine to print the current state of the dma registers
105 dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
107 regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC);
108 regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC);
109 regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT);
110 regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON);
111 regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
115 dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
116 struct s3c2410_dma_regstate *regs)
118 printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
119 chan->number, fname, line,
120 regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig,
125 dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
127 struct s3c2410_dma_regstate state;
129 dmadbg_capture(chan, &state);
131 printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n",
132 chan->number, fname, line, chan->load_state,
133 chan->curr, chan->next, chan->end);
135 dmadbg_dumpregs(fname, line, chan, &state);
139 dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
141 struct s3c2410_dma_regstate state;
143 dmadbg_capture(chan, &state);
144 dmadbg_dumpregs(fname, line, chan, &state);
147 #define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
148 #define dbg_showchan(chan) dmadbg_showchan(__FUNCTION__, __LINE__, (chan))
150 #define dbg_showregs(chan) do { } while(0)
151 #define dbg_showchan(chan) do { } while(0)
152 #endif /* CONFIG_S3C2410_DMA_DEBUG */
154 #define check_channel(chan) \
155 do { if ((chan) >= S3C2410_DMA_CHANNELS) { \
156 printk(KERN_ERR "%s: invalid channel %d\n", __FUNCTION__, (chan)); \
161 /* s3c2410_dma_stats_timeout
163 * Update DMA stats from timeout info
167 s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
172 if (val > stats->timeout_longest)
173 stats->timeout_longest = val;
174 if (val < stats->timeout_shortest)
175 stats->timeout_shortest = val;
177 stats->timeout_avg += val;
180 /* s3c2410_dma_waitforload
182 * wait for the DMA engine to load a buffer, and update the state accordingly
186 s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
188 int timeout = chan->load_timeout;
191 if (chan->load_state != S3C2410_DMALOAD_1LOADED) {
192 printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line);
196 if (chan->stats != NULL)
197 chan->stats->loads++;
199 while (--timeout > 0) {
200 if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) {
201 took = chan->load_timeout - timeout;
203 s3c2410_dma_stats_timeout(chan->stats, took);
205 switch (chan->load_state) {
206 case S3C2410_DMALOAD_1LOADED:
207 chan->load_state = S3C2410_DMALOAD_1RUNNING;
211 printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state);
218 if (chan->stats != NULL) {
219 chan->stats->timeout_failed++;
227 /* s3c2410_dma_loadbuffer
229 * load a buffer, and update the channel state
233 s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
234 struct s3c2410_dma_buf *buf)
236 unsigned long reload;
238 pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n",
239 buf, (unsigned long)buf->data, buf->size);
242 dmawarn("buffer is NULL\n");
246 /* check the state of the channel before we do anything */
248 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
249 dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n");
252 if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) {
253 dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n");
256 /* it would seem sensible if we are the last buffer to not bother
257 * with the auto-reload bit, so that the DMA engine will not try
258 * and load another transfer after this one has finished...
260 if (chan->load_state == S3C2410_DMALOAD_NONE) {
261 pr_debug("load_state is none, checking for noreload (next=%p)\n",
263 reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
265 //pr_debug("load_state is %d => autoreload\n", chan->load_state);
266 reload = S3C2410_DCON_AUTORELOAD;
269 if ((buf->data & 0xf0000000) != 0x30000000) {
270 dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
273 writel(buf->data, chan->addr_reg);
275 dma_wrreg(chan, S3C2410_DMA_DCON,
276 chan->dcon | reload | (buf->size/chan->xfer_unit));
278 chan->next = buf->next;
280 /* update the state of the channel */
282 switch (chan->load_state) {
283 case S3C2410_DMALOAD_NONE:
284 chan->load_state = S3C2410_DMALOAD_1LOADED;
287 case S3C2410_DMALOAD_1RUNNING:
288 chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING;
292 dmawarn("dmaload: unknown state %d in loadbuffer\n",
300 /* s3c2410_dma_call_op
302 * small routine to call the op routine with the given op if it has been
307 s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
309 if (chan->op_fn != NULL) {
310 (chan->op_fn)(chan, op);
314 /* s3c2410_dma_buffdone
316 * small wrapper to check if callback routine needs to be called, and
321 s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
322 enum s3c2410_dma_buffresult result)
324 pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
325 chan->callback_fn, buf, buf->id, buf->size, result);
327 if (chan->callback_fn != NULL) {
328 (chan->callback_fn)(chan, buf->id, buf->size, result);
334 * start a dma channel going
337 static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
342 pr_debug("s3c2410_start_dma: channel=%d\n", chan->number);
344 local_irq_save(flags);
346 if (chan->state == S3C2410_DMA_RUNNING) {
347 pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state);
348 local_irq_restore(flags);
352 chan->state = S3C2410_DMA_RUNNING;
354 /* check wether there is anything to load, and if not, see
355 * if we can find anything to load
358 if (chan->load_state == S3C2410_DMALOAD_NONE) {
359 if (chan->next == NULL) {
360 printk(KERN_ERR "dma%d: channel has nothing loaded\n",
362 chan->state = S3C2410_DMA_IDLE;
363 local_irq_restore(flags);
367 s3c2410_dma_loadbuffer(chan, chan->next);
372 /* enable the channel */
374 if (!chan->irq_enabled) {
375 enable_irq(chan->irq);
376 chan->irq_enabled = 1;
379 /* start the channel going */
381 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
382 tmp &= ~S3C2410_DMASKTRIG_STOP;
383 tmp |= S3C2410_DMASKTRIG_ON;
384 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
386 pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
389 /* the dma buffer loads should take care of clearing the AUTO
390 * reloading feature */
391 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
392 tmp &= ~S3C2410_DCON_NORELOAD;
393 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
396 s3c2410_dma_call_op(chan, S3C2410_DMAOP_START);
400 /* if we've only loaded one buffer onto the channel, then chec
401 * to see if we have another, and if so, try and load it so when
402 * the first buffer is finished, the new one will be loaded onto
405 if (chan->next != NULL) {
406 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
408 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
409 pr_debug("%s: buff not yet loaded, no more todo\n",
412 chan->load_state = S3C2410_DMALOAD_1RUNNING;
413 s3c2410_dma_loadbuffer(chan, chan->next);
416 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
417 s3c2410_dma_loadbuffer(chan, chan->next);
422 local_irq_restore(flags);
427 /* s3c2410_dma_canload
429 * work out if we can queue another buffer into the DMA engine
433 s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
435 if (chan->load_state == S3C2410_DMALOAD_NONE ||
436 chan->load_state == S3C2410_DMALOAD_1RUNNING)
443 /* s3c2410_dma_enqueue
445 * queue an given buffer for dma transfer.
447 * id the device driver's id information for this buffer
448 * data the physical address of the buffer data
449 * size the size of the buffer in bytes
451 * If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART
452 * is checked, and if set, the channel is started. If this flag isn't set,
453 * then an error will be returned.
455 * It is possible to queue more than one DMA buffer onto a channel at
456 * once, and the code will deal with the re-loading of the next buffer
460 int s3c2410_dma_enqueue(unsigned int channel, void *id,
461 dma_addr_t data, int size)
463 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
464 struct s3c2410_dma_buf *buf;
467 check_channel(channel);
469 pr_debug("%s: id=%p, data=%08x, size=%d\n",
470 __FUNCTION__, id, (unsigned int)data, size);
472 buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
474 pr_debug("%s: out of memory (%ld alloc)\n",
475 __FUNCTION__, (long)sizeof(*buf));
479 //pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
480 //dbg_showchan(chan);
483 buf->data = buf->ptr = data;
486 buf->magic = BUF_MAGIC;
488 local_irq_save(flags);
490 if (chan->curr == NULL) {
491 /* we've got nothing loaded... */
492 pr_debug("%s: buffer %p queued onto empty channel\n",
499 pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
500 chan->number, __FUNCTION__, buf);
502 if (chan->end == NULL)
503 pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
504 chan->number, __FUNCTION__, chan);
506 chan->end->next = buf;
510 /* if necessary, update the next buffer field */
511 if (chan->next == NULL)
514 /* check to see if we can load a buffer */
515 if (chan->state == S3C2410_DMA_RUNNING) {
516 if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
517 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
518 printk(KERN_ERR "dma%d: loadbuffer:"
519 "timeout loading buffer\n",
522 local_irq_restore(flags);
527 while (s3c2410_dma_canload(chan) && chan->next != NULL) {
528 s3c2410_dma_loadbuffer(chan, chan->next);
530 } else if (chan->state == S3C2410_DMA_IDLE) {
531 if (chan->flags & S3C2410_DMAF_AUTOSTART) {
532 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_START);
536 local_irq_restore(flags);
540 EXPORT_SYMBOL(s3c2410_dma_enqueue);
543 s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
545 int magicok = (buf->magic == BUF_MAGIC);
550 kmem_cache_free(dma_kmem, buf);
552 printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf);
556 /* s3c2410_dma_lastxfer
558 * called when the system is out of buffers, to ensure that the channel
559 * is prepared for shutdown.
563 s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
565 pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
566 chan->number, chan->load_state);
568 switch (chan->load_state) {
569 case S3C2410_DMALOAD_NONE:
572 case S3C2410_DMALOAD_1LOADED:
573 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
575 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
576 chan->number, __FUNCTION__);
581 case S3C2410_DMALOAD_1LOADED_1RUNNING:
582 /* I belive in this case we do not have anything to do
583 * until the next buffer comes along, and we turn off the
588 pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
589 chan->number, chan->load_state);
594 /* hopefully this'll shut the damned thing up after the transfer... */
595 dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD);
599 #define dmadbg2(x...)
602 s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
604 struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
605 struct s3c2410_dma_buf *buf;
611 /* modify the channel state */
613 switch (chan->load_state) {
614 case S3C2410_DMALOAD_1RUNNING:
615 /* TODO - if we are running only one buffer, we probably
616 * want to reload here, and then worry about the buffer
619 chan->load_state = S3C2410_DMALOAD_NONE;
622 case S3C2410_DMALOAD_1LOADED:
623 /* iirc, we should go back to NONE loaded here, we
624 * had a buffer, and it was never verified as being
628 chan->load_state = S3C2410_DMALOAD_NONE;
631 case S3C2410_DMALOAD_1LOADED_1RUNNING:
632 /* we'll worry about checking to see if another buffer is
633 * ready after we've called back the owner. This should
634 * ensure we do not wait around too long for the DMA
635 * engine to start the next transfer
638 chan->load_state = S3C2410_DMALOAD_1LOADED;
641 case S3C2410_DMALOAD_NONE:
642 printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n",
647 printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n",
648 chan->number, chan->load_state);
653 /* update the chain to make sure that if we load any more
654 * buffers when we call the callback function, things should
657 chan->curr = buf->next;
660 if (buf->magic != BUF_MAGIC) {
661 printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n",
662 chan->number, __FUNCTION__, buf);
666 s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK);
669 s3c2410_dma_freebuf(buf);
673 /* only reload if the channel is still running... our buffer done
674 * routine may have altered the state by requesting the dma channel
675 * to stop or shutdown... */
677 /* todo: check that when the channel is shut-down from inside this
678 * function, we cope with unsetting reload, etc */
680 if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
683 switch (chan->load_state) {
684 case S3C2410_DMALOAD_1RUNNING:
685 /* don't need to do anything for this state */
688 case S3C2410_DMALOAD_NONE:
689 /* can load buffer immediately */
692 case S3C2410_DMALOAD_1LOADED:
693 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
695 printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
696 chan->number, __FUNCTION__);
702 case S3C2410_DMALOAD_1LOADED_1RUNNING:
706 printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n",
707 chan->number, chan->load_state);
711 local_irq_save(flags);
712 s3c2410_dma_loadbuffer(chan, chan->next);
713 local_irq_restore(flags);
715 s3c2410_dma_lastxfer(chan);
717 /* see if we can stop this channel.. */
718 if (chan->load_state == S3C2410_DMALOAD_NONE) {
719 pr_debug("dma%d: end of transfer, stopping channel (%ld)\n",
720 chan->number, jiffies);
721 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
729 /* s3c2410_request_dma
731 * get control of an dma channel
734 int s3c2410_dma_request(unsigned int channel, struct s3c2410_dma_client *client,
737 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
741 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
742 channel, client->name, dev);
744 check_channel(channel);
746 local_irq_save(flags);
751 if (client != chan->client) {
752 printk(KERN_ERR "dma%d: already in use\n", channel);
753 local_irq_restore(flags);
756 printk(KERN_ERR "dma%d: client already has channel\n", channel);
760 chan->client = client;
763 if (!chan->irq_claimed) {
764 pr_debug("dma%d: %s : requesting irq %d\n",
765 channel, __FUNCTION__, chan->irq);
767 chan->irq_claimed = 1;
768 local_irq_restore(flags);
770 err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
771 client->name, (void *)chan);
773 local_irq_save(flags);
777 chan->irq_claimed = 0;
778 local_irq_restore(flags);
780 printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
781 client->name, chan->irq, chan->number);
785 chan->irq_enabled = 1;
788 local_irq_restore(flags);
792 pr_debug("%s: channel initialised, %p\n", __FUNCTION__, chan);
797 EXPORT_SYMBOL(s3c2410_dma_request);
801 * release the given channel back to the system, will stop and flush
802 * any outstanding transfers, and ensure the channel is ready for the
805 * Note, although a warning is currently printed if the freeing client
806 * info is not the same as the registrant's client info, the free is still
807 * allowed to go through.
810 int s3c2410_dma_free(dmach_t channel, struct s3c2410_dma_client *client)
812 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
815 check_channel(channel);
817 local_irq_save(flags);
820 if (chan->client != client) {
821 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
822 channel, chan->client, client);
825 /* sort out stopping and freeing the channel */
827 if (chan->state != S3C2410_DMA_IDLE) {
828 pr_debug("%s: need to stop dma channel %p\n",
831 /* possibly flush the channel */
832 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP);
838 if (chan->irq_claimed)
839 free_irq(chan->irq, (void *)chan);
840 chan->irq_claimed = 0;
842 local_irq_restore(flags);
847 EXPORT_SYMBOL(s3c2410_dma_free);
849 static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
854 pr_debug("%s:\n", __FUNCTION__);
858 local_irq_save(flags);
860 s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP);
862 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
863 tmp |= S3C2410_DMASKTRIG_STOP;
864 //tmp &= ~S3C2410_DMASKTRIG_ON;
865 dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
868 /* should also clear interrupts, according to WinCE BSP */
869 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
870 tmp |= S3C2410_DCON_NORELOAD;
871 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
874 /* should stop do this, or should we wait for flush? */
875 chan->state = S3C2410_DMA_IDLE;
876 chan->load_state = S3C2410_DMALOAD_NONE;
878 local_irq_restore(flags);
883 void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
886 unsigned int timeout = 0x10000;
888 while (timeout-- > 0) {
889 tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
891 if (!(tmp & S3C2410_DMASKTRIG_ON))
895 pr_debug("dma%d: failed to stop?\n", chan->number);
901 * stop the channel, and remove all current and pending transfers
904 static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
906 struct s3c2410_dma_buf *buf, *next;
909 pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
913 local_irq_save(flags);
915 if (chan->state != S3C2410_DMA_IDLE) {
916 pr_debug("%s: stopping channel...\n", __FUNCTION__ );
917 s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
924 chan->curr = chan->next = chan->end = NULL;
927 for ( ; buf != NULL; buf = next) {
930 pr_debug("%s: free buffer %p, next %p\n",
931 __FUNCTION__, buf, buf->next);
933 s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT);
934 s3c2410_dma_freebuf(buf);
940 s3c2410_dma_waitforstop(chan);
943 /* should also clear interrupts, according to WinCE BSP */
947 tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
948 tmp |= S3C2410_DCON_NORELOAD;
949 dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
955 local_irq_restore(flags);
961 s3c2410_dma_started(struct s3c2410_dma_chan *chan)
965 local_irq_save(flags);
969 /* if we've only loaded one buffer onto the channel, then chec
970 * to see if we have another, and if so, try and load it so when
971 * the first buffer is finished, the new one will be loaded onto
974 if (chan->next != NULL) {
975 if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
977 if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
978 pr_debug("%s: buff not yet loaded, no more todo\n",
981 chan->load_state = S3C2410_DMALOAD_1RUNNING;
982 s3c2410_dma_loadbuffer(chan, chan->next);
985 } else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
986 s3c2410_dma_loadbuffer(chan, chan->next);
991 local_irq_restore(flags);
998 s3c2410_dma_ctrl(dmach_t channel, enum s3c2410_chan_op op)
1000 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1002 check_channel(channel);
1005 case S3C2410_DMAOP_START:
1006 return s3c2410_dma_start(chan);
1008 case S3C2410_DMAOP_STOP:
1009 return s3c2410_dma_dostop(chan);
1011 case S3C2410_DMAOP_PAUSE:
1012 case S3C2410_DMAOP_RESUME:
1015 case S3C2410_DMAOP_FLUSH:
1016 return s3c2410_dma_flush(chan);
1018 case S3C2410_DMAOP_STARTED:
1019 return s3c2410_dma_started(chan);
1021 case S3C2410_DMAOP_TIMEOUT:
1026 return -ENOENT; /* unknown, don't bother */
1029 EXPORT_SYMBOL(s3c2410_dma_ctrl);
1031 /* DMA configuration for each channel
1033 * DISRCC -> source of the DMA (AHB,APB)
1034 * DISRC -> source address of the DMA
1035 * DIDSTC -> destination of the DMA (AHB,APD)
1036 * DIDST -> destination address of the DMA
1039 /* s3c2410_dma_config
1041 * xfersize: size of unit in bytes (1,2,4)
1042 * dcon: base value of the DCONx register
1045 int s3c2410_dma_config(dmach_t channel,
1049 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1051 pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
1052 __FUNCTION__, channel, xferunit, dcon);
1054 check_channel(channel);
1058 dcon |= S3C2410_DCON_BYTE;
1062 dcon |= S3C2410_DCON_HALFWORD;
1066 dcon |= S3C2410_DCON_WORD;
1070 pr_debug("%s: bad transfer size %d\n", __FUNCTION__, xferunit);
1074 dcon |= S3C2410_DCON_HWTRIG;
1075 dcon |= S3C2410_DCON_INTREQ;
1077 pr_debug("%s: dcon now %08x\n", __FUNCTION__, dcon);
1080 chan->xfer_unit = xferunit;
1085 EXPORT_SYMBOL(s3c2410_dma_config);
1087 int s3c2410_dma_setflags(dmach_t channel, unsigned int flags)
1089 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1091 check_channel(channel);
1093 pr_debug("%s: chan=%p, flags=%08x\n", __FUNCTION__, chan, flags);
1095 chan->flags = flags;
1100 EXPORT_SYMBOL(s3c2410_dma_setflags);
1103 /* do we need to protect the settings of the fields from
1107 int s3c2410_dma_set_opfn(dmach_t channel, s3c2410_dma_opfn_t rtn)
1109 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1111 check_channel(channel);
1113 pr_debug("%s: chan=%p, op rtn=%p\n", __FUNCTION__, chan, rtn);
1120 EXPORT_SYMBOL(s3c2410_dma_set_opfn);
1122 int s3c2410_dma_set_buffdone_fn(dmach_t channel, s3c2410_dma_cbfn_t rtn)
1124 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1126 check_channel(channel);
1128 pr_debug("%s: chan=%p, callback rtn=%p\n", __FUNCTION__, chan, rtn);
1130 chan->callback_fn = rtn;
1135 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
1137 /* s3c2410_dma_devconfig
1139 * configure the dma source/destination hardware type and address
1141 * source: S3C2410_DMASRC_HW: source is hardware
1142 * S3C2410_DMASRC_MEM: source is memory
1144 * hwcfg: the value for xxxSTCn register,
1145 * bit 0: 0=increment pointer, 1=leave pointer
1146 * bit 1: 0=soucre is AHB, 1=soucre is APB
1148 * devaddr: physical address of the source
1151 int s3c2410_dma_devconfig(int channel,
1152 enum s3c2410_dmasrc source,
1154 unsigned long devaddr)
1156 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1158 check_channel(channel);
1160 pr_debug("%s: source=%d, hwcfg=%08x, devaddr=%08lx\n",
1161 __FUNCTION__, (int)source, hwcfg, devaddr);
1163 chan->source = source;
1164 chan->dev_addr = devaddr;
1167 case S3C2410_DMASRC_HW:
1168 /* source is hardware */
1169 pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
1170 __FUNCTION__, devaddr, hwcfg);
1171 dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3);
1172 dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr);
1173 dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0));
1175 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
1178 case S3C2410_DMASRC_MEM:
1179 /* source is memory */
1180 pr_debug( "%s: mem source, devaddr=%08lx, hwcfg=%d\n",
1181 __FUNCTION__, devaddr, hwcfg);
1182 dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0));
1183 dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr);
1184 dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3);
1186 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC);
1190 printk(KERN_ERR "dma%d: invalid source type (%d)\n", channel, source);
1194 EXPORT_SYMBOL(s3c2410_dma_devconfig);
1196 /* s3c2410_dma_getposition
1198 * returns the current transfer points for the dma source and destination
1201 int s3c2410_dma_getposition(dmach_t channel, dma_addr_t *src, dma_addr_t *dst)
1203 struct s3c2410_dma_chan *chan = &s3c2410_chans[channel];
1205 check_channel(channel);
1208 *src = dma_rdreg(chan, S3C2410_DMA_DCSRC);
1211 *dst = dma_rdreg(chan, S3C2410_DMA_DCDST);
1216 EXPORT_SYMBOL(s3c2410_dma_getposition);
1219 /* system device class */
1223 static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state)
1225 struct s3c2410_dma_chan *cp = container_of(dev, struct s3c2410_dma_chan, dev);
1227 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1229 if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
1230 /* the dma channel is still working, which is probably
1231 * a bad thing to do over suspend/resume. We stop the
1232 * channel and assume that the client is either going to
1233 * retry after resume, or that it is broken.
1236 printk(KERN_INFO "dma: stopping channel %d due to suspend\n",
1239 s3c2410_dma_dostop(cp);
1245 static int s3c2410_dma_resume(struct sys_device *dev)
1251 #define s3c2410_dma_suspend NULL
1252 #define s3c2410_dma_resume NULL
1253 #endif /* CONFIG_PM */
1255 static struct sysdev_class dma_sysclass = {
1256 set_kset_name("s3c24xx-dma"),
1257 .suspend = s3c2410_dma_suspend,
1258 .resume = s3c2410_dma_resume,
1261 /* kmem cache implementation */
1263 static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
1265 memset(p, 0, sizeof(struct s3c2410_dma_buf));
1269 /* initialisation code */
1271 static int __init s3c2410_init_dma(void)
1273 struct s3c2410_dma_chan *cp;
1277 printk("S3C2410 DMA Driver, (c) 2003-2004 Simtec Electronics\n");
1279 dma_base = ioremap(S3C24XX_PA_DMA, 0x200);
1280 if (dma_base == NULL) {
1281 printk(KERN_ERR "dma failed to remap register block\n");
1285 ret = sysdev_class_register(&dma_sysclass);
1287 printk(KERN_ERR "dma sysclass registration failed\n");
1291 dma_kmem = kmem_cache_create("dma_desc", sizeof(struct s3c2410_dma_buf), 0,
1293 s3c2410_dma_cache_ctor, NULL);
1295 if (dma_kmem == NULL) {
1296 printk(KERN_ERR "dma failed to make kmem cache\n");
1301 for (channel = 0; channel < S3C2410_DMA_CHANNELS; channel++) {
1302 cp = &s3c2410_chans[channel];
1304 memset(cp, 0, sizeof(struct s3c2410_dma_chan));
1306 /* dma channel irqs are in order.. */
1307 cp->number = channel;
1308 cp->irq = channel + IRQ_DMA0;
1309 cp->regs = dma_base + (channel*0x40);
1311 /* point current stats somewhere */
1312 cp->stats = &cp->stats_store;
1313 cp->stats_store.timeout_shortest = LONG_MAX;
1315 /* basic channel configuration */
1317 cp->load_timeout = 1<<18;
1319 /* register system device */
1321 cp->dev.cls = &dma_sysclass;
1322 cp->dev.id = channel;
1323 ret = sysdev_register(&cp->dev);
1325 printk("DMA channel %d at %p, irq %d\n",
1326 cp->number, cp->regs, cp->irq);
1332 kmem_cache_destroy(dma_kmem);
1338 __initcall(s3c2410_init_dma);