4 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
5 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
6 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include "ivtv-driver.h"
24 #include "ivtv-udma.h"
26 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
28 dma_page->uaddr = first & PAGE_MASK;
29 dma_page->offset = first & ~PAGE_MASK;
30 dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
31 dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
32 dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
33 dma_page->page_count = dma_page->last - dma_page->first + 1;
34 if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
37 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
45 offset = dma_page->offset;
47 /* Fill SG Array with new values */
48 for (i = 0; i < dma_page->page_count; i++) {
49 unsigned int len = (i == dma_page->page_count - 1) ?
50 dma_page->tail : PAGE_SIZE - offset;
52 if (PageHighMem(dma->map[map_offset])) {
55 if (dma->bouncemap[map_offset] == NULL)
56 dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
57 if (dma->bouncemap[map_offset] == NULL)
59 local_irq_save(flags);
60 src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset;
61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62 kunmap_atomic(src, KM_BOUNCE_READ);
63 local_irq_restore(flags);
64 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
67 sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
75 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
77 struct scatterlist *sg;
79 for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) {
80 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
81 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
82 dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
83 buffer_offset += sg_dma_len(sg);
85 split -= sg_dma_len(sg);
87 buffer_offset = buffer_offset_2;
91 /* User DMA Buffers */
92 void ivtv_udma_alloc(struct ivtv *itv)
94 if (itv->udma.SG_handle == 0) {
95 /* Map DMA Page Array Buffer */
96 itv->udma.SG_handle = pci_map_single(itv->dev, itv->udma.SGarray,
97 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
98 ivtv_udma_sync_for_cpu(itv);
102 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
103 void __user *userbuf, int size_in_bytes)
105 struct ivtv_dma_page_info user_dma;
106 struct ivtv_user_dma *dma = &itv->udma;
109 IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
112 if (dma->SG_length || dma->page_count) {
113 IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
114 dma->SG_length, dma->page_count);
118 ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
120 if (user_dma.page_count <= 0) {
121 IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
122 user_dma.page_count, size_in_bytes, user_dma.offset);
126 /* Get user pages for DMA Xfer */
127 down_read(¤t->mm->mmap_sem);
128 err = get_user_pages(current, current->mm,
129 user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
130 up_read(¤t->mm->mmap_sem);
132 if (user_dma.page_count != err) {
133 IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
134 err, user_dma.page_count);
138 dma->page_count = user_dma.page_count;
140 /* Fill SG List with new values */
141 if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
142 for (i = 0; i < dma->page_count; i++) {
143 put_page(dma->map[i]);
150 dma->SG_length = pci_map_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
152 /* Fill SG Array with new values */
153 ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
155 /* Tag SG Array with Interrupt Bit */
156 dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
158 ivtv_udma_sync_for_device(itv);
159 return dma->page_count;
162 void ivtv_udma_unmap(struct ivtv *itv)
164 struct ivtv_user_dma *dma = &itv->udma;
167 IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
169 /* Nothing to free */
170 if (dma->page_count == 0)
173 /* Unmap Scatterlist */
174 if (dma->SG_length) {
175 pci_unmap_sg(itv->dev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
179 ivtv_udma_sync_for_cpu(itv);
181 /* Release User Pages */
182 for (i = 0; i < dma->page_count; i++) {
183 put_page(dma->map[i]);
188 void ivtv_udma_free(struct ivtv *itv)
193 if (itv->udma.SG_handle) {
194 pci_unmap_single(itv->dev, itv->udma.SG_handle,
195 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
198 /* Unmap Scatterlist */
199 if (itv->udma.SG_length) {
200 pci_unmap_sg(itv->dev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
203 for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
204 if (itv->udma.bouncemap[i])
205 __free_page(itv->udma.bouncemap[i]);
209 void ivtv_udma_start(struct ivtv *itv)
211 IVTV_DEBUG_DMA("start UDMA\n");
212 write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
213 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
214 set_bit(IVTV_F_I_DMA, &itv->i_flags);
215 set_bit(IVTV_F_I_UDMA, &itv->i_flags);
218 void ivtv_udma_prepare(struct ivtv *itv)
222 spin_lock_irqsave(&itv->dma_reg_lock, flags);
223 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
224 ivtv_udma_start(itv);
226 set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
227 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);