2 * helper functions for SG DMA video4linux capture buffers
4 * The functions expect the hardware being able to scatter gather
5 * (i.e. the buffers are not linear in physical memory, but fragmented
6 * into PAGE_SIZE chunks). They also assume the driver does not need
7 * to touch the video data.
9 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11 * Highly based on video-buf written originally by:
12 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
13 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
14 * (c) 2006 Ted Walther and John Sokol
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/vmalloc.h>
29 #include <linux/pagemap.h>
30 #include <linux/scatterlist.h>
32 #include <asm/pgtable.h>
34 #include <media/videobuf-dma-sg.h>
36 #define MAGIC_DMABUF 0x19721112
37 #define MAGIC_SG_MEM 0x17890714
39 #define MAGIC_CHECK(is,should) if (unlikely((is) != (should))) \
40 { printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }
43 module_param(debug, int, 0644);
45 MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
46 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
47 MODULE_LICENSE("GPL");
49 #define dprintk(level, fmt, arg...) if (debug >= level) \
50 printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
52 /* --------------------------------------------------------------------- */
55 videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
57 struct scatterlist *sglist;
61 sglist = vmalloc(nr_pages * sizeof(*sglist));
64 memset(sglist, 0, nr_pages * sizeof(*sglist));
65 sg_init_table(sglist, nr_pages);
66 for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
67 pg = vmalloc_to_page(virt);
70 BUG_ON(PageHighMem(pg));
71 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
81 videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
83 struct scatterlist *sglist;
88 sglist = vmalloc(nr_pages * sizeof(*sglist));
91 sg_init_table(sglist, nr_pages);
93 if (PageHighMem(pages[0]))
94 /* DMA to highmem pages might not work */
96 sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
97 for (i = 1; i < nr_pages; i++) {
100 if (PageHighMem(pages[i]))
102 sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
107 dprintk(2,"sgl: oops - no page\n");
112 dprintk(2,"sgl: oops - highmem page\n");
117 /* --------------------------------------------------------------------- */
119 struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
121 struct videobuf_dma_sg_memory *mem = buf->priv;
124 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
129 void videobuf_dma_init(struct videobuf_dmabuf *dma)
131 memset(dma,0,sizeof(*dma));
132 dma->magic = MAGIC_DMABUF;
135 static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
136 int direction, unsigned long data, unsigned long size)
138 unsigned long first,last;
141 dma->direction = direction;
142 switch (dma->direction) {
143 case DMA_FROM_DEVICE:
153 first = (data & PAGE_MASK) >> PAGE_SHIFT;
154 last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
155 dma->offset = data & ~PAGE_MASK;
156 dma->nr_pages = last-first+1;
157 dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
159 if (NULL == dma->pages)
161 dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
162 data,size,dma->nr_pages);
164 err = get_user_pages(current,current->mm,
165 data & PAGE_MASK, dma->nr_pages,
166 rw == READ, 1, /* force */
169 if (err != dma->nr_pages) {
170 dma->nr_pages = (err >= 0) ? err : 0;
171 dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
172 return err < 0 ? err : -EINVAL;
177 int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
178 unsigned long data, unsigned long size)
181 down_read(¤t->mm->mmap_sem);
182 ret = videobuf_dma_init_user_locked(dma, direction, data, size);
183 up_read(¤t->mm->mmap_sem);
188 int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
191 dprintk(1,"init kernel [%d pages]\n",nr_pages);
192 dma->direction = direction;
193 dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
194 if (NULL == dma->vmalloc) {
195 dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
198 dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
199 (unsigned long)dma->vmalloc,
200 nr_pages << PAGE_SHIFT);
201 memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
202 dma->nr_pages = nr_pages;
206 int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
207 dma_addr_t addr, int nr_pages)
209 dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
210 nr_pages,(unsigned long)addr);
211 dma->direction = direction;
215 dma->bus_addr = addr;
216 dma->nr_pages = nr_pages;
220 int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
222 MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
223 BUG_ON(0 == dma->nr_pages);
226 dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
230 dma->sglist = videobuf_vmalloc_to_sg
231 (dma->vmalloc,dma->nr_pages);
234 dma->sglist = vmalloc(sizeof(*dma->sglist));
235 if (NULL != dma->sglist) {
237 sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
238 dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
239 sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
242 if (NULL == dma->sglist) {
243 dprintk(1,"scatterlist is NULL\n");
246 if (!dma->bus_addr) {
247 dma->sglen = dma_map_sg(q->dev, dma->sglist,
248 dma->nr_pages, dma->direction);
249 if (0 == dma->sglen) {
251 "%s: videobuf_map_sg failed\n",__func__);
261 int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
263 MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
266 dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
270 int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
272 MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
276 dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
284 int videobuf_dma_free(struct videobuf_dmabuf *dma)
286 MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
291 for (i=0; i < dma->nr_pages; i++)
292 page_cache_release(dma->pages[i]);
303 dma->direction = DMA_NONE;
307 /* --------------------------------------------------------------------- */
309 int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
311 struct videobuf_queue q;
315 return videobuf_dma_map(&q, dma);
318 int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
320 struct videobuf_queue q;
324 return videobuf_dma_unmap(&q, dma);
327 /* --------------------------------------------------------------------- */
330 videobuf_vm_open(struct vm_area_struct *vma)
332 struct videobuf_mapping *map = vma->vm_private_data;
334 dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
335 map->count,vma->vm_start,vma->vm_end);
340 videobuf_vm_close(struct vm_area_struct *vma)
342 struct videobuf_mapping *map = vma->vm_private_data;
343 struct videobuf_queue *q = map->q;
344 struct videobuf_dma_sg_memory *mem;
347 dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
348 map->count,vma->vm_start,vma->vm_end);
351 if (0 == map->count) {
352 dprintk(1,"munmap %p q=%p\n",map,q);
353 mutex_lock(&q->vb_lock);
354 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
355 if (NULL == q->bufs[i])
357 mem=q->bufs[i]->priv;
362 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
364 if (q->bufs[i]->map != map)
366 q->bufs[i]->map = NULL;
367 q->bufs[i]->baddr = 0;
368 q->ops->buf_release(q,q->bufs[i]);
370 mutex_unlock(&q->vb_lock);
377 * Get a anonymous page for the mapping. Make sure we can DMA to that
378 * memory location with 32bit PCI devices (i.e. don't use highmem for
379 * now ...). Bounce buffers don't work very well for the data rates
383 videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
387 dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n",
388 (unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end);
389 page = alloc_page(GFP_USER | __GFP_DMA32);
392 clear_user_highpage(page, (unsigned long)vmf->virtual_address);
397 static struct vm_operations_struct videobuf_vm_ops =
399 .open = videobuf_vm_open,
400 .close = videobuf_vm_close,
401 .fault = videobuf_vm_fault,
404 /* ---------------------------------------------------------------------
405 * SG handlers for the generic methods
408 /* Allocated area consists on 3 parts:
410 struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
411 struct videobuf_dma_sg_memory
414 static void *__videobuf_alloc(size_t size)
416 struct videobuf_dma_sg_memory *mem;
417 struct videobuf_buffer *vb;
419 vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
421 mem = vb->priv = ((char *)vb)+size;
422 mem->magic=MAGIC_SG_MEM;
424 videobuf_dma_init(&mem->dma);
426 dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
427 __func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
428 mem,(long)sizeof(*mem));
433 static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
435 struct videobuf_dma_sg_memory *mem = buf->priv;
438 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
440 return mem->dma.vmalloc;
443 static int __videobuf_iolock (struct videobuf_queue* q,
444 struct videobuf_buffer *vb,
445 struct v4l2_framebuffer *fbuf)
449 struct videobuf_dma_sg_memory *mem = vb->priv;
452 MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
454 switch (vb->memory) {
455 case V4L2_MEMORY_MMAP:
456 case V4L2_MEMORY_USERPTR:
457 if (0 == vb->baddr) {
458 /* no userspace addr -- kernel bounce buffer */
459 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
460 err = videobuf_dma_init_kernel( &mem->dma,
465 } else if (vb->memory == V4L2_MEMORY_USERPTR) {
466 /* dma directly to userspace */
467 err = videobuf_dma_init_user( &mem->dma,
469 vb->baddr,vb->bsize );
473 /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
474 buffers can only be called from videobuf_qbuf
475 we take current->mm->mmap_sem there, to prevent
476 locking inversion, so don't take it here */
478 err = videobuf_dma_init_user_locked(&mem->dma,
480 vb->baddr, vb->bsize);
485 case V4L2_MEMORY_OVERLAY:
488 /* FIXME: need sanity checks for vb->boff */
490 * Using a double cast to avoid compiler warnings when
491 * building for PAE. Compiler doesn't like direct casting
492 * of a 32 bit ptr to 64 bit integer.
494 bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
495 pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
496 err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
504 err = videobuf_dma_map(q, &mem->dma);
511 static int __videobuf_sync(struct videobuf_queue *q,
512 struct videobuf_buffer *buf)
514 struct videobuf_dma_sg_memory *mem = buf->priv;
516 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
518 return videobuf_dma_sync(q,&mem->dma);
521 static int __videobuf_mmap_free(struct videobuf_queue *q)
525 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
535 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
536 struct vm_area_struct *vma)
538 struct videobuf_dma_sg_memory *mem;
539 struct videobuf_mapping *map;
540 unsigned int first,last,size,i;
544 if (!(vma->vm_flags & VM_WRITE)) {
545 dprintk(1,"mmap app bug: PROT_WRITE please\n");
548 if (!(vma->vm_flags & VM_SHARED)) {
549 dprintk(1,"mmap app bug: MAP_SHARED please\n");
553 /* This function maintains backwards compatibility with V4L1 and will
554 * map more than one buffer if the vma length is equal to the combined
555 * size of multiple buffers than it will map them together. See
556 * VIDIOCGMBUF in the v4l spec
558 * TODO: Allow drivers to specify if they support this mode
561 /* look for first buffer to map */
562 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
563 if (NULL == q->bufs[first])
565 mem=q->bufs[first]->priv;
567 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
569 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
571 if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
574 if (VIDEO_MAX_FRAME == first) {
575 dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
576 (vma->vm_pgoff << PAGE_SHIFT));
580 /* look for last buffer to map */
581 for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
582 if (NULL == q->bufs[last])
584 if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
586 if (q->bufs[last]->map) {
590 size += q->bufs[last]->bsize;
591 if (size == (vma->vm_end - vma->vm_start))
594 if (VIDEO_MAX_FRAME == last) {
595 dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
596 (vma->vm_end - vma->vm_start));
600 /* create mapping + update buffer list */
602 map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
607 for (i = first; i <= last; i++) {
608 if (NULL == q->bufs[i])
610 q->bufs[i]->map = map;
611 q->bufs[i]->baddr = vma->vm_start + size;
612 size += q->bufs[i]->bsize;
616 map->start = vma->vm_start;
617 map->end = vma->vm_end;
619 vma->vm_ops = &videobuf_vm_ops;
620 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
621 vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
622 vma->vm_private_data = map;
623 dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
624 map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
631 static int __videobuf_copy_to_user ( struct videobuf_queue *q,
632 char __user *data, size_t count,
635 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
637 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
639 /* copy to userspace */
640 if (count > q->read_buf->size - q->read_off)
641 count = q->read_buf->size - q->read_off;
643 if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count))
649 static int __videobuf_copy_stream ( struct videobuf_queue *q,
650 char __user *data, size_t count, size_t pos,
651 int vbihack, int nonblocking )
654 struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
656 MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);
659 /* dirty, undocumented hack -- pass the frame counter
660 * within the last four bytes of each vbi data block.
661 * We need that one to maintain backward compatibility
662 * to all vbi decoding software out there ... */
663 fc = (unsigned int*)mem->dma.vmalloc;
664 fc += (q->read_buf->size>>2) -1;
665 *fc = q->read_buf->field_count >> 1;
666 dprintk(1,"vbihack: %d\n",*fc);
669 /* copy stuff using the common method */
670 count = __videobuf_copy_to_user (q,data,count,nonblocking);
672 if ( (count==-EFAULT) && (0 == pos) )
678 static struct videobuf_qtype_ops sg_ops = {
679 .magic = MAGIC_QTYPE_OPS,
681 .alloc = __videobuf_alloc,
682 .iolock = __videobuf_iolock,
683 .sync = __videobuf_sync,
684 .mmap_free = __videobuf_mmap_free,
685 .mmap_mapper = __videobuf_mmap_mapper,
686 .video_copy_to_user = __videobuf_copy_to_user,
687 .copy_stream = __videobuf_copy_stream,
688 .vmalloc = __videobuf_to_vmalloc,
691 void *videobuf_sg_alloc(size_t size)
693 struct videobuf_queue q;
695 /* Required to make generic handler to call __videobuf_alloc */
700 return videobuf_alloc(&q);
703 void videobuf_queue_sg_init(struct videobuf_queue* q,
704 struct videobuf_queue_ops *ops,
707 enum v4l2_buf_type type,
708 enum v4l2_field field,
712 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
716 /* --------------------------------------------------------------------- */
718 EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);
720 EXPORT_SYMBOL_GPL(videobuf_to_dma);
721 EXPORT_SYMBOL_GPL(videobuf_dma_init);
722 EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
723 EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
724 EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
725 EXPORT_SYMBOL_GPL(videobuf_dma_map);
726 EXPORT_SYMBOL_GPL(videobuf_dma_sync);
727 EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
728 EXPORT_SYMBOL_GPL(videobuf_dma_free);
730 EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
731 EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
732 EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
734 EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);