Merge branch 'linux-next' of git://git.infradead.org/ubi-2.6
[linux-2.6] / drivers / media / video / videobuf-dma-contig.c
1 /*
2  * helper functions for physically contiguous capture buffers
3  *
4  * The functions support hardware lacking scatter gather support
5  * (i.e. the buffers must be linear in physical memory)
6  *
7  * Copyright (c) 2008 Magnus Damm
8  *
9  * Based on videobuf-vmalloc.c,
10  * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2
15  */
16
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <media/videobuf-dma-contig.h>
23
24 struct videobuf_dma_contig_memory {
25         u32 magic;
26         void *vaddr;
27         dma_addr_t dma_handle;
28         unsigned long size;
29         int is_userptr;
30 };
31
32 #define MAGIC_DC_MEM 0x0733ac61
33 #define MAGIC_CHECK(is, should)                                             \
34         if (unlikely((is) != (should))) {                                   \
35                 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
36                 BUG();                                                      \
37         }
38
39 static void
40 videobuf_vm_open(struct vm_area_struct *vma)
41 {
42         struct videobuf_mapping *map = vma->vm_private_data;
43
44         dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
45                 map, map->count, vma->vm_start, vma->vm_end);
46
47         map->count++;
48 }
49
50 static void videobuf_vm_close(struct vm_area_struct *vma)
51 {
52         struct videobuf_mapping *map = vma->vm_private_data;
53         struct videobuf_queue *q = map->q;
54         int i;
55
56         dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
57                 map, map->count, vma->vm_start, vma->vm_end);
58
59         map->count--;
60         if (0 == map->count) {
61                 struct videobuf_dma_contig_memory *mem;
62
63                 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
64                 mutex_lock(&q->vb_lock);
65
66                 /* We need first to cancel streams, before unmapping */
67                 if (q->streaming)
68                         videobuf_queue_cancel(q);
69
70                 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
71                         if (NULL == q->bufs[i])
72                                 continue;
73
74                         if (q->bufs[i]->map != map)
75                                 continue;
76
77                         mem = q->bufs[i]->priv;
78                         if (mem) {
79                                 /* This callback is called only if kernel has
80                                    allocated memory and this memory is mmapped.
81                                    In this case, memory should be freed,
82                                    in order to do memory unmap.
83                                  */
84
85                                 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
86
87                                 /* vfree is not atomic - can't be
88                                    called with IRQ's disabled
89                                  */
90                                 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
91                                         i, mem->vaddr);
92
93                                 dma_free_coherent(q->dev, mem->size,
94                                                   mem->vaddr, mem->dma_handle);
95                                 mem->vaddr = NULL;
96                         }
97
98                         q->bufs[i]->map   = NULL;
99                         q->bufs[i]->baddr = 0;
100                 }
101
102                 kfree(map);
103
104                 mutex_unlock(&q->vb_lock);
105         }
106 }
107
108 static struct vm_operations_struct videobuf_vm_ops = {
109         .open     = videobuf_vm_open,
110         .close    = videobuf_vm_close,
111 };
112
113 /**
114  * videobuf_dma_contig_user_put() - reset pointer to user space buffer
115  * @mem: per-buffer private videobuf-dma-contig data
116  *
117  * This function resets the user space pointer
118  */
119 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
120 {
121         mem->is_userptr = 0;
122         mem->dma_handle = 0;
123         mem->size = 0;
124 }
125
126 /**
127  * videobuf_dma_contig_user_get() - setup user space memory pointer
128  * @mem: per-buffer private videobuf-dma-contig data
129  * @vb: video buffer to map
130  *
131  * This function validates and sets up a pointer to user space memory.
132  * Only physically contiguous pfn-mapped memory is accepted.
133  *
134  * Returns 0 if successful.
135  */
136 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137                                         struct videobuf_buffer *vb)
138 {
139         struct mm_struct *mm = current->mm;
140         struct vm_area_struct *vma;
141         unsigned long prev_pfn, this_pfn;
142         unsigned long pages_done, user_address;
143         int ret;
144
145         mem->size = PAGE_ALIGN(vb->size);
146         mem->is_userptr = 0;
147         ret = -EINVAL;
148
149         down_read(&mm->mmap_sem);
150
151         vma = find_vma(mm, vb->baddr);
152         if (!vma)
153                 goto out_up;
154
155         if ((vb->baddr + mem->size) > vma->vm_end)
156                 goto out_up;
157
158         pages_done = 0;
159         prev_pfn = 0; /* kill warning */
160         user_address = vb->baddr;
161
162         while (pages_done < (mem->size >> PAGE_SHIFT)) {
163                 ret = follow_pfn(vma, user_address, &this_pfn);
164                 if (ret)
165                         break;
166
167                 if (pages_done == 0)
168                         mem->dma_handle = this_pfn << PAGE_SHIFT;
169                 else if (this_pfn != (prev_pfn + 1))
170                         ret = -EFAULT;
171
172                 if (ret)
173                         break;
174
175                 prev_pfn = this_pfn;
176                 user_address += PAGE_SIZE;
177                 pages_done++;
178         }
179
180         if (!ret)
181                 mem->is_userptr = 1;
182
183  out_up:
184         up_read(&current->mm->mmap_sem);
185
186         return ret;
187 }
188
189 static void *__videobuf_alloc(size_t size)
190 {
191         struct videobuf_dma_contig_memory *mem;
192         struct videobuf_buffer *vb;
193
194         vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
195         if (vb) {
196                 mem = vb->priv = ((char *)vb) + size;
197                 mem->magic = MAGIC_DC_MEM;
198         }
199
200         return vb;
201 }
202
203 static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
204 {
205         struct videobuf_dma_contig_memory *mem = buf->priv;
206
207         BUG_ON(!mem);
208         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
209
210         return mem->vaddr;
211 }
212
213 static int __videobuf_iolock(struct videobuf_queue *q,
214                              struct videobuf_buffer *vb,
215                              struct v4l2_framebuffer *fbuf)
216 {
217         struct videobuf_dma_contig_memory *mem = vb->priv;
218
219         BUG_ON(!mem);
220         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
221
222         switch (vb->memory) {
223         case V4L2_MEMORY_MMAP:
224                 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
225
226                 /* All handling should be done by __videobuf_mmap_mapper() */
227                 if (!mem->vaddr) {
228                         dev_err(q->dev, "memory is not alloced/mmapped.\n");
229                         return -EINVAL;
230                 }
231                 break;
232         case V4L2_MEMORY_USERPTR:
233                 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
234
235                 /* handle pointer from user space */
236                 if (vb->baddr)
237                         return videobuf_dma_contig_user_get(mem, vb);
238
239                 /* allocate memory for the read() method */
240                 mem->size = PAGE_ALIGN(vb->size);
241                 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
242                                                 &mem->dma_handle, GFP_KERNEL);
243                 if (!mem->vaddr) {
244                         dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
245                                          mem->size);
246                         return -ENOMEM;
247                 }
248
249                 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
250                         mem->vaddr, mem->size);
251                 break;
252         case V4L2_MEMORY_OVERLAY:
253         default:
254                 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
255                         __func__);
256                 return -EINVAL;
257         }
258
259         return 0;
260 }
261
262 static int __videobuf_mmap_free(struct videobuf_queue *q)
263 {
264         unsigned int i;
265
266         dev_dbg(q->dev, "%s\n", __func__);
267         for (i = 0; i < VIDEO_MAX_FRAME; i++) {
268                 if (q->bufs[i] && q->bufs[i]->map)
269                         return -EBUSY;
270         }
271
272         return 0;
273 }
274
275 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
276                                   struct vm_area_struct *vma)
277 {
278         struct videobuf_dma_contig_memory *mem;
279         struct videobuf_mapping *map;
280         unsigned int first;
281         int retval;
282         unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
283
284         dev_dbg(q->dev, "%s\n", __func__);
285         if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
286                 return -EINVAL;
287
288         /* look for first buffer to map */
289         for (first = 0; first < VIDEO_MAX_FRAME; first++) {
290                 if (!q->bufs[first])
291                         continue;
292
293                 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
294                         continue;
295                 if (q->bufs[first]->boff == offset)
296                         break;
297         }
298         if (VIDEO_MAX_FRAME == first) {
299                 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
300                         offset);
301                 return -EINVAL;
302         }
303
304         /* create mapping + update buffer list */
305         map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
306         if (!map)
307                 return -ENOMEM;
308
309         q->bufs[first]->map = map;
310         map->start = vma->vm_start;
311         map->end = vma->vm_end;
312         map->q = q;
313
314         q->bufs[first]->baddr = vma->vm_start;
315
316         mem = q->bufs[first]->priv;
317         BUG_ON(!mem);
318         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
319
320         mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
321         mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
322                                         &mem->dma_handle, GFP_KERNEL);
323         if (!mem->vaddr) {
324                 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
325                         mem->size);
326                 goto error;
327         }
328         dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
329                 mem->vaddr, mem->size);
330
331         /* Try to remap memory */
332
333         size = vma->vm_end - vma->vm_start;
334         size = (size < mem->size) ? size : mem->size;
335
336         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
337         retval = remap_pfn_range(vma, vma->vm_start,
338                                  mem->dma_handle >> PAGE_SHIFT,
339                                  size, vma->vm_page_prot);
340         if (retval) {
341                 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
342                 dma_free_coherent(q->dev, mem->size,
343                                   mem->vaddr, mem->dma_handle);
344                 goto error;
345         }
346
347         vma->vm_ops          = &videobuf_vm_ops;
348         vma->vm_flags       |= VM_DONTEXPAND;
349         vma->vm_private_data = map;
350
351         dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
352                 map, q, vma->vm_start, vma->vm_end,
353                 (long int) q->bufs[first]->bsize,
354                 vma->vm_pgoff, first);
355
356         videobuf_vm_open(vma);
357
358         return 0;
359
360 error:
361         kfree(map);
362         return -ENOMEM;
363 }
364
365 static int __videobuf_copy_to_user(struct videobuf_queue *q,
366                                    char __user *data, size_t count,
367                                    int nonblocking)
368 {
369         struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
370         void *vaddr;
371
372         BUG_ON(!mem);
373         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
374         BUG_ON(!mem->vaddr);
375
376         /* copy to userspace */
377         if (count > q->read_buf->size - q->read_off)
378                 count = q->read_buf->size - q->read_off;
379
380         vaddr = mem->vaddr;
381
382         if (copy_to_user(data, vaddr + q->read_off, count))
383                 return -EFAULT;
384
385         return count;
386 }
387
388 static int __videobuf_copy_stream(struct videobuf_queue *q,
389                                   char __user *data, size_t count, size_t pos,
390                                   int vbihack, int nonblocking)
391 {
392         unsigned int  *fc;
393         struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
394
395         BUG_ON(!mem);
396         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
397
398         if (vbihack) {
399                 /* dirty, undocumented hack -- pass the frame counter
400                         * within the last four bytes of each vbi data block.
401                         * We need that one to maintain backward compatibility
402                         * to all vbi decoding software out there ... */
403                 fc = (unsigned int *)mem->vaddr;
404                 fc += (q->read_buf->size >> 2) - 1;
405                 *fc = q->read_buf->field_count >> 1;
406                 dev_dbg(q->dev, "vbihack: %d\n", *fc);
407         }
408
409         /* copy stuff using the common method */
410         count = __videobuf_copy_to_user(q, data, count, nonblocking);
411
412         if ((count == -EFAULT) && (pos == 0))
413                 return -EFAULT;
414
415         return count;
416 }
417
418 static struct videobuf_qtype_ops qops = {
419         .magic        = MAGIC_QTYPE_OPS,
420
421         .alloc        = __videobuf_alloc,
422         .iolock       = __videobuf_iolock,
423         .mmap_free    = __videobuf_mmap_free,
424         .mmap_mapper  = __videobuf_mmap_mapper,
425         .video_copy_to_user = __videobuf_copy_to_user,
426         .copy_stream  = __videobuf_copy_stream,
427         .vmalloc      = __videobuf_to_vmalloc,
428 };
429
430 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
431                                     struct videobuf_queue_ops *ops,
432                                     struct device *dev,
433                                     spinlock_t *irqlock,
434                                     enum v4l2_buf_type type,
435                                     enum v4l2_field field,
436                                     unsigned int msize,
437                                     void *priv)
438 {
439         videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
440                                  priv, &qops);
441 }
442 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
443
444 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
445 {
446         struct videobuf_dma_contig_memory *mem = buf->priv;
447
448         BUG_ON(!mem);
449         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
450
451         return mem->dma_handle;
452 }
453 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
454
455 void videobuf_dma_contig_free(struct videobuf_queue *q,
456                               struct videobuf_buffer *buf)
457 {
458         struct videobuf_dma_contig_memory *mem = buf->priv;
459
460         /* mmapped memory can't be freed here, otherwise mmapped region
461            would be released, while still needed. In this case, the memory
462            release should happen inside videobuf_vm_close().
463            So, it should free memory only if the memory were allocated for
464            read() operation.
465          */
466         if (buf->memory != V4L2_MEMORY_USERPTR)
467                 return;
468
469         if (!mem)
470                 return;
471
472         MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
473
474         /* handle user space pointer case */
475         if (buf->baddr) {
476                 videobuf_dma_contig_user_put(mem);
477                 return;
478         }
479
480         /* read() method */
481         dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
482         mem->vaddr = NULL;
483 }
484 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
485
486 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
487 MODULE_AUTHOR("Magnus Damm");
488 MODULE_LICENSE("GPL");