Merge branch 'master'
[linux-2.6] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.h
3  * Memory mapping for DRM
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 /**
45  * \c nopage method for AGP virtual memory.
46  *
47  * \param vma virtual memory area.
48  * \param address access address.
49  * \return pointer to the page structure.
50  * 
51  * Find the right map and if it's AGP memory find the real physical page to
52  * map, get the page, increment the use count and return it.
53  */
54 #if __OS_HAS_AGP
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56                                                  unsigned long address)
57 {
58         drm_file_t *priv  = vma->vm_file->private_data;
59         drm_device_t *dev = priv->head->dev;
60         drm_map_t *map    = NULL;
61         drm_map_list_t  *r_list;
62         struct list_head *list;
63
64         /*
65          * Find the right map
66          */
67         if (!drm_core_has_AGP(dev))
68                 goto vm_nopage_error;
69
70         if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
71
72         list_for_each(list, &dev->maplist->head) {
73                 r_list = list_entry(list, drm_map_list_t, head);
74                 map = r_list->map;
75                 if (!map) continue;
76                 if (r_list->user_token == VM_OFFSET(vma))
77                         break;
78         }
79
80         if (map && map->type == _DRM_AGP) {
81                 unsigned long offset = address - vma->vm_start;
82                 unsigned long baddr = map->offset + offset;
83                 struct drm_agp_mem *agpmem;
84                 struct page *page;
85
86 #ifdef __alpha__
87                 /*
88                  * Adjust to a bus-relative address
89                  */
90                 baddr -= dev->hose->mem_space->start;
91 #endif
92
93                 /*
94                  * It's AGP memory - find the real physical page to map
95                  */
96                 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
97                         if (agpmem->bound <= baddr &&
98                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
99                                 break;
100                 }
101
102                 if (!agpmem) goto vm_nopage_error;
103
104                 /*
105                  * Get the page, inc the use count, and return it
106                  */
107                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
109                 get_page(page);
110
111                 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
112                           baddr, __va(agpmem->memory->memory[offset]), offset,
113                           page_count(page));
114
115                 return page;
116         }
117 vm_nopage_error:
118         return NOPAGE_SIGBUS;           /* Disallow mremap */
119 }
120 #else /* __OS_HAS_AGP */
121 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
122                                                  unsigned long address)
123 {
124         return NOPAGE_SIGBUS;
125 }
126 #endif /* __OS_HAS_AGP */
127
128 /**
129  * \c nopage method for shared virtual memory.
130  *
131  * \param vma virtual memory area.
132  * \param address access address.
133  * \return pointer to the page structure.
134  * 
135  * Get the the mapping, find the real physical page to map, get the page, and
136  * return it.
137  */
138 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
139                                                      unsigned long address)
140 {
141         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
142         unsigned long    offset;
143         unsigned long    i;
144         struct page      *page;
145
146         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
147         if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
148
149         offset   = address - vma->vm_start;
150         i = (unsigned long)map->handle + offset;
151         page = (map->type == _DRM_CONSISTENT) ?
152                 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
153         if (!page)
154                 return NOPAGE_OOM;
155         get_page(page);
156
157         DRM_DEBUG("shm_nopage 0x%lx\n", address);
158         return page;
159 }
160
161
162 /**
163  * \c close method for shared virtual memory.
164  * 
165  * \param vma virtual memory area.
166  * 
167  * Deletes map information if we are the last
168  * person to close a mapping and it's not in the global maplist.
169  */
170 static void drm_vm_shm_close(struct vm_area_struct *vma)
171 {
172         drm_file_t      *priv   = vma->vm_file->private_data;
173         drm_device_t    *dev    = priv->head->dev;
174         drm_vma_entry_t *pt, *prev, *next;
175         drm_map_t *map;
176         drm_map_list_t *r_list;
177         struct list_head *list;
178         int found_maps = 0;
179
180         DRM_DEBUG("0x%08lx,0x%08lx\n",
181                   vma->vm_start, vma->vm_end - vma->vm_start);
182         atomic_dec(&dev->vma_count);
183
184         map = vma->vm_private_data;
185
186         down(&dev->struct_sem);
187         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
188                 next = pt->next;
189                 if (pt->vma->vm_private_data == map) found_maps++;
190                 if (pt->vma == vma) {
191                         if (prev) {
192                                 prev->next = pt->next;
193                         } else {
194                                 dev->vmalist = pt->next;
195                         }
196                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
197                 } else {
198                         prev = pt;
199                 }
200         }
201         /* We were the only map that was found */
202         if(found_maps == 1 &&
203            map->flags & _DRM_REMOVABLE) {
204                 /* Check to see if we are in the maplist, if we are not, then
205                  * we delete this mappings information.
206                  */
207                 found_maps = 0;
208                 list = &dev->maplist->head;
209                 list_for_each(list, &dev->maplist->head) {
210                         r_list = list_entry(list, drm_map_list_t, head);
211                         if (r_list->map == map) found_maps++;
212                 }
213
214                 if(!found_maps) {
215                         drm_dma_handle_t dmah;
216
217                         switch (map->type) {
218                         case _DRM_REGISTERS:
219                         case _DRM_FRAME_BUFFER:
220                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
221                                         int retcode;
222                                         retcode = mtrr_del(map->mtrr,
223                                                            map->offset,
224                                                            map->size);
225                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
226                                 }
227                                 drm_ioremapfree(map->handle, map->size, dev);
228                                 break;
229                         case _DRM_SHM:
230                                 vfree(map->handle);
231                                 break;
232                         case _DRM_AGP:
233                         case _DRM_SCATTER_GATHER:
234                                 break;
235                         case _DRM_CONSISTENT:
236                                 dmah.vaddr = map->handle;
237                                 dmah.busaddr = map->offset;
238                                 dmah.size = map->size;
239                                 __drm_pci_free(dev, &dmah);
240                                 break;
241                         }
242                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
243                 }
244         }
245         up(&dev->struct_sem);
246 }
247
248 /**
249  * \c nopage method for DMA virtual memory.
250  *
251  * \param vma virtual memory area.
252  * \param address access address.
253  * \return pointer to the page structure.
254  * 
255  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
256  */
257 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
258                                                      unsigned long address)
259 {
260         drm_file_t       *priv   = vma->vm_file->private_data;
261         drm_device_t     *dev    = priv->head->dev;
262         drm_device_dma_t *dma    = dev->dma;
263         unsigned long    offset;
264         unsigned long    page_nr;
265         struct page      *page;
266
267         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
268         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
269         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
270
271         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
272         page_nr  = offset >> PAGE_SHIFT;
273         page = virt_to_page((dma->pagelist[page_nr] + 
274                              (offset & (~PAGE_MASK))));
275
276         get_page(page);
277
278         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
279         return page;
280 }
281
282 /**
283  * \c nopage method for scatter-gather virtual memory.
284  *
285  * \param vma virtual memory area.
286  * \param address access address.
287  * \return pointer to the page structure.
288  * 
289  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
290  */
291 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
292                                                     unsigned long address)
293 {
294         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
295         drm_file_t *priv = vma->vm_file->private_data;
296         drm_device_t *dev = priv->head->dev;
297         drm_sg_mem_t *entry = dev->sg;
298         unsigned long offset;
299         unsigned long map_offset;
300         unsigned long page_offset;
301         struct page *page;
302
303         if (!entry)                return NOPAGE_SIGBUS; /* Error */
304         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
305         if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
306
307
308         offset = address - vma->vm_start;
309         map_offset = map->offset - (unsigned long)dev->sg->virtual;
310         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
311         page = entry->pagelist[page_offset];
312         get_page(page);
313
314         return page;
315 }
316
317
318 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
319                                    unsigned long address,
320                                    int *type) {
321         if (type) *type = VM_FAULT_MINOR;
322         return drm_do_vm_nopage(vma, address);
323 }
324
325 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
326                                        unsigned long address,
327                                        int *type) {
328         if (type) *type = VM_FAULT_MINOR;
329         return drm_do_vm_shm_nopage(vma, address);
330 }
331
332 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
333                                        unsigned long address,
334                                        int *type) {
335         if (type) *type = VM_FAULT_MINOR;
336         return drm_do_vm_dma_nopage(vma, address);
337 }
338
339 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
340                                       unsigned long address,
341                                       int *type) {
342         if (type) *type = VM_FAULT_MINOR;
343         return drm_do_vm_sg_nopage(vma, address);
344 }
345
346 /** AGP virtual memory operations */
347 static struct vm_operations_struct   drm_vm_ops = {
348         .nopage = drm_vm_nopage,
349         .open   = drm_vm_open,
350         .close  = drm_vm_close,
351 };
352
353 /** Shared virtual memory operations */
354 static struct vm_operations_struct   drm_vm_shm_ops = {
355         .nopage = drm_vm_shm_nopage,
356         .open   = drm_vm_open,
357         .close  = drm_vm_shm_close,
358 };
359
360 /** DMA virtual memory operations */
361 static struct vm_operations_struct   drm_vm_dma_ops = {
362         .nopage = drm_vm_dma_nopage,
363         .open   = drm_vm_open,
364         .close  = drm_vm_close,
365 };
366
367 /** Scatter-gather virtual memory operations */
368 static struct vm_operations_struct   drm_vm_sg_ops = {
369         .nopage = drm_vm_sg_nopage,
370         .open   = drm_vm_open,
371         .close  = drm_vm_close,
372 };
373
374
375 /**
376  * \c open method for shared virtual memory.
377  * 
378  * \param vma virtual memory area.
379  * 
380  * Create a new drm_vma_entry structure as the \p vma private data entry and
381  * add it to drm_device::vmalist.
382  */
383 static void drm_vm_open(struct vm_area_struct *vma)
384 {
385         drm_file_t      *priv   = vma->vm_file->private_data;
386         drm_device_t    *dev    = priv->head->dev;
387         drm_vma_entry_t *vma_entry;
388
389         DRM_DEBUG("0x%08lx,0x%08lx\n",
390                   vma->vm_start, vma->vm_end - vma->vm_start);
391         atomic_inc(&dev->vma_count);
392
393         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
394         if (vma_entry) {
395                 down(&dev->struct_sem);
396                 vma_entry->vma  = vma;
397                 vma_entry->next = dev->vmalist;
398                 vma_entry->pid  = current->pid;
399                 dev->vmalist    = vma_entry;
400                 up(&dev->struct_sem);
401         }
402 }
403
404 /**
405  * \c close method for all virtual memory types.
406  * 
407  * \param vma virtual memory area.
408  * 
409  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
410  * free it.
411  */
412 static void drm_vm_close(struct vm_area_struct *vma)
413 {
414         drm_file_t      *priv   = vma->vm_file->private_data;
415         drm_device_t    *dev    = priv->head->dev;
416         drm_vma_entry_t *pt, *prev;
417
418         DRM_DEBUG("0x%08lx,0x%08lx\n",
419                   vma->vm_start, vma->vm_end - vma->vm_start);
420         atomic_dec(&dev->vma_count);
421
422         down(&dev->struct_sem);
423         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
424                 if (pt->vma == vma) {
425                         if (prev) {
426                                 prev->next = pt->next;
427                         } else {
428                                 dev->vmalist = pt->next;
429                         }
430                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
431                         break;
432                 }
433         }
434         up(&dev->struct_sem);
435 }
436
437 /**
438  * mmap DMA memory.
439  *
440  * \param filp file pointer.
441  * \param vma virtual memory area.
442  * \return zero on success or a negative number on failure.
443  * 
444  * Sets the virtual memory area operations structure to vm_dma_ops, the file
445  * pointer, and calls vm_open().
446  */
447 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
448 {
449         drm_file_t       *priv   = filp->private_data;
450         drm_device_t     *dev;
451         drm_device_dma_t *dma;
452         unsigned long    length  = vma->vm_end - vma->vm_start;
453
454         lock_kernel();
455         dev      = priv->head->dev;
456         dma      = dev->dma;
457         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
458                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
459
460                                 /* Length must match exact page count */
461         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
462                 unlock_kernel();
463                 return -EINVAL;
464         }
465         unlock_kernel();
466
467         vma->vm_ops   = &drm_vm_dma_ops;
468
469         vma->vm_flags |= VM_RESERVED; /* Don't swap */
470
471         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
472         drm_vm_open(vma);
473         return 0;
474 }
475
476 unsigned long drm_core_get_map_ofs(drm_map_t *map)
477 {
478         return map->offset;
479 }
480 EXPORT_SYMBOL(drm_core_get_map_ofs);
481
482 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
483 {
484 #ifdef __alpha__
485         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
486 #else
487         return 0;
488 #endif
489 }
490 EXPORT_SYMBOL(drm_core_get_reg_ofs);
491
492 /**
493  * mmap DMA memory.
494  *
495  * \param filp file pointer.
496  * \param vma virtual memory area.
497  * \return zero on success or a negative number on failure.
498  * 
499  * If the virtual memory area has no offset associated with it then it's a DMA
500  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
501  * checks that the restricted flag is not set, sets the virtual memory operations
502  * according to the mapping type and remaps the pages. Finally sets the file
503  * pointer and calls vm_open().
504  */
505 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
506 {
507         drm_file_t      *priv   = filp->private_data;
508         drm_device_t    *dev    = priv->head->dev;
509         drm_map_t       *map    = NULL;
510         drm_map_list_t  *r_list;
511         unsigned long   offset  = 0;
512         struct list_head *list;
513
514         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
515                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
516
517         if ( !priv->authenticated ) return -EACCES;
518
519         /* We check for "dma". On Apple's UniNorth, it's valid to have
520          * the AGP mapped at physical address 0
521          * --BenH.
522          */
523         if (!VM_OFFSET(vma)
524 #if __OS_HAS_AGP
525             && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
526 #endif
527             )
528                 return drm_mmap_dma(filp, vma);
529
530                                 /* A sequential search of a linked list is
531                                    fine here because: 1) there will only be
532                                    about 5-10 entries in the list and, 2) a
533                                    DRI client only has to do this mapping
534                                    once, so it doesn't have to be optimized
535                                    for performance, even if the list was a
536                                    bit longer. */
537         list_for_each(list, &dev->maplist->head) {
538
539                 r_list = list_entry(list, drm_map_list_t, head);
540                 map = r_list->map;
541                 if (!map) continue;
542                 if (r_list->user_token == VM_OFFSET(vma))
543                         break;
544         }
545
546         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
547                 return -EPERM;
548
549                                 /* Check for valid size. */
550         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
551
552         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
553                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
554 #if defined(__i386__) || defined(__x86_64__)
555                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
556 #else
557                                 /* Ye gads this is ugly.  With more thought
558                                    we could move this up higher and use
559                                    `protection_map' instead.  */
560                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
561                         __pte(pgprot_val(vma->vm_page_prot)))));
562 #endif
563         }
564
565         switch (map->type) {
566         case _DRM_AGP:
567           if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
568                 /*
569                  * On some platforms we can't talk to bus dma address from the CPU, so for
570                  * memory of type DRM_AGP, we'll deal with sorting out the real physical
571                  * pages and mappings in nopage()
572                  */
573 #if defined(__powerpc__)
574                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
575 #endif
576                 vma->vm_ops = &drm_vm_ops;
577                 break;
578           }
579                 /* fall through to _DRM_FRAME_BUFFER... */        
580         case _DRM_FRAME_BUFFER:
581         case _DRM_REGISTERS:
582 #if defined(__i386__) || defined(__x86_64__)
583                 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
584                         pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
585                         pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
586                 }
587 #elif defined(__powerpc__)
588                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
589                 if (map->type == _DRM_REGISTERS)
590                         pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
591 #endif
592                 vma->vm_flags |= VM_IO; /* not in core dump */
593 #if defined(__ia64__)
594                 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
595                                     vma->vm_start))
596                         vma->vm_page_prot =
597                                 pgprot_writecombine(vma->vm_page_prot);
598                 else
599                         vma->vm_page_prot =
600                                 pgprot_noncached(vma->vm_page_prot);
601 #endif
602                 offset = dev->driver->get_reg_ofs(dev);
603 #ifdef __sparc__
604                 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
605                                         (map->offset + offset) >> PAGE_SHIFT,
606                                         vma->vm_end - vma->vm_start,
607                                         vma->vm_page_prot))
608 #else
609                 if (io_remap_pfn_range(vma, vma->vm_start,
610                                      (map->offset + offset) >> PAGE_SHIFT,
611                                      vma->vm_end - vma->vm_start,
612                                      vma->vm_page_prot))
613 #endif
614                                 return -EAGAIN;
615                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
616                           " offset = 0x%lx\n",
617                           map->type,
618                           vma->vm_start, vma->vm_end, map->offset + offset);
619                 vma->vm_ops = &drm_vm_ops;
620                 break;
621         case _DRM_SHM:
622         case _DRM_CONSISTENT:
623                 /* Consistent memory is really like shared memory. It's only
624                  * allocate in a different way */
625                 vma->vm_ops = &drm_vm_shm_ops;
626                 vma->vm_private_data = (void *)map;
627                                 /* Don't let this area swap.  Change when
628                                    DRM_KERNEL advisory is supported. */
629                 vma->vm_flags |= VM_RESERVED;
630                 break;
631         case _DRM_SCATTER_GATHER:
632                 vma->vm_ops = &drm_vm_sg_ops;
633                 vma->vm_private_data = (void *)map;
634                 vma->vm_flags |= VM_RESERVED;
635                 break;
636         default:
637                 return -EINVAL; /* This should never happen. */
638         }
639         vma->vm_flags |= VM_RESERVED; /* Don't swap */
640
641         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
642         drm_vm_open(vma);
643         return 0;
644 }
645 EXPORT_SYMBOL(drm_mmap);