Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[linux-2.6] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 /**
45  * \c nopage method for AGP virtual memory.
46  *
47  * \param vma virtual memory area.
48  * \param address access address.
49  * \return pointer to the page structure.
50  *
51  * Find the right map and if it's AGP memory find the real physical page to
52  * map, get the page, increment the use count and return it.
53  */
54 #if __OS_HAS_AGP
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56                                                 unsigned long address)
57 {
58         drm_file_t *priv = vma->vm_file->private_data;
59         drm_device_t *dev = priv->head->dev;
60         drm_map_t *map = NULL;
61         drm_map_list_t *r_list;
62         struct list_head *list;
63
64         /*
65          * Find the right map
66          */
67         if (!drm_core_has_AGP(dev))
68                 goto vm_nopage_error;
69
70         if (!dev->agp || !dev->agp->cant_use_aperture)
71                 goto vm_nopage_error;
72
73         list_for_each(list, &dev->maplist->head) {
74                 r_list = list_entry(list, drm_map_list_t, head);
75                 map = r_list->map;
76                 if (!map)
77                         continue;
78                 if (r_list->user_token == VM_OFFSET(vma))
79                         break;
80         }
81
82         if (map && map->type == _DRM_AGP) {
83                 unsigned long offset = address - vma->vm_start;
84                 unsigned long baddr = map->offset + offset;
85                 struct drm_agp_mem *agpmem;
86                 struct page *page;
87
88 #ifdef __alpha__
89                 /*
90                  * Adjust to a bus-relative address
91                  */
92                 baddr -= dev->hose->mem_space->start;
93 #endif
94
95                 /*
96                  * It's AGP memory - find the real physical page to map
97                  */
98                 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
99                         if (agpmem->bound <= baddr &&
100                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
101                                 break;
102                 }
103
104                 if (!agpmem)
105                         goto vm_nopage_error;
106
107                 /*
108                  * Get the page, inc the use count, and return it
109                  */
110                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
111                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
112                 get_page(page);
113
114                 DRM_DEBUG
115                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
116                      baddr, __va(agpmem->memory->memory[offset]), offset,
117                      page_count(page));
118
119                 return page;
120         }
121       vm_nopage_error:
122         return NOPAGE_SIGBUS;   /* Disallow mremap */
123 }
124 #else                           /* __OS_HAS_AGP */
125 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
126                                                 unsigned long address)
127 {
128         return NOPAGE_SIGBUS;
129 }
130 #endif                          /* __OS_HAS_AGP */
131
132 /**
133  * \c nopage method for shared virtual memory.
134  *
135  * \param vma virtual memory area.
136  * \param address access address.
137  * \return pointer to the page structure.
138  *
139  * Get the the mapping, find the real physical page to map, get the page, and
140  * return it.
141  */
142 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
143                                                     unsigned long address)
144 {
145         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
146         unsigned long offset;
147         unsigned long i;
148         struct page *page;
149
150         if (address > vma->vm_end)
151                 return NOPAGE_SIGBUS;   /* Disallow mremap */
152         if (!map)
153                 return NOPAGE_OOM;      /* Nothing allocated */
154
155         offset = address - vma->vm_start;
156         i = (unsigned long)map->handle + offset;
157         page = (map->type == _DRM_CONSISTENT) ?
158                 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
159         if (!page)
160                 return NOPAGE_OOM;
161         get_page(page);
162
163         DRM_DEBUG("shm_nopage 0x%lx\n", address);
164         return page;
165 }
166
167 /**
168  * \c close method for shared virtual memory.
169  *
170  * \param vma virtual memory area.
171  *
172  * Deletes map information if we are the last
173  * person to close a mapping and it's not in the global maplist.
174  */
175 static void drm_vm_shm_close(struct vm_area_struct *vma)
176 {
177         drm_file_t *priv = vma->vm_file->private_data;
178         drm_device_t *dev = priv->head->dev;
179         drm_vma_entry_t *pt, *prev, *next;
180         drm_map_t *map;
181         drm_map_list_t *r_list;
182         struct list_head *list;
183         int found_maps = 0;
184
185         DRM_DEBUG("0x%08lx,0x%08lx\n",
186                   vma->vm_start, vma->vm_end - vma->vm_start);
187         atomic_dec(&dev->vma_count);
188
189         map = vma->vm_private_data;
190
191         mutex_lock(&dev->struct_mutex);
192         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
193                 next = pt->next;
194                 if (pt->vma->vm_private_data == map)
195                         found_maps++;
196                 if (pt->vma == vma) {
197                         if (prev) {
198                                 prev->next = pt->next;
199                         } else {
200                                 dev->vmalist = pt->next;
201                         }
202                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
203                 } else {
204                         prev = pt;
205                 }
206         }
207         /* We were the only map that was found */
208         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
209                 /* Check to see if we are in the maplist, if we are not, then
210                  * we delete this mappings information.
211                  */
212                 found_maps = 0;
213                 list = &dev->maplist->head;
214                 list_for_each(list, &dev->maplist->head) {
215                         r_list = list_entry(list, drm_map_list_t, head);
216                         if (r_list->map == map)
217                                 found_maps++;
218                 }
219
220                 if (!found_maps) {
221                         drm_dma_handle_t dmah;
222
223                         switch (map->type) {
224                         case _DRM_REGISTERS:
225                         case _DRM_FRAME_BUFFER:
226                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
227                                         int retcode;
228                                         retcode = mtrr_del(map->mtrr,
229                                                            map->offset,
230                                                            map->size);
231                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
232                                 }
233                                 drm_ioremapfree(map->handle, map->size, dev);
234                                 break;
235                         case _DRM_SHM:
236                                 vfree(map->handle);
237                                 break;
238                         case _DRM_AGP:
239                         case _DRM_SCATTER_GATHER:
240                                 break;
241                         case _DRM_CONSISTENT:
242                                 dmah.vaddr = map->handle;
243                                 dmah.busaddr = map->offset;
244                                 dmah.size = map->size;
245                                 __drm_pci_free(dev, &dmah);
246                                 break;
247                         }
248                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
249                 }
250         }
251         mutex_unlock(&dev->struct_mutex);
252 }
253
254 /**
255  * \c nopage method for DMA virtual memory.
256  *
257  * \param vma virtual memory area.
258  * \param address access address.
259  * \return pointer to the page structure.
260  *
261  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
262  */
263 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
264                                                     unsigned long address)
265 {
266         drm_file_t *priv = vma->vm_file->private_data;
267         drm_device_t *dev = priv->head->dev;
268         drm_device_dma_t *dma = dev->dma;
269         unsigned long offset;
270         unsigned long page_nr;
271         struct page *page;
272
273         if (!dma)
274                 return NOPAGE_SIGBUS;   /* Error */
275         if (address > vma->vm_end)
276                 return NOPAGE_SIGBUS;   /* Disallow mremap */
277         if (!dma->pagelist)
278                 return NOPAGE_OOM;      /* Nothing allocated */
279
280         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
281         page_nr = offset >> PAGE_SHIFT;
282         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
283
284         get_page(page);
285
286         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
287         return page;
288 }
289
290 /**
291  * \c nopage method for scatter-gather virtual memory.
292  *
293  * \param vma virtual memory area.
294  * \param address access address.
295  * \return pointer to the page structure.
296  *
297  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
298  */
299 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
300                                                    unsigned long address)
301 {
302         drm_map_t *map = (drm_map_t *) vma->vm_private_data;
303         drm_file_t *priv = vma->vm_file->private_data;
304         drm_device_t *dev = priv->head->dev;
305         drm_sg_mem_t *entry = dev->sg;
306         unsigned long offset;
307         unsigned long map_offset;
308         unsigned long page_offset;
309         struct page *page;
310
311         if (!entry)
312                 return NOPAGE_SIGBUS;   /* Error */
313         if (address > vma->vm_end)
314                 return NOPAGE_SIGBUS;   /* Disallow mremap */
315         if (!entry->pagelist)
316                 return NOPAGE_OOM;      /* Nothing allocated */
317
318         offset = address - vma->vm_start;
319         map_offset = map->offset - (unsigned long)dev->sg->virtual;
320         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
321         page = entry->pagelist[page_offset];
322         get_page(page);
323
324         return page;
325 }
326
327 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
328                                   unsigned long address, int *type)
329 {
330         if (type)
331                 *type = VM_FAULT_MINOR;
332         return drm_do_vm_nopage(vma, address);
333 }
334
335 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
336                                       unsigned long address, int *type)
337 {
338         if (type)
339                 *type = VM_FAULT_MINOR;
340         return drm_do_vm_shm_nopage(vma, address);
341 }
342
343 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
344                                       unsigned long address, int *type)
345 {
346         if (type)
347                 *type = VM_FAULT_MINOR;
348         return drm_do_vm_dma_nopage(vma, address);
349 }
350
351 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
352                                      unsigned long address, int *type)
353 {
354         if (type)
355                 *type = VM_FAULT_MINOR;
356         return drm_do_vm_sg_nopage(vma, address);
357 }
358
359 /** AGP virtual memory operations */
360 static struct vm_operations_struct drm_vm_ops = {
361         .nopage = drm_vm_nopage,
362         .open = drm_vm_open,
363         .close = drm_vm_close,
364 };
365
366 /** Shared virtual memory operations */
367 static struct vm_operations_struct drm_vm_shm_ops = {
368         .nopage = drm_vm_shm_nopage,
369         .open = drm_vm_open,
370         .close = drm_vm_shm_close,
371 };
372
373 /** DMA virtual memory operations */
374 static struct vm_operations_struct drm_vm_dma_ops = {
375         .nopage = drm_vm_dma_nopage,
376         .open = drm_vm_open,
377         .close = drm_vm_close,
378 };
379
380 /** Scatter-gather virtual memory operations */
381 static struct vm_operations_struct drm_vm_sg_ops = {
382         .nopage = drm_vm_sg_nopage,
383         .open = drm_vm_open,
384         .close = drm_vm_close,
385 };
386
387 /**
388  * \c open method for shared virtual memory.
389  *
390  * \param vma virtual memory area.
391  *
392  * Create a new drm_vma_entry structure as the \p vma private data entry and
393  * add it to drm_device::vmalist.
394  */
395 static void drm_vm_open(struct vm_area_struct *vma)
396 {
397         drm_file_t *priv = vma->vm_file->private_data;
398         drm_device_t *dev = priv->head->dev;
399         drm_vma_entry_t *vma_entry;
400
401         DRM_DEBUG("0x%08lx,0x%08lx\n",
402                   vma->vm_start, vma->vm_end - vma->vm_start);
403         atomic_inc(&dev->vma_count);
404
405         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
406         if (vma_entry) {
407                 mutex_lock(&dev->struct_mutex);
408                 vma_entry->vma = vma;
409                 vma_entry->next = dev->vmalist;
410                 vma_entry->pid = current->pid;
411                 dev->vmalist = vma_entry;
412                 mutex_unlock(&dev->struct_mutex);
413         }
414 }
415
416 /**
417  * \c close method for all virtual memory types.
418  *
419  * \param vma virtual memory area.
420  *
421  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
422  * free it.
423  */
424 static void drm_vm_close(struct vm_area_struct *vma)
425 {
426         drm_file_t *priv = vma->vm_file->private_data;
427         drm_device_t *dev = priv->head->dev;
428         drm_vma_entry_t *pt, *prev;
429
430         DRM_DEBUG("0x%08lx,0x%08lx\n",
431                   vma->vm_start, vma->vm_end - vma->vm_start);
432         atomic_dec(&dev->vma_count);
433
434         mutex_lock(&dev->struct_mutex);
435         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
436                 if (pt->vma == vma) {
437                         if (prev) {
438                                 prev->next = pt->next;
439                         } else {
440                                 dev->vmalist = pt->next;
441                         }
442                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
443                         break;
444                 }
445         }
446         mutex_unlock(&dev->struct_mutex);
447 }
448
449 /**
450  * mmap DMA memory.
451  *
452  * \param filp file pointer.
453  * \param vma virtual memory area.
454  * \return zero on success or a negative number on failure.
455  *
456  * Sets the virtual memory area operations structure to vm_dma_ops, the file
457  * pointer, and calls vm_open().
458  */
459 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
460 {
461         drm_file_t *priv = filp->private_data;
462         drm_device_t *dev;
463         drm_device_dma_t *dma;
464         unsigned long length = vma->vm_end - vma->vm_start;
465
466         lock_kernel();
467         dev = priv->head->dev;
468         dma = dev->dma;
469         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
470                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
471
472         /* Length must match exact page count */
473         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
474                 unlock_kernel();
475                 return -EINVAL;
476         }
477         unlock_kernel();
478
479         vma->vm_ops = &drm_vm_dma_ops;
480
481         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
482
483         vma->vm_file = filp;    /* Needed for drm_vm_open() */
484         drm_vm_open(vma);
485         return 0;
486 }
487
488 unsigned long drm_core_get_map_ofs(drm_map_t * map)
489 {
490         return map->offset;
491 }
492
493 EXPORT_SYMBOL(drm_core_get_map_ofs);
494
495 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
496 {
497 #ifdef __alpha__
498         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
499 #else
500         return 0;
501 #endif
502 }
503
504 EXPORT_SYMBOL(drm_core_get_reg_ofs);
505
506 /**
507  * mmap DMA memory.
508  *
509  * \param filp file pointer.
510  * \param vma virtual memory area.
511  * \return zero on success or a negative number on failure.
512  *
513  * If the virtual memory area has no offset associated with it then it's a DMA
514  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
515  * checks that the restricted flag is not set, sets the virtual memory operations
516  * according to the mapping type and remaps the pages. Finally sets the file
517  * pointer and calls vm_open().
518  */
519 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
520 {
521         drm_file_t *priv = filp->private_data;
522         drm_device_t *dev = priv->head->dev;
523         drm_map_t *map = NULL;
524         drm_map_list_t *r_list;
525         unsigned long offset = 0;
526         struct list_head *list;
527
528         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
529                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
530
531         if (!priv->authenticated)
532                 return -EACCES;
533
534         /* We check for "dma". On Apple's UniNorth, it's valid to have
535          * the AGP mapped at physical address 0
536          * --BenH.
537          */
538         if (!VM_OFFSET(vma)
539 #if __OS_HAS_AGP
540             && (!dev->agp
541                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
542 #endif
543             )
544                 return drm_mmap_dma(filp, vma);
545
546         /* A sequential search of a linked list is
547            fine here because: 1) there will only be
548            about 5-10 entries in the list and, 2) a
549            DRI client only has to do this mapping
550            once, so it doesn't have to be optimized
551            for performance, even if the list was a
552            bit longer. */
553         list_for_each(list, &dev->maplist->head) {
554
555                 r_list = list_entry(list, drm_map_list_t, head);
556                 map = r_list->map;
557                 if (!map)
558                         continue;
559                 if (r_list->user_token == VM_OFFSET(vma))
560                         break;
561         }
562
563         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
564                 return -EPERM;
565
566         /* Check for valid size. */
567         if (map->size != vma->vm_end - vma->vm_start)
568                 return -EINVAL;
569
570         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
571                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
572 #if defined(__i386__) || defined(__x86_64__)
573                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
574 #else
575                 /* Ye gads this is ugly.  With more thought
576                    we could move this up higher and use
577                    `protection_map' instead.  */
578                 vma->vm_page_prot =
579                     __pgprot(pte_val
580                              (pte_wrprotect
581                               (__pte(pgprot_val(vma->vm_page_prot)))));
582 #endif
583         }
584
585         switch (map->type) {
586         case _DRM_AGP:
587                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
588                         /*
589                          * On some platforms we can't talk to bus dma address from the CPU, so for
590                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
591                          * pages and mappings in nopage()
592                          */
593 #if defined(__powerpc__)
594                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
595 #endif
596                         vma->vm_ops = &drm_vm_ops;
597                         break;
598                 }
599                 /* fall through to _DRM_FRAME_BUFFER... */
600         case _DRM_FRAME_BUFFER:
601         case _DRM_REGISTERS:
602 #if defined(__i386__) || defined(__x86_64__)
603                 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
604                         pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
605                         pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
606                 }
607 #elif defined(__powerpc__)
608                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
609                 if (map->type == _DRM_REGISTERS)
610                         pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
611 #endif
612                 vma->vm_flags |= VM_IO; /* not in core dump */
613 #if defined(__ia64__)
614                 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
615                         vma->vm_page_prot =
616                             pgprot_writecombine(vma->vm_page_prot);
617                 else
618                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
619 #endif
620                 offset = dev->driver->get_reg_ofs(dev);
621 #ifdef __sparc__
622                 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
623                                        (map->offset + offset) >> PAGE_SHIFT,
624                                        vma->vm_end - vma->vm_start,
625                                        vma->vm_page_prot))
626 #else
627                 if (io_remap_pfn_range(vma, vma->vm_start,
628                                        (map->offset + offset) >> PAGE_SHIFT,
629                                        vma->vm_end - vma->vm_start,
630                                        vma->vm_page_prot))
631 #endif
632                         return -EAGAIN;
633                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
634                           " offset = 0x%lx\n",
635                           map->type,
636                           vma->vm_start, vma->vm_end, map->offset + offset);
637                 vma->vm_ops = &drm_vm_ops;
638                 break;
639         case _DRM_SHM:
640         case _DRM_CONSISTENT:
641                 /* Consistent memory is really like shared memory. It's only
642                  * allocate in a different way */
643                 vma->vm_ops = &drm_vm_shm_ops;
644                 vma->vm_private_data = (void *)map;
645                 /* Don't let this area swap.  Change when
646                    DRM_KERNEL advisory is supported. */
647                 vma->vm_flags |= VM_RESERVED;
648                 break;
649         case _DRM_SCATTER_GATHER:
650                 vma->vm_ops = &drm_vm_sg_ops;
651                 vma->vm_private_data = (void *)map;
652                 vma->vm_flags |= VM_RESERVED;
653                 break;
654         default:
655                 return -EINVAL; /* This should never happen. */
656         }
657         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
658
659         vma->vm_file = filp;    /* Needed for drm_vm_open() */
660         drm_vm_open(vma);
661         return 0;
662 }
663
664 EXPORT_SYMBOL(drm_mmap);