3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
48 #if defined(__i386__) || defined(__x86_64__)
49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50 pgprot_val(tmp) |= _PAGE_PCD;
51 pgprot_val(tmp) &= ~_PAGE_PWT;
53 #elif defined(__powerpc__)
54 pgprot_val(tmp) |= _PAGE_NO_CACHE;
55 if (map_type == _DRM_REGISTERS)
56 pgprot_val(tmp) |= _PAGE_GUARDED;
57 #elif defined(__ia64__)
58 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60 tmp = pgprot_writecombine(tmp);
62 tmp = pgprot_noncached(tmp);
63 #elif defined(__sparc__)
64 tmp = pgprot_noncached(tmp);
69 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
71 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
74 tmp |= _PAGE_NO_CACHE;
80 * \c fault method for AGP virtual memory.
82 * \param vma virtual memory area.
83 * \param address access address.
84 * \return pointer to the page structure.
86 * Find the right map and if it's AGP memory find the real physical page to
87 * map, get the page, increment the use count and return it.
90 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
92 struct drm_file *priv = vma->vm_file->private_data;
93 struct drm_device *dev = priv->minor->dev;
94 struct drm_map *map = NULL;
95 struct drm_map_list *r_list;
96 struct drm_hash_item *hash;
101 if (!drm_core_has_AGP(dev))
104 if (!dev->agp || !dev->agp->cant_use_aperture)
107 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
110 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
113 if (map && map->type == _DRM_AGP) {
115 * Using vm_pgoff as a selector forces us to use this unusual
118 unsigned long offset = (unsigned long)vmf->virtual_address -
120 unsigned long baddr = map->offset + offset;
121 struct drm_agp_mem *agpmem;
126 * Adjust to a bus-relative address
128 baddr -= dev->hose->mem_space->start;
132 * It's AGP memory - find the real physical page to map
134 list_for_each_entry(agpmem, &dev->agp->memory, head) {
135 if (agpmem->bound <= baddr &&
136 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
144 * Get the page, inc the use count, and return it
146 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
147 page = virt_to_page(__va(agpmem->memory->memory[offset]));
152 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
153 baddr, __va(agpmem->memory->memory[offset]), offset,
158 return VM_FAULT_SIGBUS; /* Disallow mremap */
160 #else /* __OS_HAS_AGP */
161 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
163 return VM_FAULT_SIGBUS;
165 #endif /* __OS_HAS_AGP */
168 * \c nopage method for shared virtual memory.
170 * \param vma virtual memory area.
171 * \param address access address.
172 * \return pointer to the page structure.
174 * Get the mapping, find the real physical page to map, get the page, and
177 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
179 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
180 unsigned long offset;
185 return VM_FAULT_SIGBUS; /* Nothing allocated */
187 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
188 i = (unsigned long)map->handle + offset;
189 page = vmalloc_to_page((void *)i);
191 return VM_FAULT_SIGBUS;
195 DRM_DEBUG("shm_fault 0x%lx\n", offset);
200 * \c close method for shared virtual memory.
202 * \param vma virtual memory area.
204 * Deletes map information if we are the last
205 * person to close a mapping and it's not in the global maplist.
207 static void drm_vm_shm_close(struct vm_area_struct *vma)
209 struct drm_file *priv = vma->vm_file->private_data;
210 struct drm_device *dev = priv->minor->dev;
211 struct drm_vma_entry *pt, *temp;
213 struct drm_map_list *r_list;
216 DRM_DEBUG("0x%08lx,0x%08lx\n",
217 vma->vm_start, vma->vm_end - vma->vm_start);
218 atomic_dec(&dev->vma_count);
220 map = vma->vm_private_data;
222 mutex_lock(&dev->struct_mutex);
223 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
224 if (pt->vma->vm_private_data == map)
226 if (pt->vma == vma) {
228 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
232 /* We were the only map that was found */
233 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
234 /* Check to see if we are in the maplist, if we are not, then
235 * we delete this mappings information.
238 list_for_each_entry(r_list, &dev->maplist, head) {
239 if (r_list->map == map)
244 drm_dma_handle_t dmah;
248 case _DRM_FRAME_BUFFER:
249 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
251 retcode = mtrr_del(map->mtrr,
254 DRM_DEBUG("mtrr_del = %d\n", retcode);
256 iounmap(map->handle);
262 case _DRM_SCATTER_GATHER:
264 case _DRM_CONSISTENT:
265 dmah.vaddr = map->handle;
266 dmah.busaddr = map->offset;
267 dmah.size = map->size;
268 __drm_pci_free(dev, &dmah);
271 DRM_ERROR("tried to rmmap GEM object\n");
274 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
277 mutex_unlock(&dev->struct_mutex);
281 * \c fault method for DMA virtual memory.
283 * \param vma virtual memory area.
284 * \param address access address.
285 * \return pointer to the page structure.
287 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
289 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
291 struct drm_file *priv = vma->vm_file->private_data;
292 struct drm_device *dev = priv->minor->dev;
293 struct drm_device_dma *dma = dev->dma;
294 unsigned long offset;
295 unsigned long page_nr;
299 return VM_FAULT_SIGBUS; /* Error */
301 return VM_FAULT_SIGBUS; /* Nothing allocated */
303 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
304 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
305 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
310 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315 * \c fault method for scatter-gather virtual memory.
317 * \param vma virtual memory area.
318 * \param address access address.
319 * \return pointer to the page structure.
321 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
323 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
325 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
326 struct drm_file *priv = vma->vm_file->private_data;
327 struct drm_device *dev = priv->minor->dev;
328 struct drm_sg_mem *entry = dev->sg;
329 unsigned long offset;
330 unsigned long map_offset;
331 unsigned long page_offset;
335 return VM_FAULT_SIGBUS; /* Error */
336 if (!entry->pagelist)
337 return VM_FAULT_SIGBUS; /* Nothing allocated */
339 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
340 map_offset = map->offset - (unsigned long)dev->sg->virtual;
341 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
342 page = entry->pagelist[page_offset];
349 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
351 return drm_do_vm_fault(vma, vmf);
354 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
356 return drm_do_vm_shm_fault(vma, vmf);
359 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
361 return drm_do_vm_dma_fault(vma, vmf);
364 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
366 return drm_do_vm_sg_fault(vma, vmf);
369 /** AGP virtual memory operations */
370 static struct vm_operations_struct drm_vm_ops = {
371 .fault = drm_vm_fault,
373 .close = drm_vm_close,
376 /** Shared virtual memory operations */
377 static struct vm_operations_struct drm_vm_shm_ops = {
378 .fault = drm_vm_shm_fault,
380 .close = drm_vm_shm_close,
383 /** DMA virtual memory operations */
384 static struct vm_operations_struct drm_vm_dma_ops = {
385 .fault = drm_vm_dma_fault,
387 .close = drm_vm_close,
390 /** Scatter-gather virtual memory operations */
391 static struct vm_operations_struct drm_vm_sg_ops = {
392 .fault = drm_vm_sg_fault,
394 .close = drm_vm_close,
398 * \c open method for shared virtual memory.
400 * \param vma virtual memory area.
402 * Create a new drm_vma_entry structure as the \p vma private data entry and
403 * add it to drm_device::vmalist.
405 void drm_vm_open_locked(struct vm_area_struct *vma)
407 struct drm_file *priv = vma->vm_file->private_data;
408 struct drm_device *dev = priv->minor->dev;
409 struct drm_vma_entry *vma_entry;
411 DRM_DEBUG("0x%08lx,0x%08lx\n",
412 vma->vm_start, vma->vm_end - vma->vm_start);
413 atomic_inc(&dev->vma_count);
415 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
417 vma_entry->vma = vma;
418 vma_entry->pid = current->pid;
419 list_add(&vma_entry->head, &dev->vmalist);
423 static void drm_vm_open(struct vm_area_struct *vma)
425 struct drm_file *priv = vma->vm_file->private_data;
426 struct drm_device *dev = priv->minor->dev;
428 mutex_lock(&dev->struct_mutex);
429 drm_vm_open_locked(vma);
430 mutex_unlock(&dev->struct_mutex);
434 * \c close method for all virtual memory types.
436 * \param vma virtual memory area.
438 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
441 static void drm_vm_close(struct vm_area_struct *vma)
443 struct drm_file *priv = vma->vm_file->private_data;
444 struct drm_device *dev = priv->minor->dev;
445 struct drm_vma_entry *pt, *temp;
447 DRM_DEBUG("0x%08lx,0x%08lx\n",
448 vma->vm_start, vma->vm_end - vma->vm_start);
449 atomic_dec(&dev->vma_count);
451 mutex_lock(&dev->struct_mutex);
452 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
453 if (pt->vma == vma) {
455 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
459 mutex_unlock(&dev->struct_mutex);
465 * \param file_priv DRM file private.
466 * \param vma virtual memory area.
467 * \return zero on success or a negative number on failure.
469 * Sets the virtual memory area operations structure to vm_dma_ops, the file
470 * pointer, and calls vm_open().
472 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
474 struct drm_file *priv = filp->private_data;
475 struct drm_device *dev;
476 struct drm_device_dma *dma;
477 unsigned long length = vma->vm_end - vma->vm_start;
479 dev = priv->minor->dev;
481 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
482 vma->vm_start, vma->vm_end, vma->vm_pgoff);
484 /* Length must match exact page count */
485 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
489 if (!capable(CAP_SYS_ADMIN) &&
490 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
491 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
492 #if defined(__i386__) || defined(__x86_64__)
493 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
495 /* Ye gads this is ugly. With more thought
496 we could move this up higher and use
497 `protection_map' instead. */
501 (__pte(pgprot_val(vma->vm_page_prot)))));
505 vma->vm_ops = &drm_vm_dma_ops;
507 vma->vm_flags |= VM_RESERVED; /* Don't swap */
508 vma->vm_flags |= VM_DONTEXPAND;
510 vma->vm_file = filp; /* Needed for drm_vm_open() */
511 drm_vm_open_locked(vma);
515 unsigned long drm_core_get_map_ofs(struct drm_map * map)
520 EXPORT_SYMBOL(drm_core_get_map_ofs);
522 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
525 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
531 EXPORT_SYMBOL(drm_core_get_reg_ofs);
536 * \param file_priv DRM file private.
537 * \param vma virtual memory area.
538 * \return zero on success or a negative number on failure.
540 * If the virtual memory area has no offset associated with it then it's a DMA
541 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
542 * checks that the restricted flag is not set, sets the virtual memory operations
543 * according to the mapping type and remaps the pages. Finally sets the file
544 * pointer and calls vm_open().
546 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
548 struct drm_file *priv = filp->private_data;
549 struct drm_device *dev = priv->minor->dev;
550 struct drm_map *map = NULL;
551 unsigned long offset = 0;
552 struct drm_hash_item *hash;
554 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
555 vma->vm_start, vma->vm_end, vma->vm_pgoff);
557 if (!priv->authenticated)
560 /* We check for "dma". On Apple's UniNorth, it's valid to have
561 * the AGP mapped at physical address 0
567 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
570 return drm_mmap_dma(filp, vma);
572 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
573 DRM_ERROR("Could not find map\n");
577 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
578 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
581 /* Check for valid size. */
582 if (map->size < vma->vm_end - vma->vm_start)
585 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
586 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
587 #if defined(__i386__) || defined(__x86_64__)
588 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
590 /* Ye gads this is ugly. With more thought
591 we could move this up higher and use
592 `protection_map' instead. */
596 (__pte(pgprot_val(vma->vm_page_prot)))));
602 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
604 * On some platforms we can't talk to bus dma address from the CPU, so for
605 * memory of type DRM_AGP, we'll deal with sorting out the real physical
606 * pages and mappings in fault()
608 #if defined(__powerpc__)
609 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
611 vma->vm_ops = &drm_vm_ops;
614 /* fall through to _DRM_FRAME_BUFFER... */
615 case _DRM_FRAME_BUFFER:
617 offset = dev->driver->get_reg_ofs(dev);
618 vma->vm_flags |= VM_IO; /* not in core dump */
619 vma->vm_page_prot = drm_io_prot(map->type, vma);
620 if (io_remap_pfn_range(vma, vma->vm_start,
621 (map->offset + offset) >> PAGE_SHIFT,
622 vma->vm_end - vma->vm_start,
625 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
628 vma->vm_start, vma->vm_end, map->offset + offset);
629 vma->vm_ops = &drm_vm_ops;
631 case _DRM_CONSISTENT:
632 /* Consistent memory is really like shared memory. But
633 * it's allocated in a different way, so avoid fault */
634 if (remap_pfn_range(vma, vma->vm_start,
635 page_to_pfn(virt_to_page(map->handle)),
636 vma->vm_end - vma->vm_start, vma->vm_page_prot))
638 vma->vm_page_prot = drm_dma_prot(map->type, vma);
639 /* fall through to _DRM_SHM */
641 vma->vm_ops = &drm_vm_shm_ops;
642 vma->vm_private_data = (void *)map;
643 /* Don't let this area swap. Change when
644 DRM_KERNEL advisory is supported. */
645 vma->vm_flags |= VM_RESERVED;
647 case _DRM_SCATTER_GATHER:
648 vma->vm_ops = &drm_vm_sg_ops;
649 vma->vm_private_data = (void *)map;
650 vma->vm_flags |= VM_RESERVED;
651 vma->vm_page_prot = drm_dma_prot(map->type, vma);
654 return -EINVAL; /* This should never happen. */
656 vma->vm_flags |= VM_RESERVED; /* Don't swap */
657 vma->vm_flags |= VM_DONTEXPAND;
659 vma->vm_file = filp; /* Needed for drm_vm_open() */
660 drm_vm_open_locked(vma);
664 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
666 struct drm_file *priv = filp->private_data;
667 struct drm_device *dev = priv->minor->dev;
670 mutex_lock(&dev->struct_mutex);
671 ret = drm_mmap_locked(filp, vma);
672 mutex_unlock(&dev->struct_mutex);
676 EXPORT_SYMBOL(drm_mmap);