3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
48 #if defined(__i386__) || defined(__x86_64__)
49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50 pgprot_val(tmp) |= _PAGE_PCD;
51 pgprot_val(tmp) &= ~_PAGE_PWT;
53 #elif defined(__powerpc__)
54 pgprot_val(tmp) |= _PAGE_NO_CACHE;
55 if (map_type == _DRM_REGISTERS)
56 pgprot_val(tmp) |= _PAGE_GUARDED;
59 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
61 tmp = pgprot_writecombine(tmp);
63 tmp = pgprot_noncached(tmp);
69 * \c nopage method for AGP virtual memory.
71 * \param vma virtual memory area.
72 * \param address access address.
73 * \return pointer to the page structure.
75 * Find the right map and if it's AGP memory find the real physical page to
76 * map, get the page, increment the use count and return it.
79 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
80 unsigned long address)
82 drm_file_t *priv = vma->vm_file->private_data;
83 drm_device_t *dev = priv->head->dev;
84 drm_map_t *map = NULL;
85 drm_map_list_t *r_list;
86 drm_hash_item_t *hash;
91 if (!drm_core_has_AGP(dev))
94 if (!dev->agp || !dev->agp->cant_use_aperture)
97 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
100 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
103 if (map && map->type == _DRM_AGP) {
104 unsigned long offset = address - vma->vm_start;
105 unsigned long baddr = map->offset + offset;
106 struct drm_agp_mem *agpmem;
111 * Adjust to a bus-relative address
113 baddr -= dev->hose->mem_space->start;
117 * It's AGP memory - find the real physical page to map
119 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
120 if (agpmem->bound <= baddr &&
121 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
126 goto vm_nopage_error;
129 * Get the page, inc the use count, and return it
131 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
132 page = virt_to_page(__va(agpmem->memory->memory[offset]));
136 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
137 baddr, __va(agpmem->memory->memory[offset]), offset,
143 return NOPAGE_SIGBUS; /* Disallow mremap */
145 #else /* __OS_HAS_AGP */
146 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
147 unsigned long address)
149 return NOPAGE_SIGBUS;
151 #endif /* __OS_HAS_AGP */
154 * \c nopage method for shared virtual memory.
156 * \param vma virtual memory area.
157 * \param address access address.
158 * \return pointer to the page structure.
160 * Get the mapping, find the real physical page to map, get the page, and
163 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
164 unsigned long address)
166 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
167 unsigned long offset;
171 if (address > vma->vm_end)
172 return NOPAGE_SIGBUS; /* Disallow mremap */
174 return NOPAGE_SIGBUS; /* Nothing allocated */
176 offset = address - vma->vm_start;
177 i = (unsigned long)map->handle + offset;
178 page = vmalloc_to_page((void *)i);
180 return NOPAGE_SIGBUS;
183 DRM_DEBUG("shm_nopage 0x%lx\n", address);
188 * \c close method for shared virtual memory.
190 * \param vma virtual memory area.
192 * Deletes map information if we are the last
193 * person to close a mapping and it's not in the global maplist.
195 static void drm_vm_shm_close(struct vm_area_struct *vma)
197 drm_file_t *priv = vma->vm_file->private_data;
198 drm_device_t *dev = priv->head->dev;
199 drm_vma_entry_t *pt, *prev, *next;
201 drm_map_list_t *r_list;
202 struct list_head *list;
205 DRM_DEBUG("0x%08lx,0x%08lx\n",
206 vma->vm_start, vma->vm_end - vma->vm_start);
207 atomic_dec(&dev->vma_count);
209 map = vma->vm_private_data;
211 mutex_lock(&dev->struct_mutex);
212 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
214 if (pt->vma->vm_private_data == map)
216 if (pt->vma == vma) {
218 prev->next = pt->next;
220 dev->vmalist = pt->next;
222 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
227 /* We were the only map that was found */
228 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
229 /* Check to see if we are in the maplist, if we are not, then
230 * we delete this mappings information.
233 list = &dev->maplist->head;
234 list_for_each(list, &dev->maplist->head) {
235 r_list = list_entry(list, drm_map_list_t, head);
236 if (r_list->map == map)
241 drm_dma_handle_t dmah;
245 case _DRM_FRAME_BUFFER:
246 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
248 retcode = mtrr_del(map->mtrr,
251 DRM_DEBUG("mtrr_del = %d\n", retcode);
253 iounmap(map->handle);
259 case _DRM_SCATTER_GATHER:
261 case _DRM_CONSISTENT:
262 dmah.vaddr = map->handle;
263 dmah.busaddr = map->offset;
264 dmah.size = map->size;
265 __drm_pci_free(dev, &dmah);
268 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
271 mutex_unlock(&dev->struct_mutex);
275 * \c nopage method for DMA virtual memory.
277 * \param vma virtual memory area.
278 * \param address access address.
279 * \return pointer to the page structure.
281 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
283 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
284 unsigned long address)
286 drm_file_t *priv = vma->vm_file->private_data;
287 drm_device_t *dev = priv->head->dev;
288 drm_device_dma_t *dma = dev->dma;
289 unsigned long offset;
290 unsigned long page_nr;
294 return NOPAGE_SIGBUS; /* Error */
295 if (address > vma->vm_end)
296 return NOPAGE_SIGBUS; /* Disallow mremap */
298 return NOPAGE_SIGBUS; /* Nothing allocated */
300 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
301 page_nr = offset >> PAGE_SHIFT;
302 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
306 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
311 * \c nopage method for scatter-gather virtual memory.
313 * \param vma virtual memory area.
314 * \param address access address.
315 * \return pointer to the page structure.
317 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
319 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
320 unsigned long address)
322 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
323 drm_file_t *priv = vma->vm_file->private_data;
324 drm_device_t *dev = priv->head->dev;
325 drm_sg_mem_t *entry = dev->sg;
326 unsigned long offset;
327 unsigned long map_offset;
328 unsigned long page_offset;
332 return NOPAGE_SIGBUS; /* Error */
333 if (address > vma->vm_end)
334 return NOPAGE_SIGBUS; /* Disallow mremap */
335 if (!entry->pagelist)
336 return NOPAGE_SIGBUS; /* Nothing allocated */
338 offset = address - vma->vm_start;
339 map_offset = map->offset - (unsigned long)dev->sg->virtual;
340 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
341 page = entry->pagelist[page_offset];
347 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
348 unsigned long address, int *type)
351 *type = VM_FAULT_MINOR;
352 return drm_do_vm_nopage(vma, address);
355 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
356 unsigned long address, int *type)
359 *type = VM_FAULT_MINOR;
360 return drm_do_vm_shm_nopage(vma, address);
363 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
364 unsigned long address, int *type)
367 *type = VM_FAULT_MINOR;
368 return drm_do_vm_dma_nopage(vma, address);
371 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
372 unsigned long address, int *type)
375 *type = VM_FAULT_MINOR;
376 return drm_do_vm_sg_nopage(vma, address);
379 /** AGP virtual memory operations */
380 static struct vm_operations_struct drm_vm_ops = {
381 .nopage = drm_vm_nopage,
383 .close = drm_vm_close,
386 /** Shared virtual memory operations */
387 static struct vm_operations_struct drm_vm_shm_ops = {
388 .nopage = drm_vm_shm_nopage,
390 .close = drm_vm_shm_close,
393 /** DMA virtual memory operations */
394 static struct vm_operations_struct drm_vm_dma_ops = {
395 .nopage = drm_vm_dma_nopage,
397 .close = drm_vm_close,
400 /** Scatter-gather virtual memory operations */
401 static struct vm_operations_struct drm_vm_sg_ops = {
402 .nopage = drm_vm_sg_nopage,
404 .close = drm_vm_close,
408 * \c open method for shared virtual memory.
410 * \param vma virtual memory area.
412 * Create a new drm_vma_entry structure as the \p vma private data entry and
413 * add it to drm_device::vmalist.
415 static void drm_vm_open_locked(struct vm_area_struct *vma)
417 drm_file_t *priv = vma->vm_file->private_data;
418 drm_device_t *dev = priv->head->dev;
419 drm_vma_entry_t *vma_entry;
421 DRM_DEBUG("0x%08lx,0x%08lx\n",
422 vma->vm_start, vma->vm_end - vma->vm_start);
423 atomic_inc(&dev->vma_count);
425 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
427 vma_entry->vma = vma;
428 vma_entry->next = dev->vmalist;
429 vma_entry->pid = current->pid;
430 dev->vmalist = vma_entry;
434 static void drm_vm_open(struct vm_area_struct *vma)
436 drm_file_t *priv = vma->vm_file->private_data;
437 drm_device_t *dev = priv->head->dev;
439 mutex_lock(&dev->struct_mutex);
440 drm_vm_open_locked(vma);
441 mutex_unlock(&dev->struct_mutex);
445 * \c close method for all virtual memory types.
447 * \param vma virtual memory area.
449 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
452 static void drm_vm_close(struct vm_area_struct *vma)
454 drm_file_t *priv = vma->vm_file->private_data;
455 drm_device_t *dev = priv->head->dev;
456 drm_vma_entry_t *pt, *prev;
458 DRM_DEBUG("0x%08lx,0x%08lx\n",
459 vma->vm_start, vma->vm_end - vma->vm_start);
460 atomic_dec(&dev->vma_count);
462 mutex_lock(&dev->struct_mutex);
463 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
464 if (pt->vma == vma) {
466 prev->next = pt->next;
468 dev->vmalist = pt->next;
470 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
474 mutex_unlock(&dev->struct_mutex);
480 * \param filp file pointer.
481 * \param vma virtual memory area.
482 * \return zero on success or a negative number on failure.
484 * Sets the virtual memory area operations structure to vm_dma_ops, the file
485 * pointer, and calls vm_open().
487 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
489 drm_file_t *priv = filp->private_data;
491 drm_device_dma_t *dma;
492 unsigned long length = vma->vm_end - vma->vm_start;
494 dev = priv->head->dev;
496 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
497 vma->vm_start, vma->vm_end, vma->vm_pgoff);
499 /* Length must match exact page count */
500 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
504 if (!capable(CAP_SYS_ADMIN) &&
505 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
506 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
507 #if defined(__i386__) || defined(__x86_64__)
508 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
510 /* Ye gads this is ugly. With more thought
511 we could move this up higher and use
512 `protection_map' instead. */
516 (__pte(pgprot_val(vma->vm_page_prot)))));
520 vma->vm_ops = &drm_vm_dma_ops;
522 vma->vm_flags |= VM_RESERVED; /* Don't swap */
524 vma->vm_file = filp; /* Needed for drm_vm_open() */
525 drm_vm_open_locked(vma);
529 unsigned long drm_core_get_map_ofs(drm_map_t * map)
534 EXPORT_SYMBOL(drm_core_get_map_ofs);
536 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
539 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
545 EXPORT_SYMBOL(drm_core_get_reg_ofs);
550 * \param filp file pointer.
551 * \param vma virtual memory area.
552 * \return zero on success or a negative number on failure.
554 * If the virtual memory area has no offset associated with it then it's a DMA
555 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
556 * checks that the restricted flag is not set, sets the virtual memory operations
557 * according to the mapping type and remaps the pages. Finally sets the file
558 * pointer and calls vm_open().
560 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
562 drm_file_t *priv = filp->private_data;
563 drm_device_t *dev = priv->head->dev;
564 drm_map_t *map = NULL;
565 unsigned long offset = 0;
566 drm_hash_item_t *hash;
568 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
569 vma->vm_start, vma->vm_end, vma->vm_pgoff);
571 if (!priv->authenticated)
574 /* We check for "dma". On Apple's UniNorth, it's valid to have
575 * the AGP mapped at physical address 0
581 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
584 return drm_mmap_dma(filp, vma);
586 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
587 DRM_ERROR("Could not find map\n");
591 map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
592 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
595 /* Check for valid size. */
596 if (map->size < vma->vm_end - vma->vm_start)
599 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
600 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
601 #if defined(__i386__) || defined(__x86_64__)
602 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
604 /* Ye gads this is ugly. With more thought
605 we could move this up higher and use
606 `protection_map' instead. */
610 (__pte(pgprot_val(vma->vm_page_prot)))));
616 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
618 * On some platforms we can't talk to bus dma address from the CPU, so for
619 * memory of type DRM_AGP, we'll deal with sorting out the real physical
620 * pages and mappings in nopage()
622 #if defined(__powerpc__)
623 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
625 vma->vm_ops = &drm_vm_ops;
628 /* fall through to _DRM_FRAME_BUFFER... */
629 case _DRM_FRAME_BUFFER:
631 offset = dev->driver->get_reg_ofs(dev);
632 vma->vm_flags |= VM_IO; /* not in core dump */
633 vma->vm_page_prot = drm_io_prot(map->type, vma);
635 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
637 if (io_remap_pfn_range(vma, vma->vm_start,
638 (map->offset + offset) >> PAGE_SHIFT,
639 vma->vm_end - vma->vm_start,
642 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
645 vma->vm_start, vma->vm_end, map->offset + offset);
646 vma->vm_ops = &drm_vm_ops;
648 case _DRM_CONSISTENT:
649 /* Consistent memory is really like shared memory. But
650 * it's allocated in a different way, so avoid nopage */
651 if (remap_pfn_range(vma, vma->vm_start,
652 page_to_pfn(virt_to_page(map->handle)),
653 vma->vm_end - vma->vm_start, vma->vm_page_prot))
655 /* fall through to _DRM_SHM */
657 vma->vm_ops = &drm_vm_shm_ops;
658 vma->vm_private_data = (void *)map;
659 /* Don't let this area swap. Change when
660 DRM_KERNEL advisory is supported. */
661 vma->vm_flags |= VM_RESERVED;
663 case _DRM_SCATTER_GATHER:
664 vma->vm_ops = &drm_vm_sg_ops;
665 vma->vm_private_data = (void *)map;
666 vma->vm_flags |= VM_RESERVED;
669 return -EINVAL; /* This should never happen. */
671 vma->vm_flags |= VM_RESERVED; /* Don't swap */
673 vma->vm_file = filp; /* Needed for drm_vm_open() */
674 drm_vm_open_locked(vma);
678 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
680 drm_file_t *priv = filp->private_data;
681 drm_device_t *dev = priv->head->dev;
684 mutex_lock(&dev->struct_mutex);
685 ret = drm_mmap_locked(filp, vma);
686 mutex_unlock(&dev->struct_mutex);
690 EXPORT_SYMBOL(drm_mmap);