3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
45 * \c nopage method for AGP virtual memory.
47 * \param vma virtual memory area.
48 * \param address access address.
49 * \return pointer to the page structure.
51 * Find the right map and if it's AGP memory find the real physical page to
52 * map, get the page, increment the use count and return it.
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56 unsigned long address)
58 drm_file_t *priv = vma->vm_file->private_data;
59 drm_device_t *dev = priv->head->dev;
60 drm_map_t *map = NULL;
61 drm_map_list_t *r_list;
62 struct list_head *list;
67 if (!drm_core_has_AGP(dev))
70 if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
72 list_for_each(list, &dev->maplist->head) {
73 r_list = list_entry(list, drm_map_list_t, head);
76 if (r_list->user_token == VM_OFFSET(vma))
80 if (map && map->type == _DRM_AGP) {
81 unsigned long offset = address - vma->vm_start;
82 unsigned long baddr = map->offset + offset;
83 struct drm_agp_mem *agpmem;
88 * Adjust to a bus-relative address
90 baddr -= dev->hose->mem_space->start;
94 * It's AGP memory - find the real physical page to map
96 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
97 if (agpmem->bound <= baddr &&
98 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
102 if (!agpmem) goto vm_nopage_error;
105 * Get the page, inc the use count, and return it
107 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108 page = virt_to_page(__va(agpmem->memory->memory[offset]));
111 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
112 baddr, __va(agpmem->memory->memory[offset]), offset,
118 return NOPAGE_SIGBUS; /* Disallow mremap */
120 #else /* __OS_HAS_AGP */
121 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
122 unsigned long address)
124 return NOPAGE_SIGBUS;
126 #endif /* __OS_HAS_AGP */
129 * \c nopage method for shared virtual memory.
131 * \param vma virtual memory area.
132 * \param address access address.
133 * \return pointer to the page structure.
135 * Get the the mapping, find the real physical page to map, get the page, and
138 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
139 unsigned long address)
141 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
142 unsigned long offset;
146 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
147 if (!map) return NOPAGE_OOM; /* Nothing allocated */
149 offset = address - vma->vm_start;
150 i = (unsigned long)map->handle + offset;
151 page = (map->type == _DRM_CONSISTENT) ?
152 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
157 DRM_DEBUG("shm_nopage 0x%lx\n", address);
163 * \c close method for shared virtual memory.
165 * \param vma virtual memory area.
167 * Deletes map information if we are the last
168 * person to close a mapping and it's not in the global maplist.
170 static void drm_vm_shm_close(struct vm_area_struct *vma)
172 drm_file_t *priv = vma->vm_file->private_data;
173 drm_device_t *dev = priv->head->dev;
174 drm_vma_entry_t *pt, *prev, *next;
176 drm_map_list_t *r_list;
177 struct list_head *list;
180 DRM_DEBUG("0x%08lx,0x%08lx\n",
181 vma->vm_start, vma->vm_end - vma->vm_start);
182 atomic_dec(&dev->vma_count);
184 map = vma->vm_private_data;
186 down(&dev->struct_sem);
187 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
189 if (pt->vma->vm_private_data == map) found_maps++;
190 if (pt->vma == vma) {
192 prev->next = pt->next;
194 dev->vmalist = pt->next;
196 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
201 /* We were the only map that was found */
202 if(found_maps == 1 &&
203 map->flags & _DRM_REMOVABLE) {
204 /* Check to see if we are in the maplist, if we are not, then
205 * we delete this mappings information.
208 list = &dev->maplist->head;
209 list_for_each(list, &dev->maplist->head) {
210 r_list = list_entry(list, drm_map_list_t, head);
211 if (r_list->map == map) found_maps++;
215 drm_dma_handle_t dmah;
219 case _DRM_FRAME_BUFFER:
220 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
222 retcode = mtrr_del(map->mtrr,
225 DRM_DEBUG("mtrr_del = %d\n", retcode);
227 drm_ioremapfree(map->handle, map->size, dev);
233 case _DRM_SCATTER_GATHER:
235 case _DRM_CONSISTENT:
236 dmah.vaddr = map->handle;
237 dmah.busaddr = map->offset;
238 dmah.size = map->size;
239 __drm_pci_free(dev, &dmah);
242 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
245 up(&dev->struct_sem);
249 * \c nopage method for DMA virtual memory.
251 * \param vma virtual memory area.
252 * \param address access address.
253 * \return pointer to the page structure.
255 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
257 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
258 unsigned long address)
260 drm_file_t *priv = vma->vm_file->private_data;
261 drm_device_t *dev = priv->head->dev;
262 drm_device_dma_t *dma = dev->dma;
263 unsigned long offset;
264 unsigned long page_nr;
267 if (!dma) return NOPAGE_SIGBUS; /* Error */
268 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
269 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
271 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
272 page_nr = offset >> PAGE_SHIFT;
273 page = virt_to_page((dma->pagelist[page_nr] +
274 (offset & (~PAGE_MASK))));
278 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
283 * \c nopage method for scatter-gather virtual memory.
285 * \param vma virtual memory area.
286 * \param address access address.
287 * \return pointer to the page structure.
289 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
291 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
292 unsigned long address)
294 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
295 drm_file_t *priv = vma->vm_file->private_data;
296 drm_device_t *dev = priv->head->dev;
297 drm_sg_mem_t *entry = dev->sg;
298 unsigned long offset;
299 unsigned long map_offset;
300 unsigned long page_offset;
303 if (!entry) return NOPAGE_SIGBUS; /* Error */
304 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
305 if (!entry->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
308 offset = address - vma->vm_start;
309 map_offset = map->offset - (unsigned long)dev->sg->virtual;
310 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
311 page = entry->pagelist[page_offset];
318 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
319 unsigned long address,
321 if (type) *type = VM_FAULT_MINOR;
322 return drm_do_vm_nopage(vma, address);
325 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
326 unsigned long address,
328 if (type) *type = VM_FAULT_MINOR;
329 return drm_do_vm_shm_nopage(vma, address);
332 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
333 unsigned long address,
335 if (type) *type = VM_FAULT_MINOR;
336 return drm_do_vm_dma_nopage(vma, address);
339 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
340 unsigned long address,
342 if (type) *type = VM_FAULT_MINOR;
343 return drm_do_vm_sg_nopage(vma, address);
346 /** AGP virtual memory operations */
347 static struct vm_operations_struct drm_vm_ops = {
348 .nopage = drm_vm_nopage,
350 .close = drm_vm_close,
353 /** Shared virtual memory operations */
354 static struct vm_operations_struct drm_vm_shm_ops = {
355 .nopage = drm_vm_shm_nopage,
357 .close = drm_vm_shm_close,
360 /** DMA virtual memory operations */
361 static struct vm_operations_struct drm_vm_dma_ops = {
362 .nopage = drm_vm_dma_nopage,
364 .close = drm_vm_close,
367 /** Scatter-gather virtual memory operations */
368 static struct vm_operations_struct drm_vm_sg_ops = {
369 .nopage = drm_vm_sg_nopage,
371 .close = drm_vm_close,
376 * \c open method for shared virtual memory.
378 * \param vma virtual memory area.
380 * Create a new drm_vma_entry structure as the \p vma private data entry and
381 * add it to drm_device::vmalist.
383 static void drm_vm_open(struct vm_area_struct *vma)
385 drm_file_t *priv = vma->vm_file->private_data;
386 drm_device_t *dev = priv->head->dev;
387 drm_vma_entry_t *vma_entry;
389 DRM_DEBUG("0x%08lx,0x%08lx\n",
390 vma->vm_start, vma->vm_end - vma->vm_start);
391 atomic_inc(&dev->vma_count);
393 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
395 down(&dev->struct_sem);
396 vma_entry->vma = vma;
397 vma_entry->next = dev->vmalist;
398 vma_entry->pid = current->pid;
399 dev->vmalist = vma_entry;
400 up(&dev->struct_sem);
405 * \c close method for all virtual memory types.
407 * \param vma virtual memory area.
409 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
412 static void drm_vm_close(struct vm_area_struct *vma)
414 drm_file_t *priv = vma->vm_file->private_data;
415 drm_device_t *dev = priv->head->dev;
416 drm_vma_entry_t *pt, *prev;
418 DRM_DEBUG("0x%08lx,0x%08lx\n",
419 vma->vm_start, vma->vm_end - vma->vm_start);
420 atomic_dec(&dev->vma_count);
422 down(&dev->struct_sem);
423 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
424 if (pt->vma == vma) {
426 prev->next = pt->next;
428 dev->vmalist = pt->next;
430 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
434 up(&dev->struct_sem);
440 * \param filp file pointer.
441 * \param vma virtual memory area.
442 * \return zero on success or a negative number on failure.
444 * Sets the virtual memory area operations structure to vm_dma_ops, the file
445 * pointer, and calls vm_open().
447 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
449 drm_file_t *priv = filp->private_data;
451 drm_device_dma_t *dma;
452 unsigned long length = vma->vm_end - vma->vm_start;
455 dev = priv->head->dev;
457 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
458 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
460 /* Length must match exact page count */
461 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
467 vma->vm_ops = &drm_vm_dma_ops;
469 vma->vm_flags |= VM_RESERVED; /* Don't swap */
471 vma->vm_file = filp; /* Needed for drm_vm_open() */
476 unsigned long drm_core_get_map_ofs(drm_map_t *map)
480 EXPORT_SYMBOL(drm_core_get_map_ofs);
482 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
485 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
490 EXPORT_SYMBOL(drm_core_get_reg_ofs);
495 * \param filp file pointer.
496 * \param vma virtual memory area.
497 * \return zero on success or a negative number on failure.
499 * If the virtual memory area has no offset associated with it then it's a DMA
500 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
501 * checks that the restricted flag is not set, sets the virtual memory operations
502 * according to the mapping type and remaps the pages. Finally sets the file
503 * pointer and calls vm_open().
505 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
507 drm_file_t *priv = filp->private_data;
508 drm_device_t *dev = priv->head->dev;
509 drm_map_t *map = NULL;
510 drm_map_list_t *r_list;
511 unsigned long offset = 0;
512 struct list_head *list;
514 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
515 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
517 if ( !priv->authenticated ) return -EACCES;
519 /* We check for "dma". On Apple's UniNorth, it's valid to have
520 * the AGP mapped at physical address 0
525 && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
528 return drm_mmap_dma(filp, vma);
530 /* A sequential search of a linked list is
531 fine here because: 1) there will only be
532 about 5-10 entries in the list and, 2) a
533 DRI client only has to do this mapping
534 once, so it doesn't have to be optimized
535 for performance, even if the list was a
537 list_for_each(list, &dev->maplist->head) {
539 r_list = list_entry(list, drm_map_list_t, head);
542 if (r_list->user_token == VM_OFFSET(vma))
546 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
549 /* Check for valid size. */
550 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
552 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
553 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
554 #if defined(__i386__) || defined(__x86_64__)
555 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
557 /* Ye gads this is ugly. With more thought
558 we could move this up higher and use
559 `protection_map' instead. */
560 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
561 __pte(pgprot_val(vma->vm_page_prot)))));
567 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
569 * On some platforms we can't talk to bus dma address from the CPU, so for
570 * memory of type DRM_AGP, we'll deal with sorting out the real physical
571 * pages and mappings in nopage()
573 #if defined(__powerpc__)
574 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
576 vma->vm_ops = &drm_vm_ops;
579 /* fall through to _DRM_FRAME_BUFFER... */
580 case _DRM_FRAME_BUFFER:
582 #if defined(__i386__) || defined(__x86_64__)
583 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
584 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
585 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
587 #elif defined(__powerpc__)
588 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
589 if (map->type == _DRM_REGISTERS)
590 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
592 vma->vm_flags |= VM_IO; /* not in core dump */
593 #if defined(__ia64__)
594 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
597 pgprot_writecombine(vma->vm_page_prot);
600 pgprot_noncached(vma->vm_page_prot);
602 offset = dev->driver->get_reg_ofs(dev);
604 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
605 (map->offset + offset) >> PAGE_SHIFT,
606 vma->vm_end - vma->vm_start,
609 if (io_remap_pfn_range(vma, vma->vm_start,
610 (map->offset + offset) >> PAGE_SHIFT,
611 vma->vm_end - vma->vm_start,
615 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
618 vma->vm_start, vma->vm_end, map->offset + offset);
619 vma->vm_ops = &drm_vm_ops;
622 case _DRM_CONSISTENT:
623 /* Consistent memory is really like shared memory. It's only
624 * allocate in a different way */
625 vma->vm_ops = &drm_vm_shm_ops;
626 vma->vm_private_data = (void *)map;
627 /* Don't let this area swap. Change when
628 DRM_KERNEL advisory is supported. */
629 vma->vm_flags |= VM_RESERVED;
631 case _DRM_SCATTER_GATHER:
632 vma->vm_ops = &drm_vm_sg_ops;
633 vma->vm_private_data = (void *)map;
634 vma->vm_flags |= VM_RESERVED;
637 return -EINVAL; /* This should never happen. */
639 vma->vm_flags |= VM_RESERVED; /* Don't swap */
641 vma->vm_file = filp; /* Needed for drm_vm_open() */
645 EXPORT_SYMBOL(drm_mmap);