3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
50 EXPORT_SYMBOL(drm_get_resource_len);
52 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
55 struct drm_map_list *entry;
56 list_for_each_entry(entry, &dev->maplist, head) {
57 if (entry->map && map->type == entry->map->type &&
58 ((entry->map->offset == map->offset) ||
59 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
67 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
68 unsigned long user_token, int hashed_handle)
70 int use_hashed_handle;
71 #if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73 #elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
76 #error Unsupported long size. Neither 64 nor 32 bits.
79 if (!use_hashed_handle) {
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94 * \param inode device inode.
95 * \param filp file pointer.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
104 static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
105 unsigned int size, enum drm_map_type type,
106 enum drm_map_flags flags,
107 struct drm_map_list ** maplist)
110 struct drm_map_list *list;
111 drm_dma_handle_t *dmah;
112 unsigned long user_token;
115 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
119 map->offset = offset;
124 /* Only allow shared memory to be removable since we only keep enough
125 * book keeping information about shared memory to allow for removal
126 * when processes fork.
128 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
129 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
132 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
133 map->offset, map->size, map->type);
134 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
135 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
143 case _DRM_FRAME_BUFFER:
144 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
145 if (map->offset + (map->size-1) < map->offset ||
146 map->offset < virt_to_phys(high_memory)) {
147 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
152 map->offset += dev->hose->mem_space->start;
154 /* Some drivers preinitialize some maps, without the X Server
155 * needing to be aware of it. Therefore, we just return success
156 * when the server tries to create a duplicate map.
158 list = drm_find_matching_map(dev, map);
160 if (list->map->size != map->size) {
161 DRM_DEBUG("Matching maps of type %d with "
162 "mismatched sizes, (%ld vs %ld)\n",
163 map->type, map->size,
165 list->map->size = map->size;
168 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
173 if (drm_core_has_MTRR(dev)) {
174 if (map->type == _DRM_FRAME_BUFFER ||
175 (map->flags & _DRM_WRITE_COMBINING)) {
176 map->mtrr = mtrr_add(map->offset, map->size,
177 MTRR_TYPE_WRCOMB, 1);
180 if (map->type == _DRM_REGISTERS)
181 map->handle = ioremap(map->offset, map->size);
184 list = drm_find_matching_map(dev, map);
186 if(list->map->size != map->size) {
187 DRM_DEBUG("Matching maps of type %d with "
188 "mismatched sizes, (%ld vs %ld)\n",
189 map->type, map->size, list->map->size);
190 list->map->size = map->size;
193 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
197 map->handle = vmalloc_user(map->size);
198 DRM_DEBUG("%lu %d %p\n",
199 map->size, drm_order(map->size), map->handle);
201 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
204 map->offset = (unsigned long)map->handle;
205 if (map->flags & _DRM_CONTAINS_LOCK) {
206 /* Prevent a 2nd X Server from creating a 2nd lock */
207 if (dev->lock.hw_lock != NULL) {
209 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
212 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
216 struct drm_agp_mem *entry;
219 if (!drm_core_has_AGP(dev)) {
220 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
224 map->offset += dev->hose->mem_space->start;
226 /* Note: dev->agp->base may actually be 0 when the DRM
227 * is not in control of AGP space. But if user space is
228 * it should already have added the AGP base itself.
230 map->offset += dev->agp->base;
231 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
233 /* This assumes the DRM is in total control of AGP space.
234 * It's not always the case as AGP can be in the control
235 * of user space (i.e. i810 driver). So this loop will get
236 * skipped and we double check that dev->agp->memory is
237 * actually set as well as being invalid before EPERM'ing
239 list_for_each_entry(entry, &dev->agp->memory, head) {
240 if ((map->offset >= entry->bound) &&
241 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
246 if (!list_empty(&dev->agp->memory) && !valid) {
247 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
250 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
254 case _DRM_SCATTER_GATHER:
256 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
259 map->offset += (unsigned long)dev->sg->virtual;
261 case _DRM_CONSISTENT:
262 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
263 * As we're limiting the address to 2^32-1 (or less),
264 * casting it down to 32 bits is no problem, but we
265 * need to point to a 64bit variable first. */
266 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
268 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
271 map->handle = dmah->vaddr;
272 map->offset = (unsigned long)dmah->busaddr;
276 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
280 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
282 if (map->type == _DRM_REGISTERS)
283 iounmap(map->handle);
284 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
287 memset(list, 0, sizeof(*list));
290 mutex_lock(&dev->struct_mutex);
291 list_add(&list->head, &dev->maplist);
293 /* Assign a 32-bit handle */
294 /* We do it here so that dev->struct_mutex protects the increment */
295 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
297 ret = drm_map_handle(dev, &list->hash, user_token, 0);
299 if (map->type == _DRM_REGISTERS)
300 iounmap(map->handle);
301 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
302 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
303 mutex_unlock(&dev->struct_mutex);
307 list->user_token = list->hash.key << PAGE_SHIFT;
308 mutex_unlock(&dev->struct_mutex);
314 int drm_addmap(struct drm_device * dev, unsigned int offset,
315 unsigned int size, enum drm_map_type type,
316 enum drm_map_flags flags, drm_local_map_t ** map_ptr)
318 struct drm_map_list *list;
321 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
323 *map_ptr = list->map;
327 EXPORT_SYMBOL(drm_addmap);
329 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
330 unsigned int cmd, unsigned long arg)
332 struct drm_file *priv = filp->private_data;
333 struct drm_device *dev = priv->head->dev;
335 struct drm_map_list *maplist;
336 struct drm_map __user *argp = (void __user *)arg;
339 if (!(filp->f_mode & 3))
340 return -EACCES; /* Require read/write */
342 if (copy_from_user(&map, argp, sizeof(map))) {
346 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
349 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
355 if (copy_to_user(argp, maplist->map, sizeof(struct drm_map)))
358 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
359 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
365 * Remove a map private from list and deallocate resources if the mapping
368 * \param inode device inode.
369 * \param filp file pointer.
370 * \param cmd command.
371 * \param arg pointer to a struct drm_map structure.
372 * \return zero on success or a negative value on error.
374 * Searches the map on drm_device::maplist, removes it from the list, see if
375 * its being used, and free any associate resource (such as MTRR's) if it's not
380 int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
382 struct drm_map_list *r_list = NULL, *list_t;
383 drm_dma_handle_t dmah;
386 /* Find the list entry for the map and remove it */
387 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
388 if (r_list->map == map) {
389 list_del(&r_list->head);
390 drm_ht_remove_key(&dev->map_hash,
391 r_list->user_token >> PAGE_SHIFT);
392 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
403 iounmap(map->handle);
405 case _DRM_FRAME_BUFFER:
406 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
408 retcode = mtrr_del(map->mtrr, map->offset, map->size);
409 DRM_DEBUG("mtrr_del=%d\n", retcode);
416 case _DRM_SCATTER_GATHER:
418 case _DRM_CONSISTENT:
419 dmah.vaddr = map->handle;
420 dmah.busaddr = map->offset;
421 dmah.size = map->size;
422 __drm_pci_free(dev, &dmah);
425 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
430 int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
434 mutex_lock(&dev->struct_mutex);
435 ret = drm_rmmap_locked(dev, map);
436 mutex_unlock(&dev->struct_mutex);
441 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
442 * the last close of the device, and this is necessary for cleanup when things
443 * exit uncleanly. Therefore, having userland manually remove mappings seems
444 * like a pointless exercise since they're going away anyway.
446 * One use case might be after addmap is allowed for normal users for SHM and
447 * gets used by drivers that the server doesn't need to care about. This seems
450 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
451 unsigned int cmd, unsigned long arg)
453 struct drm_file *priv = filp->private_data;
454 struct drm_device *dev = priv->head->dev;
455 struct drm_map request;
456 drm_local_map_t *map = NULL;
457 struct drm_map_list *r_list;
460 if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) {
464 mutex_lock(&dev->struct_mutex);
465 list_for_each_entry(r_list, &dev->maplist, head) {
467 r_list->user_token == (unsigned long)request.handle &&
468 r_list->map->flags & _DRM_REMOVABLE) {
474 /* List has wrapped around to the head pointer, or its empty we didn't
477 if (list_empty(&dev->maplist) || !map) {
478 mutex_unlock(&dev->struct_mutex);
483 mutex_unlock(&dev->struct_mutex);
487 /* Register and framebuffer maps are permanent */
488 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
489 mutex_unlock(&dev->struct_mutex);
493 ret = drm_rmmap_locked(dev, map);
495 mutex_unlock(&dev->struct_mutex);
501 * Cleanup after an error on one of the addbufs() functions.
503 * \param dev DRM device.
504 * \param entry buffer entry where the error occurred.
506 * Frees any pages and buffers associated with the given entry.
508 static void drm_cleanup_buf_error(struct drm_device * dev,
509 struct drm_buf_entry * entry)
513 if (entry->seg_count) {
514 for (i = 0; i < entry->seg_count; i++) {
515 if (entry->seglist[i]) {
516 drm_pci_free(dev, entry->seglist[i]);
519 drm_free(entry->seglist,
521 sizeof(*entry->seglist), DRM_MEM_SEGS);
523 entry->seg_count = 0;
526 if (entry->buf_count) {
527 for (i = 0; i < entry->buf_count; i++) {
528 if (entry->buflist[i].dev_private) {
529 drm_free(entry->buflist[i].dev_private,
530 entry->buflist[i].dev_priv_size,
534 drm_free(entry->buflist,
536 sizeof(*entry->buflist), DRM_MEM_BUFS);
538 entry->buf_count = 0;
544 * Add AGP buffers for DMA transfers.
546 * \param dev struct drm_device to which the buffers are to be added.
547 * \param request pointer to a struct drm_buf_desc describing the request.
548 * \return zero on success or a negative number on failure.
550 * After some sanity checks creates a drm_buf structure for each buffer and
551 * reallocates the buffer list of the same size order to accommodate the new
554 int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
556 struct drm_device_dma *dma = dev->dma;
557 struct drm_buf_entry *entry;
558 struct drm_agp_mem *agp_entry;
560 unsigned long offset;
561 unsigned long agp_offset;
570 struct drm_buf **temp_buflist;
575 count = request->count;
576 order = drm_order(request->size);
579 alignment = (request->flags & _DRM_PAGE_ALIGN)
580 ? PAGE_ALIGN(size) : size;
581 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
582 total = PAGE_SIZE << page_order;
585 agp_offset = dev->agp->base + request->agp_start;
587 DRM_DEBUG("count: %d\n", count);
588 DRM_DEBUG("order: %d\n", order);
589 DRM_DEBUG("size: %d\n", size);
590 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
591 DRM_DEBUG("alignment: %d\n", alignment);
592 DRM_DEBUG("page_order: %d\n", page_order);
593 DRM_DEBUG("total: %d\n", total);
595 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
597 if (dev->queue_count)
598 return -EBUSY; /* Not while in use */
600 /* Make sure buffers are located in AGP memory that we own */
602 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
603 if ((agp_offset >= agp_entry->bound) &&
604 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
609 if (!list_empty(&dev->agp->memory) && !valid) {
610 DRM_DEBUG("zone invalid\n");
613 spin_lock(&dev->count_lock);
615 spin_unlock(&dev->count_lock);
618 atomic_inc(&dev->buf_alloc);
619 spin_unlock(&dev->count_lock);
621 mutex_lock(&dev->struct_mutex);
622 entry = &dma->bufs[order];
623 if (entry->buf_count) {
624 mutex_unlock(&dev->struct_mutex);
625 atomic_dec(&dev->buf_alloc);
626 return -ENOMEM; /* May only call once for each order */
629 if (count < 0 || count > 4096) {
630 mutex_unlock(&dev->struct_mutex);
631 atomic_dec(&dev->buf_alloc);
635 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
637 if (!entry->buflist) {
638 mutex_unlock(&dev->struct_mutex);
639 atomic_dec(&dev->buf_alloc);
642 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
644 entry->buf_size = size;
645 entry->page_order = page_order;
649 while (entry->buf_count < count) {
650 buf = &entry->buflist[entry->buf_count];
651 buf->idx = dma->buf_count + entry->buf_count;
652 buf->total = alignment;
656 buf->offset = (dma->byte_count + offset);
657 buf->bus_address = agp_offset + offset;
658 buf->address = (void *)(agp_offset + offset);
662 init_waitqueue_head(&buf->dma_wait);
665 buf->dev_priv_size = dev->driver->dev_priv_size;
666 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
667 if (!buf->dev_private) {
668 /* Set count correctly so we free the proper amount. */
669 entry->buf_count = count;
670 drm_cleanup_buf_error(dev, entry);
671 mutex_unlock(&dev->struct_mutex);
672 atomic_dec(&dev->buf_alloc);
675 memset(buf->dev_private, 0, buf->dev_priv_size);
677 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
681 byte_count += PAGE_SIZE << page_order;
684 DRM_DEBUG("byte_count: %d\n", byte_count);
686 temp_buflist = drm_realloc(dma->buflist,
687 dma->buf_count * sizeof(*dma->buflist),
688 (dma->buf_count + entry->buf_count)
689 * sizeof(*dma->buflist), DRM_MEM_BUFS);
691 /* Free the entry because it isn't valid */
692 drm_cleanup_buf_error(dev, entry);
693 mutex_unlock(&dev->struct_mutex);
694 atomic_dec(&dev->buf_alloc);
697 dma->buflist = temp_buflist;
699 for (i = 0; i < entry->buf_count; i++) {
700 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
703 dma->buf_count += entry->buf_count;
704 dma->seg_count += entry->seg_count;
705 dma->page_count += byte_count >> PAGE_SHIFT;
706 dma->byte_count += byte_count;
708 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
709 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
711 mutex_unlock(&dev->struct_mutex);
713 request->count = entry->buf_count;
714 request->size = size;
716 dma->flags = _DRM_DMA_USE_AGP;
718 atomic_dec(&dev->buf_alloc);
721 EXPORT_SYMBOL(drm_addbufs_agp);
722 #endif /* __OS_HAS_AGP */
724 int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
726 struct drm_device_dma *dma = dev->dma;
732 struct drm_buf_entry *entry;
733 drm_dma_handle_t *dmah;
736 unsigned long offset;
740 unsigned long *temp_pagelist;
741 struct drm_buf **temp_buflist;
743 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
749 if (!capable(CAP_SYS_ADMIN))
752 count = request->count;
753 order = drm_order(request->size);
756 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
757 request->count, request->size, size, order, dev->queue_count);
759 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
761 if (dev->queue_count)
762 return -EBUSY; /* Not while in use */
764 alignment = (request->flags & _DRM_PAGE_ALIGN)
765 ? PAGE_ALIGN(size) : size;
766 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
767 total = PAGE_SIZE << page_order;
769 spin_lock(&dev->count_lock);
771 spin_unlock(&dev->count_lock);
774 atomic_inc(&dev->buf_alloc);
775 spin_unlock(&dev->count_lock);
777 mutex_lock(&dev->struct_mutex);
778 entry = &dma->bufs[order];
779 if (entry->buf_count) {
780 mutex_unlock(&dev->struct_mutex);
781 atomic_dec(&dev->buf_alloc);
782 return -ENOMEM; /* May only call once for each order */
785 if (count < 0 || count > 4096) {
786 mutex_unlock(&dev->struct_mutex);
787 atomic_dec(&dev->buf_alloc);
791 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
793 if (!entry->buflist) {
794 mutex_unlock(&dev->struct_mutex);
795 atomic_dec(&dev->buf_alloc);
798 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
800 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
802 if (!entry->seglist) {
803 drm_free(entry->buflist,
804 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
805 mutex_unlock(&dev->struct_mutex);
806 atomic_dec(&dev->buf_alloc);
809 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
811 /* Keep the original pagelist until we know all the allocations
814 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
815 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
816 if (!temp_pagelist) {
817 drm_free(entry->buflist,
818 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
819 drm_free(entry->seglist,
820 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
821 mutex_unlock(&dev->struct_mutex);
822 atomic_dec(&dev->buf_alloc);
825 memcpy(temp_pagelist,
826 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
827 DRM_DEBUG("pagelist: %d entries\n",
828 dma->page_count + (count << page_order));
830 entry->buf_size = size;
831 entry->page_order = page_order;
835 while (entry->buf_count < count) {
837 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
840 /* Set count correctly so we free the proper amount. */
841 entry->buf_count = count;
842 entry->seg_count = count;
843 drm_cleanup_buf_error(dev, entry);
844 drm_free(temp_pagelist,
845 (dma->page_count + (count << page_order))
846 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
847 mutex_unlock(&dev->struct_mutex);
848 atomic_dec(&dev->buf_alloc);
851 entry->seglist[entry->seg_count++] = dmah;
852 for (i = 0; i < (1 << page_order); i++) {
853 DRM_DEBUG("page %d @ 0x%08lx\n",
854 dma->page_count + page_count,
855 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
856 temp_pagelist[dma->page_count + page_count++]
857 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
860 offset + size <= total && entry->buf_count < count;
861 offset += alignment, ++entry->buf_count) {
862 buf = &entry->buflist[entry->buf_count];
863 buf->idx = dma->buf_count + entry->buf_count;
864 buf->total = alignment;
867 buf->offset = (dma->byte_count + byte_count + offset);
868 buf->address = (void *)(dmah->vaddr + offset);
869 buf->bus_address = dmah->busaddr + offset;
873 init_waitqueue_head(&buf->dma_wait);
876 buf->dev_priv_size = dev->driver->dev_priv_size;
877 buf->dev_private = drm_alloc(buf->dev_priv_size,
879 if (!buf->dev_private) {
880 /* Set count correctly so we free the proper amount. */
881 entry->buf_count = count;
882 entry->seg_count = count;
883 drm_cleanup_buf_error(dev, entry);
884 drm_free(temp_pagelist,
886 (count << page_order))
887 * sizeof(*dma->pagelist),
889 mutex_unlock(&dev->struct_mutex);
890 atomic_dec(&dev->buf_alloc);
893 memset(buf->dev_private, 0, buf->dev_priv_size);
895 DRM_DEBUG("buffer %d @ %p\n",
896 entry->buf_count, buf->address);
898 byte_count += PAGE_SIZE << page_order;
901 temp_buflist = drm_realloc(dma->buflist,
902 dma->buf_count * sizeof(*dma->buflist),
903 (dma->buf_count + entry->buf_count)
904 * sizeof(*dma->buflist), DRM_MEM_BUFS);
906 /* Free the entry because it isn't valid */
907 drm_cleanup_buf_error(dev, entry);
908 drm_free(temp_pagelist,
909 (dma->page_count + (count << page_order))
910 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
911 mutex_unlock(&dev->struct_mutex);
912 atomic_dec(&dev->buf_alloc);
915 dma->buflist = temp_buflist;
917 for (i = 0; i < entry->buf_count; i++) {
918 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
921 /* No allocations failed, so now we can replace the orginal pagelist
924 if (dma->page_count) {
925 drm_free(dma->pagelist,
926 dma->page_count * sizeof(*dma->pagelist),
929 dma->pagelist = temp_pagelist;
931 dma->buf_count += entry->buf_count;
932 dma->seg_count += entry->seg_count;
933 dma->page_count += entry->seg_count << page_order;
934 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
936 mutex_unlock(&dev->struct_mutex);
938 request->count = entry->buf_count;
939 request->size = size;
941 if (request->flags & _DRM_PCI_BUFFER_RO)
942 dma->flags = _DRM_DMA_USE_PCI_RO;
944 atomic_dec(&dev->buf_alloc);
948 EXPORT_SYMBOL(drm_addbufs_pci);
950 static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
952 struct drm_device_dma *dma = dev->dma;
953 struct drm_buf_entry *entry;
955 unsigned long offset;
956 unsigned long agp_offset;
965 struct drm_buf **temp_buflist;
967 if (!drm_core_check_feature(dev, DRIVER_SG))
973 if (!capable(CAP_SYS_ADMIN))
976 count = request->count;
977 order = drm_order(request->size);
980 alignment = (request->flags & _DRM_PAGE_ALIGN)
981 ? PAGE_ALIGN(size) : size;
982 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
983 total = PAGE_SIZE << page_order;
986 agp_offset = request->agp_start;
988 DRM_DEBUG("count: %d\n", count);
989 DRM_DEBUG("order: %d\n", order);
990 DRM_DEBUG("size: %d\n", size);
991 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
992 DRM_DEBUG("alignment: %d\n", alignment);
993 DRM_DEBUG("page_order: %d\n", page_order);
994 DRM_DEBUG("total: %d\n", total);
996 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
998 if (dev->queue_count)
999 return -EBUSY; /* Not while in use */
1001 spin_lock(&dev->count_lock);
1003 spin_unlock(&dev->count_lock);
1006 atomic_inc(&dev->buf_alloc);
1007 spin_unlock(&dev->count_lock);
1009 mutex_lock(&dev->struct_mutex);
1010 entry = &dma->bufs[order];
1011 if (entry->buf_count) {
1012 mutex_unlock(&dev->struct_mutex);
1013 atomic_dec(&dev->buf_alloc);
1014 return -ENOMEM; /* May only call once for each order */
1017 if (count < 0 || count > 4096) {
1018 mutex_unlock(&dev->struct_mutex);
1019 atomic_dec(&dev->buf_alloc);
1023 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1025 if (!entry->buflist) {
1026 mutex_unlock(&dev->struct_mutex);
1027 atomic_dec(&dev->buf_alloc);
1030 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1032 entry->buf_size = size;
1033 entry->page_order = page_order;
1037 while (entry->buf_count < count) {
1038 buf = &entry->buflist[entry->buf_count];
1039 buf->idx = dma->buf_count + entry->buf_count;
1040 buf->total = alignment;
1044 buf->offset = (dma->byte_count + offset);
1045 buf->bus_address = agp_offset + offset;
1046 buf->address = (void *)(agp_offset + offset
1047 + (unsigned long)dev->sg->virtual);
1051 init_waitqueue_head(&buf->dma_wait);
1054 buf->dev_priv_size = dev->driver->dev_priv_size;
1055 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1056 if (!buf->dev_private) {
1057 /* Set count correctly so we free the proper amount. */
1058 entry->buf_count = count;
1059 drm_cleanup_buf_error(dev, entry);
1060 mutex_unlock(&dev->struct_mutex);
1061 atomic_dec(&dev->buf_alloc);
1065 memset(buf->dev_private, 0, buf->dev_priv_size);
1067 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1069 offset += alignment;
1071 byte_count += PAGE_SIZE << page_order;
1074 DRM_DEBUG("byte_count: %d\n", byte_count);
1076 temp_buflist = drm_realloc(dma->buflist,
1077 dma->buf_count * sizeof(*dma->buflist),
1078 (dma->buf_count + entry->buf_count)
1079 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1080 if (!temp_buflist) {
1081 /* Free the entry because it isn't valid */
1082 drm_cleanup_buf_error(dev, entry);
1083 mutex_unlock(&dev->struct_mutex);
1084 atomic_dec(&dev->buf_alloc);
1087 dma->buflist = temp_buflist;
1089 for (i = 0; i < entry->buf_count; i++) {
1090 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1093 dma->buf_count += entry->buf_count;
1094 dma->seg_count += entry->seg_count;
1095 dma->page_count += byte_count >> PAGE_SHIFT;
1096 dma->byte_count += byte_count;
1098 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1099 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1101 mutex_unlock(&dev->struct_mutex);
1103 request->count = entry->buf_count;
1104 request->size = size;
1106 dma->flags = _DRM_DMA_USE_SG;
1108 atomic_dec(&dev->buf_alloc);
1112 static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1114 struct drm_device_dma *dma = dev->dma;
1115 struct drm_buf_entry *entry;
1116 struct drm_buf *buf;
1117 unsigned long offset;
1118 unsigned long agp_offset;
1127 struct drm_buf **temp_buflist;
1129 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1135 if (!capable(CAP_SYS_ADMIN))
1138 count = request->count;
1139 order = drm_order(request->size);
1142 alignment = (request->flags & _DRM_PAGE_ALIGN)
1143 ? PAGE_ALIGN(size) : size;
1144 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1145 total = PAGE_SIZE << page_order;
1148 agp_offset = request->agp_start;
1150 DRM_DEBUG("count: %d\n", count);
1151 DRM_DEBUG("order: %d\n", order);
1152 DRM_DEBUG("size: %d\n", size);
1153 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1154 DRM_DEBUG("alignment: %d\n", alignment);
1155 DRM_DEBUG("page_order: %d\n", page_order);
1156 DRM_DEBUG("total: %d\n", total);
1158 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1160 if (dev->queue_count)
1161 return -EBUSY; /* Not while in use */
1163 spin_lock(&dev->count_lock);
1165 spin_unlock(&dev->count_lock);
1168 atomic_inc(&dev->buf_alloc);
1169 spin_unlock(&dev->count_lock);
1171 mutex_lock(&dev->struct_mutex);
1172 entry = &dma->bufs[order];
1173 if (entry->buf_count) {
1174 mutex_unlock(&dev->struct_mutex);
1175 atomic_dec(&dev->buf_alloc);
1176 return -ENOMEM; /* May only call once for each order */
1179 if (count < 0 || count > 4096) {
1180 mutex_unlock(&dev->struct_mutex);
1181 atomic_dec(&dev->buf_alloc);
1185 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1187 if (!entry->buflist) {
1188 mutex_unlock(&dev->struct_mutex);
1189 atomic_dec(&dev->buf_alloc);
1192 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1194 entry->buf_size = size;
1195 entry->page_order = page_order;
1199 while (entry->buf_count < count) {
1200 buf = &entry->buflist[entry->buf_count];
1201 buf->idx = dma->buf_count + entry->buf_count;
1202 buf->total = alignment;
1206 buf->offset = (dma->byte_count + offset);
1207 buf->bus_address = agp_offset + offset;
1208 buf->address = (void *)(agp_offset + offset);
1212 init_waitqueue_head(&buf->dma_wait);
1215 buf->dev_priv_size = dev->driver->dev_priv_size;
1216 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1217 if (!buf->dev_private) {
1218 /* Set count correctly so we free the proper amount. */
1219 entry->buf_count = count;
1220 drm_cleanup_buf_error(dev, entry);
1221 mutex_unlock(&dev->struct_mutex);
1222 atomic_dec(&dev->buf_alloc);
1225 memset(buf->dev_private, 0, buf->dev_priv_size);
1227 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1229 offset += alignment;
1231 byte_count += PAGE_SIZE << page_order;
1234 DRM_DEBUG("byte_count: %d\n", byte_count);
1236 temp_buflist = drm_realloc(dma->buflist,
1237 dma->buf_count * sizeof(*dma->buflist),
1238 (dma->buf_count + entry->buf_count)
1239 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1240 if (!temp_buflist) {
1241 /* Free the entry because it isn't valid */
1242 drm_cleanup_buf_error(dev, entry);
1243 mutex_unlock(&dev->struct_mutex);
1244 atomic_dec(&dev->buf_alloc);
1247 dma->buflist = temp_buflist;
1249 for (i = 0; i < entry->buf_count; i++) {
1250 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1253 dma->buf_count += entry->buf_count;
1254 dma->seg_count += entry->seg_count;
1255 dma->page_count += byte_count >> PAGE_SHIFT;
1256 dma->byte_count += byte_count;
1258 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1259 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1261 mutex_unlock(&dev->struct_mutex);
1263 request->count = entry->buf_count;
1264 request->size = size;
1266 dma->flags = _DRM_DMA_USE_FB;
1268 atomic_dec(&dev->buf_alloc);
1274 * Add buffers for DMA transfers (ioctl).
1276 * \param inode device inode.
1277 * \param filp file pointer.
1278 * \param cmd command.
1279 * \param arg pointer to a struct drm_buf_desc request.
1280 * \return zero on success or a negative number on failure.
1282 * According with the memory type specified in drm_buf_desc::flags and the
1283 * build options, it dispatches the call either to addbufs_agp(),
1284 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1285 * PCI memory respectively.
1287 int drm_addbufs(struct inode *inode, struct file *filp,
1288 unsigned int cmd, unsigned long arg)
1290 struct drm_buf_desc request;
1291 struct drm_file *priv = filp->private_data;
1292 struct drm_device *dev = priv->head->dev;
1295 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1298 if (copy_from_user(&request, (struct drm_buf_desc __user *) arg,
1303 if (request.flags & _DRM_AGP_BUFFER)
1304 ret = drm_addbufs_agp(dev, &request);
1307 if (request.flags & _DRM_SG_BUFFER)
1308 ret = drm_addbufs_sg(dev, &request);
1309 else if (request.flags & _DRM_FB_BUFFER)
1310 ret = drm_addbufs_fb(dev, &request);
1312 ret = drm_addbufs_pci(dev, &request);
1315 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1323 * Get information about the buffer mappings.
1325 * This was originally mean for debugging purposes, or by a sophisticated
1326 * client library to determine how best to use the available buffers (e.g.,
1327 * large buffers can be used for image transfer).
1329 * \param inode device inode.
1330 * \param filp file pointer.
1331 * \param cmd command.
1332 * \param arg pointer to a drm_buf_info structure.
1333 * \return zero on success or a negative number on failure.
1335 * Increments drm_device::buf_use while holding the drm_device::count_lock
1336 * lock, preventing of allocating more buffers after this call. Information
1337 * about each requested buffer is then copied into user space.
1339 int drm_infobufs(struct inode *inode, struct file *filp,
1340 unsigned int cmd, unsigned long arg)
1342 struct drm_file *priv = filp->private_data;
1343 struct drm_device *dev = priv->head->dev;
1344 struct drm_device_dma *dma = dev->dma;
1345 struct drm_buf_info request;
1346 struct drm_buf_info __user *argp = (void __user *)arg;
1350 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1356 spin_lock(&dev->count_lock);
1357 if (atomic_read(&dev->buf_alloc)) {
1358 spin_unlock(&dev->count_lock);
1361 ++dev->buf_use; /* Can't allocate more after this call */
1362 spin_unlock(&dev->count_lock);
1364 if (copy_from_user(&request, argp, sizeof(request)))
1367 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1368 if (dma->bufs[i].buf_count)
1372 DRM_DEBUG("count = %d\n", count);
1374 if (request.count >= count) {
1375 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1376 if (dma->bufs[i].buf_count) {
1377 struct drm_buf_desc __user *to =
1378 &request.list[count];
1379 struct drm_buf_entry *from = &dma->bufs[i];
1380 struct drm_freelist *list = &dma->bufs[i].freelist;
1381 if (copy_to_user(&to->count,
1383 sizeof(from->buf_count)) ||
1384 copy_to_user(&to->size,
1386 sizeof(from->buf_size)) ||
1387 copy_to_user(&to->low_mark,
1389 sizeof(list->low_mark)) ||
1390 copy_to_user(&to->high_mark,
1392 sizeof(list->high_mark)))
1395 DRM_DEBUG("%d %d %d %d %d\n",
1397 dma->bufs[i].buf_count,
1398 dma->bufs[i].buf_size,
1399 dma->bufs[i].freelist.low_mark,
1400 dma->bufs[i].freelist.high_mark);
1405 request.count = count;
1407 if (copy_to_user(argp, &request, sizeof(request)))
1414 * Specifies a low and high water mark for buffer allocation
1416 * \param inode device inode.
1417 * \param filp file pointer.
1418 * \param cmd command.
1419 * \param arg a pointer to a drm_buf_desc structure.
1420 * \return zero on success or a negative number on failure.
1422 * Verifies that the size order is bounded between the admissible orders and
1423 * updates the respective drm_device_dma::bufs entry low and high water mark.
1425 * \note This ioctl is deprecated and mostly never used.
1427 int drm_markbufs(struct inode *inode, struct file *filp,
1428 unsigned int cmd, unsigned long arg)
1430 struct drm_file *priv = filp->private_data;
1431 struct drm_device *dev = priv->head->dev;
1432 struct drm_device_dma *dma = dev->dma;
1433 struct drm_buf_desc request;
1435 struct drm_buf_entry *entry;
1437 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1443 if (copy_from_user(&request,
1444 (struct drm_buf_desc __user *) arg, sizeof(request)))
1447 DRM_DEBUG("%d, %d, %d\n",
1448 request.size, request.low_mark, request.high_mark);
1449 order = drm_order(request.size);
1450 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1452 entry = &dma->bufs[order];
1454 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1456 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1459 entry->freelist.low_mark = request.low_mark;
1460 entry->freelist.high_mark = request.high_mark;
1466 * Unreserve the buffers in list, previously reserved using drmDMA.
1468 * \param inode device inode.
1469 * \param filp file pointer.
1470 * \param cmd command.
1471 * \param arg pointer to a drm_buf_free structure.
1472 * \return zero on success or a negative number on failure.
1474 * Calls free_buffer() for each used buffer.
1475 * This function is primarily used for debugging.
1477 int drm_freebufs(struct inode *inode, struct file *filp,
1478 unsigned int cmd, unsigned long arg)
1480 struct drm_file *priv = filp->private_data;
1481 struct drm_device *dev = priv->head->dev;
1482 struct drm_device_dma *dma = dev->dma;
1483 struct drm_buf_free request;
1486 struct drm_buf *buf;
1488 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1494 if (copy_from_user(&request,
1495 (struct drm_buf_free __user *) arg, sizeof(request)))
1498 DRM_DEBUG("%d\n", request.count);
1499 for (i = 0; i < request.count; i++) {
1500 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1502 if (idx < 0 || idx >= dma->buf_count) {
1503 DRM_ERROR("Index %d (of %d max)\n",
1504 idx, dma->buf_count - 1);
1507 buf = dma->buflist[idx];
1508 if (buf->filp != filp) {
1509 DRM_ERROR("Process %d freeing buffer not owned\n",
1513 drm_free_buffer(dev, buf);
1520 * Maps all of the DMA buffers into client-virtual space (ioctl).
1522 * \param inode device inode.
1523 * \param filp file pointer.
1524 * \param cmd command.
1525 * \param arg pointer to a drm_buf_map structure.
1526 * \return zero on success or a negative number on failure.
1528 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1529 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1530 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1533 int drm_mapbufs(struct inode *inode, struct file *filp,
1534 unsigned int cmd, unsigned long arg)
1536 struct drm_file *priv = filp->private_data;
1537 struct drm_device *dev = priv->head->dev;
1538 struct drm_device_dma *dma = dev->dma;
1539 struct drm_buf_map __user *argp = (void __user *)arg;
1542 unsigned long virtual;
1543 unsigned long address;
1544 struct drm_buf_map request;
1547 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1553 spin_lock(&dev->count_lock);
1554 if (atomic_read(&dev->buf_alloc)) {
1555 spin_unlock(&dev->count_lock);
1558 dev->buf_use++; /* Can't allocate more after this call */
1559 spin_unlock(&dev->count_lock);
1561 if (copy_from_user(&request, argp, sizeof(request)))
1564 if (request.count >= dma->buf_count) {
1565 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1566 || (drm_core_check_feature(dev, DRIVER_SG)
1567 && (dma->flags & _DRM_DMA_USE_SG))
1568 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1569 && (dma->flags & _DRM_DMA_USE_FB))) {
1570 struct drm_map *map = dev->agp_buffer_map;
1571 unsigned long token = dev->agp_buffer_token;
1578 down_write(¤t->mm->mmap_sem);
1579 virtual = do_mmap(filp, 0, map->size,
1580 PROT_READ | PROT_WRITE,
1582 up_write(¤t->mm->mmap_sem);
1584 down_write(¤t->mm->mmap_sem);
1585 virtual = do_mmap(filp, 0, dma->byte_count,
1586 PROT_READ | PROT_WRITE,
1588 up_write(¤t->mm->mmap_sem);
1590 if (virtual > -1024UL) {
1592 retcode = (signed long)virtual;
1595 request.virtual = (void __user *)virtual;
1597 for (i = 0; i < dma->buf_count; i++) {
1598 if (copy_to_user(&request.list[i].idx,
1599 &dma->buflist[i]->idx,
1600 sizeof(request.list[0].idx))) {
1604 if (copy_to_user(&request.list[i].total,
1605 &dma->buflist[i]->total,
1606 sizeof(request.list[0].total))) {
1610 if (copy_to_user(&request.list[i].used,
1611 &zero, sizeof(zero))) {
1615 address = virtual + dma->buflist[i]->offset; /* *** */
1616 if (copy_to_user(&request.list[i].address,
1617 &address, sizeof(address))) {
1624 request.count = dma->buf_count;
1625 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1627 if (copy_to_user(argp, &request, sizeof(request)))
1634 * Compute size order. Returns the exponent of the smaller power of two which
1635 * is greater or equal to given number.
1640 * \todo Can be made faster.
1642 int drm_order(unsigned long size)
1647 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1649 if (size & (size - 1))
1654 EXPORT_SYMBOL(drm_order);