1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
41 #define TTM_ASSERT_LOCKED(param)
42 #define TTM_DEBUG(fmt, arg...)
43 #define TTM_BO_HASH_ORDER 13
45 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static inline uint32_t ttm_bo_type_flags(unsigned type)
53 static void ttm_bo_release_list(struct kref *list_kref)
55 struct ttm_buffer_object *bo =
56 container_of(list_kref, struct ttm_buffer_object, list_kref);
57 struct ttm_bo_device *bdev = bo->bdev;
59 BUG_ON(atomic_read(&bo->list_kref.refcount));
60 BUG_ON(atomic_read(&bo->kref.refcount));
61 BUG_ON(atomic_read(&bo->cpu_writers));
62 BUG_ON(bo->sync_obj != NULL);
63 BUG_ON(bo->mem.mm_node != NULL);
64 BUG_ON(!list_empty(&bo->lru));
65 BUG_ON(!list_empty(&bo->ddestroy));
68 ttm_tt_destroy(bo->ttm);
72 ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
77 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
83 ret = wait_event_interruptible(bo->event_queue,
84 atomic_read(&bo->reserved) == 0);
85 if (unlikely(ret != 0))
88 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
93 static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
95 struct ttm_bo_device *bdev = bo->bdev;
96 struct ttm_mem_type_manager *man;
98 BUG_ON(!atomic_read(&bo->reserved));
100 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
102 BUG_ON(!list_empty(&bo->lru));
104 man = &bdev->man[bo->mem.mem_type];
105 list_add_tail(&bo->lru, &man->lru);
106 kref_get(&bo->list_kref);
108 if (bo->ttm != NULL) {
109 list_add_tail(&bo->swap, &bdev->swap_lru);
110 kref_get(&bo->list_kref);
116 * Call with the lru_lock held.
119 static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
123 if (!list_empty(&bo->swap)) {
124 list_del_init(&bo->swap);
127 if (!list_empty(&bo->lru)) {
128 list_del_init(&bo->lru);
133 * TODO: Add a driver hook to delete from
134 * driver-specific LRU's here.
140 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
142 bool no_wait, bool use_sequence, uint32_t sequence)
144 struct ttm_bo_device *bdev = bo->bdev;
147 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
148 if (use_sequence && bo->seq_valid &&
149 (sequence - bo->val_seq < (1 << 31))) {
156 spin_unlock(&bdev->lru_lock);
157 ret = ttm_bo_wait_unreserved(bo, interruptible);
158 spin_lock(&bdev->lru_lock);
165 bo->val_seq = sequence;
166 bo->seq_valid = true;
168 bo->seq_valid = false;
173 EXPORT_SYMBOL(ttm_bo_reserve);
175 static void ttm_bo_ref_bug(struct kref *list_kref)
180 int ttm_bo_reserve(struct ttm_buffer_object *bo,
182 bool no_wait, bool use_sequence, uint32_t sequence)
184 struct ttm_bo_device *bdev = bo->bdev;
188 spin_lock(&bdev->lru_lock);
189 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
191 if (likely(ret == 0))
192 put_count = ttm_bo_del_from_lru(bo);
193 spin_unlock(&bdev->lru_lock);
196 kref_put(&bo->list_kref, ttm_bo_ref_bug);
201 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
203 struct ttm_bo_device *bdev = bo->bdev;
205 spin_lock(&bdev->lru_lock);
206 ttm_bo_add_to_lru(bo);
207 atomic_set(&bo->reserved, 0);
208 wake_up_all(&bo->event_queue);
209 spin_unlock(&bdev->lru_lock);
211 EXPORT_SYMBOL(ttm_bo_unreserve);
214 * Call bo->mutex locked.
217 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
219 struct ttm_bo_device *bdev = bo->bdev;
221 uint32_t page_flags = 0;
223 TTM_ASSERT_LOCKED(&bo->mutex);
226 if (bdev->need_dma32)
227 page_flags |= TTM_PAGE_FLAG_DMA32;
230 case ttm_bo_type_device:
232 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
233 case ttm_bo_type_kernel:
234 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
235 page_flags, bdev->dummy_read_page);
236 if (unlikely(bo->ttm == NULL))
239 case ttm_bo_type_user:
240 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
241 page_flags | TTM_PAGE_FLAG_USER,
242 bdev->dummy_read_page);
243 if (unlikely(bo->ttm == NULL))
247 ret = ttm_tt_set_user(bo->ttm, current,
248 bo->buffer_start, bo->num_pages);
249 if (unlikely(ret != 0))
250 ttm_tt_destroy(bo->ttm);
253 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
261 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
262 struct ttm_mem_reg *mem,
263 bool evict, bool interruptible, bool no_wait)
265 struct ttm_bo_device *bdev = bo->bdev;
266 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
267 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
268 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
269 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
272 if (old_is_pci || new_is_pci ||
273 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
274 ttm_bo_unmap_virtual(bo);
277 * Create and bind a ttm if required.
280 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
281 ret = ttm_bo_add_ttm(bo, false);
285 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
289 if (mem->mem_type != TTM_PL_SYSTEM) {
290 ret = ttm_tt_bind(bo->ttm, mem);
295 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
297 struct ttm_mem_reg *old_mem = &bo->mem;
298 uint32_t save_flags = old_mem->placement;
302 ttm_flag_masked(&save_flags, mem->placement,
303 TTM_PL_MASK_MEMTYPE);
309 if (bdev->driver->move_notify)
310 bdev->driver->move_notify(bo, mem);
312 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
313 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
314 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
315 else if (bdev->driver->move)
316 ret = bdev->driver->move(bo, evict, interruptible,
319 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
326 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
328 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
332 if (bo->mem.mm_node) {
333 spin_lock(&bo->lock);
334 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
335 bdev->man[bo->mem.mem_type].gpu_offset;
336 bo->cur_placement = bo->mem.placement;
337 spin_unlock(&bo->lock);
343 new_man = &bdev->man[bo->mem.mem_type];
344 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
345 ttm_tt_unbind(bo->ttm);
346 ttm_tt_destroy(bo->ttm);
354 * If bo idle, remove from delayed- and lru lists, and unref.
355 * If not idle, and already on delayed list, do nothing.
356 * If not idle, and not on delayed list, put on delayed list,
357 * up the list_kref and schedule a delayed list check.
360 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
362 struct ttm_bo_device *bdev = bo->bdev;
363 struct ttm_bo_driver *driver = bdev->driver;
366 spin_lock(&bo->lock);
367 (void) ttm_bo_wait(bo, false, false, !remove_all);
372 spin_unlock(&bo->lock);
374 spin_lock(&bdev->lru_lock);
375 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
378 ttm_tt_unbind(bo->ttm);
380 if (!list_empty(&bo->ddestroy)) {
381 list_del_init(&bo->ddestroy);
382 kref_put(&bo->list_kref, ttm_bo_ref_bug);
384 if (bo->mem.mm_node) {
385 drm_mm_put_block(bo->mem.mm_node);
386 bo->mem.mm_node = NULL;
388 put_count = ttm_bo_del_from_lru(bo);
389 spin_unlock(&bdev->lru_lock);
391 atomic_set(&bo->reserved, 0);
394 kref_put(&bo->list_kref, ttm_bo_release_list);
399 spin_lock(&bdev->lru_lock);
400 if (list_empty(&bo->ddestroy)) {
401 void *sync_obj = bo->sync_obj;
402 void *sync_obj_arg = bo->sync_obj_arg;
404 kref_get(&bo->list_kref);
405 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
406 spin_unlock(&bdev->lru_lock);
407 spin_unlock(&bo->lock);
410 driver->sync_obj_flush(sync_obj, sync_obj_arg);
411 schedule_delayed_work(&bdev->wq,
412 ((HZ / 100) < 1) ? 1 : HZ / 100);
416 spin_unlock(&bdev->lru_lock);
417 spin_unlock(&bo->lock);
425 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
426 * encountered buffers.
429 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
431 struct ttm_buffer_object *entry, *nentry;
432 struct list_head *list, *next;
435 spin_lock(&bdev->lru_lock);
436 list_for_each_safe(list, next, &bdev->ddestroy) {
437 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
441 * Protect the next list entry from destruction while we
442 * unlock the lru_lock.
445 if (next != &bdev->ddestroy) {
446 nentry = list_entry(next, struct ttm_buffer_object,
448 kref_get(&nentry->list_kref);
450 kref_get(&entry->list_kref);
452 spin_unlock(&bdev->lru_lock);
453 ret = ttm_bo_cleanup_refs(entry, remove_all);
454 kref_put(&entry->list_kref, ttm_bo_release_list);
456 spin_lock(&bdev->lru_lock);
458 bool next_onlist = !list_empty(next);
459 spin_unlock(&bdev->lru_lock);
460 kref_put(&nentry->list_kref, ttm_bo_release_list);
461 spin_lock(&bdev->lru_lock);
463 * Someone might have raced us and removed the
464 * next entry from the list. We don't bother restarting
474 ret = !list_empty(&bdev->ddestroy);
475 spin_unlock(&bdev->lru_lock);
480 static void ttm_bo_delayed_workqueue(struct work_struct *work)
482 struct ttm_bo_device *bdev =
483 container_of(work, struct ttm_bo_device, wq.work);
485 if (ttm_bo_delayed_delete(bdev, false)) {
486 schedule_delayed_work(&bdev->wq,
487 ((HZ / 100) < 1) ? 1 : HZ / 100);
491 static void ttm_bo_release(struct kref *kref)
493 struct ttm_buffer_object *bo =
494 container_of(kref, struct ttm_buffer_object, kref);
495 struct ttm_bo_device *bdev = bo->bdev;
497 if (likely(bo->vm_node != NULL)) {
498 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
499 drm_mm_put_block(bo->vm_node);
502 write_unlock(&bdev->vm_lock);
503 ttm_bo_cleanup_refs(bo, false);
504 kref_put(&bo->list_kref, ttm_bo_release_list);
505 write_lock(&bdev->vm_lock);
508 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
510 struct ttm_buffer_object *bo = *p_bo;
511 struct ttm_bo_device *bdev = bo->bdev;
514 write_lock(&bdev->vm_lock);
515 kref_put(&bo->kref, ttm_bo_release);
516 write_unlock(&bdev->vm_lock);
518 EXPORT_SYMBOL(ttm_bo_unref);
520 static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
521 bool interruptible, bool no_wait)
524 struct ttm_bo_device *bdev = bo->bdev;
525 struct ttm_mem_reg evict_mem;
526 uint32_t proposed_placement;
528 if (bo->mem.mem_type != mem_type)
531 spin_lock(&bo->lock);
532 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
533 spin_unlock(&bo->lock);
535 if (unlikely(ret != 0)) {
536 if (ret != -ERESTART) {
537 printk(KERN_ERR TTM_PFX
538 "Failed to expire sync object before "
539 "buffer eviction.\n");
544 BUG_ON(!atomic_read(&bo->reserved));
547 evict_mem.mm_node = NULL;
549 proposed_placement = bdev->driver->evict_flags(bo);
551 ret = ttm_bo_mem_space(bo, proposed_placement,
552 &evict_mem, interruptible, no_wait);
553 if (unlikely(ret != 0 && ret != -ERESTART))
554 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
555 &evict_mem, interruptible, no_wait);
558 if (ret != -ERESTART)
559 printk(KERN_ERR TTM_PFX
560 "Failed to find memory space for "
561 "buffer 0x%p eviction.\n", bo);
565 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
568 if (ret != -ERESTART)
569 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
573 spin_lock(&bdev->lru_lock);
574 if (evict_mem.mm_node) {
575 drm_mm_put_block(evict_mem.mm_node);
576 evict_mem.mm_node = NULL;
578 spin_unlock(&bdev->lru_lock);
585 * Repeatedly evict memory from the LRU for @mem_type until we create enough
586 * space, or we've evicted everything and there isn't enough space.
588 static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
589 struct ttm_mem_reg *mem,
591 bool interruptible, bool no_wait)
593 struct drm_mm_node *node;
594 struct ttm_buffer_object *entry;
595 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
596 struct list_head *lru;
597 unsigned long num_pages = mem->num_pages;
602 ret = drm_mm_pre_get(&man->manager);
603 if (unlikely(ret != 0))
606 spin_lock(&bdev->lru_lock);
608 node = drm_mm_search_free(&man->manager, num_pages,
609 mem->page_alignment, 1);
617 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
618 kref_get(&entry->list_kref);
621 ttm_bo_reserve_locked(entry, interruptible, no_wait,
624 if (likely(ret == 0))
625 put_count = ttm_bo_del_from_lru(entry);
627 spin_unlock(&bdev->lru_lock);
629 if (unlikely(ret != 0))
633 kref_put(&entry->list_kref, ttm_bo_ref_bug);
635 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
637 ttm_bo_unreserve(entry);
639 kref_put(&entry->list_kref, ttm_bo_release_list);
643 spin_lock(&bdev->lru_lock);
647 spin_unlock(&bdev->lru_lock);
651 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
652 if (unlikely(!node)) {
653 spin_unlock(&bdev->lru_lock);
657 spin_unlock(&bdev->lru_lock);
659 mem->mem_type = mem_type;
663 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664 uint32_t cur_placement,
665 uint32_t proposed_placement)
667 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
671 * Keep current caching if possible.
674 if ((cur_placement & caching) != 0)
675 result |= (cur_placement & caching);
676 else if ((man->default_caching & caching) != 0)
677 result |= man->default_caching;
678 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679 result |= TTM_PL_FLAG_CACHED;
680 else if ((TTM_PL_FLAG_WC & caching) != 0)
681 result |= TTM_PL_FLAG_WC;
682 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683 result |= TTM_PL_FLAG_UNCACHED;
689 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
692 uint32_t proposed_placement,
693 uint32_t *masked_placement)
695 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
697 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
700 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
703 if ((proposed_placement & man->available_caching) == 0)
706 cur_flags |= (proposed_placement & man->available_caching);
708 *masked_placement = cur_flags;
713 * Creates space for memory region @mem according to its type.
715 * This function first searches for free space in compatible memory types in
716 * the priority order defined by the driver. If free space isn't found, then
717 * ttm_bo_mem_force_space is attempted in priority order to evict and find
720 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
721 uint32_t proposed_placement,
722 struct ttm_mem_reg *mem,
723 bool interruptible, bool no_wait)
725 struct ttm_bo_device *bdev = bo->bdev;
726 struct ttm_mem_type_manager *man;
728 uint32_t num_prios = bdev->driver->num_mem_type_prio;
729 const uint32_t *prios = bdev->driver->mem_type_prio;
731 uint32_t mem_type = TTM_PL_SYSTEM;
732 uint32_t cur_flags = 0;
733 bool type_found = false;
734 bool type_ok = false;
735 bool has_eagain = false;
736 struct drm_mm_node *node = NULL;
740 for (i = 0; i < num_prios; ++i) {
742 man = &bdev->man[mem_type];
744 type_ok = ttm_bo_mt_compatible(man,
745 bo->type == ttm_bo_type_user,
746 mem_type, proposed_placement,
752 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
755 if (mem_type == TTM_PL_SYSTEM)
758 if (man->has_type && man->use_type) {
761 ret = drm_mm_pre_get(&man->manager);
765 spin_lock(&bdev->lru_lock);
766 node = drm_mm_search_free(&man->manager,
770 if (unlikely(!node)) {
771 spin_unlock(&bdev->lru_lock);
774 node = drm_mm_get_block_atomic(node,
778 spin_unlock(&bdev->lru_lock);
785 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
787 mem->mem_type = mem_type;
788 mem->placement = cur_flags;
795 num_prios = bdev->driver->num_mem_busy_prio;
796 prios = bdev->driver->mem_busy_prio;
798 for (i = 0; i < num_prios; ++i) {
800 man = &bdev->man[mem_type];
805 if (!ttm_bo_mt_compatible(man,
806 bo->type == ttm_bo_type_user,
808 proposed_placement, &cur_flags))
811 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
814 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
815 interruptible, no_wait);
817 if (ret == 0 && mem->mm_node) {
818 mem->placement = cur_flags;
822 if (ret == -ERESTART)
826 ret = (has_eagain) ? -ERESTART : -ENOMEM;
829 EXPORT_SYMBOL(ttm_bo_mem_space);
831 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
835 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
838 ret = wait_event_interruptible(bo->event_queue,
839 atomic_read(&bo->cpu_writers) == 0);
841 if (ret == -ERESTARTSYS)
847 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
848 uint32_t proposed_placement,
849 bool interruptible, bool no_wait)
851 struct ttm_bo_device *bdev = bo->bdev;
853 struct ttm_mem_reg mem;
855 BUG_ON(!atomic_read(&bo->reserved));
858 * FIXME: It's possible to pipeline buffer moves.
859 * Have the driver move function wait for idle when necessary,
860 * instead of doing it here.
863 spin_lock(&bo->lock);
864 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
865 spin_unlock(&bo->lock);
870 mem.num_pages = bo->num_pages;
871 mem.size = mem.num_pages << PAGE_SHIFT;
872 mem.page_alignment = bo->mem.page_alignment;
875 * Determine where to move the buffer.
878 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
879 interruptible, no_wait);
883 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
886 if (ret && mem.mm_node) {
887 spin_lock(&bdev->lru_lock);
888 drm_mm_put_block(mem.mm_node);
889 spin_unlock(&bdev->lru_lock);
894 static int ttm_bo_mem_compat(uint32_t proposed_placement,
895 struct ttm_mem_reg *mem)
897 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
899 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
905 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
906 uint32_t proposed_placement,
907 bool interruptible, bool no_wait)
911 BUG_ON(!atomic_read(&bo->reserved));
912 bo->proposed_placement = proposed_placement;
914 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
915 (unsigned long)proposed_placement,
916 (unsigned long)bo->mem.placement);
919 * Check whether we need to move buffer.
922 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
923 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
924 interruptible, no_wait);
926 if (ret != -ERESTART)
927 printk(KERN_ERR TTM_PFX
928 "Failed moving buffer. "
929 "Proposed placement 0x%08x\n",
930 bo->proposed_placement);
932 printk(KERN_ERR TTM_PFX
933 "Out of aperture space or "
934 "DRM memory quota.\n");
940 * We might need to add a TTM.
943 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
944 ret = ttm_bo_add_ttm(bo, true);
949 * Validation has succeeded, move the access and other
950 * non-mapping-related flag bits from the proposed flags to
954 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
955 ~TTM_PL_MASK_MEMTYPE);
959 EXPORT_SYMBOL(ttm_buffer_object_validate);
962 ttm_bo_check_placement(struct ttm_buffer_object *bo,
963 uint32_t set_flags, uint32_t clr_flags)
965 uint32_t new_mask = set_flags | clr_flags;
967 if ((bo->type == ttm_bo_type_user) &&
968 (clr_flags & TTM_PL_FLAG_CACHED)) {
969 printk(KERN_ERR TTM_PFX
970 "User buffers require cache-coherent memory.\n");
974 if (!capable(CAP_SYS_ADMIN)) {
975 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
976 printk(KERN_ERR TTM_PFX "Need to be root to modify"
977 " NO_EVICT status.\n");
981 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
982 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
983 printk(KERN_ERR TTM_PFX
984 "Incompatible memory specification"
985 " for NO_EVICT buffer.\n");
992 int ttm_buffer_object_init(struct ttm_bo_device *bdev,
993 struct ttm_buffer_object *bo,
995 enum ttm_bo_type type,
997 uint32_t page_alignment,
998 unsigned long buffer_start,
1000 struct file *persistant_swap_storage,
1002 void (*destroy) (struct ttm_buffer_object *))
1005 unsigned long num_pages;
1007 size += buffer_start & ~PAGE_MASK;
1008 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1009 if (num_pages == 0) {
1010 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1013 bo->destroy = destroy;
1015 spin_lock_init(&bo->lock);
1016 kref_init(&bo->kref);
1017 kref_init(&bo->list_kref);
1018 atomic_set(&bo->cpu_writers, 0);
1019 atomic_set(&bo->reserved, 1);
1020 init_waitqueue_head(&bo->event_queue);
1021 INIT_LIST_HEAD(&bo->lru);
1022 INIT_LIST_HEAD(&bo->ddestroy);
1023 INIT_LIST_HEAD(&bo->swap);
1026 bo->num_pages = num_pages;
1027 bo->mem.mem_type = TTM_PL_SYSTEM;
1028 bo->mem.num_pages = bo->num_pages;
1029 bo->mem.mm_node = NULL;
1030 bo->mem.page_alignment = page_alignment;
1031 bo->buffer_start = buffer_start & PAGE_MASK;
1033 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1034 bo->seq_valid = false;
1035 bo->persistant_swap_storage = persistant_swap_storage;
1036 bo->acc_size = acc_size;
1038 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1039 if (unlikely(ret != 0))
1043 * If no caching attributes are set, accept any form of caching.
1046 if ((flags & TTM_PL_MASK_CACHING) == 0)
1047 flags |= TTM_PL_MASK_CACHING;
1050 * For ttm_bo_type_device buffers, allocate
1051 * address space from the device.
1054 if (bo->type == ttm_bo_type_device) {
1055 ret = ttm_bo_setup_vm(bo);
1060 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1064 ttm_bo_unreserve(bo);
1068 ttm_bo_unreserve(bo);
1073 EXPORT_SYMBOL(ttm_buffer_object_init);
1075 static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
1076 unsigned long num_pages)
1078 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1081 return bdev->ttm_bo_size + 2 * page_array_size;
1084 int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1086 enum ttm_bo_type type,
1088 uint32_t page_alignment,
1089 unsigned long buffer_start,
1091 struct file *persistant_swap_storage,
1092 struct ttm_buffer_object **p_bo)
1094 struct ttm_buffer_object *bo;
1096 struct ttm_mem_global *mem_glob = bdev->mem_glob;
1099 ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1100 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
1101 if (unlikely(ret != 0))
1104 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1106 if (unlikely(bo == NULL)) {
1107 ttm_mem_global_free(mem_glob, acc_size, false);
1111 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1112 page_alignment, buffer_start,
1114 persistant_swap_storage, acc_size, NULL);
1115 if (likely(ret == 0))
1121 static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1122 uint32_t mem_type, bool allow_errors)
1126 spin_lock(&bo->lock);
1127 ret = ttm_bo_wait(bo, false, false, false);
1128 spin_unlock(&bo->lock);
1130 if (ret && allow_errors)
1133 if (bo->mem.mem_type == mem_type)
1134 ret = ttm_bo_evict(bo, mem_type, false, false);
1141 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1149 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1150 struct list_head *head,
1151 unsigned mem_type, bool allow_errors)
1153 struct ttm_buffer_object *entry;
1158 * Can't use standard list traversal since we're unlocking.
1161 spin_lock(&bdev->lru_lock);
1163 while (!list_empty(head)) {
1164 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1165 kref_get(&entry->list_kref);
1166 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1167 put_count = ttm_bo_del_from_lru(entry);
1168 spin_unlock(&bdev->lru_lock);
1170 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1172 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1173 ttm_bo_unreserve(entry);
1174 kref_put(&entry->list_kref, ttm_bo_release_list);
1175 spin_lock(&bdev->lru_lock);
1178 spin_unlock(&bdev->lru_lock);
1183 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1185 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1188 if (mem_type >= TTM_NUM_MEM_TYPES) {
1189 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1193 if (!man->has_type) {
1194 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1195 "memory manager type %u\n", mem_type);
1199 man->use_type = false;
1200 man->has_type = false;
1204 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1206 spin_lock(&bdev->lru_lock);
1207 if (drm_mm_clean(&man->manager))
1208 drm_mm_takedown(&man->manager);
1212 spin_unlock(&bdev->lru_lock);
1217 EXPORT_SYMBOL(ttm_bo_clean_mm);
1219 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1221 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1223 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1224 printk(KERN_ERR TTM_PFX
1225 "Illegal memory manager memory type %u.\n",
1230 if (!man->has_type) {
1231 printk(KERN_ERR TTM_PFX
1232 "Memory type %u has not been initialized.\n",
1237 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1239 EXPORT_SYMBOL(ttm_bo_evict_mm);
1241 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1242 unsigned long p_offset, unsigned long p_size)
1245 struct ttm_mem_type_manager *man;
1247 if (type >= TTM_NUM_MEM_TYPES) {
1248 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1252 man = &bdev->man[type];
1253 if (man->has_type) {
1254 printk(KERN_ERR TTM_PFX
1255 "Memory manager already initialized for type %d\n",
1260 ret = bdev->driver->init_mem_type(bdev, type, man);
1265 if (type != TTM_PL_SYSTEM) {
1267 printk(KERN_ERR TTM_PFX
1268 "Zero size memory manager type %d\n",
1272 ret = drm_mm_init(&man->manager, p_offset, p_size);
1276 man->has_type = true;
1277 man->use_type = true;
1280 INIT_LIST_HEAD(&man->lru);
1284 EXPORT_SYMBOL(ttm_bo_init_mm);
1286 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1289 unsigned i = TTM_NUM_MEM_TYPES;
1290 struct ttm_mem_type_manager *man;
1293 man = &bdev->man[i];
1294 if (man->has_type) {
1295 man->use_type = false;
1296 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1298 printk(KERN_ERR TTM_PFX
1299 "DRM memory manager type %d "
1300 "is not clean.\n", i);
1302 man->has_type = false;
1306 if (!cancel_delayed_work(&bdev->wq))
1307 flush_scheduled_work();
1309 while (ttm_bo_delayed_delete(bdev, true))
1312 spin_lock(&bdev->lru_lock);
1313 if (list_empty(&bdev->ddestroy))
1314 TTM_DEBUG("Delayed destroy list was clean\n");
1316 if (list_empty(&bdev->man[0].lru))
1317 TTM_DEBUG("Swap list was clean\n");
1318 spin_unlock(&bdev->lru_lock);
1320 ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
1321 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1322 write_lock(&bdev->vm_lock);
1323 drm_mm_takedown(&bdev->addr_space_mm);
1324 write_unlock(&bdev->vm_lock);
1326 __free_page(bdev->dummy_read_page);
1329 EXPORT_SYMBOL(ttm_bo_device_release);
1332 * This function is intended to be called on drm driver load.
1333 * If you decide to call it from firstopen, you must protect the call
1334 * from a potentially racing ttm_bo_driver_finish in lastclose.
1335 * (This may happen on X server restart).
1338 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1339 struct ttm_mem_global *mem_glob,
1340 struct ttm_bo_driver *driver, uint64_t file_page_offset,
1345 bdev->dummy_read_page = NULL;
1346 rwlock_init(&bdev->vm_lock);
1347 spin_lock_init(&bdev->lru_lock);
1349 bdev->driver = driver;
1350 bdev->mem_glob = mem_glob;
1352 memset(bdev->man, 0, sizeof(bdev->man));
1354 bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1355 if (unlikely(bdev->dummy_read_page == NULL)) {
1361 * Initialize the system memory buffer type.
1362 * Other types need to be driver / IOCTL initialized.
1364 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1365 if (unlikely(ret != 0))
1368 bdev->addr_space_rb = RB_ROOT;
1369 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1370 if (unlikely(ret != 0))
1373 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1374 bdev->nice_mode = true;
1375 INIT_LIST_HEAD(&bdev->ddestroy);
1376 INIT_LIST_HEAD(&bdev->swap_lru);
1377 bdev->dev_mapping = NULL;
1378 bdev->need_dma32 = need_dma32;
1379 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1380 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1381 if (unlikely(ret != 0)) {
1382 printk(KERN_ERR TTM_PFX
1383 "Could not register buffer object swapout.\n");
1387 bdev->ttm_bo_extra_size =
1388 ttm_round_pot(sizeof(struct ttm_tt)) +
1389 ttm_round_pot(sizeof(struct ttm_backend));
1391 bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
1392 ttm_round_pot(sizeof(struct ttm_buffer_object));
1396 ttm_bo_clean_mm(bdev, 0);
1398 __free_page(bdev->dummy_read_page);
1402 EXPORT_SYMBOL(ttm_bo_device_init);
1405 * buffer object vm functions.
1408 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1410 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1412 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1413 if (mem->mem_type == TTM_PL_SYSTEM)
1416 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1419 if (mem->placement & TTM_PL_FLAG_CACHED)
1425 int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1426 struct ttm_mem_reg *mem,
1427 unsigned long *bus_base,
1428 unsigned long *bus_offset, unsigned long *bus_size)
1430 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1433 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1436 if (ttm_mem_reg_is_pci(bdev, mem)) {
1437 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1438 *bus_size = mem->num_pages << PAGE_SHIFT;
1439 *bus_base = man->io_offset;
1445 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1447 struct ttm_bo_device *bdev = bo->bdev;
1448 loff_t offset = (loff_t) bo->addr_space_offset;
1449 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1451 if (!bdev->dev_mapping)
1454 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1456 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1458 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1460 struct ttm_bo_device *bdev = bo->bdev;
1461 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1462 struct rb_node *parent = NULL;
1463 struct ttm_buffer_object *cur_bo;
1464 unsigned long offset = bo->vm_node->start;
1465 unsigned long cur_offset;
1469 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1470 cur_offset = cur_bo->vm_node->start;
1471 if (offset < cur_offset)
1472 cur = &parent->rb_left;
1473 else if (offset > cur_offset)
1474 cur = &parent->rb_right;
1479 rb_link_node(&bo->vm_rb, parent, cur);
1480 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1486 * @bo: the buffer to allocate address space for
1488 * Allocate address space in the drm device so that applications
1489 * can mmap the buffer and access the contents. This only
1490 * applies to ttm_bo_type_device objects as others are not
1491 * placed in the drm device address space.
1494 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1496 struct ttm_bo_device *bdev = bo->bdev;
1500 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1501 if (unlikely(ret != 0))
1504 write_lock(&bdev->vm_lock);
1505 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1506 bo->mem.num_pages, 0, 0);
1508 if (unlikely(bo->vm_node == NULL)) {
1513 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1514 bo->mem.num_pages, 0);
1516 if (unlikely(bo->vm_node == NULL)) {
1517 write_unlock(&bdev->vm_lock);
1521 ttm_bo_vm_insert_rb(bo);
1522 write_unlock(&bdev->vm_lock);
1523 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1527 write_unlock(&bdev->vm_lock);
1531 int ttm_bo_wait(struct ttm_buffer_object *bo,
1532 bool lazy, bool interruptible, bool no_wait)
1534 struct ttm_bo_driver *driver = bo->bdev->driver;
1539 if (likely(bo->sync_obj == NULL))
1542 while (bo->sync_obj) {
1544 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1545 void *tmp_obj = bo->sync_obj;
1546 bo->sync_obj = NULL;
1547 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1548 spin_unlock(&bo->lock);
1549 driver->sync_obj_unref(&tmp_obj);
1550 spin_lock(&bo->lock);
1557 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1558 sync_obj_arg = bo->sync_obj_arg;
1559 spin_unlock(&bo->lock);
1560 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1561 lazy, interruptible);
1562 if (unlikely(ret != 0)) {
1563 driver->sync_obj_unref(&sync_obj);
1564 spin_lock(&bo->lock);
1567 spin_lock(&bo->lock);
1568 if (likely(bo->sync_obj == sync_obj &&
1569 bo->sync_obj_arg == sync_obj_arg)) {
1570 void *tmp_obj = bo->sync_obj;
1571 bo->sync_obj = NULL;
1572 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1574 spin_unlock(&bo->lock);
1575 driver->sync_obj_unref(&sync_obj);
1576 driver->sync_obj_unref(&tmp_obj);
1577 spin_lock(&bo->lock);
1582 EXPORT_SYMBOL(ttm_bo_wait);
1584 void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1586 atomic_set(&bo->reserved, 0);
1587 wake_up_all(&bo->event_queue);
1590 int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1595 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1598 else if (interruptible) {
1599 ret = wait_event_interruptible
1600 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1601 if (unlikely(ret != 0))
1604 wait_event(bo->event_queue,
1605 atomic_read(&bo->reserved) == 0);
1611 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1616 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1617 * makes sure the lru lists are updated.
1620 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1621 if (unlikely(ret != 0))
1623 spin_lock(&bo->lock);
1624 ret = ttm_bo_wait(bo, false, true, no_wait);
1625 spin_unlock(&bo->lock);
1626 if (likely(ret == 0))
1627 atomic_inc(&bo->cpu_writers);
1628 ttm_bo_unreserve(bo);
1632 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1634 if (atomic_dec_and_test(&bo->cpu_writers))
1635 wake_up_all(&bo->event_queue);
1639 * A buffer object shrink method that tries to swap out the first
1640 * buffer object on the bo_global::swap_lru list.
1643 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1645 struct ttm_bo_device *bdev =
1646 container_of(shrink, struct ttm_bo_device, shrink);
1647 struct ttm_buffer_object *bo;
1650 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1652 spin_lock(&bdev->lru_lock);
1653 while (ret == -EBUSY) {
1654 if (unlikely(list_empty(&bdev->swap_lru))) {
1655 spin_unlock(&bdev->lru_lock);
1659 bo = list_first_entry(&bdev->swap_lru,
1660 struct ttm_buffer_object, swap);
1661 kref_get(&bo->list_kref);
1664 * Reserve buffer. Since we unlock while sleeping, we need
1665 * to re-check that nobody removed us from the swap-list while
1669 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1670 if (unlikely(ret == -EBUSY)) {
1671 spin_unlock(&bdev->lru_lock);
1672 ttm_bo_wait_unreserved(bo, false);
1673 kref_put(&bo->list_kref, ttm_bo_release_list);
1674 spin_lock(&bdev->lru_lock);
1679 put_count = ttm_bo_del_from_lru(bo);
1680 spin_unlock(&bdev->lru_lock);
1683 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1686 * Wait for GPU, then move to system cached.
1689 spin_lock(&bo->lock);
1690 ret = ttm_bo_wait(bo, false, false, false);
1691 spin_unlock(&bo->lock);
1693 if (unlikely(ret != 0))
1696 if ((bo->mem.placement & swap_placement) != swap_placement) {
1697 struct ttm_mem_reg evict_mem;
1699 evict_mem = bo->mem;
1700 evict_mem.mm_node = NULL;
1701 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1702 evict_mem.mem_type = TTM_PL_SYSTEM;
1704 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1706 if (unlikely(ret != 0))
1710 ttm_bo_unmap_virtual(bo);
1713 * Swap out. Buffer will be swapped in again as soon as
1714 * anyone tries to access a ttm page.
1717 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1722 * Unreserve without putting on LRU to avoid swapping out an
1723 * already swapped buffer.
1726 atomic_set(&bo->reserved, 0);
1727 wake_up_all(&bo->event_queue);
1728 kref_put(&bo->list_kref, ttm_bo_release_list);
1732 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1734 while (ttm_bo_swapout(&bdev->shrink) == 0)