1 /**************************************************************************
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/vmalloc.h>
37 #include <linux/module.h>
39 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 struct ttm_mem_reg *old_mem = &bo->mem;
43 if (old_mem->mm_node) {
44 spin_lock(&bo->bdev->lru_lock);
45 drm_mm_put_block(old_mem->mm_node);
46 spin_unlock(&bo->bdev->lru_lock);
48 old_mem->mm_node = NULL;
51 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
52 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
54 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
59 if (old_mem->mem_type != TTM_PL_SYSTEM) {
61 ttm_bo_free_old_node(bo);
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
64 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
69 if (unlikely(ret != 0))
72 if (new_mem->mem_type != TTM_PL_SYSTEM) {
73 ret = ttm_tt_bind(ttm, new_mem);
74 if (unlikely(ret != 0))
79 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
83 EXPORT_SYMBOL(ttm_bo_move_ttm);
85 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
88 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
89 unsigned long bus_offset;
90 unsigned long bus_size;
91 unsigned long bus_base;
96 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
97 if (ret || bus_size == 0)
100 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
101 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
103 if (mem->placement & TTM_PL_FLAG_WC)
104 addr = ioremap_wc(bus_base + bus_offset, bus_size);
106 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
114 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 struct ttm_mem_type_manager *man;
119 man = &bdev->man[mem->mem_type];
121 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
125 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
128 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
130 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
133 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
134 iowrite32(ioread32(srcP++), dstP++);
138 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
142 struct page *d = ttm_tt_get_page(ttm, page);
148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
154 dst = vmap(&d, 1, 0, prot);
161 memcpy_fromio(dst, src, PAGE_SIZE);
164 kunmap_atomic(dst, KM_USER0);
166 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
175 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
179 struct page *s = ttm_tt_get_page(ttm, page);
185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
187 src = kmap_atomic_prot(s, KM_USER0, prot);
189 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190 src = vmap(&s, 1, 0, prot);
197 memcpy_toio(dst, src, PAGE_SIZE);
200 kunmap_atomic(src, KM_USER0);
202 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
211 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
212 bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
214 struct ttm_bo_device *bdev = bo->bdev;
215 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
216 struct ttm_tt *ttm = bo->ttm;
217 struct ttm_mem_reg *old_mem = &bo->mem;
218 struct ttm_mem_reg old_copy = *old_mem;
222 uint32_t save_flags = old_mem->placement;
225 unsigned long add = 0;
228 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
231 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
235 if (old_iomap == NULL && new_iomap == NULL)
237 if (old_iomap == NULL && ttm == NULL)
243 if ((old_mem->mem_type == new_mem->mem_type) &&
244 (new_mem->mm_node->start <
245 old_mem->mm_node->start + old_mem->mm_node->size)) {
247 add = new_mem->num_pages - 1;
250 for (i = 0; i < new_mem->num_pages; ++i) {
251 page = i * dir + add;
252 if (old_iomap == NULL) {
253 pgprot_t prot = ttm_io_prot(old_mem->placement,
255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
269 ttm_bo_free_old_node(bo);
272 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
282 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
284 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
287 EXPORT_SYMBOL(ttm_bo_move_memcpy);
289 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
295 * ttm_buffer_object_transfer
297 * @bo: A pointer to a struct ttm_buffer_object.
298 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
299 * holding the data of @bo with the old placement.
301 * This is a utility function that may be called after an accelerated move
302 * has been scheduled. A new buffer object is created as a placeholder for
303 * the old data while it's being copied. When that buffer object is idle,
304 * it can be destroyed, releasing the space of the old placement.
309 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
310 struct ttm_buffer_object **new_obj)
312 struct ttm_buffer_object *fbo;
313 struct ttm_bo_device *bdev = bo->bdev;
314 struct ttm_bo_driver *driver = bdev->driver;
316 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
323 * Fix up members that we shouldn't copy directly:
324 * TODO: Explicit member copy would probably be better here.
327 spin_lock_init(&fbo->lock);
328 init_waitqueue_head(&fbo->event_queue);
329 INIT_LIST_HEAD(&fbo->ddestroy);
330 INIT_LIST_HEAD(&fbo->lru);
331 INIT_LIST_HEAD(&fbo->swap);
334 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
335 if (fbo->mem.mm_node)
336 fbo->mem.mm_node->private = (void *)fbo;
337 kref_init(&fbo->list_kref);
338 kref_init(&fbo->kref);
339 fbo->destroy = &ttm_transfered_destroy;
345 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
347 #if defined(__i386__) || defined(__x86_64__)
348 if (caching_flags & TTM_PL_FLAG_WC)
349 tmp = pgprot_writecombine(tmp);
350 else if (boot_cpu_data.x86 > 3)
351 tmp = pgprot_noncached(tmp);
353 #elif defined(__powerpc__)
354 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
355 pgprot_val(tmp) |= _PAGE_NO_CACHE;
356 if (caching_flags & TTM_PL_FLAG_UNCACHED)
357 pgprot_val(tmp) |= _PAGE_GUARDED;
360 #if defined(__ia64__)
361 if (caching_flags & TTM_PL_FLAG_WC)
362 tmp = pgprot_writecombine(tmp);
364 tmp = pgprot_noncached(tmp);
366 #if defined(__sparc__)
367 if (!(caching_flags & TTM_PL_FLAG_CACHED))
368 tmp = pgprot_noncached(tmp);
373 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
374 unsigned long bus_base,
375 unsigned long bus_offset,
376 unsigned long bus_size,
377 struct ttm_bo_kmap_obj *map)
379 struct ttm_bo_device *bdev = bo->bdev;
380 struct ttm_mem_reg *mem = &bo->mem;
381 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
383 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) {
384 map->bo_kmap_type = ttm_bo_map_premapped;
385 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
387 map->bo_kmap_type = ttm_bo_map_iomap;
388 if (mem->placement & TTM_PL_FLAG_WC)
389 map->virtual = ioremap_wc(bus_base + bus_offset,
392 map->virtual = ioremap_nocache(bus_base + bus_offset,
395 return (!map->virtual) ? -ENOMEM : 0;
398 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
399 unsigned long start_page,
400 unsigned long num_pages,
401 struct ttm_bo_kmap_obj *map)
403 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
404 struct ttm_tt *ttm = bo->ttm;
409 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
411 * We're mapping a single page, and the desired
412 * page protection is consistent with the bo.
415 map->bo_kmap_type = ttm_bo_map_kmap;
416 map->page = ttm_tt_get_page(ttm, start_page);
417 map->virtual = kmap(map->page);
420 * Populate the part we're mapping;
422 for (i = start_page; i < start_page + num_pages; ++i) {
423 d = ttm_tt_get_page(ttm, i);
429 * We need to use vmap to get the desired page protection
430 * or to make the buffer object look contigous.
432 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
434 ttm_io_prot(mem->placement, PAGE_KERNEL);
435 map->bo_kmap_type = ttm_bo_map_vmap;
436 map->virtual = vmap(ttm->pages + start_page, num_pages,
439 return (!map->virtual) ? -ENOMEM : 0;
442 int ttm_bo_kmap(struct ttm_buffer_object *bo,
443 unsigned long start_page, unsigned long num_pages,
444 struct ttm_bo_kmap_obj *map)
447 unsigned long bus_base;
448 unsigned long bus_offset;
449 unsigned long bus_size;
451 BUG_ON(!list_empty(&bo->swap));
453 if (num_pages > bo->num_pages)
455 if (start_page > bo->num_pages)
458 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
461 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
462 &bus_offset, &bus_size);
466 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
468 bus_offset += start_page << PAGE_SHIFT;
469 bus_size = num_pages << PAGE_SHIFT;
470 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
473 EXPORT_SYMBOL(ttm_bo_kmap);
475 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
479 switch (map->bo_kmap_type) {
480 case ttm_bo_map_iomap:
481 iounmap(map->virtual);
483 case ttm_bo_map_vmap:
484 vunmap(map->virtual);
486 case ttm_bo_map_kmap:
489 case ttm_bo_map_premapped:
497 EXPORT_SYMBOL(ttm_bo_kunmap);
499 int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
500 unsigned long dst_offset,
501 unsigned long *pfn, pgprot_t *prot)
503 struct ttm_mem_reg *mem = &bo->mem;
504 struct ttm_bo_device *bdev = bo->bdev;
505 unsigned long bus_offset;
506 unsigned long bus_size;
507 unsigned long bus_base;
509 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
514 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
519 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
522 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
523 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
528 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
531 bool evict, bool no_wait,
532 struct ttm_mem_reg *new_mem)
534 struct ttm_bo_device *bdev = bo->bdev;
535 struct ttm_bo_driver *driver = bdev->driver;
536 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
537 struct ttm_mem_reg *old_mem = &bo->mem;
539 uint32_t save_flags = old_mem->placement;
540 struct ttm_buffer_object *ghost_obj;
541 void *tmp_obj = NULL;
543 spin_lock(&bo->lock);
545 tmp_obj = bo->sync_obj;
548 bo->sync_obj = driver->sync_obj_ref(sync_obj);
549 bo->sync_obj_arg = sync_obj_arg;
551 ret = ttm_bo_wait(bo, false, false, false);
552 spin_unlock(&bo->lock);
554 driver->sync_obj_unref(&tmp_obj);
558 ttm_bo_free_old_node(bo);
559 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
561 ttm_tt_unbind(bo->ttm);
562 ttm_tt_destroy(bo->ttm);
567 * This should help pipeline ordinary buffer moves.
569 * Hang old buffer memory on a new buffer object,
570 * and leave it to be released when the GPU
571 * operation has completed.
574 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
575 spin_unlock(&bo->lock);
577 driver->sync_obj_unref(&tmp_obj);
579 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
584 * If we're not moving to fixed memory, the TTM object
585 * needs to stay alive. Otherwhise hang it on the ghost
586 * bo to be unbound and destroyed.
589 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
590 ghost_obj->ttm = NULL;
594 ttm_bo_unreserve(ghost_obj);
595 ttm_bo_unref(&ghost_obj);
599 new_mem->mm_node = NULL;
600 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
603 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);