1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/version.h>
32 #include <linux/vmalloc.h>
33 #include <linux/sched.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/file.h>
37 #include <linux/swap.h>
38 #include "ttm/ttm_module.h"
39 #include "ttm/ttm_bo_driver.h"
40 #include "ttm/ttm_placement.h"
42 static int ttm_tt_swapin(struct ttm_tt *ttm);
44 #if defined(CONFIG_X86)
45 static void ttm_tt_clflush_page(struct page *page)
47 uint8_t *page_virtual;
50 if (unlikely(page == NULL))
53 page_virtual = kmap_atomic(page, KM_USER0);
55 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
56 clflush(page_virtual + i);
58 kunmap_atomic(page_virtual, KM_USER0);
61 static void ttm_tt_cache_flush_clflush(struct page *pages[],
62 unsigned long num_pages)
67 for (i = 0; i < num_pages; ++i)
68 ttm_tt_clflush_page(*pages++);
71 #elif !defined(__powerpc__)
72 static void ttm_tt_ipi_handler(void *null)
78 void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
81 #if defined(CONFIG_X86)
82 if (cpu_has_clflush) {
83 ttm_tt_cache_flush_clflush(pages, num_pages);
86 #elif defined(__powerpc__)
89 for (i = 0; i < num_pages; ++i) {
91 unsigned long start = (unsigned long)page_address(pages[i]);
92 flush_dcache_range(start, start + PAGE_SIZE);
96 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
97 printk(KERN_ERR TTM_PFX
98 "Timed out waiting for drm cache flush.\n");
103 * Allocates storage for pointers to the pages that back the ttm.
105 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
107 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
109 unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
112 if (size <= PAGE_SIZE)
113 ttm->pages = kzalloc(size, GFP_KERNEL);
116 ttm->pages = vmalloc_user(size);
118 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
122 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
124 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
126 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
133 static struct page *ttm_tt_alloc_page(unsigned page_flags)
135 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
136 return alloc_page(GFP_HIGHUSER | __GFP_ZERO);
138 return alloc_page(GFP_HIGHUSER);
141 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
147 struct ttm_backend *be = ttm->be;
149 BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
150 write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
151 dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
156 for (i = 0; i < ttm->num_pages; ++i) {
157 page = ttm->pages[i];
161 if (page == ttm->dummy_read_page) {
166 if (write && dirty && !PageReserved(page))
167 set_page_dirty_lock(page);
169 ttm->pages[i] = NULL;
170 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
173 ttm->state = tt_unpopulated;
174 ttm->first_himem_page = ttm->num_pages;
175 ttm->last_lomem_page = -1;
178 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
181 struct ttm_bo_device *bdev = ttm->bdev;
182 struct ttm_mem_global *mem_glob = bdev->mem_glob;
185 while (NULL == (p = ttm->pages[index])) {
186 p = ttm_tt_alloc_page(ttm->page_flags);
191 if (PageHighMem(p)) {
193 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
195 if (unlikely(ret != 0))
197 ttm->pages[--ttm->first_himem_page] = p;
200 ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
201 false, false, false);
202 if (unlikely(ret != 0))
204 ttm->pages[++ttm->last_lomem_page] = p;
213 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
217 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
218 ret = ttm_tt_swapin(ttm);
219 if (unlikely(ret != 0))
222 return __ttm_tt_get_page(ttm, index);
225 int ttm_tt_populate(struct ttm_tt *ttm)
229 struct ttm_backend *be;
232 if (ttm->state != tt_unpopulated)
235 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
236 ret = ttm_tt_swapin(ttm);
237 if (unlikely(ret != 0))
243 for (i = 0; i < ttm->num_pages; ++i) {
244 page = __ttm_tt_get_page(ttm, i);
249 be->func->populate(be, ttm->num_pages, ttm->pages,
250 ttm->dummy_read_page);
251 ttm->state = tt_unbound;
256 static inline int ttm_tt_set_page_caching(struct page *p,
257 enum ttm_caching_state c_state)
264 return set_pages_wb(p, 1);
266 return set_memory_wc((unsigned long) page_address(p), 1);
268 return set_pages_uc(p, 1);
271 #else /* CONFIG_X86 */
272 static inline int ttm_tt_set_page_caching(struct page *p,
273 enum ttm_caching_state c_state)
277 #endif /* CONFIG_X86 */
280 * Change caching policy for the linear kernel map
281 * for range of pages in a ttm.
284 static int ttm_tt_set_caching(struct ttm_tt *ttm,
285 enum ttm_caching_state c_state)
288 struct page *cur_page;
291 if (ttm->caching_state == c_state)
294 if (c_state != tt_cached) {
295 ret = ttm_tt_populate(ttm);
296 if (unlikely(ret != 0))
300 if (ttm->caching_state == tt_cached)
301 ttm_tt_cache_flush(ttm->pages, ttm->num_pages);
303 for (i = 0; i < ttm->num_pages; ++i) {
304 cur_page = ttm->pages[i];
305 if (likely(cur_page != NULL)) {
306 ret = ttm_tt_set_page_caching(cur_page, c_state);
307 if (unlikely(ret != 0))
312 ttm->caching_state = c_state;
317 for (j = 0; j < i; ++j) {
318 cur_page = ttm->pages[j];
319 if (likely(cur_page != NULL)) {
320 (void)ttm_tt_set_page_caching(cur_page,
328 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
330 enum ttm_caching_state state;
332 if (placement & TTM_PL_FLAG_WC)
334 else if (placement & TTM_PL_FLAG_UNCACHED)
339 return ttm_tt_set_caching(ttm, state);
342 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
345 struct page *cur_page;
346 struct ttm_backend *be = ttm->be;
350 (void)ttm_tt_set_caching(ttm, tt_cached);
351 for (i = 0; i < ttm->num_pages; ++i) {
352 cur_page = ttm->pages[i];
353 ttm->pages[i] = NULL;
355 if (page_count(cur_page) != 1)
356 printk(KERN_ERR TTM_PFX
357 "Erroneous page count. "
359 ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
360 PageHighMem(cur_page));
361 __free_page(cur_page);
364 ttm->state = tt_unpopulated;
365 ttm->first_himem_page = ttm->num_pages;
366 ttm->last_lomem_page = -1;
369 void ttm_tt_destroy(struct ttm_tt *ttm)
371 struct ttm_backend *be;
373 if (unlikely(ttm == NULL))
377 if (likely(be != NULL)) {
378 be->func->destroy(be);
382 if (likely(ttm->pages != NULL)) {
383 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
384 ttm_tt_free_user_pages(ttm);
386 ttm_tt_free_alloced_pages(ttm);
388 ttm_tt_free_page_directory(ttm);
391 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
393 fput(ttm->swap_storage);
398 int ttm_tt_set_user(struct ttm_tt *ttm,
399 struct task_struct *tsk,
400 unsigned long start, unsigned long num_pages)
402 struct mm_struct *mm = tsk->mm;
404 int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
405 struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob;
407 BUG_ON(num_pages != ttm->num_pages);
408 BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
411 * Account user pages as lowmem pages for now.
414 ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
415 false, false, false);
416 if (unlikely(ret != 0))
419 down_read(&mm->mmap_sem);
420 ret = get_user_pages(tsk, mm, start, num_pages,
421 write, 0, ttm->pages, NULL);
422 up_read(&mm->mmap_sem);
424 if (ret != num_pages && write) {
425 ttm_tt_free_user_pages(ttm);
426 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
432 ttm->state = tt_unbound;
437 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
438 uint32_t page_flags, struct page *dummy_read_page)
440 struct ttm_bo_driver *bo_driver = bdev->driver;
446 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
452 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
453 ttm->first_himem_page = ttm->num_pages;
454 ttm->last_lomem_page = -1;
455 ttm->caching_state = tt_cached;
456 ttm->page_flags = page_flags;
458 ttm->dummy_read_page = dummy_read_page;
460 ttm_tt_alloc_page_directory(ttm);
463 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
466 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
469 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
472 ttm->state = tt_unpopulated;
476 void ttm_tt_unbind(struct ttm_tt *ttm)
479 struct ttm_backend *be = ttm->be;
481 if (ttm->state == tt_bound) {
482 ret = be->func->unbind(be);
484 ttm->state = tt_unbound;
488 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
491 struct ttm_backend *be;
496 if (ttm->state == tt_bound)
501 ret = ttm_tt_populate(ttm);
505 ret = be->func->bind(be, bo_mem);
507 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
511 ttm->state = tt_bound;
513 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
514 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
517 EXPORT_SYMBOL(ttm_tt_bind);
519 static int ttm_tt_swapin(struct ttm_tt *ttm)
521 struct address_space *swap_space;
522 struct file *swap_storage;
523 struct page *from_page;
524 struct page *to_page;
530 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
531 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
533 if (unlikely(ret != 0))
536 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
540 swap_storage = ttm->swap_storage;
541 BUG_ON(swap_storage == NULL);
543 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
545 for (i = 0; i < ttm->num_pages; ++i) {
546 from_page = read_mapping_page(swap_space, i, NULL);
547 if (IS_ERR(from_page))
549 to_page = __ttm_tt_get_page(ttm, i);
550 if (unlikely(to_page == NULL))
554 from_virtual = kmap_atomic(from_page, KM_USER0);
555 to_virtual = kmap_atomic(to_page, KM_USER1);
556 memcpy(to_virtual, from_virtual, PAGE_SIZE);
557 kunmap_atomic(to_virtual, KM_USER1);
558 kunmap_atomic(from_virtual, KM_USER0);
560 page_cache_release(from_page);
563 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
565 ttm->swap_storage = NULL;
566 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
570 ttm_tt_free_alloced_pages(ttm);
574 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
576 struct address_space *swap_space;
577 struct file *swap_storage;
578 struct page *from_page;
579 struct page *to_page;
584 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
585 BUG_ON(ttm->caching_state != tt_cached);
588 * For user buffers, just unpin the pages, as there should be
592 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
593 ttm_tt_free_user_pages(ttm);
594 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
595 ttm->swap_storage = NULL;
599 if (!persistant_swap_storage) {
600 swap_storage = shmem_file_setup("ttm swap",
601 ttm->num_pages << PAGE_SHIFT,
603 if (unlikely(IS_ERR(swap_storage))) {
604 printk(KERN_ERR "Failed allocating swap storage.\n");
608 swap_storage = persistant_swap_storage;
610 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
612 for (i = 0; i < ttm->num_pages; ++i) {
613 from_page = ttm->pages[i];
614 if (unlikely(from_page == NULL))
616 to_page = read_mapping_page(swap_space, i, NULL);
617 if (unlikely(to_page == NULL))
621 from_virtual = kmap_atomic(from_page, KM_USER0);
622 to_virtual = kmap_atomic(to_page, KM_USER1);
623 memcpy(to_virtual, from_virtual, PAGE_SIZE);
624 kunmap_atomic(to_virtual, KM_USER1);
625 kunmap_atomic(from_virtual, KM_USER0);
627 set_page_dirty(to_page);
628 mark_page_accessed(to_page);
629 page_cache_release(to_page);
632 ttm_tt_free_alloced_pages(ttm);
633 ttm->swap_storage = swap_storage;
634 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
635 if (persistant_swap_storage)
636 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
640 if (!persistant_swap_storage)