1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
39 #define TTM_BO_VM_NUM_PREFAULT 16
41 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
42 unsigned long page_start,
43 unsigned long num_pages)
45 struct rb_node *cur = bdev->addr_space_rb.rb_node;
46 unsigned long cur_offset;
47 struct ttm_buffer_object *bo;
48 struct ttm_buffer_object *best_bo = NULL;
50 while (likely(cur != NULL)) {
51 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52 cur_offset = bo->vm_node->start;
53 if (page_start >= cur_offset) {
56 if (page_start == cur_offset)
62 if (unlikely(best_bo == NULL))
65 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
66 (page_start + num_pages)))
72 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long bus_base;
78 unsigned long bus_offset;
79 unsigned long bus_size;
80 unsigned long page_offset;
81 unsigned long page_last;
83 struct ttm_tt *ttm = NULL;
88 unsigned long address = (unsigned long)vmf->virtual_address;
89 int retval = VM_FAULT_NOPAGE;
92 * Work around locking order reversal in fault / nopfn
93 * between mmap_sem and bo_reserve: Perform a trylock operation
94 * for reserve, and if it fails, retry the fault after scheduling.
97 ret = ttm_bo_reserve(bo, true, true, false, 0);
98 if (unlikely(ret != 0)) {
101 return VM_FAULT_NOPAGE;
105 * Wait for buffer data in transit, due to a pipelined
109 spin_lock(&bo->lock);
110 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
111 ret = ttm_bo_wait(bo, false, true, false);
112 spin_unlock(&bo->lock);
113 if (unlikely(ret != 0)) {
114 retval = (ret != -ERESTART) ?
115 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
119 spin_unlock(&bo->lock);
122 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
124 if (unlikely(ret != 0)) {
125 retval = VM_FAULT_SIGBUS;
129 is_iomem = (bus_size != 0);
131 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
132 bo->vm_node->start - vma->vm_pgoff;
133 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
134 bo->vm_node->start - vma->vm_pgoff;
136 if (unlikely(page_offset >= bo->num_pages)) {
137 retval = VM_FAULT_SIGBUS;
142 * Strictly, we're not allowed to modify vma->vm_page_prot here,
143 * since the mmap_sem is only held in read mode. However, we
144 * modify only the caching bits of vma->vm_page_prot and
145 * consider those bits protected by
146 * the bo->mutex, as we should be the only writers.
147 * There shouldn't really be any readers of these bits except
148 * within vm_insert_mixed()? fork?
150 * TODO: Add a list of vmas to the bo, and change the
151 * vma->vm_page_prot when the object changes caching policy, with
152 * the correct locks held.
156 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
160 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
161 vm_get_page_prot(vma->vm_flags) :
162 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
166 * Speculatively prefault a number of pages. Only error on
170 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
173 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
176 page = ttm_tt_get_page(ttm, page_offset);
177 if (unlikely(!page && i == 0)) {
178 retval = VM_FAULT_OOM;
180 } else if (unlikely(!page)) {
183 pfn = page_to_pfn(page);
186 ret = vm_insert_mixed(vma, address, pfn);
188 * Somebody beat us to this PTE or prefaulting to
189 * an already populated PTE, or prefaulting error.
192 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
194 else if (unlikely(ret != 0)) {
196 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
201 address += PAGE_SIZE;
202 if (unlikely(++page_offset >= page_last))
207 ttm_bo_unreserve(bo);
211 static void ttm_bo_vm_open(struct vm_area_struct *vma)
213 struct ttm_buffer_object *bo =
214 (struct ttm_buffer_object *)vma->vm_private_data;
216 (void)ttm_bo_reference(bo);
219 static void ttm_bo_vm_close(struct vm_area_struct *vma)
221 struct ttm_buffer_object *bo =
222 (struct ttm_buffer_object *)vma->vm_private_data;
225 vma->vm_private_data = NULL;
228 static struct vm_operations_struct ttm_bo_vm_ops = {
229 .fault = ttm_bo_vm_fault,
230 .open = ttm_bo_vm_open,
231 .close = ttm_bo_vm_close
234 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
235 struct ttm_bo_device *bdev)
237 struct ttm_bo_driver *driver;
238 struct ttm_buffer_object *bo;
241 read_lock(&bdev->vm_lock);
242 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
243 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
244 if (likely(bo != NULL))
245 ttm_bo_reference(bo);
246 read_unlock(&bdev->vm_lock);
248 if (unlikely(bo == NULL)) {
249 printk(KERN_ERR TTM_PFX
250 "Could not find buffer object to map.\n");
254 driver = bo->bdev->driver;
255 if (unlikely(!driver->verify_access)) {
259 ret = driver->verify_access(bo, filp);
260 if (unlikely(ret != 0))
263 vma->vm_ops = &ttm_bo_vm_ops;
266 * Note: We're transferring the bo reference to
267 * vma->vm_private_data here.
270 vma->vm_private_data = bo;
271 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
277 EXPORT_SYMBOL(ttm_bo_mmap);
279 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
281 if (vma->vm_pgoff != 0)
284 vma->vm_ops = &ttm_bo_vm_ops;
285 vma->vm_private_data = ttm_bo_reference(bo);
286 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
289 EXPORT_SYMBOL(ttm_fbdev_mmap);
292 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
293 const char __user *wbuf, char __user *rbuf, size_t count,
294 loff_t *f_pos, bool write)
296 struct ttm_buffer_object *bo;
297 struct ttm_bo_driver *driver;
298 struct ttm_bo_kmap_obj map;
299 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
300 unsigned long kmap_offset;
301 unsigned long kmap_end;
302 unsigned long kmap_num;
304 unsigned int page_offset;
307 bool no_wait = false;
310 read_lock(&bdev->vm_lock);
311 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
312 if (likely(bo != NULL))
313 ttm_bo_reference(bo);
314 read_unlock(&bdev->vm_lock);
316 if (unlikely(bo == NULL))
319 driver = bo->bdev->driver;
320 if (unlikely(driver->verify_access)) {
325 ret = driver->verify_access(bo, filp);
326 if (unlikely(ret != 0))
329 kmap_offset = dev_offset - bo->vm_node->start;
330 if (unlikely(kmap_offset >= bo->num_pages)) {
335 page_offset = *f_pos & ~PAGE_MASK;
336 io_size = bo->num_pages - kmap_offset;
337 io_size = (io_size << PAGE_SHIFT) - page_offset;
341 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
342 kmap_num = kmap_end - kmap_offset + 1;
344 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
359 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
360 if (unlikely(ret != 0)) {
361 ttm_bo_unreserve(bo);
365 virtual = ttm_kmap_obj_virtual(&map, &dummy);
366 virtual += page_offset;
369 ret = copy_from_user(virtual, wbuf, io_size);
371 ret = copy_to_user(rbuf, virtual, io_size);
374 ttm_bo_unreserve(bo);
377 if (unlikely(ret != 0))
388 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
389 char __user *rbuf, size_t count, loff_t *f_pos,
392 struct ttm_bo_kmap_obj map;
393 unsigned long kmap_offset;
394 unsigned long kmap_end;
395 unsigned long kmap_num;
397 unsigned int page_offset;
400 bool no_wait = false;
403 kmap_offset = (*f_pos >> PAGE_SHIFT);
404 if (unlikely(kmap_offset >= bo->num_pages))
407 page_offset = *f_pos & ~PAGE_MASK;
408 io_size = bo->num_pages - kmap_offset;
409 io_size = (io_size << PAGE_SHIFT) - page_offset;
413 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
414 kmap_num = kmap_end - kmap_offset + 1;
416 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
429 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
430 if (unlikely(ret != 0)) {
431 ttm_bo_unreserve(bo);
435 virtual = ttm_kmap_obj_virtual(&map, &dummy);
436 virtual += page_offset;
439 ret = copy_from_user(virtual, wbuf, io_size);
441 ret = copy_to_user(rbuf, virtual, io_size);
444 ttm_bo_unreserve(bo);
447 if (unlikely(ret != 0))