Merge git://git.infradead.org/~dwmw2/iommu-2.6.31
[linux-2.6] / drivers / gpu / drm / ttm / ttm_bo_vm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
34 #include <linux/mm.h>
35 #include <linux/version.h>
36 #include <linux/rbtree.h>
37 #include <linux/module.h>
38 #include <linux/uaccess.h>
39
40 #define TTM_BO_VM_NUM_PREFAULT 16
41
42 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
43                                                      unsigned long page_start,
44                                                      unsigned long num_pages)
45 {
46         struct rb_node *cur = bdev->addr_space_rb.rb_node;
47         unsigned long cur_offset;
48         struct ttm_buffer_object *bo;
49         struct ttm_buffer_object *best_bo = NULL;
50
51         while (likely(cur != NULL)) {
52                 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
53                 cur_offset = bo->vm_node->start;
54                 if (page_start >= cur_offset) {
55                         cur = cur->rb_right;
56                         best_bo = bo;
57                         if (page_start == cur_offset)
58                                 break;
59                 } else
60                         cur = cur->rb_left;
61         }
62
63         if (unlikely(best_bo == NULL))
64                 return NULL;
65
66         if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
67                      (page_start + num_pages)))
68                 return NULL;
69
70         return best_bo;
71 }
72
73 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 {
75         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76             vma->vm_private_data;
77         struct ttm_bo_device *bdev = bo->bdev;
78         unsigned long bus_base;
79         unsigned long bus_offset;
80         unsigned long bus_size;
81         unsigned long page_offset;
82         unsigned long page_last;
83         unsigned long pfn;
84         struct ttm_tt *ttm = NULL;
85         struct page *page;
86         int ret;
87         int i;
88         bool is_iomem;
89         unsigned long address = (unsigned long)vmf->virtual_address;
90         int retval = VM_FAULT_NOPAGE;
91
92         /*
93          * Work around locking order reversal in fault / nopfn
94          * between mmap_sem and bo_reserve: Perform a trylock operation
95          * for reserve, and if it fails, retry the fault after scheduling.
96          */
97
98         ret = ttm_bo_reserve(bo, true, true, false, 0);
99         if (unlikely(ret != 0)) {
100                 if (ret == -EBUSY)
101                         set_need_resched();
102                 return VM_FAULT_NOPAGE;
103         }
104
105         /*
106          * Wait for buffer data in transit, due to a pipelined
107          * move.
108          */
109
110         spin_lock(&bo->lock);
111         if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
112                 ret = ttm_bo_wait(bo, false, true, false);
113                 spin_unlock(&bo->lock);
114                 if (unlikely(ret != 0)) {
115                         retval = (ret != -ERESTART) ?
116                             VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
117                         goto out_unlock;
118                 }
119         } else
120                 spin_unlock(&bo->lock);
121
122
123         ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
124                                 &bus_size);
125         if (unlikely(ret != 0)) {
126                 retval = VM_FAULT_SIGBUS;
127                 goto out_unlock;
128         }
129
130         is_iomem = (bus_size != 0);
131
132         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
133             bo->vm_node->start - vma->vm_pgoff;
134         page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
135             bo->vm_node->start - vma->vm_pgoff;
136
137         if (unlikely(page_offset >= bo->num_pages)) {
138                 retval = VM_FAULT_SIGBUS;
139                 goto out_unlock;
140         }
141
142         /*
143          * Strictly, we're not allowed to modify vma->vm_page_prot here,
144          * since the mmap_sem is only held in read mode. However, we
145          * modify only the caching bits of vma->vm_page_prot and
146          * consider those bits protected by
147          * the bo->mutex, as we should be the only writers.
148          * There shouldn't really be any readers of these bits except
149          * within vm_insert_mixed()? fork?
150          *
151          * TODO: Add a list of vmas to the bo, and change the
152          * vma->vm_page_prot when the object changes caching policy, with
153          * the correct locks held.
154          */
155
156         if (is_iomem) {
157                 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
158                                                 vma->vm_page_prot);
159         } else {
160                 ttm = bo->ttm;
161                 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
162                     vm_get_page_prot(vma->vm_flags) :
163                     ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
164         }
165
166         /*
167          * Speculatively prefault a number of pages. Only error on
168          * first page.
169          */
170
171         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
172
173                 if (is_iomem)
174                         pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
175                             page_offset;
176                 else {
177                         page = ttm_tt_get_page(ttm, page_offset);
178                         if (unlikely(!page && i == 0)) {
179                                 retval = VM_FAULT_OOM;
180                                 goto out_unlock;
181                         } else if (unlikely(!page)) {
182                                 break;
183                         }
184                         pfn = page_to_pfn(page);
185                 }
186
187                 ret = vm_insert_mixed(vma, address, pfn);
188                 /*
189                  * Somebody beat us to this PTE or prefaulting to
190                  * an already populated PTE, or prefaulting error.
191                  */
192
193                 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
194                         break;
195                 else if (unlikely(ret != 0)) {
196                         retval =
197                             (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
198                         goto out_unlock;
199
200                 }
201
202                 address += PAGE_SIZE;
203                 if (unlikely(++page_offset >= page_last))
204                         break;
205         }
206
207 out_unlock:
208         ttm_bo_unreserve(bo);
209         return retval;
210 }
211
212 static void ttm_bo_vm_open(struct vm_area_struct *vma)
213 {
214         struct ttm_buffer_object *bo =
215             (struct ttm_buffer_object *)vma->vm_private_data;
216
217         (void)ttm_bo_reference(bo);
218 }
219
220 static void ttm_bo_vm_close(struct vm_area_struct *vma)
221 {
222         struct ttm_buffer_object *bo =
223             (struct ttm_buffer_object *)vma->vm_private_data;
224
225         ttm_bo_unref(&bo);
226         vma->vm_private_data = NULL;
227 }
228
229 static struct vm_operations_struct ttm_bo_vm_ops = {
230         .fault = ttm_bo_vm_fault,
231         .open = ttm_bo_vm_open,
232         .close = ttm_bo_vm_close
233 };
234
235 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
236                 struct ttm_bo_device *bdev)
237 {
238         struct ttm_bo_driver *driver;
239         struct ttm_buffer_object *bo;
240         int ret;
241
242         read_lock(&bdev->vm_lock);
243         bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
244                                  (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
245         if (likely(bo != NULL))
246                 ttm_bo_reference(bo);
247         read_unlock(&bdev->vm_lock);
248
249         if (unlikely(bo == NULL)) {
250                 printk(KERN_ERR TTM_PFX
251                        "Could not find buffer object to map.\n");
252                 return -EINVAL;
253         }
254
255         driver = bo->bdev->driver;
256         if (unlikely(!driver->verify_access)) {
257                 ret = -EPERM;
258                 goto out_unref;
259         }
260         ret = driver->verify_access(bo, filp);
261         if (unlikely(ret != 0))
262                 goto out_unref;
263
264         vma->vm_ops = &ttm_bo_vm_ops;
265
266         /*
267          * Note: We're transferring the bo reference to
268          * vma->vm_private_data here.
269          */
270
271         vma->vm_private_data = bo;
272         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
273         return 0;
274 out_unref:
275         ttm_bo_unref(&bo);
276         return ret;
277 }
278 EXPORT_SYMBOL(ttm_bo_mmap);
279
280 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
281 {
282         if (vma->vm_pgoff != 0)
283                 return -EACCES;
284
285         vma->vm_ops = &ttm_bo_vm_ops;
286         vma->vm_private_data = ttm_bo_reference(bo);
287         vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
288         return 0;
289 }
290 EXPORT_SYMBOL(ttm_fbdev_mmap);
291
292
293 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
294                   const char __user *wbuf, char __user *rbuf, size_t count,
295                   loff_t *f_pos, bool write)
296 {
297         struct ttm_buffer_object *bo;
298         struct ttm_bo_driver *driver;
299         struct ttm_bo_kmap_obj map;
300         unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
301         unsigned long kmap_offset;
302         unsigned long kmap_end;
303         unsigned long kmap_num;
304         size_t io_size;
305         unsigned int page_offset;
306         char *virtual;
307         int ret;
308         bool no_wait = false;
309         bool dummy;
310
311         read_lock(&bdev->vm_lock);
312         bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
313         if (likely(bo != NULL))
314                 ttm_bo_reference(bo);
315         read_unlock(&bdev->vm_lock);
316
317         if (unlikely(bo == NULL))
318                 return -EFAULT;
319
320         driver = bo->bdev->driver;
321         if (unlikely(driver->verify_access)) {
322                 ret = -EPERM;
323                 goto out_unref;
324         }
325
326         ret = driver->verify_access(bo, filp);
327         if (unlikely(ret != 0))
328                 goto out_unref;
329
330         kmap_offset = dev_offset - bo->vm_node->start;
331         if (unlikely(kmap_offset) >= bo->num_pages) {
332                 ret = -EFBIG;
333                 goto out_unref;
334         }
335
336         page_offset = *f_pos & ~PAGE_MASK;
337         io_size = bo->num_pages - kmap_offset;
338         io_size = (io_size << PAGE_SHIFT) - page_offset;
339         if (count < io_size)
340                 io_size = count;
341
342         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
343         kmap_num = kmap_end - kmap_offset + 1;
344
345         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
346
347         switch (ret) {
348         case 0:
349                 break;
350         case -ERESTART:
351                 ret = -EINTR;
352                 goto out_unref;
353         case -EBUSY:
354                 ret = -EAGAIN;
355                 goto out_unref;
356         default:
357                 goto out_unref;
358         }
359
360         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361         if (unlikely(ret != 0)) {
362                 ttm_bo_unreserve(bo);
363                 goto out_unref;
364         }
365
366         virtual = ttm_kmap_obj_virtual(&map, &dummy);
367         virtual += page_offset;
368
369         if (write)
370                 ret = copy_from_user(virtual, wbuf, io_size);
371         else
372                 ret = copy_to_user(rbuf, virtual, io_size);
373
374         ttm_bo_kunmap(&map);
375         ttm_bo_unreserve(bo);
376         ttm_bo_unref(&bo);
377
378         if (unlikely(ret != 0))
379                 return -EFBIG;
380
381         *f_pos += io_size;
382
383         return io_size;
384 out_unref:
385         ttm_bo_unref(&bo);
386         return ret;
387 }
388
389 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390                         char __user *rbuf, size_t count, loff_t *f_pos,
391                         bool write)
392 {
393         struct ttm_bo_kmap_obj map;
394         unsigned long kmap_offset;
395         unsigned long kmap_end;
396         unsigned long kmap_num;
397         size_t io_size;
398         unsigned int page_offset;
399         char *virtual;
400         int ret;
401         bool no_wait = false;
402         bool dummy;
403
404         kmap_offset = (*f_pos >> PAGE_SHIFT);
405         if (unlikely(kmap_offset) >= bo->num_pages)
406                 return -EFBIG;
407
408         page_offset = *f_pos & ~PAGE_MASK;
409         io_size = bo->num_pages - kmap_offset;
410         io_size = (io_size << PAGE_SHIFT) - page_offset;
411         if (count < io_size)
412                 io_size = count;
413
414         kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415         kmap_num = kmap_end - kmap_offset + 1;
416
417         ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
418
419         switch (ret) {
420         case 0:
421                 break;
422         case -ERESTART:
423                 return -EINTR;
424         case -EBUSY:
425                 return -EAGAIN;
426         default:
427                 return ret;
428         }
429
430         ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431         if (unlikely(ret != 0)) {
432                 ttm_bo_unreserve(bo);
433                 return ret;
434         }
435
436         virtual = ttm_kmap_obj_virtual(&map, &dummy);
437         virtual += page_offset;
438
439         if (write)
440                 ret = copy_from_user(virtual, wbuf, io_size);
441         else
442                 ret = copy_to_user(rbuf, virtual, io_size);
443
444         ttm_bo_kunmap(&map);
445         ttm_bo_unreserve(bo);
446         ttm_bo_unref(&bo);
447
448         if (unlikely(ret != 0))
449                 return ret;
450
451         *f_pos += io_size;
452
453         return io_size;
454 }