2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include <sys/ioctl.h>
31 #include "nouveau_drmif.h"
32 #include "nouveau_local.h"
35 nouveau_bo_init(struct nouveau_device *dev)
41 nouveau_bo_takedown(struct nouveau_device *dev)
46 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
48 if (nvbo->sysmem || nvbo->handle)
54 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
56 if (nvbo->user || nvbo->sysmem) {
61 nvbo->sysmem = malloc(nvbo->size);
69 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
79 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
81 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
82 struct drm_gem_close req;
88 munmap(nvbo->map, nvbo->size);
92 req.handle = nvbo->handle;
94 ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
98 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo)
100 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
101 struct drm_nouveau_gem_new req;
107 req.size = nvbo->size;
108 req.align = nvbo->align;
112 if (nvbo->flags & NOUVEAU_BO_VRAM)
113 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
115 if (nvbo->flags & NOUVEAU_BO_GART)
116 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
118 if (nvbo->flags & NOUVEAU_BO_TILED) {
119 req.domain |= NOUVEAU_GEM_DOMAIN_TILE;
120 if (nvbo->flags & NOUVEAU_BO_ZTILE)
121 req.domain |= NOUVEAU_GEM_DOMAIN_TILE_ZETA;
125 req.domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
126 NOUVEAU_GEM_DOMAIN_GART);
129 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
133 nvbo->handle = req.handle;
134 nvbo->size = req.size;
135 nvbo->domain = req.domain;
141 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
143 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
144 struct drm_nouveau_gem_mmap req;
153 req.handle = nvbo->handle;
154 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_MMAP,
159 nvbo->map = (void *)(unsigned long)req.vaddr;
164 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
165 int size, struct nouveau_bo **bo)
167 struct nouveau_bo_priv *nvbo;
170 if (!dev || !bo || *bo)
173 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
176 nvbo->base.device = dev;
177 nvbo->base.size = size;
178 nvbo->base.handle = bo_to_ptr(nvbo);
185 /*XXX: murder me violently */
186 if (flags & NOUVEAU_BO_TILED) {
187 nvbo->base.tiled = 1;
188 if (flags & NOUVEAU_BO_ZTILE)
189 nvbo->base.tiled |= 2;
192 if (flags & NOUVEAU_BO_PIN) {
193 ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
195 nouveau_bo_ref(NULL, (void *)nvbo);
205 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
206 struct nouveau_bo **bo)
208 struct nouveau_bo_priv *nvbo;
211 ret = nouveau_bo_new(dev, 0, 0, size, bo);
214 nvbo = nouveau_bo(*bo);
222 nouveau_bo_del_cb(void *priv)
224 struct nouveau_bo_priv *nvbo = priv;
226 nouveau_fence_ref(NULL, &nvbo->fence);
227 nouveau_bo_ufree(nvbo);
228 nouveau_bo_kfree(nvbo);
233 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
235 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
241 if (!nvbo->global_handle) {
242 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
243 struct drm_gem_flink req;
245 ret = nouveau_bo_kalloc(nvbo);
249 req.handle = nvbo->handle;
250 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
252 nouveau_bo_kfree(nvbo);
256 nvbo->global_handle = req.name;
259 *handle = nvbo->global_handle;
264 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
265 struct nouveau_bo **bo)
267 struct nouveau_device_priv *nvdev = nouveau_device(dev);
268 struct nouveau_bo_priv *nvbo;
269 struct drm_gem_open req;
272 ret = nouveau_bo_new(dev, 0, 0, 0, bo);
275 nvbo = nouveau_bo(*bo);
278 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
280 nouveau_bo_ref(NULL, bo);
284 nvbo->size = req.size;
285 nvbo->handle = req.handle;
290 nouveau_bo_del(struct nouveau_bo **bo)
292 struct nouveau_bo_priv *nvbo;
296 nvbo = nouveau_bo(*bo);
299 if (--nvbo->refcount)
303 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
306 nouveau_fence_signal_cb(nvbo->fence, nouveau_bo_del_cb, nvbo);
308 nouveau_bo_del_cb(nvbo);
312 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
318 nouveau_bo(ref)->refcount++;
328 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
330 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
333 if (!nvbo || bo->map)
336 if (!nouveau_bo_allocated(nvbo)) {
337 if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART))
338 nouveau_bo_kalloc(nvbo);
340 if (!nouveau_bo_allocated(nvbo)) {
341 ret = nouveau_bo_ualloc(nvbo);
348 bo->map = nvbo->sysmem;
351 (nvbo->pending->write_domains || flags & NOUVEAU_BO_WR)) {
352 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
355 nouveau_bo_kmap(nvbo);
357 if (flags & NOUVEAU_BO_WR)
358 nouveau_fence_wait(&nvbo->fence);
360 nouveau_fence_wait(&nvbo->wr_fence);
369 nouveau_bo_unmap(struct nouveau_bo *bo)
375 nouveau_bo_get_drm_map(struct nouveau_bo *bo)
377 NOUVEAU_ERR("-EINVAL :)\n");
382 nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
384 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
385 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
386 struct drm_nouveau_gem_pin req;
392 /* Ensure we have a kernel object... */
394 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
398 ret = nouveau_bo_kalloc(nvbo);
403 /* Now force it to stay put :) */
404 req.handle = nvbo->handle;
406 if (nvbo->flags & NOUVEAU_BO_VRAM)
407 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
408 if (nvbo->flags & NOUVEAU_BO_GART)
409 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
411 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
412 sizeof(struct drm_nouveau_gem_pin));
415 nvbo->offset = req.offset;
416 nvbo->domain = req.domain;
419 /* Fill in public nouveau_bo members */
420 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
421 bo->flags = NOUVEAU_BO_VRAM;
422 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
423 bo->flags = NOUVEAU_BO_GART;
424 bo->offset = nvbo->offset;
430 nouveau_bo_unpin(struct nouveau_bo *bo)
432 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
433 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
434 struct drm_nouveau_gem_unpin req;
439 req.handle = nvbo->handle;
440 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN, &req, sizeof(req));
442 nvbo->pinned = bo->offset = bo->flags = 0;
445 struct drm_nouveau_gem_pushbuf_bo *
446 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
448 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
449 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
450 struct drm_nouveau_gem_pushbuf_bo *pbbo;
451 struct nouveau_bo *ref = NULL;
455 return nvbo->pending;
458 nouveau_bo_kalloc(nvbo);
460 void *sysmem_tmp = nvbo->sysmem;
463 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
466 nvbo->sysmem = sysmem_tmp;
468 memcpy(bo->map, nvbo->sysmem, nvbo->size);
469 nouveau_bo_unmap(bo);
470 nouveau_bo_ufree(nvbo);
474 if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
476 pbbo = nvpb->buffers + nvpb->nr_buffers++;
477 nvbo->pending = pbbo;
478 nvbo->pending_channel = chan;
480 nouveau_bo_ref(bo, &ref);
481 pbbo->user_priv = (uint64_t)(unsigned long)ref;
482 pbbo->handle = nvbo->handle;
483 pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
485 pbbo->write_domains = 0;
486 pbbo->presumed_domain = nvbo->domain;
487 pbbo->presumed_offset = nvbo->offset;
488 pbbo->presumed_ok = 1;