2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include "nouveau_drmif.h"
29 #define PB_BUFMGR_DWORDS (4096 / 2)
30 #define PB_MIN_USER_DWORDS 2048
33 nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
35 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
36 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
41 nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;
42 nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
44 nvpb->base.channel = chan;
45 nvpb->base.remaining = nvpb->size;
46 nvpb->base.cur = nvpb->pushbuf;
48 /* Create a new fence object for this "frame" */
49 nouveau_fence_ref(NULL, &nvpb->base.fence);
50 nouveau_fence_new(chan, &nvpb->base.fence);
56 nouveau_pushbuf_init(struct nouveau_channel *chan)
58 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
60 nouveau_pushbuf_space(chan, 0);
62 nvchan->pb.buffers = calloc(NOUVEAU_PUSHBUF_MAX_BUFFERS,
63 sizeof(struct nouveau_pushbuf_bo));
64 nvchan->pb.relocs = calloc(NOUVEAU_PUSHBUF_MAX_RELOCS,
65 sizeof(struct nouveau_pushbuf_reloc));
67 chan->pushbuf = &nvchan->pb.base;
72 nouveau_pushbuf_calc_reloc(struct nouveau_bo *bo,
73 struct nouveau_pushbuf_reloc *r)
75 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
78 if (r->flags & NOUVEAU_BO_LOW) {
79 push = nvbo->offset + r->data;
81 if (r->flags & NOUVEAU_BO_HIGH) {
82 push = (nvbo->offset + r->data) >> 32;
87 if (r->flags & NOUVEAU_BO_OR) {
88 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
97 /* This would be our TTM "superioctl" */
99 nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
101 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
102 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
103 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
106 if (nvpb->base.remaining == nvpb->size)
108 nvpb->size -= nvpb->base.remaining;
110 nouveau_fence_flush(chan);
112 /* Validate buffers + apply relocations */
113 nvchan->user_charge = 0;
114 for (i = 0; i < nvpb->nr_relocs; i++) {
115 struct nouveau_pushbuf_reloc *r = &nvpb->relocs[i];
116 struct nouveau_pushbuf_bo *pbbo = r->pbbo;
117 struct nouveau_bo *bo = pbbo->bo;
119 /* Validated, mem matches presumed, no relocation necessary */
120 if (pbbo->handled & 2) {
121 if (!(pbbo->handled & 1))
126 /* Not yet validated, do it now */
127 if (!(pbbo->handled & 1)) {
128 uint64_t offset = nouveau_bo(bo)->offset;
129 unsigned domain = nouveau_bo(bo)->domain;
131 ret = nouveau_bo_validate(chan, bo, pbbo->flags);
138 if (offset == nouveau_bo(bo)->offset &&
139 domain == nouveau_bo(bo)->domain) {
145 /* Apply the relocation */
146 *r->ptr = nouveau_pushbuf_calc_reloc(bo, r);
150 /* Dereference all buffers on validate list */
151 for (i = 0; i < nvpb->nr_buffers; i++) {
152 struct nouveau_pushbuf_bo *pbbo = &nvpb->buffers[i];
154 nouveau_bo(pbbo->bo)->pending = NULL;
155 nouveau_bo_del(&pbbo->bo);
157 nvpb->nr_buffers = 0;
159 /* Fence + kickoff */
160 nouveau_fence_emit(nvpb->base.fence);
163 struct drm_nouveau_gem_pushbuf req;
165 req.channel = chan->id;
166 req.size = nvpb->size;
167 req.ptr = (uint32_t)(unsigned long)nvpb->pushbuf;
168 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
173 /* Allocate space for next push buffer */
174 ret = nouveau_pushbuf_space(chan, min);
180 static struct nouveau_pushbuf_bo *
181 nouveau_pushbuf_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
183 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
184 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
185 struct nouveau_pushbuf_bo *pbbo;
188 return nvbo->pending;
190 if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
192 pbbo = nvpb->buffers + nvpb->nr_buffers++;
193 nvbo->pending = pbbo;
195 nouveau_bo_ref(bo->device, bo->handle, &pbbo->bo);
196 pbbo->channel = chan;
197 pbbo->flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_GART;
203 nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
204 struct nouveau_bo *bo, uint32_t data, uint32_t flags,
205 uint32_t vor, uint32_t tor)
207 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
208 struct nouveau_pushbuf_bo *pbbo;
209 struct nouveau_pushbuf_reloc *r;
211 if (nvpb->nr_relocs >= NOUVEAU_PUSHBUF_MAX_RELOCS)
214 pbbo = nouveau_pushbuf_emit_buffer(chan, bo);
217 pbbo->flags |= (flags & NOUVEAU_BO_RDWR);
218 pbbo->flags &= (flags | NOUVEAU_BO_RDWR);
220 r = nvpb->relocs + nvpb->nr_relocs++;
228 if (flags & NOUVEAU_BO_DUMMY)
229 *(uint32_t *)ptr = 0;
231 *(uint32_t *)ptr = nouveau_pushbuf_calc_reloc(bo, r);