Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / drivers / char / drm / savage_state.c
1 /* savage_state.c -- State and drawing support for Savage
2  *
3  * Copyright 2004  Felix Kuehling
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22  * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 #include "drmP.h"
26 #include "savage_drm.h"
27 #include "savage_drv.h"
28
29 void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30                                drm_clip_rect_t *pbox)
31 {
32         uint32_t scstart = dev_priv->state.s3d.new_scstart;
33         uint32_t scend   = dev_priv->state.s3d.new_scend;
34         scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35                 ((uint32_t)pbox->x1 & 0x000007ff) | 
36                 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37         scend   = (scend   & ~SAVAGE_SCISSOR_MASK_S3D) |
38                 (((uint32_t)pbox->x2-1) & 0x000007ff) |
39                 ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
40         if (scstart != dev_priv->state.s3d.scstart ||
41             scend   != dev_priv->state.s3d.scend) {
42                 DMA_LOCALS;
43                 BEGIN_DMA(4);
44                 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
45                 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46                 DMA_WRITE(scstart);
47                 DMA_WRITE(scend);
48                 dev_priv->state.s3d.scstart = scstart;
49                 dev_priv->state.s3d.scend   = scend;
50                 dev_priv->waiting = 1;
51                 DMA_COMMIT();
52         }
53 }
54
55 void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56                               drm_clip_rect_t *pbox)
57 {
58         uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59         uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60         drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61                 ((uint32_t)pbox->x1 & 0x000007ff) |
62                 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63         drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64                 (((uint32_t)pbox->x2-1) & 0x000007ff) |
65                 ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
66         if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67             drawctrl1 != dev_priv->state.s4.drawctrl1) {
68                 DMA_LOCALS;
69                 BEGIN_DMA(4);
70                 DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
71                 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72                 DMA_WRITE(drawctrl0);
73                 DMA_WRITE(drawctrl1);
74                 dev_priv->state.s4.drawctrl0 = drawctrl0;
75                 dev_priv->state.s4.drawctrl1 = drawctrl1;
76                 dev_priv->waiting = 1;
77                 DMA_COMMIT();
78         }
79 }
80
81 static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
82                                  uint32_t addr)
83 {
84         if ((addr & 6) != 2) { /* reserved bits */
85                 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86                 return DRM_ERR(EINVAL);
87         }
88         if (!(addr & 1)) { /* local */
89                 addr &= ~7;
90                 if (addr <  dev_priv->texture_offset ||
91                     addr >= dev_priv->texture_offset+dev_priv->texture_size) {
92                         DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
93                                   unit, addr);
94                         return DRM_ERR(EINVAL);
95                 }
96         } else { /* AGP */
97                 if (!dev_priv->agp_textures) {
98                         DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
99                                   unit, addr);
100                         return DRM_ERR(EINVAL);
101                 }
102                 addr &= ~7;
103                 if (addr < dev_priv->agp_textures->offset ||
104                     addr >= (dev_priv->agp_textures->offset +
105                              dev_priv->agp_textures->size)) {
106                         DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
107                                   unit, addr);
108                         return DRM_ERR(EINVAL);
109                 }
110         }
111         return 0;
112 }
113
114 #define SAVE_STATE(reg,where)                   \
115         if(start <= reg && start+count > reg)   \
116                 DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
117 #define SAVE_STATE_MASK(reg,where,mask) do {                    \
118         if(start <= reg && start+count > reg) {                 \
119                 uint32_t tmp;                                   \
120                 DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]);  \
121                 dev_priv->state.where = (tmp & (mask)) |        \
122                         (dev_priv->state.where & ~(mask));      \
123         }                                                       \
124 } while (0)
125 static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
126                                    unsigned int start, unsigned int count,
127                                    const uint32_t __user *regs)
128 {
129         if (start < SAVAGE_TEXPALADDR_S3D ||
130             start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
131                 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
132                           start, start+count-1);
133                 return DRM_ERR(EINVAL);
134         }
135
136         SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
137                         ~SAVAGE_SCISSOR_MASK_S3D);
138         SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
139                         ~SAVAGE_SCISSOR_MASK_S3D);
140
141         /* if any texture regs were changed ... */
142         if (start <= SAVAGE_TEXCTRL_S3D &&
143             start+count > SAVAGE_TEXPALADDR_S3D) {
144                 /* ... check texture state */
145                 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
146                 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
147                 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
148                         return savage_verify_texaddr(
149                                 dev_priv, 0, dev_priv->state.s3d.texaddr);
150         }
151
152         return 0;
153 }
154
155 static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
156                                   unsigned int start, unsigned int count,
157                                   const uint32_t __user *regs)
158 {
159         int ret = 0;
160
161         if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
162             start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
163                 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
164                           start, start+count-1);
165                 return DRM_ERR(EINVAL);
166         }
167
168         SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
169                         ~SAVAGE_SCISSOR_MASK_S4);
170         SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
171                         ~SAVAGE_SCISSOR_MASK_S4);
172
173         /* if any texture regs were changed ... */
174         if (start <= SAVAGE_TEXDESCR_S4 &&
175             start+count > SAVAGE_TEXPALADDR_S4) {
176                 /* ... check texture state */
177                 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
178                 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
179                 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
180                 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
181                         ret |= savage_verify_texaddr(
182                                 dev_priv, 0, dev_priv->state.s4.texaddr0);
183                 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
184                         ret |= savage_verify_texaddr(
185                                 dev_priv, 1, dev_priv->state.s4.texaddr1);
186         }
187
188         return ret;
189 }
190 #undef SAVE_STATE
191 #undef SAVE_STATE_MASK
192
193 static int savage_dispatch_state(drm_savage_private_t *dev_priv,
194                                  const drm_savage_cmd_header_t *cmd_header,
195                                  const uint32_t __user *regs)
196 {
197         unsigned int count = cmd_header->state.count;
198         unsigned int start = cmd_header->state.start;
199         unsigned int count2 = 0;
200         unsigned int bci_size;
201         int ret;
202         DMA_LOCALS;
203
204         if (!count)
205                 return 0;
206
207         if (DRM_VERIFYAREA_READ(regs, count*4))
208                 return DRM_ERR(EFAULT);
209
210         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
211                 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
212                 if (ret != 0)
213                         return ret;
214                 /* scissor regs are emitted in savage_dispatch_draw */
215                 if (start < SAVAGE_SCSTART_S3D) {
216                         if (start+count > SAVAGE_SCEND_S3D+1)
217                                 count2 = count - (SAVAGE_SCEND_S3D+1 - start);
218                         if (start+count > SAVAGE_SCSTART_S3D)
219                                 count = SAVAGE_SCSTART_S3D - start;
220                 } else if (start <= SAVAGE_SCEND_S3D) {
221                         if (start+count > SAVAGE_SCEND_S3D+1) {
222                                 count -= SAVAGE_SCEND_S3D+1 - start;
223                                 start = SAVAGE_SCEND_S3D+1;
224                         } else
225                                 return 0;
226                 }
227         } else {
228                 ret = savage_verify_state_s4(dev_priv, start, count, regs);
229                 if (ret != 0)
230                         return ret;
231                 /* scissor regs are emitted in savage_dispatch_draw */
232                 if (start < SAVAGE_DRAWCTRL0_S4) {
233                         if (start+count > SAVAGE_DRAWCTRL1_S4+1)
234                                 count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
235                         if (start+count > SAVAGE_DRAWCTRL0_S4)
236                                 count = SAVAGE_DRAWCTRL0_S4 - start;
237                 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238                         if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
239                                 count -= SAVAGE_DRAWCTRL1_S4+1 - start;
240                                 start = SAVAGE_DRAWCTRL1_S4+1;
241                         } else
242                                 return 0;
243                 }
244         }
245
246         bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
247
248         if (cmd_header->state.global) {
249                 BEGIN_DMA(bci_size+1);
250                 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251                 dev_priv->waiting = 1;
252         } else {
253                 BEGIN_DMA(bci_size);
254         }
255
256         do {
257                 while (count > 0) {
258                         unsigned int n = count < 255 ? count : 255;
259                         DMA_SET_REGISTERS(start, n);
260                         DMA_COPY_FROM_USER(regs, n);
261                         count -= n;
262                         start += n;
263                         regs += n;
264                 }
265                 start += 2;
266                 regs += 2;
267                 count = count2;
268                 count2 = 0;
269         } while (count);
270
271         DMA_COMMIT();
272
273         return 0;
274 }
275
276 static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277                                     const drm_savage_cmd_header_t *cmd_header,
278                                     const drm_buf_t *dmabuf)
279 {
280         unsigned char reorder = 0;
281         unsigned int prim = cmd_header->prim.prim;
282         unsigned int skip = cmd_header->prim.skip;
283         unsigned int n = cmd_header->prim.count;
284         unsigned int start = cmd_header->prim.start;
285         unsigned int i;
286         BCI_LOCALS;
287
288         if (!dmabuf) {
289             DRM_ERROR("called without dma buffers!\n");
290             return DRM_ERR(EINVAL);
291         }
292
293         if (!n)
294                 return 0;
295
296         switch (prim) {
297         case SAVAGE_PRIM_TRILIST_201:
298                 reorder = 1;
299                 prim = SAVAGE_PRIM_TRILIST;
300         case SAVAGE_PRIM_TRILIST:
301                 if (n % 3 != 0) {
302                         DRM_ERROR("wrong number of vertices %u in TRILIST\n",
303                                   n);
304                         return DRM_ERR(EINVAL);
305                 }
306                 break;
307         case SAVAGE_PRIM_TRISTRIP:
308         case SAVAGE_PRIM_TRIFAN:
309                 if (n < 3) {
310                         DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
311                                   n);
312                         return DRM_ERR(EINVAL);
313                 }
314                 break;
315         default:
316                 DRM_ERROR("invalid primitive type %u\n", prim);
317                 return DRM_ERR(EINVAL);
318         }
319
320         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
321                 if (skip != 0) {
322                         DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
323                                   skip);
324                         return DRM_ERR(EINVAL);
325                 }
326         } else {
327                 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
328                         (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
329                         (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
330                 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
331                         DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
332                                   skip);
333                         return DRM_ERR(EINVAL);
334                 }
335                 if (reorder) {
336                         DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
337                         return DRM_ERR(EINVAL);
338                 }
339         }
340
341         if (start + n > dmabuf->total/32) {
342                 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343                           start, start + n - 1, dmabuf->total/32);
344                 return DRM_ERR(EINVAL);
345         }
346
347         /* Vertex DMA doesn't work with command DMA at the same time,
348          * so we use BCI_... to submit commands here. Flush buffered
349          * faked DMA first. */
350         DMA_FLUSH();
351
352         if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353                 BEGIN_BCI(2);
354                 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
355                 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
356                 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357         }
358         if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
359                 /* Workaround for what looks like a hardware bug. If a
360                  * WAIT_3D_IDLE was emitted some time before the
361                  * indexed drawing command then the engine will lock
362                  * up. There are two known workarounds:
363                  * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364                 BEGIN_BCI(63);
365                 for (i = 0; i < 63; ++i)
366                         BCI_WRITE(BCI_CMD_WAIT);
367                 dev_priv->waiting = 0;
368         }
369
370         prim <<= 25;
371         while (n != 0) {
372                 /* Can emit up to 255 indices (85 triangles) at once. */
373                 unsigned int count = n > 255 ? 255 : n;
374                 if (reorder) {
375                         /* Need to reorder indices for correct flat
376                          * shading while preserving the clock sense
377                          * for correct culling. Only on Savage3D. */
378                         int reorder[3] = {-1, -1, -1};
379                         reorder[start%3] = 2;
380
381                         BEGIN_BCI((count+1+1)/2);
382                         BCI_DRAW_INDICES_S3D(count, prim, start+2);
383
384                         for (i = start+1; i+1 < start+count; i += 2)
385                                 BCI_WRITE((i + reorder[i % 3]) |
386                                           ((i+1 + reorder[(i+1) % 3]) << 16));
387                         if (i < start+count)
388                                 BCI_WRITE(i + reorder[i%3]);
389                 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
390                         BEGIN_BCI((count+1+1)/2);
391                         BCI_DRAW_INDICES_S3D(count, prim, start);
392
393                         for (i = start+1; i+1 < start+count; i += 2)
394                                 BCI_WRITE(i | ((i+1) << 16));
395                         if (i < start+count)
396                                 BCI_WRITE(i);
397                 } else {
398                         BEGIN_BCI((count+2+1)/2);
399                         BCI_DRAW_INDICES_S4(count, prim, skip);
400
401                         for (i = start; i+1 < start+count; i += 2)
402                                 BCI_WRITE(i | ((i+1) << 16));
403                         if (i < start+count)
404                                 BCI_WRITE(i);
405                 }
406
407                 start += count;
408                 n -= count;
409
410                 prim |= BCI_CMD_DRAW_CONT;
411         }
412
413         return 0;
414 }
415
416 static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
417                                    const drm_savage_cmd_header_t *cmd_header,
418                                    const uint32_t __user *vtxbuf,
419                                    unsigned int vb_size,
420                                    unsigned int vb_stride)
421 {
422         unsigned char reorder = 0;
423         unsigned int prim = cmd_header->prim.prim;
424         unsigned int skip = cmd_header->prim.skip;
425         unsigned int n = cmd_header->prim.count;
426         unsigned int start = cmd_header->prim.start;
427         unsigned int vtx_size;
428         unsigned int i;
429         DMA_LOCALS;
430
431         if (!n)
432                 return 0;
433
434         switch (prim) {
435         case SAVAGE_PRIM_TRILIST_201:
436                 reorder = 1;
437                 prim = SAVAGE_PRIM_TRILIST;
438         case SAVAGE_PRIM_TRILIST:
439                 if (n % 3 != 0) {
440                         DRM_ERROR("wrong number of vertices %u in TRILIST\n",
441                                   n);
442                         return DRM_ERR(EINVAL);
443                 }
444                 break;
445         case SAVAGE_PRIM_TRISTRIP:
446         case SAVAGE_PRIM_TRIFAN:
447                 if (n < 3) {
448                         DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
449                                   n);
450                         return DRM_ERR(EINVAL);
451                 }
452                 break;
453         default:
454                 DRM_ERROR("invalid primitive type %u\n", prim);
455                 return DRM_ERR(EINVAL);
456         }
457
458         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
459                 if (skip > SAVAGE_SKIP_ALL_S3D) {
460                         DRM_ERROR("invalid skip flags 0x%04x\n", skip);
461                         return DRM_ERR(EINVAL);
462                 }
463                 vtx_size = 8; /* full vertex */
464         } else {
465                 if (skip > SAVAGE_SKIP_ALL_S4) {
466                         DRM_ERROR("invalid skip flags 0x%04x\n", skip);
467                         return DRM_ERR(EINVAL);
468                 }
469                 vtx_size = 10; /* full vertex */
470         }
471
472         vtx_size -= (skip & 1) + (skip >> 1 & 1) +
473                 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
474                 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
475
476         if (vtx_size > vb_stride) {
477                 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
478                           vtx_size, vb_stride);
479                 return DRM_ERR(EINVAL);
480         }
481
482         if (start + n > vb_size / (vb_stride*4)) {
483                 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
484                           start, start + n - 1, vb_size / (vb_stride*4));
485                 return DRM_ERR(EINVAL);
486         }
487
488         prim <<= 25;
489         while (n != 0) {
490                 /* Can emit up to 255 vertices (85 triangles) at once. */
491                 unsigned int count = n > 255 ? 255 : n;
492                 if (reorder) {
493                         /* Need to reorder vertices for correct flat
494                          * shading while preserving the clock sense
495                          * for correct culling. Only on Savage3D. */
496                         int reorder[3] = {-1, -1, -1};
497                         reorder[start%3] = 2;
498
499                         BEGIN_DMA(count*vtx_size+1);
500                         DMA_DRAW_PRIMITIVE(count, prim, skip);
501
502                         for (i = start; i < start+count; ++i) {
503                                 unsigned int j = i + reorder[i % 3];
504                                 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
505                                                    vtx_size);
506                         }
507
508                         DMA_COMMIT();
509                 } else {
510                         BEGIN_DMA(count*vtx_size+1);
511                         DMA_DRAW_PRIMITIVE(count, prim, skip);
512
513                         if (vb_stride == vtx_size) {
514                                 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
515                                                    vtx_size*count);
516                         } else {
517                                 for (i = start; i < start+count; ++i) {
518                                         DMA_COPY_FROM_USER(
519                                                 &vtxbuf[vb_stride*i],
520                                                 vtx_size);
521                                 }
522                         }
523
524                         DMA_COMMIT();
525                 }
526
527                 start += count;
528                 n -= count;
529
530                 prim |= BCI_CMD_DRAW_CONT;
531         }
532
533         return 0;
534 }
535
536 static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
537                                    const drm_savage_cmd_header_t *cmd_header,
538                                    const uint16_t __user *usr_idx,
539                                    const drm_buf_t *dmabuf)
540 {
541         unsigned char reorder = 0;
542         unsigned int prim = cmd_header->idx.prim;
543         unsigned int skip = cmd_header->idx.skip;
544         unsigned int n = cmd_header->idx.count;
545         unsigned int i;
546         BCI_LOCALS;
547
548         if (!dmabuf) {
549             DRM_ERROR("called without dma buffers!\n");
550             return DRM_ERR(EINVAL);
551         }
552
553         if (!n)
554                 return 0;
555
556         switch (prim) {
557         case SAVAGE_PRIM_TRILIST_201:
558                 reorder = 1;
559                 prim = SAVAGE_PRIM_TRILIST;
560         case SAVAGE_PRIM_TRILIST:
561                 if (n % 3 != 0) {
562                         DRM_ERROR("wrong number of indices %u in TRILIST\n",
563                                   n);
564                         return DRM_ERR(EINVAL);
565                 }
566                 break;
567         case SAVAGE_PRIM_TRISTRIP:
568         case SAVAGE_PRIM_TRIFAN:
569                 if (n < 3) {
570                         DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
571                                   n);
572                         return DRM_ERR(EINVAL);
573                 }
574                 break;
575         default:
576                 DRM_ERROR("invalid primitive type %u\n", prim);
577                 return DRM_ERR(EINVAL);
578         }
579
580         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581                 if (skip != 0) {
582                         DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
583                                   skip);
584                         return DRM_ERR(EINVAL);
585                 }
586         } else {
587                 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
588                         (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
589                         (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
590                 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
591                         DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
592                                   skip);
593                         return DRM_ERR(EINVAL);
594                 }
595                 if (reorder) {
596                         DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
597                         return DRM_ERR(EINVAL);
598                 }
599         }
600
601         /* Vertex DMA doesn't work with command DMA at the same time,
602          * so we use BCI_... to submit commands here. Flush buffered
603          * faked DMA first. */
604         DMA_FLUSH();
605
606         if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
607                 BEGIN_BCI(2);
608                 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
609                 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
610                 dev_priv->state.common.vbaddr = dmabuf->bus_address;
611         }
612         if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
613                 /* Workaround for what looks like a hardware bug. If a
614                  * WAIT_3D_IDLE was emitted some time before the
615                  * indexed drawing command then the engine will lock
616                  * up. There are two known workarounds:
617                  * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
618                 BEGIN_BCI(63);
619                 for (i = 0; i < 63; ++i)
620                         BCI_WRITE(BCI_CMD_WAIT);
621                 dev_priv->waiting = 0;
622         }
623
624         prim <<= 25;
625         while (n != 0) {
626                 /* Can emit up to 255 indices (85 triangles) at once. */
627                 unsigned int count = n > 255 ? 255 : n;
628                 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
629                 uint16_t idx[255];
630
631                 /* Copy and check indices */
632                 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
633                 for (i = 0; i < count; ++i) {
634                         if (idx[i] > dmabuf->total/32) {
635                                 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
636                                           i, idx[i], dmabuf->total/32);
637                                 return DRM_ERR(EINVAL);
638                         }
639                 }
640
641                 if (reorder) {
642                         /* Need to reorder indices for correct flat
643                          * shading while preserving the clock sense
644                          * for correct culling. Only on Savage3D. */
645                         int reorder[3] = {2, -1, -1};
646
647                         BEGIN_BCI((count+1+1)/2);
648                         BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
649
650                         for (i = 1; i+1 < count; i += 2)
651                                 BCI_WRITE(idx[i + reorder[i % 3]] |
652                                           (idx[i+1 + reorder[(i+1) % 3]] << 16));
653                         if (i < count)
654                                 BCI_WRITE(idx[i + reorder[i%3]]);
655                 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
656                         BEGIN_BCI((count+1+1)/2);
657                         BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
658
659                         for (i = 1; i+1 < count; i += 2)
660                                 BCI_WRITE(idx[i] | (idx[i+1] << 16));
661                         if (i < count)
662                                 BCI_WRITE(idx[i]);
663                 } else {
664                         BEGIN_BCI((count+2+1)/2);
665                         BCI_DRAW_INDICES_S4(count, prim, skip);
666
667                         for (i = 0; i+1 < count; i += 2)
668                                 BCI_WRITE(idx[i] | (idx[i+1] << 16));
669                         if (i < count)
670                                 BCI_WRITE(idx[i]);
671                 }
672
673                 usr_idx += count;
674                 n -= count;
675
676                 prim |= BCI_CMD_DRAW_CONT;
677         }
678
679         return 0;
680 }
681
682 static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
683                                   const drm_savage_cmd_header_t *cmd_header,
684                                   const uint16_t __user *usr_idx,
685                                   const uint32_t __user *vtxbuf,
686                                   unsigned int vb_size,
687                                   unsigned int vb_stride)
688 {
689         unsigned char reorder = 0;
690         unsigned int prim = cmd_header->idx.prim;
691         unsigned int skip = cmd_header->idx.skip;
692         unsigned int n = cmd_header->idx.count;
693         unsigned int vtx_size;
694         unsigned int i;
695         DMA_LOCALS;
696
697         if (!n)
698                 return 0;
699
700         switch (prim) {
701         case SAVAGE_PRIM_TRILIST_201:
702                 reorder = 1;
703                 prim = SAVAGE_PRIM_TRILIST;
704         case SAVAGE_PRIM_TRILIST:
705                 if (n % 3 != 0) {
706                         DRM_ERROR("wrong number of indices %u in TRILIST\n",
707                                   n);
708                         return DRM_ERR(EINVAL);
709                 }
710                 break;
711         case SAVAGE_PRIM_TRISTRIP:
712         case SAVAGE_PRIM_TRIFAN:
713                 if (n < 3) {
714                         DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
715                                   n);
716                         return DRM_ERR(EINVAL);
717                 }
718                 break;
719         default:
720                 DRM_ERROR("invalid primitive type %u\n", prim);
721                 return DRM_ERR(EINVAL);
722         }
723
724         if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
725                 if (skip > SAVAGE_SKIP_ALL_S3D) {
726                         DRM_ERROR("invalid skip flags 0x%04x\n", skip);
727                         return DRM_ERR(EINVAL);
728                 }
729                 vtx_size = 8; /* full vertex */
730         } else {
731                 if (skip > SAVAGE_SKIP_ALL_S4) {
732                         DRM_ERROR("invalid skip flags 0x%04x\n", skip);
733                         return DRM_ERR(EINVAL);
734                 }
735                 vtx_size = 10; /* full vertex */
736         }
737
738         vtx_size -= (skip & 1) + (skip >> 1 & 1) +
739                 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
740                 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
741
742         if (vtx_size > vb_stride) {
743                 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
744                           vtx_size, vb_stride);
745                 return DRM_ERR(EINVAL);
746         }
747
748         prim <<= 25;
749         while (n != 0) {
750                 /* Can emit up to 255 vertices (85 triangles) at once. */
751                 unsigned int count = n > 255 ? 255 : n;
752                 /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
753                 uint16_t idx[255];
754
755                 /* Copy and check indices */
756                 DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
757                 for (i = 0; i < count; ++i) {
758                         if (idx[i] > vb_size / (vb_stride*4)) {
759                                 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
760                                           i, idx[i],  vb_size / (vb_stride*4));
761                                 return DRM_ERR(EINVAL);
762                         }
763                 }
764
765                 if (reorder) {
766                         /* Need to reorder vertices for correct flat
767                          * shading while preserving the clock sense
768                          * for correct culling. Only on Savage3D. */
769                         int reorder[3] = {2, -1, -1};
770
771                         BEGIN_DMA(count*vtx_size+1);
772                         DMA_DRAW_PRIMITIVE(count, prim, skip);
773
774                         for (i = 0; i < count; ++i) {
775                                 unsigned int j = idx[i + reorder[i % 3]];
776                                 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
777                                                    vtx_size);
778                         }
779
780                         DMA_COMMIT();
781                 } else {
782                         BEGIN_DMA(count*vtx_size+1);
783                         DMA_DRAW_PRIMITIVE(count, prim, skip);
784
785                         for (i = 0; i < count; ++i) {
786                                 unsigned int j = idx[i];
787                                 DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
788                                                    vtx_size);
789                         }
790
791                         DMA_COMMIT();
792                 }
793
794                 usr_idx += count;
795                 n -= count;
796
797                 prim |= BCI_CMD_DRAW_CONT;
798         }
799
800         return 0;
801 }
802
803 static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
804                                  const drm_savage_cmd_header_t *cmd_header,
805                                  const drm_savage_cmd_header_t __user *data,
806                                  unsigned int nbox,
807                                  const drm_clip_rect_t __user *usr_boxes)
808 {
809         unsigned int flags = cmd_header->clear0.flags, mask, value;
810         unsigned int clear_cmd;
811         unsigned int i, nbufs;
812         DMA_LOCALS;
813
814         if (nbox == 0)
815                 return 0;
816
817         DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
818                                ->clear1.mask);
819         DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
820                                ->clear1.value);
821
822         clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
823                 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
824         BCI_CMD_SET_ROP(clear_cmd,0xCC);
825
826         nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
827                 ((flags & SAVAGE_BACK) ? 1 : 0) +
828                 ((flags & SAVAGE_DEPTH) ? 1 : 0);
829         if (nbufs == 0)
830                 return 0;
831
832         if (mask != 0xffffffff) {
833                 /* set mask */
834                 BEGIN_DMA(2);
835                 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
836                 DMA_WRITE(mask);
837                 DMA_COMMIT();
838         }
839         for (i = 0; i < nbox; ++i) {
840                 drm_clip_rect_t box;
841                 unsigned int x, y, w, h;
842                 unsigned int buf;
843                 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
844                 x = box.x1, y = box.y1;
845                 w = box.x2 - box.x1;
846                 h = box.y2 - box.y1;
847                 BEGIN_DMA(nbufs*6);
848                 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
849                         if (!(flags & buf))
850                                 continue;
851                         DMA_WRITE(clear_cmd);
852                         switch(buf) {
853                         case SAVAGE_FRONT:
854                                 DMA_WRITE(dev_priv->front_offset);
855                                 DMA_WRITE(dev_priv->front_bd);
856                                 break;
857                         case SAVAGE_BACK:
858                                 DMA_WRITE(dev_priv->back_offset);
859                                 DMA_WRITE(dev_priv->back_bd);
860                                 break;
861                         case SAVAGE_DEPTH:
862                                 DMA_WRITE(dev_priv->depth_offset);
863                                 DMA_WRITE(dev_priv->depth_bd);
864                                 break;
865                         }
866                         DMA_WRITE(value);
867                         DMA_WRITE(BCI_X_Y(x, y));
868                         DMA_WRITE(BCI_W_H(w, h));
869                 }
870                 DMA_COMMIT();
871         }
872         if (mask != 0xffffffff) {
873                 /* reset mask */
874                 BEGIN_DMA(2);
875                 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
876                 DMA_WRITE(0xffffffff);
877                 DMA_COMMIT();
878         }
879
880         return 0;
881 }
882
883 static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
884                                 unsigned int nbox,
885                                 const drm_clip_rect_t __user *usr_boxes)
886 {
887         unsigned int swap_cmd;
888         unsigned int i;
889         DMA_LOCALS;
890
891         if (nbox == 0)
892                 return 0;
893
894         swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
895                 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
896         BCI_CMD_SET_ROP(swap_cmd,0xCC);
897
898         for (i = 0; i < nbox; ++i) {
899                 drm_clip_rect_t box;
900                 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
901
902                 BEGIN_DMA(6);
903                 DMA_WRITE(swap_cmd);
904                 DMA_WRITE(dev_priv->back_offset);
905                 DMA_WRITE(dev_priv->back_bd);
906                 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
907                 DMA_WRITE(BCI_X_Y(box.x1, box.y1));
908                 DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
909                 DMA_COMMIT();
910         }
911
912         return 0;
913 }
914
915 static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
916                                 const drm_savage_cmd_header_t __user *start,
917                                 const drm_savage_cmd_header_t __user *end,
918                                 const drm_buf_t *dmabuf,
919                                 const unsigned int __user *usr_vtxbuf,
920                                 unsigned int vb_size, unsigned int vb_stride,
921                                 unsigned int nbox,
922                                 const drm_clip_rect_t __user *usr_boxes)
923 {
924         unsigned int i, j;
925         int ret;
926
927         for (i = 0; i < nbox; ++i) {
928                 drm_clip_rect_t box;
929                 const drm_savage_cmd_header_t __user *usr_cmdbuf;
930                 DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
931                 dev_priv->emit_clip_rect(dev_priv, &box);
932
933                 usr_cmdbuf = start;
934                 while (usr_cmdbuf < end) {
935                         drm_savage_cmd_header_t cmd_header;
936                         DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
937                                                      sizeof(cmd_header));
938                         usr_cmdbuf++;
939                         switch (cmd_header.cmd.cmd) {
940                         case SAVAGE_CMD_DMA_PRIM:
941                                 ret = savage_dispatch_dma_prim(
942                                         dev_priv, &cmd_header, dmabuf);
943                                 break;
944                         case SAVAGE_CMD_VB_PRIM:
945                                 ret = savage_dispatch_vb_prim(
946                                         dev_priv, &cmd_header,
947                                         (const uint32_t __user *)usr_vtxbuf,
948                                         vb_size, vb_stride);
949                                 break;
950                         case SAVAGE_CMD_DMA_IDX:
951                                 j = (cmd_header.idx.count + 3) / 4;
952                                 /* j was check in savage_bci_cmdbuf */
953                                 ret = savage_dispatch_dma_idx(
954                                         dev_priv, &cmd_header,
955                                         (const uint16_t __user *)usr_cmdbuf,
956                                         dmabuf);
957                                 usr_cmdbuf += j;
958                                 break;
959                         case SAVAGE_CMD_VB_IDX:
960                                 j = (cmd_header.idx.count + 3) / 4;
961                                 /* j was check in savage_bci_cmdbuf */
962                                 ret = savage_dispatch_vb_idx(
963                                         dev_priv, &cmd_header,
964                                         (const uint16_t __user *)usr_cmdbuf,
965                                         (const uint32_t __user *)usr_vtxbuf,
966                                         vb_size, vb_stride);
967                                 usr_cmdbuf += j;
968                                 break;
969                         default:
970                                 /* What's the best return code? EFAULT? */
971                                 DRM_ERROR("IMPLEMENTATION ERROR: "
972                                           "non-drawing-command %d\n",
973                                           cmd_header.cmd.cmd);
974                                 return DRM_ERR(EINVAL);
975                         }
976
977                         if (ret != 0)
978                                 return ret;
979                 }
980         }
981
982         return 0;
983 }
984
985 int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
986 {
987         DRM_DEVICE;
988         drm_savage_private_t *dev_priv = dev->dev_private;
989         drm_device_dma_t *dma = dev->dma;
990         drm_buf_t *dmabuf;
991         drm_savage_cmdbuf_t cmdbuf;
992         drm_savage_cmd_header_t __user *usr_cmdbuf;
993         drm_savage_cmd_header_t __user *first_draw_cmd;
994         unsigned int __user *usr_vtxbuf;
995         drm_clip_rect_t __user *usr_boxes;
996         unsigned int i, j;
997         int ret = 0;
998
999         DRM_DEBUG("\n");
1000         
1001         LOCK_TEST_WITH_RETURN(dev, filp);
1002
1003         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
1004                                  sizeof(cmdbuf));
1005
1006         if (dma && dma->buflist) {
1007                 if (cmdbuf.dma_idx > dma->buf_count) {
1008                         DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
1009                                   cmdbuf.dma_idx, dma->buf_count-1);
1010                         return DRM_ERR(EINVAL);
1011                 }
1012                 dmabuf = dma->buflist[cmdbuf.dma_idx];
1013         } else {
1014                 dmabuf = NULL;
1015         }
1016
1017         usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
1018         usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
1019         usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
1020         if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
1021             (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
1022                     usr_vtxbuf, cmdbuf.vb_size)) ||
1023             (cmdbuf.nbox && DRM_VERIFYAREA_READ(
1024                     usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
1025                 return DRM_ERR(EFAULT);
1026
1027         /* Make sure writes to DMA buffers are finished before sending
1028          * DMA commands to the graphics hardware. */
1029         DRM_MEMORYBARRIER();
1030
1031         /* Coming from user space. Don't know if the Xserver has
1032          * emitted wait commands. Assuming the worst. */
1033         dev_priv->waiting = 1;
1034
1035         i = 0;
1036         first_draw_cmd = NULL;
1037         while (i < cmdbuf.size) {
1038                 drm_savage_cmd_header_t cmd_header;
1039                 DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
1040                                              sizeof(cmd_header));
1041                 usr_cmdbuf++;
1042                 i++;
1043
1044                 /* Group drawing commands with same state to minimize
1045                  * iterations over clip rects. */
1046                 j = 0;
1047                 switch (cmd_header.cmd.cmd) {
1048                 case SAVAGE_CMD_DMA_IDX:
1049                 case SAVAGE_CMD_VB_IDX:
1050                         j = (cmd_header.idx.count + 3) / 4;
1051                         if (i + j > cmdbuf.size) {
1052                                 DRM_ERROR("indexed drawing command extends "
1053                                           "beyond end of command buffer\n");
1054                                 DMA_FLUSH();
1055                                 return DRM_ERR(EINVAL);
1056                         }
1057                         /* fall through */
1058                 case SAVAGE_CMD_DMA_PRIM:
1059                 case SAVAGE_CMD_VB_PRIM:
1060                         if (!first_draw_cmd)
1061                                 first_draw_cmd = usr_cmdbuf-1;
1062                         usr_cmdbuf += j;
1063                         i += j;
1064                         break;
1065                 default:
1066                         if (first_draw_cmd) {
1067                                 ret = savage_dispatch_draw (
1068                                         dev_priv, first_draw_cmd, usr_cmdbuf-1,
1069                                         dmabuf, usr_vtxbuf, cmdbuf.vb_size,
1070                                         cmdbuf.vb_stride,
1071                                         cmdbuf.nbox, usr_boxes);
1072                                 if (ret != 0)
1073                                         return ret;
1074                                 first_draw_cmd = NULL;
1075                         }
1076                 }
1077                 if (first_draw_cmd)
1078                         continue;
1079
1080                 switch (cmd_header.cmd.cmd) {
1081                 case SAVAGE_CMD_STATE:
1082                         j = (cmd_header.state.count + 1) / 2;
1083                         if (i + j > cmdbuf.size) {
1084                                 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1085                                           "beyond end of command buffer\n");
1086                                 DMA_FLUSH();
1087                                 return DRM_ERR(EINVAL);
1088                         }
1089                         ret = savage_dispatch_state(
1090                                 dev_priv, &cmd_header,
1091                                 (uint32_t __user *)usr_cmdbuf);
1092                         usr_cmdbuf += j;
1093                         i += j;
1094                         break;
1095                 case SAVAGE_CMD_CLEAR:
1096                         if (i + 1 > cmdbuf.size) {
1097                                 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1098                                           "beyond end of command buffer\n");
1099                                 DMA_FLUSH();
1100                                 return DRM_ERR(EINVAL);
1101                         }
1102                         ret = savage_dispatch_clear(dev_priv, &cmd_header,
1103                                                     usr_cmdbuf,
1104                                                     cmdbuf.nbox, usr_boxes);
1105                         usr_cmdbuf++;
1106                         i++;
1107                         break;
1108                 case SAVAGE_CMD_SWAP:
1109                         ret = savage_dispatch_swap(dev_priv,
1110                                                    cmdbuf.nbox, usr_boxes);
1111                         break;
1112                 default:
1113                         DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
1114                         DMA_FLUSH();
1115                         return DRM_ERR(EINVAL);
1116                 }
1117
1118                 if (ret != 0) {
1119                         DMA_FLUSH();
1120                         return ret;
1121                 }
1122         }
1123
1124         if (first_draw_cmd) {
1125                 ret = savage_dispatch_draw (
1126                         dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
1127                         usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
1128                         cmdbuf.nbox, usr_boxes);
1129                 if (ret != 0) {
1130                         DMA_FLUSH();
1131                         return ret;
1132                 }
1133         }
1134
1135         DMA_FLUSH();
1136
1137         if (dmabuf && cmdbuf.discard) {
1138                 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1139                 uint16_t event;
1140                 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1141                 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1142                 savage_freelist_put(dev, dmabuf);
1143         }
1144
1145         return 0;
1146 }