Merge unstable branch 'omap-rmk'
[linux-2.6] / drivers / gpu / drm / radeon / r300_cmdbuf.c
1 /* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2  *
3  * Copyright (C) The Weather Channel, Inc.  2002.
4  * Copyright (C) 2004 Nicolai Haehnle.
5  * All Rights Reserved.
6  *
7  * The Weather Channel (TM) funded Tungsten Graphics to develop the
8  * initial release of the Radeon 8500 driver under the XFree86 license.
9  * This notice must be preserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28  * DEALINGS IN THE SOFTWARE.
29  *
30  * Authors:
31  *    Nicolai Haehnle <prefect_@gmx.net>
32  */
33
34 #include "drmP.h"
35 #include "drm.h"
36 #include "radeon_drm.h"
37 #include "radeon_drv.h"
38 #include "r300_reg.h"
39
40 #define R300_SIMULTANEOUS_CLIPRECTS             4
41
42 /* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
43  */
44 static const int r300_cliprect_cntl[4] = {
45         0xAAAA,
46         0xEEEE,
47         0xFEFE,
48         0xFFFE
49 };
50
51 /**
52  * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
53  * buffer, starting with index n.
54  */
55 static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
56                                drm_radeon_kcmd_buffer_t *cmdbuf, int n)
57 {
58         struct drm_clip_rect box;
59         int nr;
60         int i;
61         RING_LOCALS;
62
63         nr = cmdbuf->nbox - n;
64         if (nr > R300_SIMULTANEOUS_CLIPRECTS)
65                 nr = R300_SIMULTANEOUS_CLIPRECTS;
66
67         DRM_DEBUG("%i cliprects\n", nr);
68
69         if (nr) {
70                 BEGIN_RING(6 + nr * 2);
71                 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
72
73                 for (i = 0; i < nr; ++i) {
74                         if (DRM_COPY_FROM_USER_UNCHECKED
75                             (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
76                                 DRM_ERROR("copy cliprect faulted\n");
77                                 return -EFAULT;
78                         }
79
80                         box.x2--; /* Hardware expects inclusive bottom-right corner */
81                         box.y2--;
82
83                         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
84                                 box.x1 = (box.x1) &
85                                         R300_CLIPRECT_MASK;
86                                 box.y1 = (box.y1) &
87                                         R300_CLIPRECT_MASK;
88                                 box.x2 = (box.x2) &
89                                         R300_CLIPRECT_MASK;
90                                 box.y2 = (box.y2) &
91                                         R300_CLIPRECT_MASK;
92                         } else {
93                                 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
94                                         R300_CLIPRECT_MASK;
95                                 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
96                                         R300_CLIPRECT_MASK;
97                                 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
98                                         R300_CLIPRECT_MASK;
99                                 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
100                                         R300_CLIPRECT_MASK;
101                         }
102
103                         OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
104                                  (box.y1 << R300_CLIPRECT_Y_SHIFT));
105                         OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
106                                  (box.y2 << R300_CLIPRECT_Y_SHIFT));
107
108                 }
109
110                 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
111
112                 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
113                  * client might be able to trample over memory.
114                  * The impact should be very limited, but I'd rather be safe than
115                  * sorry.
116                  */
117                 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
118                 OUT_RING(0);
119                 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
120                 ADVANCE_RING();
121         } else {
122                 /* Why we allow zero cliprect rendering:
123                  * There are some commands in a command buffer that must be submitted
124                  * even when there are no cliprects, e.g. DMA buffer discard
125                  * or state setting (though state setting could be avoided by
126                  * simulating a loss of context).
127                  *
128                  * Now since the cmdbuf interface is so chaotic right now (and is
129                  * bound to remain that way for a bit until things settle down),
130                  * it is basically impossible to filter out the commands that are
131                  * necessary and those that aren't.
132                  *
133                  * So I choose the safe way and don't do any filtering at all;
134                  * instead, I simply set up the engine so that all rendering
135                  * can't produce any fragments.
136                  */
137                 BEGIN_RING(2);
138                 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
139                 ADVANCE_RING();
140         }
141
142         /* flus cache and wait idle clean after cliprect change */
143         BEGIN_RING(2);
144         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
145         OUT_RING(R300_RB3D_DC_FLUSH);
146         ADVANCE_RING();
147         BEGIN_RING(2);
148         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
149         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
150         ADVANCE_RING();
151         /* set flush flag */
152         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
153
154         return 0;
155 }
156
157 static u8 r300_reg_flags[0x10000 >> 2];
158
159 void r300_init_reg_flags(struct drm_device *dev)
160 {
161         int i;
162         drm_radeon_private_t *dev_priv = dev->dev_private;
163
164         memset(r300_reg_flags, 0, 0x10000 >> 2);
165 #define ADD_RANGE_MARK(reg, count,mark) \
166                 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
167                         r300_reg_flags[i]|=(mark);
168
169 #define MARK_SAFE               1
170 #define MARK_CHECK_OFFSET       2
171
172 #define ADD_RANGE(reg, count)   ADD_RANGE_MARK(reg, count, MARK_SAFE)
173
174         /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
175         ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
176         ADD_RANGE(R300_VAP_CNTL, 1);
177         ADD_RANGE(R300_SE_VTE_CNTL, 2);
178         ADD_RANGE(0x2134, 2);
179         ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
180         ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
181         ADD_RANGE(0x21DC, 1);
182         ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
183         ADD_RANGE(R300_VAP_CLIP_X_0, 4);
184         ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
185         ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
186         ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
187         ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
188         ADD_RANGE(R300_GB_ENABLE, 1);
189         ADD_RANGE(R300_GB_MSPOS0, 5);
190         ADD_RANGE(R300_TX_INVALTAGS, 1);
191         ADD_RANGE(R300_TX_ENABLE, 1);
192         ADD_RANGE(0x4200, 4);
193         ADD_RANGE(0x4214, 1);
194         ADD_RANGE(R300_RE_POINTSIZE, 1);
195         ADD_RANGE(0x4230, 3);
196         ADD_RANGE(R300_RE_LINE_CNT, 1);
197         ADD_RANGE(R300_RE_UNK4238, 1);
198         ADD_RANGE(0x4260, 3);
199         ADD_RANGE(R300_RE_SHADE, 4);
200         ADD_RANGE(R300_RE_POLYGON_MODE, 5);
201         ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
202         ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
203         ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
204         ADD_RANGE(R300_RE_CULL_CNTL, 1);
205         ADD_RANGE(0x42C0, 2);
206         ADD_RANGE(R300_RS_CNTL_0, 2);
207
208         ADD_RANGE(R300_SC_HYPERZ, 2);
209         ADD_RANGE(0x43E8, 1);
210
211         ADD_RANGE(0x46A4, 5);
212
213         ADD_RANGE(R300_RE_FOG_STATE, 1);
214         ADD_RANGE(R300_FOG_COLOR_R, 3);
215         ADD_RANGE(R300_PP_ALPHA_TEST, 2);
216         ADD_RANGE(0x4BD8, 1);
217         ADD_RANGE(R300_PFS_PARAM_0_X, 64);
218         ADD_RANGE(0x4E00, 1);
219         ADD_RANGE(R300_RB3D_CBLEND, 2);
220         ADD_RANGE(R300_RB3D_COLORMASK, 1);
221         ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
222         ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);   /* check offset */
223         ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
224         ADD_RANGE(0x4E50, 9);
225         ADD_RANGE(0x4E88, 1);
226         ADD_RANGE(0x4EA0, 2);
227         ADD_RANGE(R300_ZB_CNTL, 3);
228         ADD_RANGE(R300_ZB_FORMAT, 4);
229         ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);      /* check offset */
230         ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
231         ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
232         ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
233
234         ADD_RANGE(R300_TX_FILTER_0, 16);
235         ADD_RANGE(R300_TX_FILTER1_0, 16);
236         ADD_RANGE(R300_TX_SIZE_0, 16);
237         ADD_RANGE(R300_TX_FORMAT_0, 16);
238         ADD_RANGE(R300_TX_PITCH_0, 16);
239         /* Texture offset is dangerous and needs more checking */
240         ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
241         ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
242         ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
243
244         /* Sporadic registers used as primitives are emitted */
245         ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
246         ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
247         ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
248         ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
249
250         if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
251                 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
252                 ADD_RANGE(R500_US_CONFIG, 2);
253                 ADD_RANGE(R500_US_CODE_ADDR, 3);
254                 ADD_RANGE(R500_US_FC_CTRL, 1);
255                 ADD_RANGE(R500_RS_IP_0, 16);
256                 ADD_RANGE(R500_RS_INST_0, 16);
257                 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
258                 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
259                 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
260         } else {
261                 ADD_RANGE(R300_PFS_CNTL_0, 3);
262                 ADD_RANGE(R300_PFS_NODE_0, 4);
263                 ADD_RANGE(R300_PFS_TEXI_0, 64);
264                 ADD_RANGE(R300_PFS_INSTR0_0, 64);
265                 ADD_RANGE(R300_PFS_INSTR1_0, 64);
266                 ADD_RANGE(R300_PFS_INSTR2_0, 64);
267                 ADD_RANGE(R300_PFS_INSTR3_0, 64);
268                 ADD_RANGE(R300_RS_INTERP_0, 8);
269                 ADD_RANGE(R300_RS_ROUTE_0, 8);
270
271         }
272 }
273
274 static __inline__ int r300_check_range(unsigned reg, int count)
275 {
276         int i;
277         if (reg & ~0xffff)
278                 return -1;
279         for (i = (reg >> 2); i < (reg >> 2) + count; i++)
280                 if (r300_reg_flags[i] != MARK_SAFE)
281                         return 1;
282         return 0;
283 }
284
285 static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
286                                                           dev_priv,
287                                                           drm_radeon_kcmd_buffer_t
288                                                           * cmdbuf,
289                                                           drm_r300_cmd_header_t
290                                                           header)
291 {
292         int reg;
293         int sz;
294         int i;
295         int values[64];
296         RING_LOCALS;
297
298         sz = header.packet0.count;
299         reg = (header.packet0.reghi << 8) | header.packet0.reglo;
300
301         if ((sz > 64) || (sz < 0)) {
302                 DRM_ERROR
303                     ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
304                      reg, sz);
305                 return -EINVAL;
306         }
307         for (i = 0; i < sz; i++) {
308                 values[i] = ((int *)cmdbuf->buf)[i];
309                 switch (r300_reg_flags[(reg >> 2) + i]) {
310                 case MARK_SAFE:
311                         break;
312                 case MARK_CHECK_OFFSET:
313                         if (!radeon_check_offset(dev_priv, (u32) values[i])) {
314                                 DRM_ERROR
315                                     ("Offset failed range check (reg=%04x sz=%d)\n",
316                                      reg, sz);
317                                 return -EINVAL;
318                         }
319                         break;
320                 default:
321                         DRM_ERROR("Register %04x failed check as flag=%02x\n",
322                                   reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
323                         return -EINVAL;
324                 }
325         }
326
327         BEGIN_RING(1 + sz);
328         OUT_RING(CP_PACKET0(reg, sz - 1));
329         OUT_RING_TABLE(values, sz);
330         ADVANCE_RING();
331
332         cmdbuf->buf += sz * 4;
333         cmdbuf->bufsz -= sz * 4;
334
335         return 0;
336 }
337
338 /**
339  * Emits a packet0 setting arbitrary registers.
340  * Called by r300_do_cp_cmdbuf.
341  *
342  * Note that checks are performed on contents and addresses of the registers
343  */
344 static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
345                                         drm_radeon_kcmd_buffer_t *cmdbuf,
346                                         drm_r300_cmd_header_t header)
347 {
348         int reg;
349         int sz;
350         RING_LOCALS;
351
352         sz = header.packet0.count;
353         reg = (header.packet0.reghi << 8) | header.packet0.reglo;
354
355         if (!sz)
356                 return 0;
357
358         if (sz * 4 > cmdbuf->bufsz)
359                 return -EINVAL;
360
361         if (reg + sz * 4 >= 0x10000) {
362                 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
363                           sz);
364                 return -EINVAL;
365         }
366
367         if (r300_check_range(reg, sz)) {
368                 /* go and check everything */
369                 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
370                                                            header);
371         }
372         /* the rest of the data is safe to emit, whatever the values the user passed */
373
374         BEGIN_RING(1 + sz);
375         OUT_RING(CP_PACKET0(reg, sz - 1));
376         OUT_RING_TABLE((int *)cmdbuf->buf, sz);
377         ADVANCE_RING();
378
379         cmdbuf->buf += sz * 4;
380         cmdbuf->bufsz -= sz * 4;
381
382         return 0;
383 }
384
385 /**
386  * Uploads user-supplied vertex program instructions or parameters onto
387  * the graphics card.
388  * Called by r300_do_cp_cmdbuf.
389  */
390 static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
391                                     drm_radeon_kcmd_buffer_t *cmdbuf,
392                                     drm_r300_cmd_header_t header)
393 {
394         int sz;
395         int addr;
396         RING_LOCALS;
397
398         sz = header.vpu.count;
399         addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
400
401         if (!sz)
402                 return 0;
403         if (sz * 16 > cmdbuf->bufsz)
404                 return -EINVAL;
405
406         /* VAP is very sensitive so we purge cache before we program it
407          * and we also flush its state before & after */
408         BEGIN_RING(6);
409         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
410         OUT_RING(R300_RB3D_DC_FLUSH);
411         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
412         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
413         OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
414         OUT_RING(0);
415         ADVANCE_RING();
416         /* set flush flag */
417         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
418
419         BEGIN_RING(3 + sz * 4);
420         OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
421         OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
422         OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
423         ADVANCE_RING();
424
425         BEGIN_RING(2);
426         OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
427         OUT_RING(0);
428         ADVANCE_RING();
429
430         cmdbuf->buf += sz * 16;
431         cmdbuf->bufsz -= sz * 16;
432
433         return 0;
434 }
435
436 /**
437  * Emit a clear packet from userspace.
438  * Called by r300_emit_packet3.
439  */
440 static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
441                                       drm_radeon_kcmd_buffer_t *cmdbuf)
442 {
443         RING_LOCALS;
444
445         if (8 * 4 > cmdbuf->bufsz)
446                 return -EINVAL;
447
448         BEGIN_RING(10);
449         OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
450         OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
451                  (1 << R300_PRIM_NUM_VERTICES_SHIFT));
452         OUT_RING_TABLE((int *)cmdbuf->buf, 8);
453         ADVANCE_RING();
454
455         BEGIN_RING(4);
456         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
457         OUT_RING(R300_RB3D_DC_FLUSH);
458         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
459         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
460         ADVANCE_RING();
461         /* set flush flag */
462         dev_priv->track_flush |= RADEON_FLUSH_EMITED;
463
464         cmdbuf->buf += 8 * 4;
465         cmdbuf->bufsz -= 8 * 4;
466
467         return 0;
468 }
469
470 static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
471                                                drm_radeon_kcmd_buffer_t *cmdbuf,
472                                                u32 header)
473 {
474         int count, i, k;
475 #define MAX_ARRAY_PACKET  64
476         u32 payload[MAX_ARRAY_PACKET];
477         u32 narrays;
478         RING_LOCALS;
479
480         count = (header >> 16) & 0x3fff;
481
482         if ((count + 1) > MAX_ARRAY_PACKET) {
483                 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
484                           count);
485                 return -EINVAL;
486         }
487         memset(payload, 0, MAX_ARRAY_PACKET * 4);
488         memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
489
490         /* carefully check packet contents */
491
492         narrays = payload[0];
493         k = 0;
494         i = 1;
495         while ((k < narrays) && (i < (count + 1))) {
496                 i++;            /* skip attribute field */
497                 if (!radeon_check_offset(dev_priv, payload[i])) {
498                         DRM_ERROR
499                             ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
500                              k, i);
501                         return -EINVAL;
502                 }
503                 k++;
504                 i++;
505                 if (k == narrays)
506                         break;
507                 /* have one more to process, they come in pairs */
508                 if (!radeon_check_offset(dev_priv, payload[i])) {
509                         DRM_ERROR
510                             ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
511                              k, i);
512                         return -EINVAL;
513                 }
514                 k++;
515                 i++;
516         }
517         /* do the counts match what we expect ? */
518         if ((k != narrays) || (i != (count + 1))) {
519                 DRM_ERROR
520                     ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
521                      k, i, narrays, count + 1);
522                 return -EINVAL;
523         }
524
525         /* all clear, output packet */
526
527         BEGIN_RING(count + 2);
528         OUT_RING(header);
529         OUT_RING_TABLE(payload, count + 1);
530         ADVANCE_RING();
531
532         cmdbuf->buf += (count + 2) * 4;
533         cmdbuf->bufsz -= (count + 2) * 4;
534
535         return 0;
536 }
537
538 static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
539                                              drm_radeon_kcmd_buffer_t *cmdbuf)
540 {
541         u32 *cmd = (u32 *) cmdbuf->buf;
542         int count, ret;
543         RING_LOCALS;
544
545         count=(cmd[0]>>16) & 0x3fff;
546
547         if (cmd[0] & 0x8000) {
548                 u32 offset;
549
550                 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
551                               | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
552                         offset = cmd[2] << 10;
553                         ret = !radeon_check_offset(dev_priv, offset);
554                         if (ret) {
555                                 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
556                                 return -EINVAL;
557                         }
558                 }
559
560                 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
561                     (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
562                         offset = cmd[3] << 10;
563                         ret = !radeon_check_offset(dev_priv, offset);
564                         if (ret) {
565                                 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
566                                 return -EINVAL;
567                         }
568
569                 }
570         }
571
572         BEGIN_RING(count+2);
573         OUT_RING(cmd[0]);
574         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
575         ADVANCE_RING();
576
577         cmdbuf->buf += (count+2)*4;
578         cmdbuf->bufsz -= (count+2)*4;
579
580         return 0;
581 }
582
583 static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
584                                             drm_radeon_kcmd_buffer_t *cmdbuf)
585 {
586         u32 *cmd;
587         int count;
588         int expected_count;
589         RING_LOCALS;
590
591         cmd = (u32 *) cmdbuf->buf;
592         count = (cmd[0]>>16) & 0x3fff;
593         expected_count = cmd[1] >> 16;
594         if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
595                 expected_count = (expected_count+1)/2;
596
597         if (count && count != expected_count) {
598                 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
599                         count, expected_count);
600                 return -EINVAL;
601         }
602
603         BEGIN_RING(count+2);
604         OUT_RING(cmd[0]);
605         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
606         ADVANCE_RING();
607
608         cmdbuf->buf += (count+2)*4;
609         cmdbuf->bufsz -= (count+2)*4;
610
611         if (!count) {
612                 drm_r300_cmd_header_t header;
613
614                 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
615                         DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
616                         return -EINVAL;
617                 }
618
619                 header.u = *(unsigned int *)cmdbuf->buf;
620
621                 cmdbuf->buf += sizeof(header);
622                 cmdbuf->bufsz -= sizeof(header);
623                 cmd = (u32 *) cmdbuf->buf;
624
625                 if (header.header.cmd_type != R300_CMD_PACKET3 ||
626                     header.packet3.packet != R300_CMD_PACKET3_RAW ||
627                     cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
628                         DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
629                         return -EINVAL;
630                 }
631
632                 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
633                         DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
634                         return -EINVAL;
635                 }
636                 if (!radeon_check_offset(dev_priv, cmd[2])) {
637                         DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
638                         return -EINVAL;
639                 }
640                 if (cmd[3] != expected_count) {
641                         DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
642                                 cmd[3], expected_count);
643                         return -EINVAL;
644                 }
645
646                 BEGIN_RING(4);
647                 OUT_RING(cmd[0]);
648                 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
649                 ADVANCE_RING();
650
651                 cmdbuf->buf += 4*4;
652                 cmdbuf->bufsz -= 4*4;
653         }
654
655         return 0;
656 }
657
658 static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
659                                             drm_radeon_kcmd_buffer_t *cmdbuf)
660 {
661         u32 header;
662         int count;
663         RING_LOCALS;
664
665         if (4 > cmdbuf->bufsz)
666                 return -EINVAL;
667
668         /* Fixme !! This simply emits a packet without much checking.
669            We need to be smarter. */
670
671         /* obtain first word - actual packet3 header */
672         header = *(u32 *) cmdbuf->buf;
673
674         /* Is it packet 3 ? */
675         if ((header >> 30) != 0x3) {
676                 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
677                 return -EINVAL;
678         }
679
680         count = (header >> 16) & 0x3fff;
681
682         /* Check again now that we know how much data to expect */
683         if ((count + 2) * 4 > cmdbuf->bufsz) {
684                 DRM_ERROR
685                     ("Expected packet3 of length %d but have only %d bytes left\n",
686                      (count + 2) * 4, cmdbuf->bufsz);
687                 return -EINVAL;
688         }
689
690         /* Is it a packet type we know about ? */
691         switch (header & 0xff00) {
692         case RADEON_3D_LOAD_VBPNTR:     /* load vertex array pointers */
693                 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
694
695         case RADEON_CNTL_BITBLT_MULTI:
696                 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
697
698         case RADEON_CP_INDX_BUFFER:
699                 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
700                 return -EINVAL;
701         case RADEON_CP_3D_DRAW_IMMD_2:
702                 /* triggers drawing using in-packet vertex data */
703         case RADEON_CP_3D_DRAW_VBUF_2:
704                 /* triggers drawing of vertex buffers setup elsewhere */
705                 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706                                            RADEON_PURGE_EMITED);
707                 break;
708         case RADEON_CP_3D_DRAW_INDX_2:
709                 /* triggers drawing using indices to vertex buffer */
710                 /* whenever we send vertex we clear flush & purge */
711                 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
712                                            RADEON_PURGE_EMITED);
713                 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
714         case RADEON_WAIT_FOR_IDLE:
715         case RADEON_CP_NOP:
716                 /* these packets are safe */
717                 break;
718         default:
719                 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
720                 return -EINVAL;
721         }
722
723         BEGIN_RING(count + 2);
724         OUT_RING(header);
725         OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
726         ADVANCE_RING();
727
728         cmdbuf->buf += (count + 2) * 4;
729         cmdbuf->bufsz -= (count + 2) * 4;
730
731         return 0;
732 }
733
734 /**
735  * Emit a rendering packet3 from userspace.
736  * Called by r300_do_cp_cmdbuf.
737  */
738 static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
739                                         drm_radeon_kcmd_buffer_t *cmdbuf,
740                                         drm_r300_cmd_header_t header)
741 {
742         int n;
743         int ret;
744         char *orig_buf = cmdbuf->buf;
745         int orig_bufsz = cmdbuf->bufsz;
746
747         /* This is a do-while-loop so that we run the interior at least once,
748          * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
749          */
750         n = 0;
751         do {
752                 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
753                         ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
754                         if (ret)
755                                 return ret;
756
757                         cmdbuf->buf = orig_buf;
758                         cmdbuf->bufsz = orig_bufsz;
759                 }
760
761                 switch (header.packet3.packet) {
762                 case R300_CMD_PACKET3_CLEAR:
763                         DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
764                         ret = r300_emit_clear(dev_priv, cmdbuf);
765                         if (ret) {
766                                 DRM_ERROR("r300_emit_clear failed\n");
767                                 return ret;
768                         }
769                         break;
770
771                 case R300_CMD_PACKET3_RAW:
772                         DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
773                         ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
774                         if (ret) {
775                                 DRM_ERROR("r300_emit_raw_packet3 failed\n");
776                                 return ret;
777                         }
778                         break;
779
780                 default:
781                         DRM_ERROR("bad packet3 type %i at %p\n",
782                                   header.packet3.packet,
783                                   cmdbuf->buf - sizeof(header));
784                         return -EINVAL;
785                 }
786
787                 n += R300_SIMULTANEOUS_CLIPRECTS;
788         } while (n < cmdbuf->nbox);
789
790         return 0;
791 }
792
793 /* Some of the R300 chips seem to be extremely touchy about the two registers
794  * that are configured in r300_pacify.
795  * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
796  * sends a command buffer that contains only state setting commands and a
797  * vertex program/parameter upload sequence, this will eventually lead to a
798  * lockup, unless the sequence is bracketed by calls to r300_pacify.
799  * So we should take great care to *always* call r300_pacify before
800  * *anything* 3D related, and again afterwards. This is what the
801  * call bracket in r300_do_cp_cmdbuf is for.
802  */
803
804 /**
805  * Emit the sequence to pacify R300.
806  */
807 static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
808 {
809         uint32_t cache_z, cache_3d, cache_2d;
810         RING_LOCALS;
811
812         cache_z = R300_ZC_FLUSH;
813         cache_2d = R300_RB2D_DC_FLUSH;
814         cache_3d = R300_RB3D_DC_FLUSH;
815         if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
816                 /* we can purge, primitive where draw since last purge */
817                 cache_z |= R300_ZC_FREE;
818                 cache_2d |= R300_RB2D_DC_FREE;
819                 cache_3d |= R300_RB3D_DC_FREE;
820         }
821
822         /* flush & purge zbuffer */
823         BEGIN_RING(2);
824         OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
825         OUT_RING(cache_z);
826         ADVANCE_RING();
827         /* flush & purge 3d */
828         BEGIN_RING(2);
829         OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
830         OUT_RING(cache_3d);
831         ADVANCE_RING();
832         /* flush & purge texture */
833         BEGIN_RING(2);
834         OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
835         OUT_RING(0);
836         ADVANCE_RING();
837         /* FIXME: is this one really needed ? */
838         BEGIN_RING(2);
839         OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
840         OUT_RING(0);
841         ADVANCE_RING();
842         BEGIN_RING(2);
843         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
844         OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
845         ADVANCE_RING();
846         /* flush & purge 2d through E2 as RB2D will trigger lockup */
847         BEGIN_RING(4);
848         OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
849         OUT_RING(cache_2d);
850         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
851         OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
852                  RADEON_WAIT_HOST_IDLECLEAN);
853         ADVANCE_RING();
854         /* set flush & purge flags */
855         dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
856 }
857
858 /**
859  * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
860  * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
861  * be careful about how this function is called.
862  */
863 static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
864 {
865         drm_radeon_private_t *dev_priv = dev->dev_private;
866         drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
867
868         buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
869         buf->pending = 1;
870         buf->used = 0;
871 }
872
873 static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
874                           drm_r300_cmd_header_t header)
875 {
876         u32 wait_until;
877         RING_LOCALS;
878
879         if (!header.wait.flags)
880                 return;
881
882         wait_until = 0;
883
884         switch(header.wait.flags) {
885         case R300_WAIT_2D:
886                 wait_until = RADEON_WAIT_2D_IDLE;
887                 break;
888         case R300_WAIT_3D:
889                 wait_until = RADEON_WAIT_3D_IDLE;
890                 break;
891         case R300_NEW_WAIT_2D_3D:
892                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
893                 break;
894         case R300_NEW_WAIT_2D_2D_CLEAN:
895                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
896                 break;
897         case R300_NEW_WAIT_3D_3D_CLEAN:
898                 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
899                 break;
900         case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
901                 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
902                 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
903                 break;
904         default:
905                 return;
906         }
907
908         BEGIN_RING(2);
909         OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
910         OUT_RING(wait_until);
911         ADVANCE_RING();
912 }
913
914 static int r300_scratch(drm_radeon_private_t *dev_priv,
915                         drm_radeon_kcmd_buffer_t *cmdbuf,
916                         drm_r300_cmd_header_t header)
917 {
918         u32 *ref_age_base;
919         u32 i, buf_idx, h_pending;
920         RING_LOCALS;
921
922         if (cmdbuf->bufsz <
923             (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
924                 return -EINVAL;
925         }
926
927         if (header.scratch.reg >= 5) {
928                 return -EINVAL;
929         }
930
931         dev_priv->scratch_ages[header.scratch.reg]++;
932
933         ref_age_base =  (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
934
935         cmdbuf->buf += sizeof(u64);
936         cmdbuf->bufsz -= sizeof(u64);
937
938         for (i=0; i < header.scratch.n_bufs; i++) {
939                 buf_idx = *(u32 *)cmdbuf->buf;
940                 buf_idx *= 2; /* 8 bytes per buf */
941
942                 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
943                         return -EINVAL;
944                 }
945
946                 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
947                         return -EINVAL;
948                 }
949
950                 if (h_pending == 0) {
951                         return -EINVAL;
952                 }
953
954                 h_pending--;
955
956                 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
957                         return -EINVAL;
958                 }
959
960                 cmdbuf->buf += sizeof(buf_idx);
961                 cmdbuf->bufsz -= sizeof(buf_idx);
962         }
963
964         BEGIN_RING(2);
965         OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
966         OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
967         ADVANCE_RING();
968
969         return 0;
970 }
971
972 /**
973  * Uploads user-supplied vertex program instructions or parameters onto
974  * the graphics card.
975  * Called by r300_do_cp_cmdbuf.
976  */
977 static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
978                                        drm_radeon_kcmd_buffer_t *cmdbuf,
979                                        drm_r300_cmd_header_t header)
980 {
981         int sz;
982         int addr;
983         int type;
984         int clamp;
985         int stride;
986         RING_LOCALS;
987
988         sz = header.r500fp.count;
989         /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
990         addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
991
992         type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
993         clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
994
995         addr |= (type << 16);
996         addr |= (clamp << 17);
997
998         stride = type ? 4 : 6;
999
1000         DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1001         if (!sz)
1002                 return 0;
1003         if (sz * stride * 4 > cmdbuf->bufsz)
1004                 return -EINVAL;
1005
1006         BEGIN_RING(3 + sz * stride);
1007         OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1008         OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1009         OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
1010
1011         ADVANCE_RING();
1012
1013         cmdbuf->buf += sz * stride * 4;
1014         cmdbuf->bufsz -= sz * stride * 4;
1015
1016         return 0;
1017 }
1018
1019
1020 /**
1021  * Parses and validates a user-supplied command buffer and emits appropriate
1022  * commands on the DMA ring buffer.
1023  * Called by the ioctl handler function radeon_cp_cmdbuf.
1024  */
1025 int r300_do_cp_cmdbuf(struct drm_device *dev,
1026                       struct drm_file *file_priv,
1027                       drm_radeon_kcmd_buffer_t *cmdbuf)
1028 {
1029         drm_radeon_private_t *dev_priv = dev->dev_private;
1030         struct drm_device_dma *dma = dev->dma;
1031         struct drm_buf *buf = NULL;
1032         int emit_dispatch_age = 0;
1033         int ret = 0;
1034
1035         DRM_DEBUG("\n");
1036
1037         /* pacify */
1038         r300_pacify(dev_priv);
1039
1040         if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1041                 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1042                 if (ret)
1043                         goto cleanup;
1044         }
1045
1046         while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
1047                 int idx;
1048                 drm_r300_cmd_header_t header;
1049
1050                 header.u = *(unsigned int *)cmdbuf->buf;
1051
1052                 cmdbuf->buf += sizeof(header);
1053                 cmdbuf->bufsz -= sizeof(header);
1054
1055                 switch (header.header.cmd_type) {
1056                 case R300_CMD_PACKET0:
1057                         DRM_DEBUG("R300_CMD_PACKET0\n");
1058                         ret = r300_emit_packet0(dev_priv, cmdbuf, header);
1059                         if (ret) {
1060                                 DRM_ERROR("r300_emit_packet0 failed\n");
1061                                 goto cleanup;
1062                         }
1063                         break;
1064
1065                 case R300_CMD_VPU:
1066                         DRM_DEBUG("R300_CMD_VPU\n");
1067                         ret = r300_emit_vpu(dev_priv, cmdbuf, header);
1068                         if (ret) {
1069                                 DRM_ERROR("r300_emit_vpu failed\n");
1070                                 goto cleanup;
1071                         }
1072                         break;
1073
1074                 case R300_CMD_PACKET3:
1075                         DRM_DEBUG("R300_CMD_PACKET3\n");
1076                         ret = r300_emit_packet3(dev_priv, cmdbuf, header);
1077                         if (ret) {
1078                                 DRM_ERROR("r300_emit_packet3 failed\n");
1079                                 goto cleanup;
1080                         }
1081                         break;
1082
1083                 case R300_CMD_END3D:
1084                         DRM_DEBUG("R300_CMD_END3D\n");
1085                         /* TODO:
1086                            Ideally userspace driver should not need to issue this call,
1087                            i.e. the drm driver should issue it automatically and prevent
1088                            lockups.
1089
1090                            In practice, we do not understand why this call is needed and what
1091                            it does (except for some vague guesses that it has to do with cache
1092                            coherence) and so the user space driver does it.
1093
1094                            Once we are sure which uses prevent lockups the code could be moved
1095                            into the kernel and the userspace driver will not
1096                            need to use this command.
1097
1098                            Note that issuing this command does not hurt anything
1099                            except, possibly, performance */
1100                         r300_pacify(dev_priv);
1101                         break;
1102
1103                 case R300_CMD_CP_DELAY:
1104                         /* simple enough, we can do it here */
1105                         DRM_DEBUG("R300_CMD_CP_DELAY\n");
1106                         {
1107                                 int i;
1108                                 RING_LOCALS;
1109
1110                                 BEGIN_RING(header.delay.count);
1111                                 for (i = 0; i < header.delay.count; i++)
1112                                         OUT_RING(RADEON_CP_PACKET2);
1113                                 ADVANCE_RING();
1114                         }
1115                         break;
1116
1117                 case R300_CMD_DMA_DISCARD:
1118                         DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
1119                         idx = header.dma.buf_idx;
1120                         if (idx < 0 || idx >= dma->buf_count) {
1121                                 DRM_ERROR("buffer index %d (of %d max)\n",
1122                                           idx, dma->buf_count - 1);
1123                                 ret = -EINVAL;
1124                                 goto cleanup;
1125                         }
1126
1127                         buf = dma->buflist[idx];
1128                         if (buf->file_priv != file_priv || buf->pending) {
1129                                 DRM_ERROR("bad buffer %p %p %d\n",
1130                                           buf->file_priv, file_priv,
1131                                           buf->pending);
1132                                 ret = -EINVAL;
1133                                 goto cleanup;
1134                         }
1135
1136                         emit_dispatch_age = 1;
1137                         r300_discard_buffer(dev, buf);
1138                         break;
1139
1140                 case R300_CMD_WAIT:
1141                         DRM_DEBUG("R300_CMD_WAIT\n");
1142                         r300_cmd_wait(dev_priv, header);
1143                         break;
1144
1145                 case R300_CMD_SCRATCH:
1146                         DRM_DEBUG("R300_CMD_SCRATCH\n");
1147                         ret = r300_scratch(dev_priv, cmdbuf, header);
1148                         if (ret) {
1149                                 DRM_ERROR("r300_scratch failed\n");
1150                                 goto cleanup;
1151                         }
1152                         break;
1153
1154                 case R300_CMD_R500FP:
1155                         if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1156                                 DRM_ERROR("Calling r500 command on r300 card\n");
1157                                 ret = -EINVAL;
1158                                 goto cleanup;
1159                         }
1160                         DRM_DEBUG("R300_CMD_R500FP\n");
1161                         ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
1162                         if (ret) {
1163                                 DRM_ERROR("r300_emit_r500fp failed\n");
1164                                 goto cleanup;
1165                         }
1166                         break;
1167                 default:
1168                         DRM_ERROR("bad cmd_type %i at %p\n",
1169                                   header.header.cmd_type,
1170                                   cmdbuf->buf - sizeof(header));
1171                         ret = -EINVAL;
1172                         goto cleanup;
1173                 }
1174         }
1175
1176         DRM_DEBUG("END\n");
1177
1178       cleanup:
1179         r300_pacify(dev_priv);
1180
1181         /* We emit the vertex buffer age here, outside the pacifier "brackets"
1182          * for two reasons:
1183          *  (1) This may coalesce multiple age emissions into a single one and
1184          *  (2) more importantly, some chips lock up hard when scratch registers
1185          *      are written inside the pacifier bracket.
1186          */
1187         if (emit_dispatch_age) {
1188                 RING_LOCALS;
1189
1190                 /* Emit the vertex buffer age */
1191                 BEGIN_RING(2);
1192                 RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
1193                 ADVANCE_RING();
1194         }
1195
1196         COMMIT_RING();
1197
1198         return ret;
1199 }