1 /* radeon_state.c -- State support for Radeon -*- linux-c -*- */
3 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com>
32 #include "drm_sarea.h"
33 #include "radeon_drm.h"
34 #include "radeon_drv.h"
36 /* ================================================================
37 * Helper functions for client state checking and fixup
40 static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
42 drm_file_t * filp_priv,
46 struct drm_radeon_driver_file_fields *radeon_priv;
48 if (off >= dev_priv->fb_location &&
49 off < (dev_priv->gart_vm_start + dev_priv->gart_size))
52 radeon_priv = filp_priv->driver_priv;
53 off += radeon_priv->radeon_fb_delta;
55 DRM_DEBUG("offset fixed up to 0x%x\n", off);
57 if (off < dev_priv->fb_location ||
58 off >= (dev_priv->gart_vm_start + dev_priv->gart_size))
59 return DRM_ERR(EINVAL);
66 static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
68 drm_file_t * filp_priv,
73 case RADEON_EMIT_PP_MISC:
74 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
75 &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
76 DRM_ERROR("Invalid depth buffer offset\n");
77 return DRM_ERR(EINVAL);
81 case RADEON_EMIT_PP_CNTL:
82 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
83 &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
84 DRM_ERROR("Invalid colour buffer offset\n");
85 return DRM_ERR(EINVAL);
89 case R200_EMIT_PP_TXOFFSET_0:
90 case R200_EMIT_PP_TXOFFSET_1:
91 case R200_EMIT_PP_TXOFFSET_2:
92 case R200_EMIT_PP_TXOFFSET_3:
93 case R200_EMIT_PP_TXOFFSET_4:
94 case R200_EMIT_PP_TXOFFSET_5:
95 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
97 DRM_ERROR("Invalid R200 texture offset\n");
98 return DRM_ERR(EINVAL);
102 case RADEON_EMIT_PP_TXFILTER_0:
103 case RADEON_EMIT_PP_TXFILTER_1:
104 case RADEON_EMIT_PP_TXFILTER_2:
105 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
106 &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
107 DRM_ERROR("Invalid R100 texture offset\n");
108 return DRM_ERR(EINVAL);
112 case R200_EMIT_PP_CUBIC_OFFSETS_0:
113 case R200_EMIT_PP_CUBIC_OFFSETS_1:
114 case R200_EMIT_PP_CUBIC_OFFSETS_2:
115 case R200_EMIT_PP_CUBIC_OFFSETS_3:
116 case R200_EMIT_PP_CUBIC_OFFSETS_4:
117 case R200_EMIT_PP_CUBIC_OFFSETS_5:{
119 for (i = 0; i < 5; i++) {
120 if (radeon_check_and_fixup_offset(dev_priv,
124 ("Invalid R200 cubic texture offset\n");
125 return DRM_ERR(EINVAL);
131 case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
132 case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
133 case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
135 for (i = 0; i < 5; i++) {
136 if (radeon_check_and_fixup_offset(dev_priv,
140 ("Invalid R100 cubic texture offset\n");
141 return DRM_ERR(EINVAL);
147 case RADEON_EMIT_RB3D_COLORPITCH:
148 case RADEON_EMIT_RE_LINE_PATTERN:
149 case RADEON_EMIT_SE_LINE_WIDTH:
150 case RADEON_EMIT_PP_LUM_MATRIX:
151 case RADEON_EMIT_PP_ROT_MATRIX_0:
152 case RADEON_EMIT_RB3D_STENCILREFMASK:
153 case RADEON_EMIT_SE_VPORT_XSCALE:
154 case RADEON_EMIT_SE_CNTL:
155 case RADEON_EMIT_SE_CNTL_STATUS:
156 case RADEON_EMIT_RE_MISC:
157 case RADEON_EMIT_PP_BORDER_COLOR_0:
158 case RADEON_EMIT_PP_BORDER_COLOR_1:
159 case RADEON_EMIT_PP_BORDER_COLOR_2:
160 case RADEON_EMIT_SE_ZBIAS_FACTOR:
161 case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
162 case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
163 case R200_EMIT_PP_TXCBLEND_0:
164 case R200_EMIT_PP_TXCBLEND_1:
165 case R200_EMIT_PP_TXCBLEND_2:
166 case R200_EMIT_PP_TXCBLEND_3:
167 case R200_EMIT_PP_TXCBLEND_4:
168 case R200_EMIT_PP_TXCBLEND_5:
169 case R200_EMIT_PP_TXCBLEND_6:
170 case R200_EMIT_PP_TXCBLEND_7:
171 case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
172 case R200_EMIT_TFACTOR_0:
173 case R200_EMIT_VTX_FMT_0:
174 case R200_EMIT_VAP_CTL:
175 case R200_EMIT_MATRIX_SELECT_0:
176 case R200_EMIT_TEX_PROC_CTL_2:
177 case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
178 case R200_EMIT_PP_TXFILTER_0:
179 case R200_EMIT_PP_TXFILTER_1:
180 case R200_EMIT_PP_TXFILTER_2:
181 case R200_EMIT_PP_TXFILTER_3:
182 case R200_EMIT_PP_TXFILTER_4:
183 case R200_EMIT_PP_TXFILTER_5:
184 case R200_EMIT_VTE_CNTL:
185 case R200_EMIT_OUTPUT_VTX_COMP_SEL:
186 case R200_EMIT_PP_TAM_DEBUG3:
187 case R200_EMIT_PP_CNTL_X:
188 case R200_EMIT_RB3D_DEPTHXY_OFFSET:
189 case R200_EMIT_RE_AUX_SCISSOR_CNTL:
190 case R200_EMIT_RE_SCISSOR_TL_0:
191 case R200_EMIT_RE_SCISSOR_TL_1:
192 case R200_EMIT_RE_SCISSOR_TL_2:
193 case R200_EMIT_SE_VAP_CNTL_STATUS:
194 case R200_EMIT_SE_VTX_STATE_CNTL:
195 case R200_EMIT_RE_POINTSIZE:
196 case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
197 case R200_EMIT_PP_CUBIC_FACES_0:
198 case R200_EMIT_PP_CUBIC_FACES_1:
199 case R200_EMIT_PP_CUBIC_FACES_2:
200 case R200_EMIT_PP_CUBIC_FACES_3:
201 case R200_EMIT_PP_CUBIC_FACES_4:
202 case R200_EMIT_PP_CUBIC_FACES_5:
203 case RADEON_EMIT_PP_TEX_SIZE_0:
204 case RADEON_EMIT_PP_TEX_SIZE_1:
205 case RADEON_EMIT_PP_TEX_SIZE_2:
206 case R200_EMIT_RB3D_BLENDCOLOR:
207 case R200_EMIT_TCL_POINT_SPRITE_CNTL:
208 case RADEON_EMIT_PP_CUBIC_FACES_0:
209 case RADEON_EMIT_PP_CUBIC_FACES_1:
210 case RADEON_EMIT_PP_CUBIC_FACES_2:
211 case R200_EMIT_PP_TRI_PERF_CNTL:
212 case R200_EMIT_PP_AFS_0:
213 case R200_EMIT_PP_AFS_1:
214 case R200_EMIT_ATF_TFACTOR:
215 case R200_EMIT_PP_TXCTLALL_0:
216 case R200_EMIT_PP_TXCTLALL_1:
217 case R200_EMIT_PP_TXCTLALL_2:
218 case R200_EMIT_PP_TXCTLALL_3:
219 case R200_EMIT_PP_TXCTLALL_4:
220 case R200_EMIT_PP_TXCTLALL_5:
221 /* These packets don't contain memory offsets */
225 DRM_ERROR("Unknown state packet ID %d\n", id);
226 return DRM_ERR(EINVAL);
232 static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
234 drm_file_t *filp_priv,
235 drm_radeon_kcmd_buffer_t *
239 u32 *cmd = (u32 *) cmdbuf->buf;
241 *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
243 if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
244 DRM_ERROR("Not a type 3 packet\n");
245 return DRM_ERR(EINVAL);
248 if (4 * *cmdsz > cmdbuf->bufsz) {
249 DRM_ERROR("Packet size larger than size of data provided\n");
250 return DRM_ERR(EINVAL);
253 /* Check client state and fix it up if necessary */
254 if (cmd[0] & 0x8000) { /* MSB of opcode: next DWORD GUI_CNTL */
257 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
258 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
259 offset = cmd[2] << 10;
260 if (radeon_check_and_fixup_offset
261 (dev_priv, filp_priv, &offset)) {
262 DRM_ERROR("Invalid first packet offset\n");
263 return DRM_ERR(EINVAL);
265 cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
268 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
269 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
270 offset = cmd[3] << 10;
271 if (radeon_check_and_fixup_offset
272 (dev_priv, filp_priv, &offset)) {
273 DRM_ERROR("Invalid second packet offset\n");
274 return DRM_ERR(EINVAL);
276 cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
283 /* ================================================================
284 * CP hardware state programming functions
287 static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
288 drm_clip_rect_t * box)
292 DRM_DEBUG(" box: x1=%d y1=%d x2=%d y2=%d\n",
293 box->x1, box->y1, box->x2, box->y2);
296 OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
297 OUT_RING((box->y1 << 16) | box->x1);
298 OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
299 OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
305 static int radeon_emit_state(drm_radeon_private_t * dev_priv,
306 drm_file_t * filp_priv,
307 drm_radeon_context_regs_t * ctx,
308 drm_radeon_texture_regs_t * tex,
312 DRM_DEBUG("dirty=0x%08x\n", dirty);
314 if (dirty & RADEON_UPLOAD_CONTEXT) {
315 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
316 &ctx->rb3d_depthoffset)) {
317 DRM_ERROR("Invalid depth buffer offset\n");
318 return DRM_ERR(EINVAL);
321 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
322 &ctx->rb3d_coloroffset)) {
323 DRM_ERROR("Invalid depth buffer offset\n");
324 return DRM_ERR(EINVAL);
328 OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
329 OUT_RING(ctx->pp_misc);
330 OUT_RING(ctx->pp_fog_color);
331 OUT_RING(ctx->re_solid_color);
332 OUT_RING(ctx->rb3d_blendcntl);
333 OUT_RING(ctx->rb3d_depthoffset);
334 OUT_RING(ctx->rb3d_depthpitch);
335 OUT_RING(ctx->rb3d_zstencilcntl);
336 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
337 OUT_RING(ctx->pp_cntl);
338 OUT_RING(ctx->rb3d_cntl);
339 OUT_RING(ctx->rb3d_coloroffset);
340 OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
341 OUT_RING(ctx->rb3d_colorpitch);
345 if (dirty & RADEON_UPLOAD_VERTFMT) {
347 OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
348 OUT_RING(ctx->se_coord_fmt);
352 if (dirty & RADEON_UPLOAD_LINE) {
354 OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
355 OUT_RING(ctx->re_line_pattern);
356 OUT_RING(ctx->re_line_state);
357 OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
358 OUT_RING(ctx->se_line_width);
362 if (dirty & RADEON_UPLOAD_BUMPMAP) {
364 OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
365 OUT_RING(ctx->pp_lum_matrix);
366 OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
367 OUT_RING(ctx->pp_rot_matrix_0);
368 OUT_RING(ctx->pp_rot_matrix_1);
372 if (dirty & RADEON_UPLOAD_MASKS) {
374 OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
375 OUT_RING(ctx->rb3d_stencilrefmask);
376 OUT_RING(ctx->rb3d_ropcntl);
377 OUT_RING(ctx->rb3d_planemask);
381 if (dirty & RADEON_UPLOAD_VIEWPORT) {
383 OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
384 OUT_RING(ctx->se_vport_xscale);
385 OUT_RING(ctx->se_vport_xoffset);
386 OUT_RING(ctx->se_vport_yscale);
387 OUT_RING(ctx->se_vport_yoffset);
388 OUT_RING(ctx->se_vport_zscale);
389 OUT_RING(ctx->se_vport_zoffset);
393 if (dirty & RADEON_UPLOAD_SETUP) {
395 OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
396 OUT_RING(ctx->se_cntl);
397 OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
398 OUT_RING(ctx->se_cntl_status);
402 if (dirty & RADEON_UPLOAD_MISC) {
404 OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
405 OUT_RING(ctx->re_misc);
409 if (dirty & RADEON_UPLOAD_TEX0) {
410 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
411 &tex[0].pp_txoffset)) {
412 DRM_ERROR("Invalid texture offset for unit 0\n");
413 return DRM_ERR(EINVAL);
417 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
418 OUT_RING(tex[0].pp_txfilter);
419 OUT_RING(tex[0].pp_txformat);
420 OUT_RING(tex[0].pp_txoffset);
421 OUT_RING(tex[0].pp_txcblend);
422 OUT_RING(tex[0].pp_txablend);
423 OUT_RING(tex[0].pp_tfactor);
424 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
425 OUT_RING(tex[0].pp_border_color);
429 if (dirty & RADEON_UPLOAD_TEX1) {
430 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
431 &tex[1].pp_txoffset)) {
432 DRM_ERROR("Invalid texture offset for unit 1\n");
433 return DRM_ERR(EINVAL);
437 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
438 OUT_RING(tex[1].pp_txfilter);
439 OUT_RING(tex[1].pp_txformat);
440 OUT_RING(tex[1].pp_txoffset);
441 OUT_RING(tex[1].pp_txcblend);
442 OUT_RING(tex[1].pp_txablend);
443 OUT_RING(tex[1].pp_tfactor);
444 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
445 OUT_RING(tex[1].pp_border_color);
449 if (dirty & RADEON_UPLOAD_TEX2) {
450 if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
451 &tex[2].pp_txoffset)) {
452 DRM_ERROR("Invalid texture offset for unit 2\n");
453 return DRM_ERR(EINVAL);
457 OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
458 OUT_RING(tex[2].pp_txfilter);
459 OUT_RING(tex[2].pp_txformat);
460 OUT_RING(tex[2].pp_txoffset);
461 OUT_RING(tex[2].pp_txcblend);
462 OUT_RING(tex[2].pp_txablend);
463 OUT_RING(tex[2].pp_tfactor);
464 OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
465 OUT_RING(tex[2].pp_border_color);
474 static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
475 drm_file_t * filp_priv,
476 drm_radeon_state_t * state)
480 if (state->dirty & RADEON_UPLOAD_ZBIAS) {
482 OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
483 OUT_RING(state->context2.se_zbias_factor);
484 OUT_RING(state->context2.se_zbias_constant);
488 return radeon_emit_state(dev_priv, filp_priv, &state->context,
489 state->tex, state->dirty);
492 /* New (1.3) state mechanism. 3 commands (packet, scalar, vector) in
493 * 1.3 cmdbuffers allow all previous state to be updated as well as
494 * the tcl scalar and vector areas.
500 } packet[RADEON_MAX_STATE_PACKETS] = {
501 {RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
502 {RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
503 {RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
504 {RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
505 {RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
506 {RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
507 {RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
508 {RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
509 {RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
510 {RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
511 {RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
512 {RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
513 {RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
514 {RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
515 {RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
516 {RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
517 {RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
518 {RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
519 {RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
520 {RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
521 {RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
522 "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
523 {R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
524 {R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
525 {R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
526 {R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
527 {R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
528 {R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
529 {R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
530 {R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
531 {R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
532 {R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
533 {R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
534 {R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
535 {R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
536 {R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
537 {R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
538 {R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
539 {R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
540 {R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
541 {R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
542 {R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
543 {R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
544 {R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
545 {R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
546 {R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
547 {R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
548 {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
549 {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
550 {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
551 {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
552 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
553 {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
554 {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
555 {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
556 {R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
557 {R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
558 {R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
559 {R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
560 {R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
561 {R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
562 {R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
563 {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
564 "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
565 {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"}, /* 61 */
566 {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
567 {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
568 {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
569 {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
570 {R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
571 {R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
572 {R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
573 {R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
574 {R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
575 {R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
576 {R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
577 {RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
578 {RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
579 {RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
580 {R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
581 {R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
582 {RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
583 {RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
584 {RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
585 {RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
586 {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
587 {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
588 {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
589 {R200_PP_AFS_0, 32, "R200_PP_AFS_0"}, /* 85 */
590 {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
591 {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
592 {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
593 {R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
594 {R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
595 {R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
596 {R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
597 {R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
600 /* ================================================================
601 * Performance monitoring functions
604 static void radeon_clear_box(drm_radeon_private_t * dev_priv,
605 int x, int y, int w, int h, int r, int g, int b)
610 x += dev_priv->sarea_priv->boxes[0].x1;
611 y += dev_priv->sarea_priv->boxes[0].y1;
613 switch (dev_priv->color_fmt) {
614 case RADEON_COLOR_FORMAT_RGB565:
615 color = (((r & 0xf8) << 8) |
616 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
618 case RADEON_COLOR_FORMAT_ARGB8888:
620 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
625 RADEON_WAIT_UNTIL_3D_IDLE();
626 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
627 OUT_RING(0xffffffff);
632 OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
633 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
634 RADEON_GMC_BRUSH_SOLID_COLOR |
635 (dev_priv->color_fmt << 8) |
636 RADEON_GMC_SRC_DATATYPE_COLOR |
637 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
639 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
640 OUT_RING(dev_priv->front_pitch_offset);
642 OUT_RING(dev_priv->back_pitch_offset);
647 OUT_RING((x << 16) | y);
648 OUT_RING((w << 16) | h);
653 static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
655 /* Collapse various things into a wait flag -- trying to
656 * guess if userspase slept -- better just to have them tell us.
658 if (dev_priv->stats.last_frame_reads > 1 ||
659 dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
660 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
663 if (dev_priv->stats.freelist_loops) {
664 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
667 /* Purple box for page flipping
669 if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
670 radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
672 /* Red box if we have to wait for idle at any point
674 if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
675 radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
677 /* Blue box: lost context?
680 /* Yellow box for texture swaps
682 if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
683 radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
685 /* Green box if hardware never idles (as far as we can tell)
687 if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
688 radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
690 /* Draw bars indicating number of buffers allocated
691 * (not a great measure, easily confused)
693 if (dev_priv->stats.requested_bufs) {
694 if (dev_priv->stats.requested_bufs > 100)
695 dev_priv->stats.requested_bufs = 100;
697 radeon_clear_box(dev_priv, 4, 16,
698 dev_priv->stats.requested_bufs, 4,
702 memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
706 /* ================================================================
707 * CP command dispatch functions
710 static void radeon_cp_dispatch_clear(drm_device_t * dev,
711 drm_radeon_clear_t * clear,
712 drm_radeon_clear_rect_t * depth_boxes)
714 drm_radeon_private_t *dev_priv = dev->dev_private;
715 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
716 drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
717 int nbox = sarea_priv->nbox;
718 drm_clip_rect_t *pbox = sarea_priv->boxes;
719 unsigned int flags = clear->flags;
720 u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
723 DRM_DEBUG("flags = 0x%x\n", flags);
725 dev_priv->stats.clears++;
727 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
728 unsigned int tmp = flags;
730 flags &= ~(RADEON_FRONT | RADEON_BACK);
731 if (tmp & RADEON_FRONT)
732 flags |= RADEON_BACK;
733 if (tmp & RADEON_BACK)
734 flags |= RADEON_FRONT;
737 if (flags & (RADEON_FRONT | RADEON_BACK)) {
741 /* Ensure the 3D stream is idle before doing a
742 * 2D fill to clear the front or back buffer.
744 RADEON_WAIT_UNTIL_3D_IDLE();
746 OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
747 OUT_RING(clear->color_mask);
751 /* Make sure we restore the 3D state next time.
753 dev_priv->sarea_priv->ctx_owner = 0;
755 for (i = 0; i < nbox; i++) {
758 int w = pbox[i].x2 - x;
759 int h = pbox[i].y2 - y;
761 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
764 if (flags & RADEON_FRONT) {
768 (RADEON_CNTL_PAINT_MULTI, 4));
769 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
770 RADEON_GMC_BRUSH_SOLID_COLOR |
773 RADEON_GMC_SRC_DATATYPE_COLOR |
775 RADEON_GMC_CLR_CMP_CNTL_DIS);
777 OUT_RING(dev_priv->front_pitch_offset);
778 OUT_RING(clear->clear_color);
780 OUT_RING((x << 16) | y);
781 OUT_RING((w << 16) | h);
786 if (flags & RADEON_BACK) {
790 (RADEON_CNTL_PAINT_MULTI, 4));
791 OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
792 RADEON_GMC_BRUSH_SOLID_COLOR |
795 RADEON_GMC_SRC_DATATYPE_COLOR |
797 RADEON_GMC_CLR_CMP_CNTL_DIS);
799 OUT_RING(dev_priv->back_pitch_offset);
800 OUT_RING(clear->clear_color);
802 OUT_RING((x << 16) | y);
803 OUT_RING((w << 16) | h);
811 /* no docs available, based on reverse engeneering by Stephane Marchesin */
812 if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
813 && (flags & RADEON_CLEAR_FASTZ)) {
816 int depthpixperline =
817 dev_priv->depth_fmt ==
818 RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
824 u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
825 ((clear->depth_mask & 0xff) << 24);
827 /* Make sure we restore the 3D state next time.
828 * we haven't touched any "normal" state - still need this?
830 dev_priv->sarea_priv->ctx_owner = 0;
832 if ((dev_priv->flags & CHIP_HAS_HIERZ)
833 && (flags & RADEON_USE_HIERZ)) {
834 /* FIXME : reverse engineer that for Rx00 cards */
835 /* FIXME : the mask supposedly contains low-res z values. So can't set
836 just to the max (0xff? or actually 0x3fff?), need to take z clear
837 value into account? */
838 /* pattern seems to work for r100, though get slight
839 rendering errors with glxgears. If hierz is not enabled for r100,
840 only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
841 other ones are ignored, and the same clear mask can be used. That's
842 very different behaviour than R200 which needs different clear mask
843 and different number of tiles to clear if hierz is enabled or not !?!
845 clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
847 /* clear mask : chooses the clearing pattern.
848 rv250: could be used to clear only parts of macrotiles
849 (but that would get really complicated...)?
850 bit 0 and 1 (either or both of them ?!?!) are used to
851 not clear tile (or maybe one of the bits indicates if the tile is
852 compressed or not), bit 2 and 3 to not clear tile 1,...,.
853 Pattern is as follows:
854 | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
855 bits -------------------------------------------------
856 | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
857 rv100: clearmask covers 2x8 4x1 tiles, but one clear still
858 covers 256 pixels ?!?
864 RADEON_WAIT_UNTIL_2D_IDLE();
865 OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
866 tempRB3D_DEPTHCLEARVALUE);
867 /* what offset is this exactly ? */
868 OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
869 /* need ctlstat, otherwise get some strange black flickering */
870 OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
871 RADEON_RB3D_ZC_FLUSH_ALL);
874 for (i = 0; i < nbox; i++) {
875 int tileoffset, nrtilesx, nrtilesy, j;
876 /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
877 if ((dev_priv->flags & CHIP_HAS_HIERZ)
878 && !(dev_priv->microcode_version == UCODE_R200)) {
879 /* FIXME : figure this out for r200 (when hierz is enabled). Or
880 maybe r200 actually doesn't need to put the low-res z value into
881 the tile cache like r100, but just needs to clear the hi-level z-buffer?
882 Works for R100, both with hierz and without.
883 R100 seems to operate on 2x1 8x8 tiles, but...
884 odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
885 problematic with resolutions which are not 64 pix aligned? */
887 ((pbox[i].y1 >> 3) * depthpixperline +
890 ((pbox[i].x2 & ~63) -
891 (pbox[i].x1 & ~63)) >> 4;
893 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
894 for (j = 0; j <= nrtilesy; j++) {
897 (RADEON_3D_CLEAR_ZMASK, 2));
899 OUT_RING(tileoffset * 8);
900 /* the number of tiles to clear */
901 OUT_RING(nrtilesx + 4);
902 /* clear mask : chooses the clearing pattern. */
905 tileoffset += depthpixperline >> 6;
907 } else if (dev_priv->microcode_version == UCODE_R200) {
908 /* works for rv250. */
909 /* find first macro tile (8x2 4x4 z-pixels on rv250) */
911 ((pbox[i].y1 >> 3) * depthpixperline +
914 (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
916 (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
917 for (j = 0; j <= nrtilesy; j++) {
920 (RADEON_3D_CLEAR_ZMASK, 2));
922 /* judging by the first tile offset needed, could possibly
923 directly address/clear 4x4 tiles instead of 8x2 * 4x4
924 macro tiles, though would still need clear mask for
925 right/bottom if truely 4x4 granularity is desired ? */
926 OUT_RING(tileoffset * 16);
927 /* the number of tiles to clear */
928 OUT_RING(nrtilesx + 1);
929 /* clear mask : chooses the clearing pattern. */
932 tileoffset += depthpixperline >> 5;
934 } else { /* rv 100 */
935 /* rv100 might not need 64 pix alignment, who knows */
936 /* offsets are, hmm, weird */
938 ((pbox[i].y1 >> 4) * depthpixperline +
941 ((pbox[i].x2 & ~63) -
942 (pbox[i].x1 & ~63)) >> 4;
944 (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
945 for (j = 0; j <= nrtilesy; j++) {
948 (RADEON_3D_CLEAR_ZMASK, 2));
949 OUT_RING(tileoffset * 128);
950 /* the number of tiles to clear */
951 OUT_RING(nrtilesx + 4);
952 /* clear mask : chooses the clearing pattern. */
955 tileoffset += depthpixperline >> 6;
960 /* TODO don't always clear all hi-level z tiles */
961 if ((dev_priv->flags & CHIP_HAS_HIERZ)
962 && (dev_priv->microcode_version == UCODE_R200)
963 && (flags & RADEON_USE_HIERZ))
964 /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
965 /* FIXME : the mask supposedly contains low-res z values. So can't set
966 just to the max (0xff? or actually 0x3fff?), need to take z clear
967 value into account? */
970 OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
971 OUT_RING(0x0); /* First tile */
973 OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
978 /* We have to clear the depth and/or stencil buffers by
979 * rendering a quad into just those buffers. Thus, we have to
980 * make sure the 3D engine is configured correctly.
982 else if ((dev_priv->microcode_version == UCODE_R200) &&
983 (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
988 int tempRB3D_ZSTENCILCNTL;
989 int tempRB3D_STENCILREFMASK;
990 int tempRB3D_PLANEMASK;
993 int tempSE_VTX_FMT_0;
994 int tempSE_VTX_FMT_1;
996 int tempRE_AUX_SCISSOR_CNTL;
1001 tempRB3D_CNTL = depth_clear->rb3d_cntl;
1003 tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1004 tempRB3D_STENCILREFMASK = 0x0;
1006 tempSE_CNTL = depth_clear->se_cntl;
1010 tempSE_VAP_CNTL = ( /* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK | */
1012 SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
1014 tempRB3D_PLANEMASK = 0x0;
1016 tempRE_AUX_SCISSOR_CNTL = 0x0;
1019 SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
1021 /* Vertex format (X, Y, Z, W) */
1023 SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
1024 SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
1025 tempSE_VTX_FMT_1 = 0x0;
1028 * Depth buffer specific enables
1030 if (flags & RADEON_DEPTH) {
1031 /* Enable depth buffer */
1032 tempRB3D_CNTL |= RADEON_Z_ENABLE;
1034 /* Disable depth buffer */
1035 tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
1039 * Stencil buffer specific enables
1041 if (flags & RADEON_STENCIL) {
1042 tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
1043 tempRB3D_STENCILREFMASK = clear->depth_mask;
1045 tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
1046 tempRB3D_STENCILREFMASK = 0x00000000;
1049 if (flags & RADEON_USE_COMP_ZBUF) {
1050 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1051 RADEON_Z_DECOMPRESSION_ENABLE;
1053 if (flags & RADEON_USE_HIERZ) {
1054 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1058 RADEON_WAIT_UNTIL_2D_IDLE();
1060 OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
1061 OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
1062 OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
1063 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1064 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
1065 tempRB3D_STENCILREFMASK);
1066 OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
1067 OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
1068 OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
1069 OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
1070 OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
1071 OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
1072 OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
1075 /* Make sure we restore the 3D state next time.
1077 dev_priv->sarea_priv->ctx_owner = 0;
1079 for (i = 0; i < nbox; i++) {
1081 /* Funny that this should be required --
1084 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1087 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
1088 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1089 RADEON_PRIM_WALK_RING |
1090 (3 << RADEON_NUM_VERTICES_SHIFT)));
1091 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1092 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1093 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1094 OUT_RING(0x3f800000);
1095 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1096 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1097 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1098 OUT_RING(0x3f800000);
1099 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1100 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1101 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1102 OUT_RING(0x3f800000);
1105 } else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
1107 int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
1109 rb3d_cntl = depth_clear->rb3d_cntl;
1111 if (flags & RADEON_DEPTH) {
1112 rb3d_cntl |= RADEON_Z_ENABLE;
1114 rb3d_cntl &= ~RADEON_Z_ENABLE;
1117 if (flags & RADEON_STENCIL) {
1118 rb3d_cntl |= RADEON_STENCIL_ENABLE;
1119 rb3d_stencilrefmask = clear->depth_mask; /* misnamed field */
1121 rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
1122 rb3d_stencilrefmask = 0x00000000;
1125 if (flags & RADEON_USE_COMP_ZBUF) {
1126 tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
1127 RADEON_Z_DECOMPRESSION_ENABLE;
1129 if (flags & RADEON_USE_HIERZ) {
1130 tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
1134 RADEON_WAIT_UNTIL_2D_IDLE();
1136 OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
1137 OUT_RING(0x00000000);
1138 OUT_RING(rb3d_cntl);
1140 OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
1141 OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
1142 OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
1143 OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
1146 /* Make sure we restore the 3D state next time.
1148 dev_priv->sarea_priv->ctx_owner = 0;
1150 for (i = 0; i < nbox; i++) {
1152 /* Funny that this should be required --
1155 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1159 OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
1160 OUT_RING(RADEON_VTX_Z_PRESENT |
1161 RADEON_VTX_PKCOLOR_PRESENT);
1162 OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
1163 RADEON_PRIM_WALK_RING |
1164 RADEON_MAOS_ENABLE |
1165 RADEON_VTX_FMT_RADEON_MODE |
1166 (3 << RADEON_NUM_VERTICES_SHIFT)));
1168 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1169 OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
1170 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1173 OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
1174 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1175 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1178 OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
1179 OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
1180 OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
1187 /* Increment the clear counter. The client-side 3D driver must
1188 * wait on this value before performing the clear ioctl. We
1189 * need this because the card's so damned fast...
1191 dev_priv->sarea_priv->last_clear++;
1195 RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
1196 RADEON_WAIT_UNTIL_IDLE();
1201 static void radeon_cp_dispatch_swap(drm_device_t * dev)
1203 drm_radeon_private_t *dev_priv = dev->dev_private;
1204 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1205 int nbox = sarea_priv->nbox;
1206 drm_clip_rect_t *pbox = sarea_priv->boxes;
1211 /* Do some trivial performance monitoring...
1213 if (dev_priv->do_boxes)
1214 radeon_cp_performance_boxes(dev_priv);
1216 /* Wait for the 3D stream to idle before dispatching the bitblt.
1217 * This will prevent data corruption between the two streams.
1221 RADEON_WAIT_UNTIL_3D_IDLE();
1225 for (i = 0; i < nbox; i++) {
1228 int w = pbox[i].x2 - x;
1229 int h = pbox[i].y2 - y;
1231 DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
1235 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1236 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1237 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1238 RADEON_GMC_BRUSH_NONE |
1239 (dev_priv->color_fmt << 8) |
1240 RADEON_GMC_SRC_DATATYPE_COLOR |
1242 RADEON_DP_SRC_SOURCE_MEMORY |
1243 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1245 /* Make this work even if front & back are flipped:
1247 if (dev_priv->current_page == 0) {
1248 OUT_RING(dev_priv->back_pitch_offset);
1249 OUT_RING(dev_priv->front_pitch_offset);
1251 OUT_RING(dev_priv->front_pitch_offset);
1252 OUT_RING(dev_priv->back_pitch_offset);
1255 OUT_RING((x << 16) | y);
1256 OUT_RING((x << 16) | y);
1257 OUT_RING((w << 16) | h);
1262 /* Increment the frame counter. The client-side 3D driver must
1263 * throttle the framerate by waiting for this value before
1264 * performing the swapbuffer ioctl.
1266 dev_priv->sarea_priv->last_frame++;
1270 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
1271 RADEON_WAIT_UNTIL_2D_IDLE();
1276 static void radeon_cp_dispatch_flip(drm_device_t * dev)
1278 drm_radeon_private_t *dev_priv = dev->dev_private;
1279 drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
1280 int offset = (dev_priv->current_page == 1)
1281 ? dev_priv->front_offset : dev_priv->back_offset;
1283 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
1285 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
1287 /* Do some trivial performance monitoring...
1289 if (dev_priv->do_boxes) {
1290 dev_priv->stats.boxes |= RADEON_BOX_FLIP;
1291 radeon_cp_performance_boxes(dev_priv);
1294 /* Update the frame offsets for both CRTCs
1298 RADEON_WAIT_UNTIL_3D_IDLE();
1299 OUT_RING_REG(RADEON_CRTC_OFFSET,
1300 ((sarea->frame.y * dev_priv->front_pitch +
1301 sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
1303 OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
1308 /* Increment the frame counter. The client-side 3D driver must
1309 * throttle the framerate by waiting for this value before
1310 * performing the swapbuffer ioctl.
1312 dev_priv->sarea_priv->last_frame++;
1313 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
1314 1 - dev_priv->current_page;
1318 RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
1323 static int bad_prim_vertex_nr(int primitive, int nr)
1325 switch (primitive & RADEON_PRIM_TYPE_MASK) {
1326 case RADEON_PRIM_TYPE_NONE:
1327 case RADEON_PRIM_TYPE_POINT:
1329 case RADEON_PRIM_TYPE_LINE:
1330 return (nr & 1) || nr == 0;
1331 case RADEON_PRIM_TYPE_LINE_STRIP:
1333 case RADEON_PRIM_TYPE_TRI_LIST:
1334 case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
1335 case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
1336 case RADEON_PRIM_TYPE_RECT_LIST:
1337 return nr % 3 || nr == 0;
1338 case RADEON_PRIM_TYPE_TRI_FAN:
1339 case RADEON_PRIM_TYPE_TRI_STRIP:
1348 unsigned int finish;
1350 unsigned int numverts;
1351 unsigned int offset;
1352 unsigned int vc_format;
1353 } drm_radeon_tcl_prim_t;
1355 static void radeon_cp_dispatch_vertex(drm_device_t * dev,
1357 drm_radeon_tcl_prim_t * prim)
1359 drm_radeon_private_t *dev_priv = dev->dev_private;
1360 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1361 int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
1362 int numverts = (int)prim->numverts;
1363 int nbox = sarea_priv->nbox;
1367 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
1369 prim->vc_format, prim->start, prim->finish, prim->numverts);
1371 if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
1372 DRM_ERROR("bad prim %x numverts %d\n",
1373 prim->prim, prim->numverts);
1378 /* Emit the next cliprect */
1380 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1383 /* Emit the vertex buffer rendering commands */
1386 OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
1389 OUT_RING(prim->vc_format);
1390 OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
1391 RADEON_COLOR_ORDER_RGBA |
1392 RADEON_VTX_FMT_RADEON_MODE |
1393 (numverts << RADEON_NUM_VERTICES_SHIFT));
1401 static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
1403 drm_radeon_private_t *dev_priv = dev->dev_private;
1404 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
1407 buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
1409 /* Emit the vertex buffer age */
1411 RADEON_DISPATCH_AGE(buf_priv->age);
1418 static void radeon_cp_dispatch_indirect(drm_device_t * dev,
1419 drm_buf_t * buf, int start, int end)
1421 drm_radeon_private_t *dev_priv = dev->dev_private;
1423 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
1426 int offset = (dev_priv->gart_buffers_offset
1427 + buf->offset + start);
1428 int dwords = (end - start + 3) / sizeof(u32);
1430 /* Indirect buffer data must be an even number of
1431 * dwords, so if we've been given an odd number we must
1432 * pad the data with a Type-2 CP packet.
1436 ((char *)dev->agp_buffer_map->handle
1437 + buf->offset + start);
1438 data[dwords++] = RADEON_CP_PACKET2;
1441 /* Fire off the indirect buffer */
1444 OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
1452 static void radeon_cp_dispatch_indices(drm_device_t * dev,
1453 drm_buf_t * elt_buf,
1454 drm_radeon_tcl_prim_t * prim)
1456 drm_radeon_private_t *dev_priv = dev->dev_private;
1457 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1458 int offset = dev_priv->gart_buffers_offset + prim->offset;
1462 int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
1463 int count = (prim->finish - start) / sizeof(u16);
1464 int nbox = sarea_priv->nbox;
1466 DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
1469 prim->start, prim->finish, prim->offset, prim->numverts);
1471 if (bad_prim_vertex_nr(prim->prim, count)) {
1472 DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
1476 if (start >= prim->finish || (prim->start & 0x7)) {
1477 DRM_ERROR("buffer prim %d\n", prim->prim);
1481 dwords = (prim->finish - prim->start + 3) / sizeof(u32);
1483 data = (u32 *) ((char *)dev->agp_buffer_map->handle +
1484 elt_buf->offset + prim->start);
1486 data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
1488 data[2] = prim->numverts;
1489 data[3] = prim->vc_format;
1490 data[4] = (prim->prim |
1491 RADEON_PRIM_WALK_IND |
1492 RADEON_COLOR_ORDER_RGBA |
1493 RADEON_VTX_FMT_RADEON_MODE |
1494 (count << RADEON_NUM_VERTICES_SHIFT));
1498 radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
1500 radeon_cp_dispatch_indirect(dev, elt_buf,
1501 prim->start, prim->finish);
1508 #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
1510 static int radeon_cp_dispatch_texture(DRMFILE filp,
1512 drm_radeon_texture_t * tex,
1513 drm_radeon_tex_image_t * image)
1515 drm_radeon_private_t *dev_priv = dev->dev_private;
1516 drm_file_t *filp_priv;
1520 const u8 __user *data;
1521 int size, dwords, tex_width, blit_width, spitch;
1524 u32 texpitch, microtile;
1528 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
1530 if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) {
1531 DRM_ERROR("Invalid destination offset\n");
1532 return DRM_ERR(EINVAL);
1535 dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
1537 /* Flush the pixel cache. This ensures no pixel data gets mixed
1538 * up with the texture data from the host data blit, otherwise
1539 * part of the texture image may be corrupted.
1542 RADEON_FLUSH_CACHE();
1543 RADEON_WAIT_UNTIL_IDLE();
1546 /* The compiler won't optimize away a division by a variable,
1547 * even if the only legal values are powers of two. Thus, we'll
1548 * use a shift instead.
1550 switch (tex->format) {
1551 case RADEON_TXFORMAT_ARGB8888:
1552 case RADEON_TXFORMAT_RGBA8888:
1553 format = RADEON_COLOR_FORMAT_ARGB8888;
1554 tex_width = tex->width * 4;
1555 blit_width = image->width * 4;
1557 case RADEON_TXFORMAT_AI88:
1558 case RADEON_TXFORMAT_ARGB1555:
1559 case RADEON_TXFORMAT_RGB565:
1560 case RADEON_TXFORMAT_ARGB4444:
1561 case RADEON_TXFORMAT_VYUY422:
1562 case RADEON_TXFORMAT_YVYU422:
1563 format = RADEON_COLOR_FORMAT_RGB565;
1564 tex_width = tex->width * 2;
1565 blit_width = image->width * 2;
1567 case RADEON_TXFORMAT_I8:
1568 case RADEON_TXFORMAT_RGB332:
1569 format = RADEON_COLOR_FORMAT_CI8;
1570 tex_width = tex->width * 1;
1571 blit_width = image->width * 1;
1574 DRM_ERROR("invalid texture format %d\n", tex->format);
1575 return DRM_ERR(EINVAL);
1577 spitch = blit_width >> 6;
1578 if (spitch == 0 && image->height > 1)
1579 return DRM_ERR(EINVAL);
1581 texpitch = tex->pitch;
1582 if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
1584 if (tex_width < 64) {
1585 texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
1586 /* we got tiled coordinates, untile them */
1592 DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
1595 DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
1596 tex->offset >> 10, tex->pitch, tex->format,
1597 image->x, image->y, image->width, image->height);
1599 /* Make a copy of some parameters in case we have to
1600 * update them for a multi-pass texture blit.
1602 height = image->height;
1603 data = (const u8 __user *)image->data;
1605 size = height * blit_width;
1607 if (size > RADEON_MAX_TEXTURE_SIZE) {
1608 height = RADEON_MAX_TEXTURE_SIZE / blit_width;
1609 size = height * blit_width;
1610 } else if (size < 4 && size > 0) {
1612 } else if (size == 0) {
1616 buf = radeon_freelist_get(dev);
1618 radeon_do_cp_idle(dev_priv);
1619 buf = radeon_freelist_get(dev);
1622 DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
1623 if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
1624 return DRM_ERR(EFAULT);
1625 return DRM_ERR(EAGAIN);
1628 /* Dispatch the indirect buffer.
1631 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
1634 #define RADEON_COPY_MT(_buf, _data, _width) \
1636 if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
1637 DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
1638 return DRM_ERR(EFAULT); \
1643 /* texture micro tiling in use, minimum texture width is thus 16 bytes.
1644 however, we cannot use blitter directly for texture width < 64 bytes,
1645 since minimum tex pitch is 64 bytes and we need this to match
1646 the texture width, otherwise the blitter will tile it wrong.
1647 Thus, tiling manually in this case. Additionally, need to special
1648 case tex height = 1, since our actual image will have height 2
1649 and we need to ensure we don't read beyond the texture size
1651 if (tex->height == 1) {
1652 if (tex_width >= 64 || tex_width <= 16) {
1653 RADEON_COPY_MT(buffer, data,
1654 (int)(tex_width * sizeof(u32)));
1655 } else if (tex_width == 32) {
1656 RADEON_COPY_MT(buffer, data, 16);
1657 RADEON_COPY_MT(buffer + 8,
1660 } else if (tex_width >= 64 || tex_width == 16) {
1661 RADEON_COPY_MT(buffer, data,
1662 (int)(dwords * sizeof(u32)));
1663 } else if (tex_width < 16) {
1664 for (i = 0; i < tex->height; i++) {
1665 RADEON_COPY_MT(buffer, data, tex_width);
1669 } else if (tex_width == 32) {
1670 /* TODO: make sure this works when not fitting in one buffer
1671 (i.e. 32bytes x 2048...) */
1672 for (i = 0; i < tex->height; i += 2) {
1673 RADEON_COPY_MT(buffer, data, 16);
1675 RADEON_COPY_MT(buffer + 8, data, 16);
1677 RADEON_COPY_MT(buffer + 4, data, 16);
1679 RADEON_COPY_MT(buffer + 12, data, 16);
1685 if (tex_width >= 32) {
1686 /* Texture image width is larger than the minimum, so we
1687 * can upload it directly.
1689 RADEON_COPY_MT(buffer, data,
1690 (int)(dwords * sizeof(u32)));
1692 /* Texture image width is less than the minimum, so we
1693 * need to pad out each image scanline to the minimum
1696 for (i = 0; i < tex->height; i++) {
1697 RADEON_COPY_MT(buffer, data, tex_width);
1704 #undef RADEON_COPY_MT
1707 offset = dev_priv->gart_buffers_offset + buf->offset;
1709 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
1710 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
1711 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
1712 RADEON_GMC_BRUSH_NONE |
1714 RADEON_GMC_SRC_DATATYPE_COLOR |
1716 RADEON_DP_SRC_SOURCE_MEMORY |
1717 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
1718 OUT_RING((spitch << 22) | (offset >> 10));
1719 OUT_RING((texpitch << 22) | (tex->offset >> 10));
1721 OUT_RING((image->x << 16) | image->y);
1722 OUT_RING((image->width << 16) | height);
1723 RADEON_WAIT_UNTIL_2D_IDLE();
1726 radeon_cp_discard_buffer(dev, buf);
1728 /* Update the input parameters for next time */
1730 image->height -= height;
1731 image->data = (const u8 __user *)image->data + size;
1732 } while (image->height > 0);
1734 /* Flush the pixel cache after the blit completes. This ensures
1735 * the texture data is written out to memory before rendering
1739 RADEON_FLUSH_CACHE();
1740 RADEON_WAIT_UNTIL_2D_IDLE();
1745 static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1747 drm_radeon_private_t *dev_priv = dev->dev_private;
1754 OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
1755 OUT_RING(0x00000000);
1757 OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
1758 for (i = 0; i < 32; i++) {
1759 OUT_RING(stipple[i]);
1765 static void radeon_apply_surface_regs(int surf_index,
1766 drm_radeon_private_t *dev_priv)
1768 if (!dev_priv->mmio)
1771 radeon_do_cp_idle(dev_priv);
1773 RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
1774 dev_priv->surfaces[surf_index].flags);
1775 RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
1776 dev_priv->surfaces[surf_index].lower);
1777 RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
1778 dev_priv->surfaces[surf_index].upper);
1781 /* Allocates a virtual surface
1782 * doesn't always allocate a real surface, will stretch an existing
1783 * surface when possible.
1785 * Note that refcount can be at most 2, since during a free refcount=3
1786 * might mean we have to allocate a new surface which might not always
1788 * For example : we allocate three contigous surfaces ABC. If B is
1789 * freed, we suddenly need two surfaces to store A and C, which might
1790 * not always be available.
1792 static int alloc_surface(drm_radeon_surface_alloc_t *new,
1793 drm_radeon_private_t *dev_priv, DRMFILE filp)
1795 struct radeon_virt_surface *s;
1797 int virt_surface_index;
1798 uint32_t new_upper, new_lower;
1800 new_lower = new->address;
1801 new_upper = new_lower + new->size - 1;
1804 if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
1805 ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
1806 RADEON_SURF_ADDRESS_FIXED_MASK)
1807 || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
1810 /* make sure there is no overlap with existing surfaces */
1811 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1812 if ((dev_priv->surfaces[i].refcount != 0) &&
1813 (((new_lower >= dev_priv->surfaces[i].lower) &&
1814 (new_lower < dev_priv->surfaces[i].upper)) ||
1815 ((new_lower < dev_priv->surfaces[i].lower) &&
1816 (new_upper > dev_priv->surfaces[i].lower)))) {
1821 /* find a virtual surface */
1822 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
1823 if (dev_priv->virt_surfaces[i].filp == 0)
1825 if (i == 2 * RADEON_MAX_SURFACES) {
1828 virt_surface_index = i;
1830 /* try to reuse an existing surface */
1831 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1833 if ((dev_priv->surfaces[i].refcount == 1) &&
1834 (new->flags == dev_priv->surfaces[i].flags) &&
1835 (new_upper + 1 == dev_priv->surfaces[i].lower)) {
1836 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1837 s->surface_index = i;
1838 s->lower = new_lower;
1839 s->upper = new_upper;
1840 s->flags = new->flags;
1842 dev_priv->surfaces[i].refcount++;
1843 dev_priv->surfaces[i].lower = s->lower;
1844 radeon_apply_surface_regs(s->surface_index, dev_priv);
1845 return virt_surface_index;
1849 if ((dev_priv->surfaces[i].refcount == 1) &&
1850 (new->flags == dev_priv->surfaces[i].flags) &&
1851 (new_lower == dev_priv->surfaces[i].upper + 1)) {
1852 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1853 s->surface_index = i;
1854 s->lower = new_lower;
1855 s->upper = new_upper;
1856 s->flags = new->flags;
1858 dev_priv->surfaces[i].refcount++;
1859 dev_priv->surfaces[i].upper = s->upper;
1860 radeon_apply_surface_regs(s->surface_index, dev_priv);
1861 return virt_surface_index;
1865 /* okay, we need a new one */
1866 for (i = 0; i < RADEON_MAX_SURFACES; i++) {
1867 if (dev_priv->surfaces[i].refcount == 0) {
1868 s = &(dev_priv->virt_surfaces[virt_surface_index]);
1869 s->surface_index = i;
1870 s->lower = new_lower;
1871 s->upper = new_upper;
1872 s->flags = new->flags;
1874 dev_priv->surfaces[i].refcount = 1;
1875 dev_priv->surfaces[i].lower = s->lower;
1876 dev_priv->surfaces[i].upper = s->upper;
1877 dev_priv->surfaces[i].flags = s->flags;
1878 radeon_apply_surface_regs(s->surface_index, dev_priv);
1879 return virt_surface_index;
1883 /* we didn't find anything */
1887 static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
1890 struct radeon_virt_surface *s;
1892 /* find the virtual surface */
1893 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
1894 s = &(dev_priv->virt_surfaces[i]);
1896 if ((lower == s->lower) && (filp == s->filp)) {
1897 if (dev_priv->surfaces[s->surface_index].
1899 dev_priv->surfaces[s->surface_index].
1902 if (dev_priv->surfaces[s->surface_index].
1904 dev_priv->surfaces[s->surface_index].
1907 dev_priv->surfaces[s->surface_index].refcount--;
1908 if (dev_priv->surfaces[s->surface_index].
1910 dev_priv->surfaces[s->surface_index].
1913 radeon_apply_surface_regs(s->surface_index,
1922 static void radeon_surfaces_release(DRMFILE filp,
1923 drm_radeon_private_t * dev_priv)
1926 for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
1927 if (dev_priv->virt_surfaces[i].filp == filp)
1928 free_surface(filp, dev_priv,
1929 dev_priv->virt_surfaces[i].lower);
1933 /* ================================================================
1936 static int radeon_surface_alloc(DRM_IOCTL_ARGS)
1939 drm_radeon_private_t *dev_priv = dev->dev_private;
1940 drm_radeon_surface_alloc_t alloc;
1943 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1944 return DRM_ERR(EINVAL);
1947 DRM_COPY_FROM_USER_IOCTL(alloc,
1948 (drm_radeon_surface_alloc_t __user *) data,
1951 if (alloc_surface(&alloc, dev_priv, filp) == -1)
1952 return DRM_ERR(EINVAL);
1957 static int radeon_surface_free(DRM_IOCTL_ARGS)
1960 drm_radeon_private_t *dev_priv = dev->dev_private;
1961 drm_radeon_surface_free_t memfree;
1964 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1965 return DRM_ERR(EINVAL);
1968 DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
1971 if (free_surface(filp, dev_priv, memfree.address))
1972 return DRM_ERR(EINVAL);
1977 static int radeon_cp_clear(DRM_IOCTL_ARGS)
1980 drm_radeon_private_t *dev_priv = dev->dev_private;
1981 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
1982 drm_radeon_clear_t clear;
1983 drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
1986 LOCK_TEST_WITH_RETURN(dev, filp);
1988 DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data,
1991 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1993 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
1994 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
1996 if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes,
1997 sarea_priv->nbox * sizeof(depth_boxes[0])))
1998 return DRM_ERR(EFAULT);
2000 radeon_cp_dispatch_clear(dev, &clear, depth_boxes);
2006 /* Not sure why this isn't set all the time:
2008 static int radeon_do_init_pageflip(drm_device_t * dev)
2010 drm_radeon_private_t *dev_priv = dev->dev_private;
2016 RADEON_WAIT_UNTIL_3D_IDLE();
2017 OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
2018 OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
2019 RADEON_CRTC_OFFSET_FLIP_CNTL);
2020 OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
2021 OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
2022 RADEON_CRTC_OFFSET_FLIP_CNTL);
2025 dev_priv->page_flipping = 1;
2026 dev_priv->current_page = 0;
2027 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
2032 /* Called whenever a client dies, from drm_release.
2033 * NOTE: Lock isn't necessarily held when this is called!
2035 static int radeon_do_cleanup_pageflip(drm_device_t * dev)
2037 drm_radeon_private_t *dev_priv = dev->dev_private;
2040 if (dev_priv->current_page != 0)
2041 radeon_cp_dispatch_flip(dev);
2043 dev_priv->page_flipping = 0;
2047 /* Swapping and flipping are different operations, need different ioctls.
2048 * They can & should be intermixed to support multiple 3d windows.
2050 static int radeon_cp_flip(DRM_IOCTL_ARGS)
2053 drm_radeon_private_t *dev_priv = dev->dev_private;
2056 LOCK_TEST_WITH_RETURN(dev, filp);
2058 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2060 if (!dev_priv->page_flipping)
2061 radeon_do_init_pageflip(dev);
2063 radeon_cp_dispatch_flip(dev);
2069 static int radeon_cp_swap(DRM_IOCTL_ARGS)
2072 drm_radeon_private_t *dev_priv = dev->dev_private;
2073 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2076 LOCK_TEST_WITH_RETURN(dev, filp);
2078 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2080 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2081 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
2083 radeon_cp_dispatch_swap(dev);
2084 dev_priv->sarea_priv->ctx_owner = 0;
2090 static int radeon_cp_vertex(DRM_IOCTL_ARGS)
2093 drm_radeon_private_t *dev_priv = dev->dev_private;
2094 drm_file_t *filp_priv;
2095 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2096 drm_device_dma_t *dma = dev->dma;
2098 drm_radeon_vertex_t vertex;
2099 drm_radeon_tcl_prim_t prim;
2101 LOCK_TEST_WITH_RETURN(dev, filp);
2104 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2105 return DRM_ERR(EINVAL);
2108 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2110 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
2113 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
2114 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
2116 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
2117 DRM_ERROR("buffer index %d (of %d max)\n",
2118 vertex.idx, dma->buf_count - 1);
2119 return DRM_ERR(EINVAL);
2121 if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2122 DRM_ERROR("buffer prim %d\n", vertex.prim);
2123 return DRM_ERR(EINVAL);
2126 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2127 VB_AGE_TEST_WITH_RETURN(dev_priv);
2129 buf = dma->buflist[vertex.idx];
2131 if (buf->filp != filp) {
2132 DRM_ERROR("process %d using buffer owned by %p\n",
2133 DRM_CURRENTPID, buf->filp);
2134 return DRM_ERR(EINVAL);
2137 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
2138 return DRM_ERR(EINVAL);
2141 /* Build up a prim_t record:
2144 buf->used = vertex.count; /* not used? */
2146 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2147 if (radeon_emit_state(dev_priv, filp_priv,
2148 &sarea_priv->context_state,
2149 sarea_priv->tex_state,
2150 sarea_priv->dirty)) {
2151 DRM_ERROR("radeon_emit_state failed\n");
2152 return DRM_ERR(EINVAL);
2155 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2156 RADEON_UPLOAD_TEX1IMAGES |
2157 RADEON_UPLOAD_TEX2IMAGES |
2158 RADEON_REQUIRE_QUIESCENCE);
2162 prim.finish = vertex.count; /* unused */
2163 prim.prim = vertex.prim;
2164 prim.numverts = vertex.count;
2165 prim.vc_format = dev_priv->sarea_priv->vc_format;
2167 radeon_cp_dispatch_vertex(dev, buf, &prim);
2170 if (vertex.discard) {
2171 radeon_cp_discard_buffer(dev, buf);
2178 static int radeon_cp_indices(DRM_IOCTL_ARGS)
2181 drm_radeon_private_t *dev_priv = dev->dev_private;
2182 drm_file_t *filp_priv;
2183 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2184 drm_device_dma_t *dma = dev->dma;
2186 drm_radeon_indices_t elts;
2187 drm_radeon_tcl_prim_t prim;
2190 LOCK_TEST_WITH_RETURN(dev, filp);
2193 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2194 return DRM_ERR(EINVAL);
2197 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2199 DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data,
2202 DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
2203 DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard);
2205 if (elts.idx < 0 || elts.idx >= dma->buf_count) {
2206 DRM_ERROR("buffer index %d (of %d max)\n",
2207 elts.idx, dma->buf_count - 1);
2208 return DRM_ERR(EINVAL);
2210 if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
2211 DRM_ERROR("buffer prim %d\n", elts.prim);
2212 return DRM_ERR(EINVAL);
2215 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2216 VB_AGE_TEST_WITH_RETURN(dev_priv);
2218 buf = dma->buflist[elts.idx];
2220 if (buf->filp != filp) {
2221 DRM_ERROR("process %d using buffer owned by %p\n",
2222 DRM_CURRENTPID, buf->filp);
2223 return DRM_ERR(EINVAL);
2226 DRM_ERROR("sending pending buffer %d\n", elts.idx);
2227 return DRM_ERR(EINVAL);
2230 count = (elts.end - elts.start) / sizeof(u16);
2231 elts.start -= RADEON_INDEX_PRIM_OFFSET;
2233 if (elts.start & 0x7) {
2234 DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
2235 return DRM_ERR(EINVAL);
2237 if (elts.start < buf->used) {
2238 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
2239 return DRM_ERR(EINVAL);
2242 buf->used = elts.end;
2244 if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
2245 if (radeon_emit_state(dev_priv, filp_priv,
2246 &sarea_priv->context_state,
2247 sarea_priv->tex_state,
2248 sarea_priv->dirty)) {
2249 DRM_ERROR("radeon_emit_state failed\n");
2250 return DRM_ERR(EINVAL);
2253 sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
2254 RADEON_UPLOAD_TEX1IMAGES |
2255 RADEON_UPLOAD_TEX2IMAGES |
2256 RADEON_REQUIRE_QUIESCENCE);
2259 /* Build up a prim_t record:
2261 prim.start = elts.start;
2262 prim.finish = elts.end;
2263 prim.prim = elts.prim;
2264 prim.offset = 0; /* offset from start of dma buffers */
2265 prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2266 prim.vc_format = dev_priv->sarea_priv->vc_format;
2268 radeon_cp_dispatch_indices(dev, buf, &prim);
2270 radeon_cp_discard_buffer(dev, buf);
2277 static int radeon_cp_texture(DRM_IOCTL_ARGS)
2280 drm_radeon_private_t *dev_priv = dev->dev_private;
2281 drm_radeon_texture_t tex;
2282 drm_radeon_tex_image_t image;
2285 LOCK_TEST_WITH_RETURN(dev, filp);
2287 DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data,
2290 if (tex.image == NULL) {
2291 DRM_ERROR("null texture image!\n");
2292 return DRM_ERR(EINVAL);
2295 if (DRM_COPY_FROM_USER(&image,
2296 (drm_radeon_tex_image_t __user *) tex.image,
2298 return DRM_ERR(EFAULT);
2300 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2301 VB_AGE_TEST_WITH_RETURN(dev_priv);
2303 ret = radeon_cp_dispatch_texture(filp, dev, &tex, &image);
2309 static int radeon_cp_stipple(DRM_IOCTL_ARGS)
2312 drm_radeon_private_t *dev_priv = dev->dev_private;
2313 drm_radeon_stipple_t stipple;
2316 LOCK_TEST_WITH_RETURN(dev, filp);
2318 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data,
2321 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
2322 return DRM_ERR(EFAULT);
2324 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2326 radeon_cp_dispatch_stipple(dev, mask);
2332 static int radeon_cp_indirect(DRM_IOCTL_ARGS)
2335 drm_radeon_private_t *dev_priv = dev->dev_private;
2336 drm_device_dma_t *dma = dev->dma;
2338 drm_radeon_indirect_t indirect;
2341 LOCK_TEST_WITH_RETURN(dev, filp);
2344 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2345 return DRM_ERR(EINVAL);
2348 DRM_COPY_FROM_USER_IOCTL(indirect,
2349 (drm_radeon_indirect_t __user *) data,
2352 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
2353 indirect.idx, indirect.start, indirect.end, indirect.discard);
2355 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
2356 DRM_ERROR("buffer index %d (of %d max)\n",
2357 indirect.idx, dma->buf_count - 1);
2358 return DRM_ERR(EINVAL);
2361 buf = dma->buflist[indirect.idx];
2363 if (buf->filp != filp) {
2364 DRM_ERROR("process %d using buffer owned by %p\n",
2365 DRM_CURRENTPID, buf->filp);
2366 return DRM_ERR(EINVAL);
2369 DRM_ERROR("sending pending buffer %d\n", indirect.idx);
2370 return DRM_ERR(EINVAL);
2373 if (indirect.start < buf->used) {
2374 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
2375 indirect.start, buf->used);
2376 return DRM_ERR(EINVAL);
2379 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2380 VB_AGE_TEST_WITH_RETURN(dev_priv);
2382 buf->used = indirect.end;
2384 /* Wait for the 3D stream to idle before the indirect buffer
2385 * containing 2D acceleration commands is processed.
2389 RADEON_WAIT_UNTIL_3D_IDLE();
2393 /* Dispatch the indirect buffer full of commands from the
2394 * X server. This is insecure and is thus only available to
2395 * privileged clients.
2397 radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end);
2398 if (indirect.discard) {
2399 radeon_cp_discard_buffer(dev, buf);
2406 static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
2409 drm_radeon_private_t *dev_priv = dev->dev_private;
2410 drm_file_t *filp_priv;
2411 drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
2412 drm_device_dma_t *dma = dev->dma;
2414 drm_radeon_vertex2_t vertex;
2416 unsigned char laststate;
2418 LOCK_TEST_WITH_RETURN(dev, filp);
2421 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2422 return DRM_ERR(EINVAL);
2425 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2427 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
2430 DRM_DEBUG("pid=%d index=%d discard=%d\n",
2431 DRM_CURRENTPID, vertex.idx, vertex.discard);
2433 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
2434 DRM_ERROR("buffer index %d (of %d max)\n",
2435 vertex.idx, dma->buf_count - 1);
2436 return DRM_ERR(EINVAL);
2439 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2440 VB_AGE_TEST_WITH_RETURN(dev_priv);
2442 buf = dma->buflist[vertex.idx];
2444 if (buf->filp != filp) {
2445 DRM_ERROR("process %d using buffer owned by %p\n",
2446 DRM_CURRENTPID, buf->filp);
2447 return DRM_ERR(EINVAL);
2451 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
2452 return DRM_ERR(EINVAL);
2455 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
2456 return DRM_ERR(EINVAL);
2458 for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) {
2459 drm_radeon_prim_t prim;
2460 drm_radeon_tcl_prim_t tclprim;
2462 if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim)))
2463 return DRM_ERR(EFAULT);
2465 if (prim.stateidx != laststate) {
2466 drm_radeon_state_t state;
2468 if (DRM_COPY_FROM_USER(&state,
2469 &vertex.state[prim.stateidx],
2471 return DRM_ERR(EFAULT);
2473 if (radeon_emit_state2(dev_priv, filp_priv, &state)) {
2474 DRM_ERROR("radeon_emit_state2 failed\n");
2475 return DRM_ERR(EINVAL);
2478 laststate = prim.stateidx;
2481 tclprim.start = prim.start;
2482 tclprim.finish = prim.finish;
2483 tclprim.prim = prim.prim;
2484 tclprim.vc_format = prim.vc_format;
2486 if (prim.prim & RADEON_PRIM_WALK_IND) {
2487 tclprim.offset = prim.numverts * 64;
2488 tclprim.numverts = RADEON_MAX_VB_VERTS; /* duh */
2490 radeon_cp_dispatch_indices(dev, buf, &tclprim);
2492 tclprim.numverts = prim.numverts;
2493 tclprim.offset = 0; /* not used */
2495 radeon_cp_dispatch_vertex(dev, buf, &tclprim);
2498 if (sarea_priv->nbox == 1)
2499 sarea_priv->nbox = 0;
2502 if (vertex.discard) {
2503 radeon_cp_discard_buffer(dev, buf);
2510 static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
2511 drm_file_t * filp_priv,
2512 drm_radeon_cmd_header_t header,
2513 drm_radeon_kcmd_buffer_t *cmdbuf)
2515 int id = (int)header.packet.packet_id;
2517 int *data = (int *)cmdbuf->buf;
2520 if (id >= RADEON_MAX_STATE_PACKETS)
2521 return DRM_ERR(EINVAL);
2523 sz = packet[id].len;
2524 reg = packet[id].start;
2526 if (sz * sizeof(int) > cmdbuf->bufsz) {
2527 DRM_ERROR("Packet size provided larger than data provided\n");
2528 return DRM_ERR(EINVAL);
2531 if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) {
2532 DRM_ERROR("Packet verification failed\n");
2533 return DRM_ERR(EINVAL);
2537 OUT_RING(CP_PACKET0(reg, (sz - 1)));
2538 OUT_RING_TABLE(data, sz);
2541 cmdbuf->buf += sz * sizeof(int);
2542 cmdbuf->bufsz -= sz * sizeof(int);
2546 static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
2547 drm_radeon_cmd_header_t header,
2548 drm_radeon_kcmd_buffer_t *cmdbuf)
2550 int sz = header.scalars.count;
2551 int start = header.scalars.offset;
2552 int stride = header.scalars.stride;
2556 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2557 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2558 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2559 OUT_RING_TABLE(cmdbuf->buf, sz);
2561 cmdbuf->buf += sz * sizeof(int);
2562 cmdbuf->bufsz -= sz * sizeof(int);
2568 static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
2569 drm_radeon_cmd_header_t header,
2570 drm_radeon_kcmd_buffer_t *cmdbuf)
2572 int sz = header.scalars.count;
2573 int start = ((unsigned int)header.scalars.offset) + 0x100;
2574 int stride = header.scalars.stride;
2578 OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
2579 OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
2580 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
2581 OUT_RING_TABLE(cmdbuf->buf, sz);
2583 cmdbuf->buf += sz * sizeof(int);
2584 cmdbuf->bufsz -= sz * sizeof(int);
2588 static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
2589 drm_radeon_cmd_header_t header,
2590 drm_radeon_kcmd_buffer_t *cmdbuf)
2592 int sz = header.vectors.count;
2593 int start = header.vectors.offset;
2594 int stride = header.vectors.stride;
2598 OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
2599 OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
2600 OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
2601 OUT_RING_TABLE(cmdbuf->buf, sz);
2604 cmdbuf->buf += sz * sizeof(int);
2605 cmdbuf->bufsz -= sz * sizeof(int);
2609 static int radeon_emit_packet3(drm_device_t * dev,
2610 drm_file_t * filp_priv,
2611 drm_radeon_kcmd_buffer_t *cmdbuf)
2613 drm_radeon_private_t *dev_priv = dev->dev_private;
2620 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv,
2622 DRM_ERROR("Packet verification failed\n");
2627 OUT_RING_TABLE(cmdbuf->buf, cmdsz);
2630 cmdbuf->buf += cmdsz * 4;
2631 cmdbuf->bufsz -= cmdsz * 4;
2635 static int radeon_emit_packet3_cliprect(drm_device_t *dev,
2636 drm_file_t *filp_priv,
2637 drm_radeon_kcmd_buffer_t *cmdbuf,
2640 drm_radeon_private_t *dev_priv = dev->dev_private;
2641 drm_clip_rect_t box;
2644 drm_clip_rect_t __user *boxes = cmdbuf->boxes;
2650 if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv,
2652 DRM_ERROR("Packet verification failed\n");
2660 if (i < cmdbuf->nbox) {
2661 if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
2662 return DRM_ERR(EFAULT);
2663 /* FIXME The second and subsequent times round
2664 * this loop, send a WAIT_UNTIL_3D_IDLE before
2665 * calling emit_clip_rect(). This fixes a
2666 * lockup on fast machines when sending
2667 * several cliprects with a cmdbuf, as when
2668 * waving a 2D window over a 3D
2669 * window. Something in the commands from user
2670 * space seems to hang the card when they're
2671 * sent several times in a row. That would be
2672 * the correct place to fix it but this works
2673 * around it until I can figure that out - Tim
2677 RADEON_WAIT_UNTIL_3D_IDLE();
2680 radeon_emit_clip_rect(dev_priv, &box);
2684 OUT_RING_TABLE(cmdbuf->buf, cmdsz);
2687 } while (++i < cmdbuf->nbox);
2688 if (cmdbuf->nbox == 1)
2692 cmdbuf->buf += cmdsz * 4;
2693 cmdbuf->bufsz -= cmdsz * 4;
2697 static int radeon_emit_wait(drm_device_t * dev, int flags)
2699 drm_radeon_private_t *dev_priv = dev->dev_private;
2702 DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
2704 case RADEON_WAIT_2D:
2706 RADEON_WAIT_UNTIL_2D_IDLE();
2709 case RADEON_WAIT_3D:
2711 RADEON_WAIT_UNTIL_3D_IDLE();
2714 case RADEON_WAIT_2D | RADEON_WAIT_3D:
2716 RADEON_WAIT_UNTIL_IDLE();
2720 return DRM_ERR(EINVAL);
2726 static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
2729 drm_radeon_private_t *dev_priv = dev->dev_private;
2730 drm_file_t *filp_priv;
2731 drm_device_dma_t *dma = dev->dma;
2732 drm_buf_t *buf = NULL;
2734 drm_radeon_kcmd_buffer_t cmdbuf;
2735 drm_radeon_cmd_header_t header;
2736 int orig_nbox, orig_bufsz;
2739 LOCK_TEST_WITH_RETURN(dev, filp);
2742 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2743 return DRM_ERR(EINVAL);
2746 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2748 DRM_COPY_FROM_USER_IOCTL(cmdbuf,
2749 (drm_radeon_cmd_buffer_t __user *) data,
2752 RING_SPACE_TEST_WITH_RETURN(dev_priv);
2753 VB_AGE_TEST_WITH_RETURN(dev_priv);
2755 if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) {
2756 return DRM_ERR(EINVAL);
2759 /* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
2760 * races between checking values and using those values in other code,
2761 * and simply to avoid a lot of function calls to copy in data.
2763 orig_bufsz = cmdbuf.bufsz;
2764 if (orig_bufsz != 0) {
2765 kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
2767 return DRM_ERR(ENOMEM);
2768 if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
2770 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2771 return DRM_ERR(EFAULT);
2776 orig_nbox = cmdbuf.nbox;
2778 if (dev_priv->microcode_version == UCODE_R300) {
2780 temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
2782 if (orig_bufsz != 0)
2783 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2788 /* microcode_version != r300 */
2789 while (cmdbuf.bufsz >= sizeof(header)) {
2791 header.i = *(int *)cmdbuf.buf;
2792 cmdbuf.buf += sizeof(header);
2793 cmdbuf.bufsz -= sizeof(header);
2795 switch (header.header.cmd_type) {
2796 case RADEON_CMD_PACKET:
2797 DRM_DEBUG("RADEON_CMD_PACKET\n");
2798 if (radeon_emit_packets
2799 (dev_priv, filp_priv, header, &cmdbuf)) {
2800 DRM_ERROR("radeon_emit_packets failed\n");
2805 case RADEON_CMD_SCALARS:
2806 DRM_DEBUG("RADEON_CMD_SCALARS\n");
2807 if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) {
2808 DRM_ERROR("radeon_emit_scalars failed\n");
2813 case RADEON_CMD_VECTORS:
2814 DRM_DEBUG("RADEON_CMD_VECTORS\n");
2815 if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) {
2816 DRM_ERROR("radeon_emit_vectors failed\n");
2821 case RADEON_CMD_DMA_DISCARD:
2822 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
2823 idx = header.dma.buf_idx;
2824 if (idx < 0 || idx >= dma->buf_count) {
2825 DRM_ERROR("buffer index %d (of %d max)\n",
2826 idx, dma->buf_count - 1);
2830 buf = dma->buflist[idx];
2831 if (buf->filp != filp || buf->pending) {
2832 DRM_ERROR("bad buffer %p %p %d\n",
2833 buf->filp, filp, buf->pending);
2837 radeon_cp_discard_buffer(dev, buf);
2840 case RADEON_CMD_PACKET3:
2841 DRM_DEBUG("RADEON_CMD_PACKET3\n");
2842 if (radeon_emit_packet3(dev, filp_priv, &cmdbuf)) {
2843 DRM_ERROR("radeon_emit_packet3 failed\n");
2848 case RADEON_CMD_PACKET3_CLIP:
2849 DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
2850 if (radeon_emit_packet3_cliprect
2851 (dev, filp_priv, &cmdbuf, orig_nbox)) {
2852 DRM_ERROR("radeon_emit_packet3_clip failed\n");
2857 case RADEON_CMD_SCALARS2:
2858 DRM_DEBUG("RADEON_CMD_SCALARS2\n");
2859 if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) {
2860 DRM_ERROR("radeon_emit_scalars2 failed\n");
2865 case RADEON_CMD_WAIT:
2866 DRM_DEBUG("RADEON_CMD_WAIT\n");
2867 if (radeon_emit_wait(dev, header.wait.flags)) {
2868 DRM_ERROR("radeon_emit_wait failed\n");
2873 DRM_ERROR("bad cmd_type %d at %p\n",
2874 header.header.cmd_type,
2875 cmdbuf.buf - sizeof(header));
2880 if (orig_bufsz != 0)
2881 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2883 DRM_DEBUG("DONE\n");
2888 if (orig_bufsz != 0)
2889 drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
2890 return DRM_ERR(EINVAL);
2893 static int radeon_cp_getparam(DRM_IOCTL_ARGS)
2896 drm_radeon_private_t *dev_priv = dev->dev_private;
2897 drm_radeon_getparam_t param;
2901 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2902 return DRM_ERR(EINVAL);
2905 DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data,
2908 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
2910 switch (param.param) {
2911 case RADEON_PARAM_GART_BUFFER_OFFSET:
2912 value = dev_priv->gart_buffers_offset;
2914 case RADEON_PARAM_LAST_FRAME:
2915 dev_priv->stats.last_frame_reads++;
2916 value = GET_SCRATCH(0);
2918 case RADEON_PARAM_LAST_DISPATCH:
2919 value = GET_SCRATCH(1);
2921 case RADEON_PARAM_LAST_CLEAR:
2922 dev_priv->stats.last_clear_reads++;
2923 value = GET_SCRATCH(2);
2925 case RADEON_PARAM_IRQ_NR:
2928 case RADEON_PARAM_GART_BASE:
2929 value = dev_priv->gart_vm_start;
2931 case RADEON_PARAM_REGISTER_HANDLE:
2932 value = dev_priv->mmio->offset;
2934 case RADEON_PARAM_STATUS_HANDLE:
2935 value = dev_priv->ring_rptr_offset;
2937 #if BITS_PER_LONG == 32
2939 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
2940 * pointer which can't fit into an int-sized variable. According to
2941 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
2942 * not supporting it shouldn't be a problem. If the same functionality
2943 * is needed on 64-bit platforms, a new ioctl() would have to be added,
2944 * so backwards-compatibility for the embedded platforms can be
2945 * maintained. --davidm 4-Feb-2004.
2947 case RADEON_PARAM_SAREA_HANDLE:
2948 /* The lock is the first dword in the sarea. */
2949 value = (long)dev->lock.hw_lock;
2952 case RADEON_PARAM_GART_TEX_HANDLE:
2953 value = dev_priv->gart_textures_offset;
2956 case RADEON_PARAM_CARD_TYPE:
2957 if (dev_priv->flags & CHIP_IS_PCIE)
2958 value = RADEON_CARD_PCIE;
2959 else if (dev_priv->flags & CHIP_IS_AGP)
2960 value = RADEON_CARD_AGP;
2962 value = RADEON_CARD_PCI;
2965 return DRM_ERR(EINVAL);
2968 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
2969 DRM_ERROR("copy_to_user\n");
2970 return DRM_ERR(EFAULT);
2976 static int radeon_cp_setparam(DRM_IOCTL_ARGS)
2979 drm_radeon_private_t *dev_priv = dev->dev_private;
2980 drm_file_t *filp_priv;
2981 drm_radeon_setparam_t sp;
2982 struct drm_radeon_driver_file_fields *radeon_priv;
2985 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
2986 return DRM_ERR(EINVAL);
2989 DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
2991 DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data,
2995 case RADEON_SETPARAM_FB_LOCATION:
2996 radeon_priv = filp_priv->driver_priv;
2997 radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
2999 case RADEON_SETPARAM_SWITCH_TILING:
3000 if (sp.value == 0) {
3001 DRM_DEBUG("color tiling disabled\n");
3002 dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3003 dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
3004 dev_priv->sarea_priv->tiling_enabled = 0;
3005 } else if (sp.value == 1) {
3006 DRM_DEBUG("color tiling enabled\n");
3007 dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
3008 dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
3009 dev_priv->sarea_priv->tiling_enabled = 1;
3012 case RADEON_SETPARAM_PCIGART_LOCATION:
3013 dev_priv->pcigart_offset = sp.value;
3016 DRM_DEBUG("Invalid parameter %d\n", sp.param);
3017 return DRM_ERR(EINVAL);
3023 /* When a client dies:
3024 * - Check for and clean up flipped page state
3025 * - Free any alloced GART memory.
3026 * - Free any alloced radeon surfaces.
3028 * DRM infrastructure takes care of reclaiming dma buffers.
3030 void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
3032 if (dev->dev_private) {
3033 drm_radeon_private_t *dev_priv = dev->dev_private;
3034 if (dev_priv->page_flipping) {
3035 radeon_do_cleanup_pageflip(dev);
3037 radeon_mem_release(filp, dev_priv->gart_heap);
3038 radeon_mem_release(filp, dev_priv->fb_heap);
3039 radeon_surfaces_release(filp, dev_priv);
3043 void radeon_driver_lastclose(drm_device_t * dev)
3045 radeon_do_release(dev);
3048 int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
3050 drm_radeon_private_t *dev_priv = dev->dev_private;
3051 struct drm_radeon_driver_file_fields *radeon_priv;
3055 (struct drm_radeon_driver_file_fields *)
3056 drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
3061 filp_priv->driver_priv = radeon_priv;
3064 radeon_priv->radeon_fb_delta = dev_priv->fb_location;
3066 radeon_priv->radeon_fb_delta = 0;
3070 void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
3072 struct drm_radeon_driver_file_fields *radeon_priv =
3073 filp_priv->driver_priv;
3075 drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
3078 drm_ioctl_desc_t radeon_ioctls[] = {
3079 [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3080 [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3081 [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3082 [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3083 [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
3084 [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
3085 [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
3086 [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
3087 [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
3088 [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
3089 [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
3090 [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
3091 [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
3092 [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
3093 [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3094 [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
3095 [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
3096 [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
3097 [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
3098 [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
3099 [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
3100 [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
3101 [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
3102 [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
3103 [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
3104 [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
3105 [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
3108 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);