1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Gareth Hughes <gareth@valinux.com>
36 /* ================================================================
37 * CCE hardware state programming functions
40 static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
41 drm_clip_rect_t * boxes, int count)
43 u32 aux_sc_cntl = 0x00000000;
45 DRM_DEBUG(" %s\n", __FUNCTION__);
47 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
50 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
51 OUT_RING(boxes[0].x1);
52 OUT_RING(boxes[0].x2 - 1);
53 OUT_RING(boxes[0].y1);
54 OUT_RING(boxes[0].y2 - 1);
56 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
59 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
60 OUT_RING(boxes[1].x1);
61 OUT_RING(boxes[1].x2 - 1);
62 OUT_RING(boxes[1].y1);
63 OUT_RING(boxes[1].y2 - 1);
65 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
68 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
69 OUT_RING(boxes[2].x1);
70 OUT_RING(boxes[2].x2 - 1);
71 OUT_RING(boxes[2].y1);
72 OUT_RING(boxes[2].y2 - 1);
74 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
77 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
78 OUT_RING(aux_sc_cntl);
83 static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
88 DRM_DEBUG(" %s\n", __FUNCTION__);
92 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
93 OUT_RING(ctx->scale_3d_cntl);
98 static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
103 DRM_DEBUG(" %s\n", __FUNCTION__);
107 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
108 OUT_RING(ctx->dst_pitch_offset_c);
109 OUT_RING(ctx->dp_gui_master_cntl_c);
110 OUT_RING(ctx->sc_top_left_c);
111 OUT_RING(ctx->sc_bottom_right_c);
112 OUT_RING(ctx->z_offset_c);
113 OUT_RING(ctx->z_pitch_c);
114 OUT_RING(ctx->z_sten_cntl_c);
115 OUT_RING(ctx->tex_cntl_c);
116 OUT_RING(ctx->misc_3d_state_cntl_reg);
117 OUT_RING(ctx->texture_clr_cmp_clr_c);
118 OUT_RING(ctx->texture_clr_cmp_msk_c);
119 OUT_RING(ctx->fog_color_c);
124 static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
129 DRM_DEBUG(" %s\n", __FUNCTION__);
133 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
134 OUT_RING(ctx->setup_cntl);
135 OUT_RING(ctx->pm4_vc_fpu_setup);
140 static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
145 DRM_DEBUG(" %s\n", __FUNCTION__);
149 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
150 OUT_RING(ctx->dp_write_mask);
152 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
153 OUT_RING(ctx->sten_ref_mask_c);
154 OUT_RING(ctx->plane_3d_mask_c);
159 static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
164 DRM_DEBUG(" %s\n", __FUNCTION__);
168 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
169 OUT_RING(ctx->window_xy_offset);
174 static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
181 DRM_DEBUG(" %s\n", __FUNCTION__);
183 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
185 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
186 2 + R128_MAX_TEXTURE_LEVELS));
187 OUT_RING(tex->tex_cntl);
188 OUT_RING(tex->tex_combine_cntl);
189 OUT_RING(ctx->tex_size_pitch_c);
190 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
191 OUT_RING(tex->tex_offset[i]);
194 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
195 OUT_RING(ctx->constant_color_c);
196 OUT_RING(tex->tex_border_color);
201 static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
203 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
204 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
207 DRM_DEBUG(" %s\n", __FUNCTION__);
209 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
211 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
212 OUT_RING(tex->tex_cntl);
213 OUT_RING(tex->tex_combine_cntl);
214 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
215 OUT_RING(tex->tex_offset[i]);
218 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
219 OUT_RING(tex->tex_border_color);
224 static void r128_emit_state(drm_r128_private_t * dev_priv)
226 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
227 unsigned int dirty = sarea_priv->dirty;
229 DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
231 if (dirty & R128_UPLOAD_CORE) {
232 r128_emit_core(dev_priv);
233 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
236 if (dirty & R128_UPLOAD_CONTEXT) {
237 r128_emit_context(dev_priv);
238 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
241 if (dirty & R128_UPLOAD_SETUP) {
242 r128_emit_setup(dev_priv);
243 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
246 if (dirty & R128_UPLOAD_MASKS) {
247 r128_emit_masks(dev_priv);
248 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
251 if (dirty & R128_UPLOAD_WINDOW) {
252 r128_emit_window(dev_priv);
253 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
256 if (dirty & R128_UPLOAD_TEX0) {
257 r128_emit_tex0(dev_priv);
258 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
261 if (dirty & R128_UPLOAD_TEX1) {
262 r128_emit_tex1(dev_priv);
263 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
266 /* Turn off the texture cache flushing */
267 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
269 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
272 #if R128_PERFORMANCE_BOXES
273 /* ================================================================
274 * Performance monitoring functions
277 static void r128_clear_box(drm_r128_private_t * dev_priv,
278 int x, int y, int w, int h, int r, int g, int b)
284 switch (dev_priv->fb_bpp) {
286 fb_bpp = R128_GMC_DST_16BPP;
287 color = (((r & 0xf8) << 8) |
288 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
291 fb_bpp = R128_GMC_DST_24BPP;
292 color = ((r << 16) | (g << 8) | b);
295 fb_bpp = R128_GMC_DST_32BPP;
296 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
302 offset = dev_priv->back_offset;
303 pitch = dev_priv->back_pitch >> 3;
307 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
308 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
309 R128_GMC_BRUSH_SOLID_COLOR |
311 R128_GMC_SRC_DATATYPE_COLOR |
313 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
315 OUT_RING((pitch << 21) | (offset >> 5));
318 OUT_RING((x << 16) | y);
319 OUT_RING((w << 16) | h);
324 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
326 if (atomic_read(&dev_priv->idle_count) == 0) {
327 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
329 atomic_set(&dev_priv->idle_count, 0);
335 /* ================================================================
336 * CCE command dispatch functions
339 static void r128_print_dirty(const char *msg, unsigned int flags)
341 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
344 (flags & R128_UPLOAD_CORE) ? "core, " : "",
345 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
346 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
347 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
348 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
349 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
350 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
351 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
352 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
355 static void r128_cce_dispatch_clear(drm_device_t * dev,
356 drm_r128_clear_t * clear)
358 drm_r128_private_t *dev_priv = dev->dev_private;
359 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
360 int nbox = sarea_priv->nbox;
361 drm_clip_rect_t *pbox = sarea_priv->boxes;
362 unsigned int flags = clear->flags;
365 DRM_DEBUG("%s\n", __FUNCTION__);
367 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
368 unsigned int tmp = flags;
370 flags &= ~(R128_FRONT | R128_BACK);
371 if (tmp & R128_FRONT)
377 for (i = 0; i < nbox; i++) {
380 int w = pbox[i].x2 - x;
381 int h = pbox[i].y2 - y;
383 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
384 pbox[i].x1, pbox[i].y1, pbox[i].x2,
387 if (flags & (R128_FRONT | R128_BACK)) {
390 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
391 OUT_RING(clear->color_mask);
396 if (flags & R128_FRONT) {
399 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
400 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
401 R128_GMC_BRUSH_SOLID_COLOR |
402 (dev_priv->color_fmt << 8) |
403 R128_GMC_SRC_DATATYPE_COLOR |
405 R128_GMC_CLR_CMP_CNTL_DIS |
406 R128_GMC_AUX_CLIP_DIS);
408 OUT_RING(dev_priv->front_pitch_offset_c);
409 OUT_RING(clear->clear_color);
411 OUT_RING((x << 16) | y);
412 OUT_RING((w << 16) | h);
417 if (flags & R128_BACK) {
420 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
421 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
422 R128_GMC_BRUSH_SOLID_COLOR |
423 (dev_priv->color_fmt << 8) |
424 R128_GMC_SRC_DATATYPE_COLOR |
426 R128_GMC_CLR_CMP_CNTL_DIS |
427 R128_GMC_AUX_CLIP_DIS);
429 OUT_RING(dev_priv->back_pitch_offset_c);
430 OUT_RING(clear->clear_color);
432 OUT_RING((x << 16) | y);
433 OUT_RING((w << 16) | h);
438 if (flags & R128_DEPTH) {
441 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
442 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
443 R128_GMC_BRUSH_SOLID_COLOR |
444 (dev_priv->depth_fmt << 8) |
445 R128_GMC_SRC_DATATYPE_COLOR |
447 R128_GMC_CLR_CMP_CNTL_DIS |
448 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
450 OUT_RING(dev_priv->depth_pitch_offset_c);
451 OUT_RING(clear->clear_depth);
453 OUT_RING((x << 16) | y);
454 OUT_RING((w << 16) | h);
461 static void r128_cce_dispatch_swap(drm_device_t * dev)
463 drm_r128_private_t *dev_priv = dev->dev_private;
464 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
465 int nbox = sarea_priv->nbox;
466 drm_clip_rect_t *pbox = sarea_priv->boxes;
469 DRM_DEBUG("%s\n", __FUNCTION__);
471 #if R128_PERFORMANCE_BOXES
472 /* Do some trivial performance monitoring...
474 r128_cce_performance_boxes(dev_priv);
477 for (i = 0; i < nbox; i++) {
480 int w = pbox[i].x2 - x;
481 int h = pbox[i].y2 - y;
485 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
486 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
487 R128_GMC_DST_PITCH_OFFSET_CNTL |
488 R128_GMC_BRUSH_NONE |
489 (dev_priv->color_fmt << 8) |
490 R128_GMC_SRC_DATATYPE_COLOR |
492 R128_DP_SRC_SOURCE_MEMORY |
493 R128_GMC_CLR_CMP_CNTL_DIS |
494 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
496 /* Make this work even if front & back are flipped:
498 if (dev_priv->current_page == 0) {
499 OUT_RING(dev_priv->back_pitch_offset_c);
500 OUT_RING(dev_priv->front_pitch_offset_c);
502 OUT_RING(dev_priv->front_pitch_offset_c);
503 OUT_RING(dev_priv->back_pitch_offset_c);
506 OUT_RING((x << 16) | y);
507 OUT_RING((x << 16) | y);
508 OUT_RING((w << 16) | h);
513 /* Increment the frame counter. The client-side 3D driver must
514 * throttle the framerate by waiting for this value before
515 * performing the swapbuffer ioctl.
517 dev_priv->sarea_priv->last_frame++;
521 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
522 OUT_RING(dev_priv->sarea_priv->last_frame);
527 static void r128_cce_dispatch_flip(drm_device_t * dev)
529 drm_r128_private_t *dev_priv = dev->dev_private;
531 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
533 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
535 #if R128_PERFORMANCE_BOXES
536 /* Do some trivial performance monitoring...
538 r128_cce_performance_boxes(dev_priv);
543 R128_WAIT_UNTIL_PAGE_FLIPPED();
544 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
546 if (dev_priv->current_page == 0) {
547 OUT_RING(dev_priv->back_offset);
549 OUT_RING(dev_priv->front_offset);
554 /* Increment the frame counter. The client-side 3D driver must
555 * throttle the framerate by waiting for this value before
556 * performing the swapbuffer ioctl.
558 dev_priv->sarea_priv->last_frame++;
559 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
560 1 - dev_priv->current_page;
564 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
565 OUT_RING(dev_priv->sarea_priv->last_frame);
570 static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
572 drm_r128_private_t *dev_priv = dev->dev_private;
573 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
574 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
575 int format = sarea_priv->vc_format;
576 int offset = buf->bus_address;
577 int size = buf->used;
578 int prim = buf_priv->prim;
581 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
584 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
587 buf_priv->dispatched = 1;
589 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
590 r128_emit_state(dev_priv);
594 /* Emit the next set of up to three cliprects */
595 if (i < sarea_priv->nbox) {
596 r128_emit_clip_rects(dev_priv,
597 &sarea_priv->boxes[i],
598 sarea_priv->nbox - i);
601 /* Emit the vertex buffer rendering commands */
604 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
608 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
609 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
614 } while (i < sarea_priv->nbox);
617 if (buf_priv->discard) {
618 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
620 /* Emit the vertex buffer age */
623 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
624 OUT_RING(buf_priv->age);
630 /* FIXME: Check dispatched field */
631 buf_priv->dispatched = 0;
634 dev_priv->sarea_priv->last_dispatch++;
636 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
637 sarea_priv->nbox = 0;
640 static void r128_cce_dispatch_indirect(drm_device_t * dev,
641 drm_buf_t * buf, int start, int end)
643 drm_r128_private_t *dev_priv = dev->dev_private;
644 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
646 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
649 int offset = buf->bus_address + start;
650 int dwords = (end - start + 3) / sizeof(u32);
652 /* Indirect buffer data must be an even number of
653 * dwords, so if we've been given an odd number we must
654 * pad the data with a Type-2 CCE packet.
658 ((char *)dev->agp_buffer_map->handle
659 + buf->offset + start);
660 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
663 buf_priv->dispatched = 1;
665 /* Fire off the indirect buffer */
668 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
675 if (buf_priv->discard) {
676 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
678 /* Emit the indirect buffer age */
681 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
682 OUT_RING(buf_priv->age);
688 /* FIXME: Check dispatched field */
689 buf_priv->dispatched = 0;
692 dev_priv->sarea_priv->last_dispatch++;
695 static void r128_cce_dispatch_indices(drm_device_t * dev,
697 int start, int end, int count)
699 drm_r128_private_t *dev_priv = dev->dev_private;
700 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
701 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
702 int format = sarea_priv->vc_format;
703 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
704 int prim = buf_priv->prim;
709 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
712 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
715 buf_priv->dispatched = 1;
717 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
718 r128_emit_state(dev_priv);
721 dwords = (end - start + 3) / sizeof(u32);
723 data = (u32 *) ((char *)dev->agp_buffer_map->handle
724 + buf->offset + start);
726 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
729 data[1] = cpu_to_le32(offset);
730 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
731 data[3] = cpu_to_le32(format);
732 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
736 #ifdef __LITTLE_ENDIAN
737 data[dwords - 1] &= 0x0000ffff;
739 data[dwords - 1] &= 0xffff0000;
744 /* Emit the next set of up to three cliprects */
745 if (i < sarea_priv->nbox) {
746 r128_emit_clip_rects(dev_priv,
747 &sarea_priv->boxes[i],
748 sarea_priv->nbox - i);
751 r128_cce_dispatch_indirect(dev, buf, start, end);
754 } while (i < sarea_priv->nbox);
757 if (buf_priv->discard) {
758 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
760 /* Emit the vertex buffer age */
763 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
764 OUT_RING(buf_priv->age);
769 /* FIXME: Check dispatched field */
770 buf_priv->dispatched = 0;
773 dev_priv->sarea_priv->last_dispatch++;
775 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
776 sarea_priv->nbox = 0;
779 static int r128_cce_dispatch_blit(DRMFILE filp,
780 drm_device_t * dev, drm_r128_blit_t * blit)
782 drm_r128_private_t *dev_priv = dev->dev_private;
783 drm_device_dma_t *dma = dev->dma;
785 drm_r128_buf_priv_t *buf_priv;
787 int dword_shift, dwords;
791 /* The compiler won't optimize away a division by a variable,
792 * even if the only legal values are powers of two. Thus, we'll
793 * use a shift instead.
795 switch (blit->format) {
796 case R128_DATATYPE_ARGB8888:
799 case R128_DATATYPE_ARGB1555:
800 case R128_DATATYPE_RGB565:
801 case R128_DATATYPE_ARGB4444:
802 case R128_DATATYPE_YVYU422:
803 case R128_DATATYPE_VYUY422:
806 case R128_DATATYPE_CI8:
807 case R128_DATATYPE_RGB8:
811 DRM_ERROR("invalid blit format %d\n", blit->format);
812 return DRM_ERR(EINVAL);
815 /* Flush the pixel cache, and mark the contents as Read Invalid.
816 * This ensures no pixel data gets mixed up with the texture
817 * data from the host data blit, otherwise part of the texture
818 * image may be corrupted.
822 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
823 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
827 /* Dispatch the indirect buffer.
829 buf = dma->buflist[blit->idx];
830 buf_priv = buf->dev_private;
832 if (buf->filp != filp) {
833 DRM_ERROR("process %d using buffer owned by %p\n",
834 DRM_CURRENTPID, buf->filp);
835 return DRM_ERR(EINVAL);
838 DRM_ERROR("sending pending buffer %d\n", blit->idx);
839 return DRM_ERR(EINVAL);
842 buf_priv->discard = 1;
844 dwords = (blit->width * blit->height) >> dword_shift;
846 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
848 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
849 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
850 R128_GMC_BRUSH_NONE |
851 (blit->format << 8) |
852 R128_GMC_SRC_DATATYPE_COLOR |
854 R128_DP_SRC_SOURCE_HOST_DATA |
855 R128_GMC_CLR_CMP_CNTL_DIS |
856 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
858 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
859 data[3] = cpu_to_le32(0xffffffff);
860 data[4] = cpu_to_le32(0xffffffff);
861 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
862 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
863 data[7] = cpu_to_le32(dwords);
865 buf->used = (dwords + 8) * sizeof(u32);
867 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
869 /* Flush the pixel cache after the blit completes. This ensures
870 * the texture data is written out to memory before rendering
875 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
876 OUT_RING(R128_PC_FLUSH_GUI);
883 /* ================================================================
884 * Tiled depth buffer management
886 * FIXME: These should all set the destination write mask for when we
887 * have hardware stencil support.
890 static int r128_cce_dispatch_write_span(drm_device_t * dev,
891 drm_r128_depth_t * depth)
893 drm_r128_private_t *dev_priv = dev->dev_private;
897 int i, buffer_size, mask_size;
902 if (count > 4096 || count <= 0)
903 return DRM_ERR(EMSGSIZE);
905 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
906 return DRM_ERR(EFAULT);
908 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
909 return DRM_ERR(EFAULT);
912 buffer_size = depth->n * sizeof(u32);
913 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
915 return DRM_ERR(ENOMEM);
916 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
917 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
918 return DRM_ERR(EFAULT);
921 mask_size = depth->n * sizeof(u8);
923 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
925 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
926 return DRM_ERR(ENOMEM);
928 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
929 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
930 drm_free(mask, mask_size, DRM_MEM_BUFS);
931 return DRM_ERR(EFAULT);
934 for (i = 0; i < count; i++, x++) {
938 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
939 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
940 R128_GMC_BRUSH_SOLID_COLOR |
941 (dev_priv->depth_fmt << 8) |
942 R128_GMC_SRC_DATATYPE_COLOR |
944 R128_GMC_CLR_CMP_CNTL_DIS |
945 R128_GMC_WR_MSK_DIS);
947 OUT_RING(dev_priv->depth_pitch_offset_c);
950 OUT_RING((x << 16) | y);
951 OUT_RING((1 << 16) | 1);
957 drm_free(mask, mask_size, DRM_MEM_BUFS);
959 for (i = 0; i < count; i++, x++) {
962 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
963 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
964 R128_GMC_BRUSH_SOLID_COLOR |
965 (dev_priv->depth_fmt << 8) |
966 R128_GMC_SRC_DATATYPE_COLOR |
968 R128_GMC_CLR_CMP_CNTL_DIS |
969 R128_GMC_WR_MSK_DIS);
971 OUT_RING(dev_priv->depth_pitch_offset_c);
974 OUT_RING((x << 16) | y);
975 OUT_RING((1 << 16) | 1);
981 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
986 static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
987 drm_r128_depth_t * depth)
989 drm_r128_private_t *dev_priv = dev->dev_private;
993 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
998 if (count > 4096 || count <= 0)
999 return DRM_ERR(EMSGSIZE);
1001 xbuf_size = count * sizeof(*x);
1002 ybuf_size = count * sizeof(*y);
1003 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1005 return DRM_ERR(ENOMEM);
1007 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1009 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1010 return DRM_ERR(ENOMEM);
1012 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1013 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1014 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1015 return DRM_ERR(EFAULT);
1017 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1018 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1019 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1020 return DRM_ERR(EFAULT);
1023 buffer_size = depth->n * sizeof(u32);
1024 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
1025 if (buffer == NULL) {
1026 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1027 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1028 return DRM_ERR(ENOMEM);
1030 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1031 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1032 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1033 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1034 return DRM_ERR(EFAULT);
1038 mask_size = depth->n * sizeof(u8);
1039 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
1041 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1042 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1043 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1044 return DRM_ERR(ENOMEM);
1046 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1047 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1048 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1049 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1050 drm_free(mask, mask_size, DRM_MEM_BUFS);
1051 return DRM_ERR(EFAULT);
1054 for (i = 0; i < count; i++) {
1058 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1059 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1060 R128_GMC_BRUSH_SOLID_COLOR |
1061 (dev_priv->depth_fmt << 8) |
1062 R128_GMC_SRC_DATATYPE_COLOR |
1064 R128_GMC_CLR_CMP_CNTL_DIS |
1065 R128_GMC_WR_MSK_DIS);
1067 OUT_RING(dev_priv->depth_pitch_offset_c);
1068 OUT_RING(buffer[i]);
1070 OUT_RING((x[i] << 16) | y[i]);
1071 OUT_RING((1 << 16) | 1);
1077 drm_free(mask, mask_size, DRM_MEM_BUFS);
1079 for (i = 0; i < count; i++) {
1082 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1083 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1084 R128_GMC_BRUSH_SOLID_COLOR |
1085 (dev_priv->depth_fmt << 8) |
1086 R128_GMC_SRC_DATATYPE_COLOR |
1088 R128_GMC_CLR_CMP_CNTL_DIS |
1089 R128_GMC_WR_MSK_DIS);
1091 OUT_RING(dev_priv->depth_pitch_offset_c);
1092 OUT_RING(buffer[i]);
1094 OUT_RING((x[i] << 16) | y[i]);
1095 OUT_RING((1 << 16) | 1);
1101 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1102 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1103 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1108 static int r128_cce_dispatch_read_span(drm_device_t * dev,
1109 drm_r128_depth_t * depth)
1111 drm_r128_private_t *dev_priv = dev->dev_private;
1117 if (count > 4096 || count <= 0)
1118 return DRM_ERR(EMSGSIZE);
1120 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1121 return DRM_ERR(EFAULT);
1123 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1124 return DRM_ERR(EFAULT);
1129 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1130 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1131 R128_GMC_DST_PITCH_OFFSET_CNTL |
1132 R128_GMC_BRUSH_NONE |
1133 (dev_priv->depth_fmt << 8) |
1134 R128_GMC_SRC_DATATYPE_COLOR |
1136 R128_DP_SRC_SOURCE_MEMORY |
1137 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1139 OUT_RING(dev_priv->depth_pitch_offset_c);
1140 OUT_RING(dev_priv->span_pitch_offset_c);
1142 OUT_RING((x << 16) | y);
1143 OUT_RING((0 << 16) | 0);
1144 OUT_RING((count << 16) | 1);
1151 static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
1152 drm_r128_depth_t * depth)
1154 drm_r128_private_t *dev_priv = dev->dev_private;
1156 int i, xbuf_size, ybuf_size;
1158 DRM_DEBUG("%s\n", __FUNCTION__);
1161 if (count > 4096 || count <= 0)
1162 return DRM_ERR(EMSGSIZE);
1164 if (count > dev_priv->depth_pitch) {
1165 count = dev_priv->depth_pitch;
1168 xbuf_size = count * sizeof(*x);
1169 ybuf_size = count * sizeof(*y);
1170 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1172 return DRM_ERR(ENOMEM);
1174 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1176 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1177 return DRM_ERR(ENOMEM);
1179 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1180 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1181 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1182 return DRM_ERR(EFAULT);
1184 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1185 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1186 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1187 return DRM_ERR(EFAULT);
1190 for (i = 0; i < count; i++) {
1193 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1194 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1195 R128_GMC_DST_PITCH_OFFSET_CNTL |
1196 R128_GMC_BRUSH_NONE |
1197 (dev_priv->depth_fmt << 8) |
1198 R128_GMC_SRC_DATATYPE_COLOR |
1200 R128_DP_SRC_SOURCE_MEMORY |
1201 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1203 OUT_RING(dev_priv->depth_pitch_offset_c);
1204 OUT_RING(dev_priv->span_pitch_offset_c);
1206 OUT_RING((x[i] << 16) | y[i]);
1207 OUT_RING((i << 16) | 0);
1208 OUT_RING((1 << 16) | 1);
1213 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1214 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1219 /* ================================================================
1223 static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1225 drm_r128_private_t *dev_priv = dev->dev_private;
1228 DRM_DEBUG("%s\n", __FUNCTION__);
1232 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1233 for (i = 0; i < 32; i++) {
1234 OUT_RING(stipple[i]);
1240 /* ================================================================
1244 static int r128_cce_clear(DRM_IOCTL_ARGS)
1247 drm_r128_private_t *dev_priv = dev->dev_private;
1248 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1249 drm_r128_clear_t clear;
1252 LOCK_TEST_WITH_RETURN(dev, filp);
1254 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1257 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1259 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1260 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1262 r128_cce_dispatch_clear(dev, &clear);
1265 /* Make sure we restore the 3D state next time.
1267 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1272 static int r128_do_init_pageflip(drm_device_t * dev)
1274 drm_r128_private_t *dev_priv = dev->dev_private;
1277 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1278 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1280 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1281 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1282 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1284 dev_priv->page_flipping = 1;
1285 dev_priv->current_page = 0;
1286 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1291 static int r128_do_cleanup_pageflip(drm_device_t * dev)
1293 drm_r128_private_t *dev_priv = dev->dev_private;
1296 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1297 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1299 if (dev_priv->current_page != 0) {
1300 r128_cce_dispatch_flip(dev);
1304 dev_priv->page_flipping = 0;
1308 /* Swapping and flipping are different operations, need different ioctls.
1309 * They can & should be intermixed to support multiple 3d windows.
1312 static int r128_cce_flip(DRM_IOCTL_ARGS)
1315 drm_r128_private_t *dev_priv = dev->dev_private;
1316 DRM_DEBUG("%s\n", __FUNCTION__);
1318 LOCK_TEST_WITH_RETURN(dev, filp);
1320 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1322 if (!dev_priv->page_flipping)
1323 r128_do_init_pageflip(dev);
1325 r128_cce_dispatch_flip(dev);
1331 static int r128_cce_swap(DRM_IOCTL_ARGS)
1334 drm_r128_private_t *dev_priv = dev->dev_private;
1335 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1336 DRM_DEBUG("%s\n", __FUNCTION__);
1338 LOCK_TEST_WITH_RETURN(dev, filp);
1340 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1342 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1343 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1345 r128_cce_dispatch_swap(dev);
1346 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1353 static int r128_cce_vertex(DRM_IOCTL_ARGS)
1356 drm_r128_private_t *dev_priv = dev->dev_private;
1357 drm_device_dma_t *dma = dev->dma;
1359 drm_r128_buf_priv_t *buf_priv;
1360 drm_r128_vertex_t vertex;
1362 LOCK_TEST_WITH_RETURN(dev, filp);
1365 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1366 return DRM_ERR(EINVAL);
1369 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1372 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1373 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
1375 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
1376 DRM_ERROR("buffer index %d (of %d max)\n",
1377 vertex.idx, dma->buf_count - 1);
1378 return DRM_ERR(EINVAL);
1380 if (vertex.prim < 0 ||
1381 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1382 DRM_ERROR("buffer prim %d\n", vertex.prim);
1383 return DRM_ERR(EINVAL);
1386 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1387 VB_AGE_TEST_WITH_RETURN(dev_priv);
1389 buf = dma->buflist[vertex.idx];
1390 buf_priv = buf->dev_private;
1392 if (buf->filp != filp) {
1393 DRM_ERROR("process %d using buffer owned by %p\n",
1394 DRM_CURRENTPID, buf->filp);
1395 return DRM_ERR(EINVAL);
1398 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
1399 return DRM_ERR(EINVAL);
1402 buf->used = vertex.count;
1403 buf_priv->prim = vertex.prim;
1404 buf_priv->discard = vertex.discard;
1406 r128_cce_dispatch_vertex(dev, buf);
1412 static int r128_cce_indices(DRM_IOCTL_ARGS)
1415 drm_r128_private_t *dev_priv = dev->dev_private;
1416 drm_device_dma_t *dma = dev->dma;
1418 drm_r128_buf_priv_t *buf_priv;
1419 drm_r128_indices_t elts;
1422 LOCK_TEST_WITH_RETURN(dev, filp);
1425 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1426 return DRM_ERR(EINVAL);
1429 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1432 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1433 elts.idx, elts.start, elts.end, elts.discard);
1435 if (elts.idx < 0 || elts.idx >= dma->buf_count) {
1436 DRM_ERROR("buffer index %d (of %d max)\n",
1437 elts.idx, dma->buf_count - 1);
1438 return DRM_ERR(EINVAL);
1440 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1441 DRM_ERROR("buffer prim %d\n", elts.prim);
1442 return DRM_ERR(EINVAL);
1445 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1446 VB_AGE_TEST_WITH_RETURN(dev_priv);
1448 buf = dma->buflist[elts.idx];
1449 buf_priv = buf->dev_private;
1451 if (buf->filp != filp) {
1452 DRM_ERROR("process %d using buffer owned by %p\n",
1453 DRM_CURRENTPID, buf->filp);
1454 return DRM_ERR(EINVAL);
1457 DRM_ERROR("sending pending buffer %d\n", elts.idx);
1458 return DRM_ERR(EINVAL);
1461 count = (elts.end - elts.start) / sizeof(u16);
1462 elts.start -= R128_INDEX_PRIM_OFFSET;
1464 if (elts.start & 0x7) {
1465 DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
1466 return DRM_ERR(EINVAL);
1468 if (elts.start < buf->used) {
1469 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
1470 return DRM_ERR(EINVAL);
1473 buf->used = elts.end;
1474 buf_priv->prim = elts.prim;
1475 buf_priv->discard = elts.discard;
1477 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
1483 static int r128_cce_blit(DRM_IOCTL_ARGS)
1486 drm_device_dma_t *dma = dev->dma;
1487 drm_r128_private_t *dev_priv = dev->dev_private;
1488 drm_r128_blit_t blit;
1491 LOCK_TEST_WITH_RETURN(dev, filp);
1493 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1496 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
1498 if (blit.idx < 0 || blit.idx >= dma->buf_count) {
1499 DRM_ERROR("buffer index %d (of %d max)\n",
1500 blit.idx, dma->buf_count - 1);
1501 return DRM_ERR(EINVAL);
1504 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1505 VB_AGE_TEST_WITH_RETURN(dev_priv);
1507 ret = r128_cce_dispatch_blit(filp, dev, &blit);
1513 static int r128_cce_depth(DRM_IOCTL_ARGS)
1516 drm_r128_private_t *dev_priv = dev->dev_private;
1517 drm_r128_depth_t depth;
1520 LOCK_TEST_WITH_RETURN(dev, filp);
1522 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1525 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1527 ret = DRM_ERR(EINVAL);
1528 switch (depth.func) {
1529 case R128_WRITE_SPAN:
1530 ret = r128_cce_dispatch_write_span(dev, &depth);
1532 case R128_WRITE_PIXELS:
1533 ret = r128_cce_dispatch_write_pixels(dev, &depth);
1535 case R128_READ_SPAN:
1536 ret = r128_cce_dispatch_read_span(dev, &depth);
1538 case R128_READ_PIXELS:
1539 ret = r128_cce_dispatch_read_pixels(dev, &depth);
1547 static int r128_cce_stipple(DRM_IOCTL_ARGS)
1550 drm_r128_private_t *dev_priv = dev->dev_private;
1551 drm_r128_stipple_t stipple;
1554 LOCK_TEST_WITH_RETURN(dev, filp);
1556 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1559 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
1560 return DRM_ERR(EFAULT);
1562 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1564 r128_cce_dispatch_stipple(dev, mask);
1570 static int r128_cce_indirect(DRM_IOCTL_ARGS)
1573 drm_r128_private_t *dev_priv = dev->dev_private;
1574 drm_device_dma_t *dma = dev->dma;
1576 drm_r128_buf_priv_t *buf_priv;
1577 drm_r128_indirect_t indirect;
1582 LOCK_TEST_WITH_RETURN(dev, filp);
1585 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1586 return DRM_ERR(EINVAL);
1589 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1592 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1593 indirect.idx, indirect.start, indirect.end, indirect.discard);
1595 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
1596 DRM_ERROR("buffer index %d (of %d max)\n",
1597 indirect.idx, dma->buf_count - 1);
1598 return DRM_ERR(EINVAL);
1601 buf = dma->buflist[indirect.idx];
1602 buf_priv = buf->dev_private;
1604 if (buf->filp != filp) {
1605 DRM_ERROR("process %d using buffer owned by %p\n",
1606 DRM_CURRENTPID, buf->filp);
1607 return DRM_ERR(EINVAL);
1610 DRM_ERROR("sending pending buffer %d\n", indirect.idx);
1611 return DRM_ERR(EINVAL);
1614 if (indirect.start < buf->used) {
1615 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1616 indirect.start, buf->used);
1617 return DRM_ERR(EINVAL);
1620 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1621 VB_AGE_TEST_WITH_RETURN(dev_priv);
1623 buf->used = indirect.end;
1624 buf_priv->discard = indirect.discard;
1627 /* Wait for the 3D stream to idle before the indirect buffer
1628 * containing 2D acceleration commands is processed.
1631 RADEON_WAIT_UNTIL_3D_IDLE();
1635 /* Dispatch the indirect buffer full of commands from the
1636 * X server. This is insecure and is thus only available to
1637 * privileged clients.
1639 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
1645 static int r128_getparam(DRM_IOCTL_ARGS)
1648 drm_r128_private_t *dev_priv = dev->dev_private;
1649 drm_r128_getparam_t param;
1653 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1654 return DRM_ERR(EINVAL);
1657 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1660 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1662 switch (param.param) {
1663 case R128_PARAM_IRQ_NR:
1667 return DRM_ERR(EINVAL);
1670 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
1671 DRM_ERROR("copy_to_user\n");
1672 return DRM_ERR(EFAULT);
1678 void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
1680 if (dev->dev_private) {
1681 drm_r128_private_t *dev_priv = dev->dev_private;
1682 if (dev_priv->page_flipping) {
1683 r128_do_cleanup_pageflip(dev);
1688 void r128_driver_lastclose(drm_device_t * dev)
1690 r128_do_cleanup_cce(dev);
1693 drm_ioctl_desc_t r128_ioctls[] = {
1694 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1695 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1696 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1697 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1698 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
1699 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
1700 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
1701 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
1702 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
1703 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
1704 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
1705 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
1706 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
1707 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
1708 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
1709 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1710 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
1713 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);