1 /* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
5 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Gareth Hughes <gareth@valinux.com>
37 #define R128_FIFO_DEBUG 0
39 /* CCE microcode (from ATI) */
40 static u32 r128_cce_microcode[] = {
41 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
42 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
43 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
44 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
45 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
46 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
47 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
48 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
49 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
50 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
51 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
52 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
53 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
54 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
55 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
56 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
57 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
58 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
59 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
60 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
61 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
62 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
63 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
64 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
65 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
66 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
67 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
68 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
69 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
70 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
71 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
72 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
73 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
74 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
75 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
76 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
77 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
78 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
79 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
80 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
81 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
84 static int R128_READ_PLL(struct drm_device * dev, int addr)
86 drm_r128_private_t *dev_priv = dev->dev_private;
88 R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
89 return R128_READ(R128_CLOCK_CNTL_DATA);
93 static void r128_status(drm_r128_private_t * dev_priv)
95 printk("GUI_STAT = 0x%08x\n",
96 (unsigned int)R128_READ(R128_GUI_STAT));
97 printk("PM4_STAT = 0x%08x\n",
98 (unsigned int)R128_READ(R128_PM4_STAT));
99 printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
100 (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
101 printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
102 (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
103 printk("PM4_MICRO_CNTL = 0x%08x\n",
104 (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
105 printk("PM4_BUFFER_CNTL = 0x%08x\n",
106 (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
110 /* ================================================================
111 * Engine, FIFO control
114 static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
119 tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
120 R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
122 for (i = 0; i < dev_priv->usec_timeout; i++) {
123 if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
130 DRM_ERROR("failed!\n");
132 return DRM_ERR(EBUSY);
135 static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
139 for (i = 0; i < dev_priv->usec_timeout; i++) {
140 int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
141 if (slots >= entries)
147 DRM_ERROR("failed!\n");
149 return DRM_ERR(EBUSY);
152 static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
156 ret = r128_do_wait_for_fifo(dev_priv, 64);
160 for (i = 0; i < dev_priv->usec_timeout; i++) {
161 if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
162 r128_do_pixcache_flush(dev_priv);
169 DRM_ERROR("failed!\n");
171 return DRM_ERR(EBUSY);
174 /* ================================================================
175 * CCE control, initialization
178 /* Load the microcode for the CCE */
179 static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
185 r128_do_wait_for_idle(dev_priv);
187 R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
188 for (i = 0; i < 256; i++) {
189 R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
190 R128_WRITE(R128_PM4_MICROCODE_DATAL,
191 r128_cce_microcode[i * 2 + 1]);
195 /* Flush any pending commands to the CCE. This should only be used just
196 * prior to a wait for idle, as it informs the engine that the command
199 static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
203 tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
204 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
207 /* Wait for the CCE to go idle.
209 int r128_do_cce_idle(drm_r128_private_t * dev_priv)
213 for (i = 0; i < dev_priv->usec_timeout; i++) {
214 if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
215 int pm4stat = R128_READ(R128_PM4_STAT);
216 if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
217 dev_priv->cce_fifo_size) &&
218 !(pm4stat & (R128_PM4_BUSY |
219 R128_PM4_GUI_ACTIVE))) {
220 return r128_do_pixcache_flush(dev_priv);
227 DRM_ERROR("failed!\n");
228 r128_status(dev_priv);
230 return DRM_ERR(EBUSY);
233 /* Start the Concurrent Command Engine.
235 static void r128_do_cce_start(drm_r128_private_t * dev_priv)
237 r128_do_wait_for_idle(dev_priv);
239 R128_WRITE(R128_PM4_BUFFER_CNTL,
240 dev_priv->cce_mode | dev_priv->ring.size_l2qw
241 | R128_PM4_BUFFER_CNTL_NOUPDATE);
242 R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
243 R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
245 dev_priv->cce_running = 1;
248 /* Reset the Concurrent Command Engine. This will not flush any pending
249 * commands, so you must wait for the CCE command stream to complete
250 * before calling this routine.
252 static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
254 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
255 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
256 dev_priv->ring.tail = 0;
259 /* Stop the Concurrent Command Engine. This will not flush any pending
260 * commands, so you must flush the command stream and wait for the CCE
261 * to go idle before calling this routine.
263 static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
265 R128_WRITE(R128_PM4_MICRO_CNTL, 0);
266 R128_WRITE(R128_PM4_BUFFER_CNTL,
267 R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
269 dev_priv->cce_running = 0;
272 /* Reset the engine. This will stop the CCE if it is running.
274 static int r128_do_engine_reset(struct drm_device * dev)
276 drm_r128_private_t *dev_priv = dev->dev_private;
277 u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
279 r128_do_pixcache_flush(dev_priv);
281 clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
282 mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
284 R128_WRITE_PLL(R128_MCLK_CNTL,
285 mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
287 gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
289 /* Taken from the sample code - do not change */
290 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
291 R128_READ(R128_GEN_RESET_CNTL);
292 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
293 R128_READ(R128_GEN_RESET_CNTL);
295 R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
296 R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
297 R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
299 /* Reset the CCE ring */
300 r128_do_cce_reset(dev_priv);
302 /* The CCE is no longer running after an engine reset */
303 dev_priv->cce_running = 0;
305 /* Reset any pending vertex, indirect buffers */
306 r128_freelist_reset(dev);
311 static void r128_cce_init_ring_buffer(struct drm_device * dev,
312 drm_r128_private_t * dev_priv)
319 /* The manual (p. 2) says this address is in "VM space". This
320 * means it's an offset from the start of AGP space.
323 if (!dev_priv->is_pci)
324 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
327 ring_start = dev_priv->cce_ring->offset -
328 (unsigned long)dev->sg->virtual;
330 R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
332 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
333 R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
335 /* Set watermark control */
336 R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
337 ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
338 | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
339 | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
340 | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
342 /* Force read. Why? Because it's in the examples... */
343 R128_READ(R128_PM4_BUFFER_ADDR);
345 /* Turn on bus mastering */
346 tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
347 R128_WRITE(R128_BUS_CNTL, tmp);
350 static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
352 drm_r128_private_t *dev_priv;
356 dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
357 if (dev_priv == NULL)
358 return DRM_ERR(ENOMEM);
360 memset(dev_priv, 0, sizeof(drm_r128_private_t));
362 dev_priv->is_pci = init->is_pci;
364 if (dev_priv->is_pci && !dev->sg) {
365 DRM_ERROR("PCI GART memory not allocated!\n");
366 dev->dev_private = (void *)dev_priv;
367 r128_do_cleanup_cce(dev);
368 return DRM_ERR(EINVAL);
371 dev_priv->usec_timeout = init->usec_timeout;
372 if (dev_priv->usec_timeout < 1 ||
373 dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
374 DRM_DEBUG("TIMEOUT problem!\n");
375 dev->dev_private = (void *)dev_priv;
376 r128_do_cleanup_cce(dev);
377 return DRM_ERR(EINVAL);
380 dev_priv->cce_mode = init->cce_mode;
382 /* GH: Simple idle check.
384 atomic_set(&dev_priv->idle_count, 0);
386 /* We don't support anything other than bus-mastering ring mode,
387 * but the ring can be in either AGP or PCI space for the ring
390 if ((init->cce_mode != R128_PM4_192BM) &&
391 (init->cce_mode != R128_PM4_128BM_64INDBM) &&
392 (init->cce_mode != R128_PM4_64BM_128INDBM) &&
393 (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
394 DRM_DEBUG("Bad cce_mode!\n");
395 dev->dev_private = (void *)dev_priv;
396 r128_do_cleanup_cce(dev);
397 return DRM_ERR(EINVAL);
400 switch (init->cce_mode) {
401 case R128_PM4_NONPM4:
402 dev_priv->cce_fifo_size = 0;
404 case R128_PM4_192PIO:
406 dev_priv->cce_fifo_size = 192;
408 case R128_PM4_128PIO_64INDBM:
409 case R128_PM4_128BM_64INDBM:
410 dev_priv->cce_fifo_size = 128;
412 case R128_PM4_64PIO_128INDBM:
413 case R128_PM4_64BM_128INDBM:
414 case R128_PM4_64PIO_64VCBM_64INDBM:
415 case R128_PM4_64BM_64VCBM_64INDBM:
416 case R128_PM4_64PIO_64VCPIO_64INDPIO:
417 dev_priv->cce_fifo_size = 64;
421 switch (init->fb_bpp) {
423 dev_priv->color_fmt = R128_DATATYPE_RGB565;
427 dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
430 dev_priv->front_offset = init->front_offset;
431 dev_priv->front_pitch = init->front_pitch;
432 dev_priv->back_offset = init->back_offset;
433 dev_priv->back_pitch = init->back_pitch;
435 switch (init->depth_bpp) {
437 dev_priv->depth_fmt = R128_DATATYPE_RGB565;
442 dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
445 dev_priv->depth_offset = init->depth_offset;
446 dev_priv->depth_pitch = init->depth_pitch;
447 dev_priv->span_offset = init->span_offset;
449 dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
450 (dev_priv->front_offset >> 5));
451 dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
452 (dev_priv->back_offset >> 5));
453 dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
454 (dev_priv->depth_offset >> 5) |
456 dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
457 (dev_priv->span_offset >> 5));
459 dev_priv->sarea = drm_getsarea(dev);
460 if (!dev_priv->sarea) {
461 DRM_ERROR("could not find sarea!\n");
462 dev->dev_private = (void *)dev_priv;
463 r128_do_cleanup_cce(dev);
464 return DRM_ERR(EINVAL);
467 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
468 if (!dev_priv->mmio) {
469 DRM_ERROR("could not find mmio region!\n");
470 dev->dev_private = (void *)dev_priv;
471 r128_do_cleanup_cce(dev);
472 return DRM_ERR(EINVAL);
474 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
475 if (!dev_priv->cce_ring) {
476 DRM_ERROR("could not find cce ring region!\n");
477 dev->dev_private = (void *)dev_priv;
478 r128_do_cleanup_cce(dev);
479 return DRM_ERR(EINVAL);
481 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
482 if (!dev_priv->ring_rptr) {
483 DRM_ERROR("could not find ring read pointer!\n");
484 dev->dev_private = (void *)dev_priv;
485 r128_do_cleanup_cce(dev);
486 return DRM_ERR(EINVAL);
488 dev->agp_buffer_token = init->buffers_offset;
489 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
490 if (!dev->agp_buffer_map) {
491 DRM_ERROR("could not find dma buffer region!\n");
492 dev->dev_private = (void *)dev_priv;
493 r128_do_cleanup_cce(dev);
494 return DRM_ERR(EINVAL);
497 if (!dev_priv->is_pci) {
498 dev_priv->agp_textures =
499 drm_core_findmap(dev, init->agp_textures_offset);
500 if (!dev_priv->agp_textures) {
501 DRM_ERROR("could not find agp texture region!\n");
502 dev->dev_private = (void *)dev_priv;
503 r128_do_cleanup_cce(dev);
504 return DRM_ERR(EINVAL);
508 dev_priv->sarea_priv =
509 (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
510 init->sarea_priv_offset);
513 if (!dev_priv->is_pci) {
514 drm_core_ioremap(dev_priv->cce_ring, dev);
515 drm_core_ioremap(dev_priv->ring_rptr, dev);
516 drm_core_ioremap(dev->agp_buffer_map, dev);
517 if (!dev_priv->cce_ring->handle ||
518 !dev_priv->ring_rptr->handle ||
519 !dev->agp_buffer_map->handle) {
520 DRM_ERROR("Could not ioremap agp regions!\n");
521 dev->dev_private = (void *)dev_priv;
522 r128_do_cleanup_cce(dev);
523 return DRM_ERR(ENOMEM);
528 dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
529 dev_priv->ring_rptr->handle =
530 (void *)dev_priv->ring_rptr->offset;
531 dev->agp_buffer_map->handle =
532 (void *)dev->agp_buffer_map->offset;
536 if (!dev_priv->is_pci)
537 dev_priv->cce_buffers_offset = dev->agp->base;
540 dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
542 dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
544 + init->ring_size / sizeof(u32));
545 dev_priv->ring.size = init->ring_size;
546 dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
548 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
550 dev_priv->ring.high_mark = 128;
552 dev_priv->sarea_priv->last_frame = 0;
553 R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
555 dev_priv->sarea_priv->last_dispatch = 0;
556 R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
559 if (dev_priv->is_pci) {
561 dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
562 dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
563 dev_priv->gart_info.addr = NULL;
564 dev_priv->gart_info.bus_addr = 0;
565 dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
566 if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
567 DRM_ERROR("failed to init PCI GART!\n");
568 dev->dev_private = (void *)dev_priv;
569 r128_do_cleanup_cce(dev);
570 return DRM_ERR(ENOMEM);
572 R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
577 r128_cce_init_ring_buffer(dev, dev_priv);
578 r128_cce_load_microcode(dev_priv);
580 dev->dev_private = (void *)dev_priv;
582 r128_do_engine_reset(dev);
587 int r128_do_cleanup_cce(struct drm_device * dev)
590 /* Make sure interrupts are disabled here because the uninstall ioctl
591 * may not have been called from userspace and after dev_private
592 * is freed, it's too late.
594 if (dev->irq_enabled)
595 drm_irq_uninstall(dev);
597 if (dev->dev_private) {
598 drm_r128_private_t *dev_priv = dev->dev_private;
601 if (!dev_priv->is_pci) {
602 if (dev_priv->cce_ring != NULL)
603 drm_core_ioremapfree(dev_priv->cce_ring, dev);
604 if (dev_priv->ring_rptr != NULL)
605 drm_core_ioremapfree(dev_priv->ring_rptr, dev);
606 if (dev->agp_buffer_map != NULL) {
607 drm_core_ioremapfree(dev->agp_buffer_map, dev);
608 dev->agp_buffer_map = NULL;
613 if (dev_priv->gart_info.bus_addr)
614 if (!drm_ati_pcigart_cleanup(dev,
615 &dev_priv->gart_info))
617 ("failed to cleanup PCI GART!\n");
620 drm_free(dev->dev_private, sizeof(drm_r128_private_t),
622 dev->dev_private = NULL;
628 int r128_cce_init(DRM_IOCTL_ARGS)
631 drm_r128_init_t init;
635 LOCK_TEST_WITH_RETURN(dev, filp);
637 DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data,
642 return r128_do_init_cce(dev, &init);
643 case R128_CLEANUP_CCE:
644 return r128_do_cleanup_cce(dev);
647 return DRM_ERR(EINVAL);
650 int r128_cce_start(DRM_IOCTL_ARGS)
653 drm_r128_private_t *dev_priv = dev->dev_private;
656 LOCK_TEST_WITH_RETURN(dev, filp);
658 if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
659 DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
663 r128_do_cce_start(dev_priv);
668 /* Stop the CCE. The engine must have been idled before calling this
671 int r128_cce_stop(DRM_IOCTL_ARGS)
674 drm_r128_private_t *dev_priv = dev->dev_private;
675 drm_r128_cce_stop_t stop;
679 LOCK_TEST_WITH_RETURN(dev, filp);
681 DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
684 /* Flush any pending CCE commands. This ensures any outstanding
685 * commands are exectuted by the engine before we turn it off.
688 r128_do_cce_flush(dev_priv);
691 /* If we fail to make the engine go idle, we return an error
692 * code so that the DRM ioctl wrapper can try again.
695 ret = r128_do_cce_idle(dev_priv);
700 /* Finally, we can turn off the CCE. If the engine isn't idle,
701 * we will get some dropped triangles as they won't be fully
702 * rendered before the CCE is shut down.
704 r128_do_cce_stop(dev_priv);
706 /* Reset the engine */
707 r128_do_engine_reset(dev);
712 /* Just reset the CCE ring. Called as part of an X Server engine reset.
714 int r128_cce_reset(DRM_IOCTL_ARGS)
717 drm_r128_private_t *dev_priv = dev->dev_private;
720 LOCK_TEST_WITH_RETURN(dev, filp);
723 DRM_DEBUG("%s called before init done\n", __FUNCTION__);
724 return DRM_ERR(EINVAL);
727 r128_do_cce_reset(dev_priv);
729 /* The CCE is no longer running after an engine reset */
730 dev_priv->cce_running = 0;
735 int r128_cce_idle(DRM_IOCTL_ARGS)
738 drm_r128_private_t *dev_priv = dev->dev_private;
741 LOCK_TEST_WITH_RETURN(dev, filp);
743 if (dev_priv->cce_running) {
744 r128_do_cce_flush(dev_priv);
747 return r128_do_cce_idle(dev_priv);
750 int r128_engine_reset(DRM_IOCTL_ARGS)
755 LOCK_TEST_WITH_RETURN(dev, filp);
757 return r128_do_engine_reset(dev);
760 int r128_fullscreen(DRM_IOCTL_ARGS)
762 return DRM_ERR(EINVAL);
765 /* ================================================================
766 * Freelist management
768 #define R128_BUFFER_USED 0xffffffff
769 #define R128_BUFFER_FREE 0
772 static int r128_freelist_init(struct drm_device * dev)
774 struct drm_device_dma *dma = dev->dma;
775 drm_r128_private_t *dev_priv = dev->dev_private;
777 drm_r128_buf_priv_t *buf_priv;
778 drm_r128_freelist_t *entry;
781 dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
782 if (dev_priv->head == NULL)
783 return DRM_ERR(ENOMEM);
785 memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
786 dev_priv->head->age = R128_BUFFER_USED;
788 for (i = 0; i < dma->buf_count; i++) {
789 buf = dma->buflist[i];
790 buf_priv = buf->dev_private;
792 entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
794 return DRM_ERR(ENOMEM);
796 entry->age = R128_BUFFER_FREE;
798 entry->prev = dev_priv->head;
799 entry->next = dev_priv->head->next;
801 dev_priv->tail = entry;
803 buf_priv->discard = 0;
804 buf_priv->dispatched = 0;
805 buf_priv->list_entry = entry;
807 dev_priv->head->next = entry;
809 if (dev_priv->head->next)
810 dev_priv->head->next->prev = entry;
818 static struct drm_buf *r128_freelist_get(struct drm_device * dev)
820 struct drm_device_dma *dma = dev->dma;
821 drm_r128_private_t *dev_priv = dev->dev_private;
822 drm_r128_buf_priv_t *buf_priv;
826 /* FIXME: Optimize -- use freelist code */
828 for (i = 0; i < dma->buf_count; i++) {
829 buf = dma->buflist[i];
830 buf_priv = buf->dev_private;
835 for (t = 0; t < dev_priv->usec_timeout; t++) {
836 u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
838 for (i = 0; i < dma->buf_count; i++) {
839 buf = dma->buflist[i];
840 buf_priv = buf->dev_private;
841 if (buf->pending && buf_priv->age <= done_age) {
842 /* The buffer has been processed, so it
852 DRM_DEBUG("returning NULL!\n");
856 void r128_freelist_reset(struct drm_device * dev)
858 struct drm_device_dma *dma = dev->dma;
861 for (i = 0; i < dma->buf_count; i++) {
862 struct drm_buf *buf = dma->buflist[i];
863 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
868 /* ================================================================
869 * CCE command submission
872 int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
874 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
877 for (i = 0; i < dev_priv->usec_timeout; i++) {
878 r128_update_ring_snapshot(dev_priv);
879 if (ring->space >= n)
884 /* FIXME: This is being ignored... */
885 DRM_ERROR("failed!\n");
886 return DRM_ERR(EBUSY);
889 static int r128_cce_get_buffers(DRMFILE filp, struct drm_device * dev, struct drm_dma * d)
894 for (i = d->granted_count; i < d->request_count; i++) {
895 buf = r128_freelist_get(dev);
897 return DRM_ERR(EAGAIN);
901 if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
903 return DRM_ERR(EFAULT);
904 if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
906 return DRM_ERR(EFAULT);
913 int r128_cce_buffers(DRM_IOCTL_ARGS)
916 struct drm_device_dma *dma = dev->dma;
918 struct drm_dma __user *argp = (void __user *)data;
921 LOCK_TEST_WITH_RETURN(dev, filp);
923 DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
925 /* Please don't send us buffers.
927 if (d.send_count != 0) {
928 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
929 DRM_CURRENTPID, d.send_count);
930 return DRM_ERR(EINVAL);
933 /* We'll send you buffers.
935 if (d.request_count < 0 || d.request_count > dma->buf_count) {
936 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
937 DRM_CURRENTPID, d.request_count, dma->buf_count);
938 return DRM_ERR(EINVAL);
943 if (d.request_count) {
944 ret = r128_cce_get_buffers(filp, dev, &d);
947 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));