2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_drm.h"
32 #include "radeon_microcode.h"
33 #include "radeon_reg.h"
36 /* This files gather functions specifics to:
37 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
39 * Some of these functions might be used by newer ASICs.
41 void r100_hdp_reset(struct radeon_device *rdev);
42 void r100_gpu_init(struct radeon_device *rdev);
43 int r100_gui_wait_for_idle(struct radeon_device *rdev);
44 int r100_mc_wait_for_idle(struct radeon_device *rdev);
45 void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47 int r100_debugfs_mc_info_init(struct radeon_device *rdev);
53 void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
55 /* TODO: can we do somethings here ? */
56 /* It seems hw only cache one entry so we should discard this
57 * entry otherwise if first GPU GART read hit this entry it
58 * could end up in wrong address. */
61 int r100_pci_gart_enable(struct radeon_device *rdev)
66 /* Initialize common gart structure */
67 r = radeon_gart_init(rdev);
71 if (rdev->gart.table.ram.ptr == NULL) {
72 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
73 r = radeon_gart_table_ram_alloc(rdev);
78 /* discard memory request outside of configured range */
79 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
80 WREG32(RADEON_AIC_CNTL, tmp);
81 /* set address range for PCI address translate */
82 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
83 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
84 WREG32(RADEON_AIC_HI_ADDR, tmp);
85 /* Enable bus mastering */
86 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
87 WREG32(RADEON_BUS_CNTL, tmp);
88 /* set PCI GART page-table base address */
89 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
90 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
91 WREG32(RADEON_AIC_CNTL, tmp);
92 r100_pci_gart_tlb_flush(rdev);
93 rdev->gart.ready = true;
97 void r100_pci_gart_disable(struct radeon_device *rdev)
101 /* discard memory request outside of configured range */
102 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
103 WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
104 WREG32(RADEON_AIC_LO_ADDR, 0);
105 WREG32(RADEON_AIC_HI_ADDR, 0);
108 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
110 if (i < 0 || i > rdev->gart.num_gpu_pages) {
113 rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
117 int r100_gart_enable(struct radeon_device *rdev)
119 if (rdev->flags & RADEON_IS_AGP) {
120 r100_pci_gart_disable(rdev);
123 return r100_pci_gart_enable(rdev);
130 void r100_mc_disable_clients(struct radeon_device *rdev)
132 uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
134 /* FIXME: is this function correct for rs100,rs200,rs300 ? */
135 if (r100_gui_wait_for_idle(rdev)) {
136 printk(KERN_WARNING "Failed to wait GUI idle while "
137 "programming pipes. Bad things might happen.\n");
140 /* stop display and memory access */
141 ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142 WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
147 r100_gpu_wait_for_vsync(rdev);
149 WREG32(RADEON_CRTC_GEN_CNTL,
150 (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151 RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
153 if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
156 r100_gpu_wait_for_vsync2(rdev);
157 WREG32(RADEON_CRTC2_GEN_CNTL,
159 ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160 RADEON_CRTC2_DISP_REQ_EN_B);
166 void r100_mc_setup(struct radeon_device *rdev)
171 r = r100_debugfs_mc_info_init(rdev);
173 DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
175 /* Write VRAM size in case we are limiting it */
176 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
177 tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
178 tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179 tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180 WREG32(RADEON_MC_FB_LOCATION, tmp);
182 /* Enable bus mastering */
183 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184 WREG32(RADEON_BUS_CNTL, tmp);
186 if (rdev->flags & RADEON_IS_AGP) {
187 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188 tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189 tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190 WREG32(RADEON_MC_AGP_LOCATION, tmp);
191 WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
193 WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194 WREG32(RADEON_AGP_BASE, 0);
197 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
199 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200 (void)RREG32(RADEON_HOST_PATH_CNTL);
201 WREG32(RADEON_HOST_PATH_CNTL, tmp);
202 (void)RREG32(RADEON_HOST_PATH_CNTL);
205 int r100_mc_init(struct radeon_device *rdev)
209 if (r100_debugfs_rbbm_init(rdev)) {
210 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
214 /* Disable gart which also disable out of gart access */
215 r100_pci_gart_disable(rdev);
217 /* Setup GPU memory space */
218 rdev->mc.gtt_location = 0xFFFFFFFFUL;
219 if (rdev->flags & RADEON_IS_AGP) {
220 r = radeon_agp_init(rdev);
222 printk(KERN_WARNING "[drm] Disabling AGP\n");
223 rdev->flags &= ~RADEON_IS_AGP;
224 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
226 rdev->mc.gtt_location = rdev->mc.agp_base;
229 r = radeon_mc_setup(rdev);
234 r100_mc_disable_clients(rdev);
235 if (r100_mc_wait_for_idle(rdev)) {
236 printk(KERN_WARNING "Failed to wait MC idle while "
237 "programming pipes. Bad things might happen.\n");
244 void r100_mc_fini(struct radeon_device *rdev)
246 r100_pci_gart_disable(rdev);
247 radeon_gart_table_ram_free(rdev);
248 radeon_gart_fini(rdev);
255 void r100_fence_ring_emit(struct radeon_device *rdev,
256 struct radeon_fence *fence)
258 /* Who ever call radeon_fence_emit should call ring_lock and ask
259 * for enough space (today caller are ib schedule and buffer move) */
260 /* Wait until IDLE & CLEAN */
261 radeon_ring_write(rdev, PACKET0(0x1720, 0));
262 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
263 /* Emit fence sequence & fire IRQ */
264 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
265 radeon_ring_write(rdev, fence->seq);
266 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
267 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
274 int r100_wb_init(struct radeon_device *rdev)
278 if (rdev->wb.wb_obj == NULL) {
279 r = radeon_object_create(rdev, NULL, 4096,
281 RADEON_GEM_DOMAIN_GTT,
282 false, &rdev->wb.wb_obj);
284 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
287 r = radeon_object_pin(rdev->wb.wb_obj,
288 RADEON_GEM_DOMAIN_GTT,
291 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
294 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
296 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
300 WREG32(0x774, rdev->wb.gpu_addr);
301 WREG32(0x70C, rdev->wb.gpu_addr + 1024);
306 void r100_wb_fini(struct radeon_device *rdev)
308 if (rdev->wb.wb_obj) {
309 radeon_object_kunmap(rdev->wb.wb_obj);
310 radeon_object_unpin(rdev->wb.wb_obj);
311 radeon_object_unref(&rdev->wb.wb_obj);
313 rdev->wb.wb_obj = NULL;
317 int r100_copy_blit(struct radeon_device *rdev,
321 struct radeon_fence *fence)
324 uint32_t stride_bytes = PAGE_SIZE;
326 uint32_t stride_pixels;
331 /* radeon limited to 16k stride */
332 stride_bytes &= 0x3fff;
333 /* radeon pitch is /64 */
334 pitch = stride_bytes / 64;
335 stride_pixels = stride_bytes / 4;
336 num_loops = DIV_ROUND_UP(num_pages, 8191);
338 /* Ask for enough room for blit + flush + fence */
339 ndw = 64 + (10 * num_loops);
340 r = radeon_ring_lock(rdev, ndw);
342 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
345 while (num_pages > 0) {
346 cur_pages = num_pages;
347 if (cur_pages > 8191) {
350 num_pages -= cur_pages;
352 /* pages are in Y direction - height
353 page width in X direction - width */
354 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
355 radeon_ring_write(rdev,
356 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
357 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
358 RADEON_GMC_SRC_CLIPPING |
359 RADEON_GMC_DST_CLIPPING |
360 RADEON_GMC_BRUSH_NONE |
361 (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
362 RADEON_GMC_SRC_DATATYPE_COLOR |
364 RADEON_DP_SRC_SOURCE_MEMORY |
365 RADEON_GMC_CLR_CMP_CNTL_DIS |
366 RADEON_GMC_WR_MSK_DIS);
367 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
368 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
369 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
370 radeon_ring_write(rdev, 0);
371 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
372 radeon_ring_write(rdev, num_pages);
373 radeon_ring_write(rdev, num_pages);
374 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
376 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
377 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
378 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
379 radeon_ring_write(rdev,
380 RADEON_WAIT_2D_IDLECLEAN |
381 RADEON_WAIT_HOST_IDLECLEAN |
382 RADEON_WAIT_DMA_GUI_IDLE);
384 r = radeon_fence_emit(rdev, fence);
386 radeon_ring_unlock_commit(rdev);
394 void r100_ring_start(struct radeon_device *rdev)
398 r = radeon_ring_lock(rdev, 2);
402 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
403 radeon_ring_write(rdev,
404 RADEON_ISYNC_ANY2D_IDLE3D |
405 RADEON_ISYNC_ANY3D_IDLE2D |
406 RADEON_ISYNC_WAIT_IDLEGUI |
407 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
408 radeon_ring_unlock_commit(rdev);
411 static void r100_cp_load_microcode(struct radeon_device *rdev)
415 if (r100_gui_wait_for_idle(rdev)) {
416 printk(KERN_WARNING "Failed to wait GUI idle while "
417 "programming pipes. Bad things might happen.\n");
420 WREG32(RADEON_CP_ME_RAM_ADDR, 0);
421 if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
422 (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
423 (rdev->family == CHIP_RS200)) {
424 DRM_INFO("Loading R100 Microcode\n");
425 for (i = 0; i < 256; i++) {
426 WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
427 WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
429 } else if ((rdev->family == CHIP_R200) ||
430 (rdev->family == CHIP_RV250) ||
431 (rdev->family == CHIP_RV280) ||
432 (rdev->family == CHIP_RS300)) {
433 DRM_INFO("Loading R200 Microcode\n");
434 for (i = 0; i < 256; i++) {
435 WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
436 WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
438 } else if ((rdev->family == CHIP_R300) ||
439 (rdev->family == CHIP_R350) ||
440 (rdev->family == CHIP_RV350) ||
441 (rdev->family == CHIP_RV380) ||
442 (rdev->family == CHIP_RS400) ||
443 (rdev->family == CHIP_RS480)) {
444 DRM_INFO("Loading R300 Microcode\n");
445 for (i = 0; i < 256; i++) {
446 WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
447 WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
449 } else if ((rdev->family == CHIP_R420) ||
450 (rdev->family == CHIP_R423) ||
451 (rdev->family == CHIP_RV410)) {
452 DRM_INFO("Loading R400 Microcode\n");
453 for (i = 0; i < 256; i++) {
454 WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
455 WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
457 } else if ((rdev->family == CHIP_RS690) ||
458 (rdev->family == CHIP_RS740)) {
459 DRM_INFO("Loading RS690/RS740 Microcode\n");
460 for (i = 0; i < 256; i++) {
461 WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
462 WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
464 } else if (rdev->family == CHIP_RS600) {
465 DRM_INFO("Loading RS600 Microcode\n");
466 for (i = 0; i < 256; i++) {
467 WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
468 WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
470 } else if ((rdev->family == CHIP_RV515) ||
471 (rdev->family == CHIP_R520) ||
472 (rdev->family == CHIP_RV530) ||
473 (rdev->family == CHIP_R580) ||
474 (rdev->family == CHIP_RV560) ||
475 (rdev->family == CHIP_RV570)) {
476 DRM_INFO("Loading R500 Microcode\n");
477 for (i = 0; i < 256; i++) {
478 WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
479 WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
484 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
489 unsigned pre_write_timer;
490 unsigned pre_write_limit;
491 unsigned indirect2_start;
492 unsigned indirect1_start;
496 if (r100_debugfs_cp_init(rdev)) {
497 DRM_ERROR("Failed to register debugfs file for CP !\n");
500 tmp = RREG32(RADEON_CP_CSQ_STAT);
501 if ((tmp & (1 << 31))) {
502 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
503 WREG32(RADEON_CP_CSQ_MODE, 0);
504 WREG32(RADEON_CP_CSQ_CNTL, 0);
505 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
506 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
508 WREG32(RADEON_RBBM_SOFT_RESET, 0);
509 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
511 tmp = RREG32(RADEON_CP_CSQ_STAT);
512 if ((tmp & (1 << 31))) {
513 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
516 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
518 /* Align ring size */
519 rb_bufsz = drm_order(ring_size / 8);
520 ring_size = (1 << (rb_bufsz + 1)) * 4;
521 r100_cp_load_microcode(rdev);
522 r = radeon_ring_init(rdev, ring_size);
526 /* Each time the cp read 1024 bytes (16 dword/quadword) update
527 * the rptr copy in system ram */
529 /* cp will read 128bytes at a time (4 dwords) */
531 rdev->cp.align_mask = 16 - 1;
532 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
533 pre_write_timer = 64;
534 /* Force CP_RB_WPTR write if written more than one time before the
538 /* Setup the cp cache like this (cache size is 96 dwords) :
542 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
543 * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
544 * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
545 * Idea being that most of the gpu cmd will be through indirect1 buffer
546 * so it gets the bigger cache.
548 indirect2_start = 80;
549 indirect1_start = 16;
551 WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
552 WREG32(RADEON_CP_RB_CNTL,
554 RADEON_BUF_SWAP_32BIT |
556 REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
557 REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
558 REG_SET(RADEON_MAX_FETCH, max_fetch) |
559 RADEON_RB_NO_UPDATE);
560 /* Set ring address */
561 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
562 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
563 /* Force read & write ptr to 0 */
564 tmp = RREG32(RADEON_CP_RB_CNTL);
565 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
566 WREG32(RADEON_CP_RB_RPTR_WR, 0);
567 WREG32(RADEON_CP_RB_WPTR, 0);
568 WREG32(RADEON_CP_RB_CNTL, tmp);
570 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
571 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
572 /* Set cp mode to bus mastering & enable cp*/
573 WREG32(RADEON_CP_CSQ_MODE,
574 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
575 REG_SET(RADEON_INDIRECT1_START, indirect1_start));
577 WREG32(0x744, 0x00004D4D);
578 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
579 radeon_ring_start(rdev);
580 r = radeon_ring_test(rdev);
582 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
585 rdev->cp.ready = true;
589 void r100_cp_fini(struct radeon_device *rdev)
592 rdev->cp.ready = false;
593 WREG32(RADEON_CP_CSQ_CNTL, 0);
594 radeon_ring_fini(rdev);
595 DRM_INFO("radeon: cp finalized\n");
598 void r100_cp_disable(struct radeon_device *rdev)
601 rdev->cp.ready = false;
602 WREG32(RADEON_CP_CSQ_MODE, 0);
603 WREG32(RADEON_CP_CSQ_CNTL, 0);
604 if (r100_gui_wait_for_idle(rdev)) {
605 printk(KERN_WARNING "Failed to wait GUI idle while "
606 "programming pipes. Bad things might happen.\n");
610 int r100_cp_reset(struct radeon_device *rdev)
616 reinit_cp = rdev->cp.ready;
617 rdev->cp.ready = false;
618 WREG32(RADEON_CP_CSQ_MODE, 0);
619 WREG32(RADEON_CP_CSQ_CNTL, 0);
620 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
621 (void)RREG32(RADEON_RBBM_SOFT_RESET);
623 WREG32(RADEON_RBBM_SOFT_RESET, 0);
624 /* Wait to prevent race in RBBM_STATUS */
626 for (i = 0; i < rdev->usec_timeout; i++) {
627 tmp = RREG32(RADEON_RBBM_STATUS);
628 if (!(tmp & (1 << 16))) {
629 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
632 return r100_cp_init(rdev, rdev->cp.ring_size);
638 tmp = RREG32(RADEON_RBBM_STATUS);
639 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
647 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
648 struct radeon_cs_packet *pkt,
649 const unsigned *auth, unsigned n,
650 radeon_packet0_check_t check)
659 /* Check that register fall into register range
660 * determined by the number of entry (n) in the
661 * safe register bitmap.
663 if (pkt->one_reg_wr) {
664 if ((reg >> 7) > n) {
668 if (((reg + (pkt->count << 2)) >> 7) > n) {
672 for (i = 0; i <= pkt->count; i++, idx++) {
674 m = 1 << ((reg >> 2) & 31);
676 r = check(p, pkt, idx, reg);
681 if (pkt->one_reg_wr) {
682 if (!(auth[j] & m)) {
692 void r100_cs_dump_packet(struct radeon_cs_parser *p,
693 struct radeon_cs_packet *pkt)
695 struct radeon_cs_chunk *ib_chunk;
696 volatile uint32_t *ib;
701 ib_chunk = &p->chunks[p->chunk_ib_idx];
703 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
704 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
709 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
710 * @parser: parser structure holding parsing context.
711 * @pkt: where to store packet informations
713 * Assume that chunk_ib_index is properly set. Will return -EINVAL
714 * if packet is bigger than remaining ib size. or if packets is unknown.
716 int r100_cs_packet_parse(struct radeon_cs_parser *p,
717 struct radeon_cs_packet *pkt,
720 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
721 uint32_t header = ib_chunk->kdata[idx];
723 if (idx >= ib_chunk->length_dw) {
724 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
725 idx, ib_chunk->length_dw);
729 pkt->type = CP_PACKET_GET_TYPE(header);
730 pkt->count = CP_PACKET_GET_COUNT(header);
733 pkt->reg = CP_PACKET0_GET_REG(header);
734 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
737 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
743 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
746 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
747 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
748 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
755 * r100_cs_packet_next_vline() - parse userspace VLINE packet
756 * @parser: parser structure holding parsing context.
758 * Userspace sends a special sequence for VLINE waits.
759 * PACKET0 - VLINE_START_END + value
760 * PACKET0 - WAIT_UNTIL +_value
761 * RELOC (P3) - crtc_id in reloc.
763 * This function parses this and relocates the VLINE START END
764 * and WAIT UNTIL packets to the correct crtc.
765 * It also detects a switched off crtc and nulls out the
768 int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
770 struct radeon_cs_chunk *ib_chunk;
771 struct drm_mode_object *obj;
772 struct drm_crtc *crtc;
773 struct radeon_crtc *radeon_crtc;
774 struct radeon_cs_packet p3reloc, waitreloc;
777 uint32_t header, h_idx, reg;
779 ib_chunk = &p->chunks[p->chunk_ib_idx];
781 /* parse the wait until */
782 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
786 /* check its a wait until and only 1 count */
787 if (waitreloc.reg != RADEON_WAIT_UNTIL ||
788 waitreloc.count != 0) {
789 DRM_ERROR("vline wait had illegal wait until segment\n");
794 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
795 DRM_ERROR("vline wait had illegal wait until\n");
800 /* jump over the NOP */
801 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
806 p->idx += waitreloc.count;
807 p->idx += p3reloc.count;
809 header = ib_chunk->kdata[h_idx];
810 crtc_id = ib_chunk->kdata[h_idx + 5];
811 reg = ib_chunk->kdata[h_idx] >> 2;
812 mutex_lock(&p->rdev->ddev->mode_config.mutex);
813 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
815 DRM_ERROR("cannot find crtc %d\n", crtc_id);
819 crtc = obj_to_crtc(obj);
820 radeon_crtc = to_radeon_crtc(crtc);
821 crtc_id = radeon_crtc->crtc_id;
823 if (!crtc->enabled) {
824 /* if the CRTC isn't enabled - we need to nop out the wait until */
825 ib_chunk->kdata[h_idx + 2] = PACKET2(0);
826 ib_chunk->kdata[h_idx + 3] = PACKET2(0);
827 } else if (crtc_id == 1) {
829 case AVIVO_D1MODE_VLINE_START_END:
830 header &= R300_CP_PACKET0_REG_MASK;
831 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
833 case RADEON_CRTC_GUI_TRIG_VLINE:
834 header &= R300_CP_PACKET0_REG_MASK;
835 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
838 DRM_ERROR("unknown crtc reloc\n");
842 ib_chunk->kdata[h_idx] = header;
843 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
846 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
851 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
852 * @parser: parser structure holding parsing context.
853 * @data: pointer to relocation data
854 * @offset_start: starting offset
855 * @offset_mask: offset mask (to align start offset on)
856 * @reloc: reloc informations
858 * Check next packet is relocation packet3, do bo validation and compute
859 * GPU offset using the provided start.
861 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
862 struct radeon_cs_reloc **cs_reloc)
864 struct radeon_cs_chunk *ib_chunk;
865 struct radeon_cs_chunk *relocs_chunk;
866 struct radeon_cs_packet p3reloc;
870 if (p->chunk_relocs_idx == -1) {
871 DRM_ERROR("No relocation chunk !\n");
875 ib_chunk = &p->chunks[p->chunk_ib_idx];
876 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
877 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
881 p->idx += p3reloc.count + 2;
882 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
883 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
885 r100_cs_dump_packet(p, &p3reloc);
888 idx = ib_chunk->kdata[p3reloc.idx + 1];
889 if (idx >= relocs_chunk->length_dw) {
890 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
891 idx, relocs_chunk->length_dw);
892 r100_cs_dump_packet(p, &p3reloc);
895 /* FIXME: we assume reloc size is 4 dwords */
896 *cs_reloc = p->relocs_ptr[(idx / 4)];
900 static int r100_packet0_check(struct radeon_cs_parser *p,
901 struct radeon_cs_packet *pkt)
903 struct radeon_cs_chunk *ib_chunk;
904 struct radeon_cs_reloc *reloc;
905 volatile uint32_t *ib;
915 ib_chunk = &p->chunks[p->chunk_ib_idx];
919 if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
922 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
924 case RADEON_CRTC_GUI_TRIG_VLINE:
925 r = r100_cs_packet_parse_vline(p);
927 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
929 r100_cs_dump_packet(p, pkt);
933 /* FIXME: only allow PACKET3 blit? easier to check for out of
935 case RADEON_DST_PITCH_OFFSET:
936 case RADEON_SRC_PITCH_OFFSET:
937 r = r100_cs_packet_next_reloc(p, &reloc);
939 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
941 r100_cs_dump_packet(p, pkt);
944 tmp = ib_chunk->kdata[idx] & 0x003fffff;
945 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
947 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
948 tile_flags |= RADEON_DST_TILE_MACRO;
949 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
950 if (reg == RADEON_SRC_PITCH_OFFSET) {
951 DRM_ERROR("Cannot src blit from microtiled surface\n");
952 r100_cs_dump_packet(p, pkt);
955 tile_flags |= RADEON_DST_TILE_MICRO;
959 ib[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
961 case RADEON_RB3D_DEPTHOFFSET:
962 case RADEON_RB3D_COLOROFFSET:
963 case R300_RB3D_COLOROFFSET0:
964 case R300_ZB_DEPTHOFFSET:
965 case R200_PP_TXOFFSET_0:
966 case R200_PP_TXOFFSET_1:
967 case R200_PP_TXOFFSET_2:
968 case R200_PP_TXOFFSET_3:
969 case R200_PP_TXOFFSET_4:
970 case R200_PP_TXOFFSET_5:
971 case RADEON_PP_TXOFFSET_0:
972 case RADEON_PP_TXOFFSET_1:
973 case RADEON_PP_TXOFFSET_2:
974 case R300_TX_OFFSET_0:
975 case R300_TX_OFFSET_0+4:
976 case R300_TX_OFFSET_0+8:
977 case R300_TX_OFFSET_0+12:
978 case R300_TX_OFFSET_0+16:
979 case R300_TX_OFFSET_0+20:
980 case R300_TX_OFFSET_0+24:
981 case R300_TX_OFFSET_0+28:
982 case R300_TX_OFFSET_0+32:
983 case R300_TX_OFFSET_0+36:
984 case R300_TX_OFFSET_0+40:
985 case R300_TX_OFFSET_0+44:
986 case R300_TX_OFFSET_0+48:
987 case R300_TX_OFFSET_0+52:
988 case R300_TX_OFFSET_0+56:
989 case R300_TX_OFFSET_0+60:
990 /* rn50 has no 3D engine so fail on any 3d setup */
991 if (ASIC_IS_RN50(p->rdev)) {
992 DRM_ERROR("attempt to use RN50 3D engine failed\n");
995 r = r100_cs_packet_next_reloc(p, &reloc);
997 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
999 r100_cs_dump_packet(p, pkt);
1002 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1004 case R300_RB3D_COLORPITCH0:
1005 case RADEON_RB3D_COLORPITCH:
1006 r = r100_cs_packet_next_reloc(p, &reloc);
1008 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1010 r100_cs_dump_packet(p, pkt);
1014 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1015 tile_flags |= RADEON_COLOR_TILE_ENABLE;
1016 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1017 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1019 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
1024 /* FIXME: we don't want to allow anyothers packet */
1028 /* FIXME: forbid onereg write to register on relocate */
1035 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1036 struct radeon_cs_packet *pkt,
1037 struct radeon_object *robj)
1039 struct radeon_cs_chunk *ib_chunk;
1042 ib_chunk = &p->chunks[p->chunk_ib_idx];
1044 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
1045 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1046 "(need %u have %lu) !\n",
1047 ib_chunk->kdata[idx+2] + 1,
1048 radeon_object_size(robj));
1054 static int r100_packet3_check(struct radeon_cs_parser *p,
1055 struct radeon_cs_packet *pkt)
1057 struct radeon_cs_chunk *ib_chunk;
1058 struct radeon_cs_reloc *reloc;
1061 volatile uint32_t *ib;
1065 ib_chunk = &p->chunks[p->chunk_ib_idx];
1067 switch (pkt->opcode) {
1068 case PACKET3_3D_LOAD_VBPNTR:
1069 c = ib_chunk->kdata[idx++];
1070 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1071 r = r100_cs_packet_next_reloc(p, &reloc);
1073 DRM_ERROR("No reloc for packet3 %d\n",
1075 r100_cs_dump_packet(p, pkt);
1078 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1079 r = r100_cs_packet_next_reloc(p, &reloc);
1081 DRM_ERROR("No reloc for packet3 %d\n",
1083 r100_cs_dump_packet(p, pkt);
1086 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1089 r = r100_cs_packet_next_reloc(p, &reloc);
1091 DRM_ERROR("No reloc for packet3 %d\n",
1093 r100_cs_dump_packet(p, pkt);
1096 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1099 case PACKET3_INDX_BUFFER:
1100 r = r100_cs_packet_next_reloc(p, &reloc);
1102 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1103 r100_cs_dump_packet(p, pkt);
1106 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1107 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1113 /* FIXME: cleanup */
1114 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1115 r = r100_cs_packet_next_reloc(p, &reloc);
1117 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1118 r100_cs_dump_packet(p, pkt);
1121 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1123 case PACKET3_3D_DRAW_IMMD:
1124 /* triggers drawing using in-packet vertex data */
1125 case PACKET3_3D_DRAW_IMMD_2:
1126 /* triggers drawing using in-packet vertex data */
1127 case PACKET3_3D_DRAW_VBUF_2:
1128 /* triggers drawing of vertex buffers setup elsewhere */
1129 case PACKET3_3D_DRAW_INDX_2:
1130 /* triggers drawing using indices to vertex buffer */
1131 case PACKET3_3D_DRAW_VBUF:
1132 /* triggers drawing of vertex buffers setup elsewhere */
1133 case PACKET3_3D_DRAW_INDX:
1134 /* triggers drawing using indices to vertex buffer */
1138 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1144 int r100_cs_parse(struct radeon_cs_parser *p)
1146 struct radeon_cs_packet pkt;
1150 r = r100_cs_packet_parse(p, &pkt, p->idx);
1154 p->idx += pkt.count + 2;
1157 r = r100_packet0_check(p, &pkt);
1162 r = r100_packet3_check(p, &pkt);
1165 DRM_ERROR("Unknown packet type %d !\n",
1172 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1178 * Global GPU functions
1180 void r100_errata(struct radeon_device *rdev)
1182 rdev->pll_errata = 0;
1184 if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1185 rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1188 if (rdev->family == CHIP_RV100 ||
1189 rdev->family == CHIP_RS100 ||
1190 rdev->family == CHIP_RS200) {
1191 rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1195 /* Wait for vertical sync on primary CRTC */
1196 void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1198 uint32_t crtc_gen_cntl, tmp;
1201 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1202 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1203 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1206 /* Clear the CRTC_VBLANK_SAVE bit */
1207 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1208 for (i = 0; i < rdev->usec_timeout; i++) {
1209 tmp = RREG32(RADEON_CRTC_STATUS);
1210 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1217 /* Wait for vertical sync on secondary CRTC */
1218 void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1220 uint32_t crtc2_gen_cntl, tmp;
1223 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1224 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1225 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1228 /* Clear the CRTC_VBLANK_SAVE bit */
1229 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1230 for (i = 0; i < rdev->usec_timeout; i++) {
1231 tmp = RREG32(RADEON_CRTC2_STATUS);
1232 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1239 int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1244 for (i = 0; i < rdev->usec_timeout; i++) {
1245 tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1254 int r100_gui_wait_for_idle(struct radeon_device *rdev)
1259 if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1260 printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1261 " Bad things might happen.\n");
1263 for (i = 0; i < rdev->usec_timeout; i++) {
1264 tmp = RREG32(RADEON_RBBM_STATUS);
1265 if (!(tmp & (1 << 31))) {
1273 int r100_mc_wait_for_idle(struct radeon_device *rdev)
1278 for (i = 0; i < rdev->usec_timeout; i++) {
1279 /* read MC_STATUS */
1280 tmp = RREG32(0x0150);
1281 if (tmp & (1 << 2)) {
1289 void r100_gpu_init(struct radeon_device *rdev)
1291 /* TODO: anythings to do here ? pipes ? */
1292 r100_hdp_reset(rdev);
1295 void r100_hdp_reset(struct radeon_device *rdev)
1299 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1301 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1302 (void)RREG32(RADEON_HOST_PATH_CNTL);
1304 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1305 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1306 (void)RREG32(RADEON_HOST_PATH_CNTL);
1309 int r100_rb2d_reset(struct radeon_device *rdev)
1314 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1315 (void)RREG32(RADEON_RBBM_SOFT_RESET);
1317 WREG32(RADEON_RBBM_SOFT_RESET, 0);
1318 /* Wait to prevent race in RBBM_STATUS */
1320 for (i = 0; i < rdev->usec_timeout; i++) {
1321 tmp = RREG32(RADEON_RBBM_STATUS);
1322 if (!(tmp & (1 << 26))) {
1323 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1329 tmp = RREG32(RADEON_RBBM_STATUS);
1330 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1334 int r100_gpu_reset(struct radeon_device *rdev)
1338 /* reset order likely matter */
1339 status = RREG32(RADEON_RBBM_STATUS);
1341 r100_hdp_reset(rdev);
1343 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1344 r100_rb2d_reset(rdev);
1346 /* TODO: reset 3D engine */
1348 status = RREG32(RADEON_RBBM_STATUS);
1349 if (status & (1 << 16)) {
1350 r100_cp_reset(rdev);
1352 /* Check if GPU is idle */
1353 status = RREG32(RADEON_RBBM_STATUS);
1354 if (status & (1 << 31)) {
1355 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1358 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1366 static void r100_vram_get_type(struct radeon_device *rdev)
1370 rdev->mc.vram_is_ddr = false;
1371 if (rdev->flags & RADEON_IS_IGP)
1372 rdev->mc.vram_is_ddr = true;
1373 else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1374 rdev->mc.vram_is_ddr = true;
1375 if ((rdev->family == CHIP_RV100) ||
1376 (rdev->family == CHIP_RS100) ||
1377 (rdev->family == CHIP_RS200)) {
1378 tmp = RREG32(RADEON_MEM_CNTL);
1379 if (tmp & RV100_HALF_MODE) {
1380 rdev->mc.vram_width = 32;
1382 rdev->mc.vram_width = 64;
1384 if (rdev->flags & RADEON_SINGLE_CRTC) {
1385 rdev->mc.vram_width /= 4;
1386 rdev->mc.vram_is_ddr = true;
1388 } else if (rdev->family <= CHIP_RV280) {
1389 tmp = RREG32(RADEON_MEM_CNTL);
1390 if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1391 rdev->mc.vram_width = 128;
1393 rdev->mc.vram_width = 64;
1397 rdev->mc.vram_width = 128;
1401 static u32 r100_get_accessible_vram(struct radeon_device *rdev)
1406 aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1408 /* Set HDP_APER_CNTL only on cards that are known not to be broken,
1409 * that is has the 2nd generation multifunction PCI interface
1411 if (rdev->family == CHIP_RV280 ||
1412 rdev->family >= CHIP_RV350) {
1413 WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
1414 ~RADEON_HDP_APER_CNTL);
1415 DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
1416 return aper_size * 2;
1419 /* Older cards have all sorts of funny issues to deal with. First
1420 * check if it's a multifunction card by reading the PCI config
1421 * header type... Limit those to one aperture size
1423 pci_read_config_byte(rdev->pdev, 0xe, &byte);
1425 DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
1426 DRM_INFO("Limiting VRAM to one aperture\n");
1430 /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
1431 * have set it up. We don't write this as it's broken on some ASICs but
1432 * we expect the BIOS to have done the right thing (might be too optimistic...)
1434 if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
1435 return aper_size * 2;
1439 void r100_vram_init_sizes(struct radeon_device *rdev)
1441 u64 config_aper_size;
1444 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1446 if (rdev->flags & RADEON_IS_IGP) {
1448 /* read NB_TOM to get the amount of ram stolen for the GPU */
1449 tom = RREG32(RADEON_NB_TOM);
1450 rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1451 /* for IGPs we need to keep VRAM where it was put by the BIOS */
1452 rdev->mc.vram_location = (tom & 0xffff) << 16;
1453 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1455 rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1456 /* Some production boards of m6 will report 0
1459 if (rdev->mc.vram_size == 0) {
1460 rdev->mc.vram_size = 8192 * 1024;
1461 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1463 /* let driver place VRAM */
1464 rdev->mc.vram_location = 0xFFFFFFFFUL;
1465 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
1466 * Novell bug 204882 + along with lots of ubuntu ones */
1467 if (config_aper_size > rdev->mc.vram_size)
1468 rdev->mc.vram_size = config_aper_size;
1471 /* work out accessible VRAM */
1472 accessible = r100_get_accessible_vram(rdev);
1474 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1475 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1477 if (accessible > rdev->mc.aper_size)
1478 accessible = rdev->mc.aper_size;
1480 if (rdev->mc.vram_size > rdev->mc.aper_size)
1481 rdev->mc.vram_size = rdev->mc.aper_size;
1484 void r100_vram_info(struct radeon_device *rdev)
1486 r100_vram_get_type(rdev);
1488 r100_vram_init_sizes(rdev);
1493 * Indirect registers accessor
1495 void r100_pll_errata_after_index(struct radeon_device *rdev)
1497 if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1500 (void)RREG32(RADEON_CLOCK_CNTL_DATA);
1501 (void)RREG32(RADEON_CRTC_GEN_CNTL);
1504 static void r100_pll_errata_after_data(struct radeon_device *rdev)
1506 /* This workarounds is necessary on RV100, RS100 and RS200 chips
1507 * or the chip could hang on a subsequent access
1509 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1513 /* This function is required to workaround a hardware bug in some (all?)
1514 * revisions of the R300. This workaround should be called after every
1515 * CLOCK_CNTL_INDEX register access. If not, register reads afterward
1516 * may not be correct.
1518 if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1521 save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1522 tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1523 WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1524 tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1525 WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1529 uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1533 WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1534 r100_pll_errata_after_index(rdev);
1535 data = RREG32(RADEON_CLOCK_CNTL_DATA);
1536 r100_pll_errata_after_data(rdev);
1540 void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1542 WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1543 r100_pll_errata_after_index(rdev);
1544 WREG32(RADEON_CLOCK_CNTL_DATA, v);
1545 r100_pll_errata_after_data(rdev);
1548 uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1551 return readl(((void __iomem *)rdev->rmmio) + reg);
1553 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1554 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1558 void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1561 writel(v, ((void __iomem *)rdev->rmmio) + reg);
1563 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1564 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1568 int r100_init(struct radeon_device *rdev)
1576 #if defined(CONFIG_DEBUG_FS)
1577 static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
1579 struct drm_info_node *node = (struct drm_info_node *) m->private;
1580 struct drm_device *dev = node->minor->dev;
1581 struct radeon_device *rdev = dev->dev_private;
1582 uint32_t reg, value;
1585 seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
1586 seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
1587 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1588 for (i = 0; i < 64; i++) {
1589 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
1590 reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
1591 WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
1592 value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
1593 seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
1598 static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
1600 struct drm_info_node *node = (struct drm_info_node *) m->private;
1601 struct drm_device *dev = node->minor->dev;
1602 struct radeon_device *rdev = dev->dev_private;
1604 unsigned count, i, j;
1606 radeon_ring_free_size(rdev);
1607 rdp = RREG32(RADEON_CP_RB_RPTR);
1608 wdp = RREG32(RADEON_CP_RB_WPTR);
1609 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1610 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1611 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1612 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1613 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1614 seq_printf(m, "%u dwords in ring\n", count);
1615 for (j = 0; j <= count; j++) {
1616 i = (rdp + j) & rdev->cp.ptr_mask;
1617 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1623 static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
1625 struct drm_info_node *node = (struct drm_info_node *) m->private;
1626 struct drm_device *dev = node->minor->dev;
1627 struct radeon_device *rdev = dev->dev_private;
1628 uint32_t csq_stat, csq2_stat, tmp;
1629 unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
1632 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1633 seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
1634 csq_stat = RREG32(RADEON_CP_CSQ_STAT);
1635 csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
1636 r_rptr = (csq_stat >> 0) & 0x3ff;
1637 r_wptr = (csq_stat >> 10) & 0x3ff;
1638 ib1_rptr = (csq_stat >> 20) & 0x3ff;
1639 ib1_wptr = (csq2_stat >> 0) & 0x3ff;
1640 ib2_rptr = (csq2_stat >> 10) & 0x3ff;
1641 ib2_wptr = (csq2_stat >> 20) & 0x3ff;
1642 seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
1643 seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
1644 seq_printf(m, "Ring rptr %u\n", r_rptr);
1645 seq_printf(m, "Ring wptr %u\n", r_wptr);
1646 seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
1647 seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
1648 seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
1649 seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
1650 /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
1651 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
1652 seq_printf(m, "Ring fifo:\n");
1653 for (i = 0; i < 256; i++) {
1654 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1655 tmp = RREG32(RADEON_CP_CSQ_DATA);
1656 seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
1658 seq_printf(m, "Indirect1 fifo:\n");
1659 for (i = 256; i <= 512; i++) {
1660 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1661 tmp = RREG32(RADEON_CP_CSQ_DATA);
1662 seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
1664 seq_printf(m, "Indirect2 fifo:\n");
1665 for (i = 640; i < ib1_wptr; i++) {
1666 WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1667 tmp = RREG32(RADEON_CP_CSQ_DATA);
1668 seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
1673 static int r100_debugfs_mc_info(struct seq_file *m, void *data)
1675 struct drm_info_node *node = (struct drm_info_node *) m->private;
1676 struct drm_device *dev = node->minor->dev;
1677 struct radeon_device *rdev = dev->dev_private;
1680 tmp = RREG32(RADEON_CONFIG_MEMSIZE);
1681 seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
1682 tmp = RREG32(RADEON_MC_FB_LOCATION);
1683 seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
1684 tmp = RREG32(RADEON_BUS_CNTL);
1685 seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
1686 tmp = RREG32(RADEON_MC_AGP_LOCATION);
1687 seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
1688 tmp = RREG32(RADEON_AGP_BASE);
1689 seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
1690 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1691 seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
1692 tmp = RREG32(0x01D0);
1693 seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
1694 tmp = RREG32(RADEON_AIC_LO_ADDR);
1695 seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
1696 tmp = RREG32(RADEON_AIC_HI_ADDR);
1697 seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
1698 tmp = RREG32(0x01E4);
1699 seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
1703 static struct drm_info_list r100_debugfs_rbbm_list[] = {
1704 {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
1707 static struct drm_info_list r100_debugfs_cp_list[] = {
1708 {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
1709 {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
1712 static struct drm_info_list r100_debugfs_mc_info_list[] = {
1713 {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
1717 int r100_debugfs_rbbm_init(struct radeon_device *rdev)
1719 #if defined(CONFIG_DEBUG_FS)
1720 return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
1726 int r100_debugfs_cp_init(struct radeon_device *rdev)
1728 #if defined(CONFIG_DEBUG_FS)
1729 return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
1735 int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1737 #if defined(CONFIG_DEBUG_FS)
1738 return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
1744 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
1745 uint32_t tiling_flags, uint32_t pitch,
1746 uint32_t offset, uint32_t obj_size)
1748 int surf_index = reg * 16;
1751 /* r100/r200 divide by 16 */
1752 if (rdev->family < CHIP_R300)
1757 if (rdev->family <= CHIP_RS200) {
1758 if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1759 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
1760 flags |= RADEON_SURF_TILE_COLOR_BOTH;
1761 if (tiling_flags & RADEON_TILING_MACRO)
1762 flags |= RADEON_SURF_TILE_COLOR_MACRO;
1763 } else if (rdev->family <= CHIP_RV280) {
1764 if (tiling_flags & (RADEON_TILING_MACRO))
1765 flags |= R200_SURF_TILE_COLOR_MACRO;
1766 if (tiling_flags & RADEON_TILING_MICRO)
1767 flags |= R200_SURF_TILE_COLOR_MICRO;
1769 if (tiling_flags & RADEON_TILING_MACRO)
1770 flags |= R300_SURF_TILE_MACRO;
1771 if (tiling_flags & RADEON_TILING_MICRO)
1772 flags |= R300_SURF_TILE_MICRO;
1775 DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
1776 WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
1777 WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
1778 WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
1782 void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
1784 int surf_index = reg * 16;
1785 WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
1788 void r100_bandwidth_update(struct radeon_device *rdev)
1790 fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
1791 fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
1792 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
1793 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
1794 fixed20_12 memtcas_ff[8] = {
1803 fixed20_12 memtcas_rs480_ff[8] = {
1813 fixed20_12 memtcas2_ff[8] = {
1823 fixed20_12 memtrbs[8] = {
1833 fixed20_12 memtrbs_r4xx[8] = {
1843 fixed20_12 min_mem_eff;
1844 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
1845 fixed20_12 cur_latency_mclk, cur_latency_sclk;
1846 fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
1847 disp_drain_rate2, read_return_rate;
1848 fixed20_12 time_disp1_drop_priority;
1850 int cur_size = 16; /* in octawords */
1851 int critical_point = 0, critical_point2;
1852 /* uint32_t read_return_rate, time_disp1_drop_priority; */
1853 int stop_req, max_stop_req;
1854 struct drm_display_mode *mode1 = NULL;
1855 struct drm_display_mode *mode2 = NULL;
1856 uint32_t pixel_bytes1 = 0;
1857 uint32_t pixel_bytes2 = 0;
1859 if (rdev->mode_info.crtcs[0]->base.enabled) {
1860 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
1861 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
1863 if (rdev->mode_info.crtcs[1]->base.enabled) {
1864 mode2 = &rdev->mode_info.crtcs[1]->base.mode;
1865 pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
1868 min_mem_eff.full = rfixed_const_8(0);
1870 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
1871 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
1872 mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
1873 mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
1874 /* check crtc enables */
1876 mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
1878 mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
1879 WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
1883 * determine is there is enough bw for current mode
1885 mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
1886 temp_ff.full = rfixed_const(100);
1887 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
1888 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
1889 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
1891 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
1892 temp_ff.full = rfixed_const(temp);
1893 mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
1897 peak_disp_bw.full = 0;
1899 temp_ff.full = rfixed_const(1000);
1900 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
1901 pix_clk.full = rfixed_div(pix_clk, temp_ff);
1902 temp_ff.full = rfixed_const(pixel_bytes1);
1903 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
1906 temp_ff.full = rfixed_const(1000);
1907 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
1908 pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
1909 temp_ff.full = rfixed_const(pixel_bytes2);
1910 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
1913 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
1914 if (peak_disp_bw.full >= mem_bw.full) {
1915 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
1916 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
1919 /* Get values from the EXT_MEM_CNTL register...converting its contents. */
1920 temp = RREG32(RADEON_MEM_TIMING_CNTL);
1921 if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
1922 mem_trcd = ((temp >> 2) & 0x3) + 1;
1923 mem_trp = ((temp & 0x3)) + 1;
1924 mem_tras = ((temp & 0x70) >> 4) + 1;
1925 } else if (rdev->family == CHIP_R300 ||
1926 rdev->family == CHIP_R350) { /* r300, r350 */
1927 mem_trcd = (temp & 0x7) + 1;
1928 mem_trp = ((temp >> 8) & 0x7) + 1;
1929 mem_tras = ((temp >> 11) & 0xf) + 4;
1930 } else if (rdev->family == CHIP_RV350 ||
1931 rdev->family <= CHIP_RV380) {
1933 mem_trcd = (temp & 0x7) + 3;
1934 mem_trp = ((temp >> 8) & 0x7) + 3;
1935 mem_tras = ((temp >> 11) & 0xf) + 6;
1936 } else if (rdev->family == CHIP_R420 ||
1937 rdev->family == CHIP_R423 ||
1938 rdev->family == CHIP_RV410) {
1940 mem_trcd = (temp & 0xf) + 3;
1943 mem_trp = ((temp >> 8) & 0xf) + 3;
1946 mem_tras = ((temp >> 12) & 0x1f) + 6;
1949 } else { /* RV200, R200 */
1950 mem_trcd = (temp & 0x7) + 1;
1951 mem_trp = ((temp >> 8) & 0x7) + 1;
1952 mem_tras = ((temp >> 12) & 0xf) + 4;
1955 trcd_ff.full = rfixed_const(mem_trcd);
1956 trp_ff.full = rfixed_const(mem_trp);
1957 tras_ff.full = rfixed_const(mem_tras);
1959 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
1960 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
1961 data = (temp & (7 << 20)) >> 20;
1962 if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
1963 if (rdev->family == CHIP_RS480) /* don't think rs400 */
1964 tcas_ff = memtcas_rs480_ff[data];
1966 tcas_ff = memtcas_ff[data];
1968 tcas_ff = memtcas2_ff[data];
1970 if (rdev->family == CHIP_RS400 ||
1971 rdev->family == CHIP_RS480) {
1972 /* extra cas latency stored in bits 23-25 0-4 clocks */
1973 data = (temp >> 23) & 0x7;
1975 tcas_ff.full += rfixed_const(data);
1978 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
1979 /* on the R300, Tcas is included in Trbs.
1981 temp = RREG32(RADEON_MEM_CNTL);
1982 data = (R300_MEM_NUM_CHANNELS_MASK & temp);
1984 if (R300_MEM_USE_CD_CH_ONLY & temp) {
1985 temp = RREG32(R300_MC_IND_INDEX);
1986 temp &= ~R300_MC_IND_ADDR_MASK;
1987 temp |= R300_MC_READ_CNTL_CD_mcind;
1988 WREG32(R300_MC_IND_INDEX, temp);
1989 temp = RREG32(R300_MC_IND_DATA);
1990 data = (R300_MEM_RBS_POSITION_C_MASK & temp);
1992 temp = RREG32(R300_MC_READ_CNTL_AB);
1993 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1996 temp = RREG32(R300_MC_READ_CNTL_AB);
1997 data = (R300_MEM_RBS_POSITION_A_MASK & temp);
1999 if (rdev->family == CHIP_RV410 ||
2000 rdev->family == CHIP_R420 ||
2001 rdev->family == CHIP_R423)
2002 trbs_ff = memtrbs_r4xx[data];
2004 trbs_ff = memtrbs[data];
2005 tcas_ff.full += trbs_ff.full;
2008 sclk_eff_ff.full = sclk_ff.full;
2010 if (rdev->flags & RADEON_IS_AGP) {
2011 fixed20_12 agpmode_ff;
2012 agpmode_ff.full = rfixed_const(radeon_agpmode);
2013 temp_ff.full = rfixed_const_666(16);
2014 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
2016 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2018 if (ASIC_IS_R300(rdev)) {
2019 sclk_delay_ff.full = rfixed_const(250);
2021 if ((rdev->family == CHIP_RV100) ||
2022 rdev->flags & RADEON_IS_IGP) {
2023 if (rdev->mc.vram_is_ddr)
2024 sclk_delay_ff.full = rfixed_const(41);
2026 sclk_delay_ff.full = rfixed_const(33);
2028 if (rdev->mc.vram_width == 128)
2029 sclk_delay_ff.full = rfixed_const(57);
2031 sclk_delay_ff.full = rfixed_const(41);
2035 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
2037 if (rdev->mc.vram_is_ddr) {
2038 if (rdev->mc.vram_width == 32) {
2039 k1.full = rfixed_const(40);
2042 k1.full = rfixed_const(20);
2046 k1.full = rfixed_const(40);
2050 temp_ff.full = rfixed_const(2);
2051 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
2052 temp_ff.full = rfixed_const(c);
2053 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
2054 temp_ff.full = rfixed_const(4);
2055 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
2056 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
2057 mc_latency_mclk.full += k1.full;
2059 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
2060 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
2063 HW cursor time assuming worst case of full size colour cursor.
2065 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2066 temp_ff.full += trcd_ff.full;
2067 if (temp_ff.full < tras_ff.full)
2068 temp_ff.full = tras_ff.full;
2069 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
2071 temp_ff.full = rfixed_const(cur_size);
2072 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
2074 Find the total latency for the display data.
2076 disp_latency_overhead.full = rfixed_const(80);
2077 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
2078 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2079 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2081 if (mc_latency_mclk.full > mc_latency_sclk.full)
2082 disp_latency.full = mc_latency_mclk.full;
2084 disp_latency.full = mc_latency_sclk.full;
2086 /* setup Max GRPH_STOP_REQ default value */
2087 if (ASIC_IS_RV100(rdev))
2088 max_stop_req = 0x5c;
2090 max_stop_req = 0x7c;
2094 Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
2095 GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
2097 stop_req = mode1->hdisplay * pixel_bytes1 / 16;
2099 if (stop_req > max_stop_req)
2100 stop_req = max_stop_req;
2103 Find the drain rate of the display buffer.
2105 temp_ff.full = rfixed_const((16/pixel_bytes1));
2106 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
2109 Find the critical point of the display buffer.
2111 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
2112 crit_point_ff.full += rfixed_const_half(0);
2114 critical_point = rfixed_trunc(crit_point_ff);
2116 if (rdev->disp_priority == 2) {
2121 The critical point should never be above max_stop_req-4. Setting
2122 GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
2124 if (max_stop_req - critical_point < 4)
2127 if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
2128 /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
2129 critical_point = 0x10;
2132 temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
2133 temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
2134 temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2135 temp &= ~(RADEON_GRPH_START_REQ_MASK);
2136 if ((rdev->family == CHIP_R350) &&
2137 (stop_req > 0x15)) {
2140 temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2141 temp |= RADEON_GRPH_BUFFER_SIZE;
2142 temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
2143 RADEON_GRPH_CRITICAL_AT_SOF |
2144 RADEON_GRPH_STOP_CNTL);
2146 Write the result into the register.
2148 WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2149 (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2152 if ((rdev->family == CHIP_RS400) ||
2153 (rdev->family == CHIP_RS480)) {
2154 /* attempt to program RS400 disp regs correctly ??? */
2155 temp = RREG32(RS400_DISP1_REG_CNTL);
2156 temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
2157 RS400_DISP1_STOP_REQ_LEVEL_MASK);
2158 WREG32(RS400_DISP1_REQ_CNTL1, (temp |
2159 (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2160 (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2161 temp = RREG32(RS400_DMIF_MEM_CNTL1);
2162 temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
2163 RS400_DISP1_CRITICAL_POINT_STOP_MASK);
2164 WREG32(RS400_DMIF_MEM_CNTL1, (temp |
2165 (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
2166 (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
2170 DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
2171 /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
2172 (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
2177 stop_req = mode2->hdisplay * pixel_bytes2 / 16;
2179 if (stop_req > max_stop_req)
2180 stop_req = max_stop_req;
2183 Find the drain rate of the display buffer.
2185 temp_ff.full = rfixed_const((16/pixel_bytes2));
2186 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
2188 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2189 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
2190 grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
2191 grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
2192 if ((rdev->family == CHIP_R350) &&
2193 (stop_req > 0x15)) {
2196 grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
2197 grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
2198 grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
2199 RADEON_GRPH_CRITICAL_AT_SOF |
2200 RADEON_GRPH_STOP_CNTL);
2202 if ((rdev->family == CHIP_RS100) ||
2203 (rdev->family == CHIP_RS200))
2204 critical_point2 = 0;
2206 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2207 temp_ff.full = rfixed_const(temp);
2208 temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
2209 if (sclk_ff.full < temp_ff.full)
2210 temp_ff.full = sclk_ff.full;
2212 read_return_rate.full = temp_ff.full;
2215 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2216 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
2218 time_disp1_drop_priority.full = 0;
2220 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2221 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
2222 crit_point_ff.full += rfixed_const_half(0);
2224 critical_point2 = rfixed_trunc(crit_point_ff);
2226 if (rdev->disp_priority == 2) {
2227 critical_point2 = 0;
2230 if (max_stop_req - critical_point2 < 4)
2231 critical_point2 = 0;
2235 if (critical_point2 == 0 && rdev->family == CHIP_R300) {
2236 /* some R300 cards have problem with this set to 0 */
2237 critical_point2 = 0x10;
2240 WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
2241 (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
2243 if ((rdev->family == CHIP_RS400) ||
2244 (rdev->family == CHIP_RS480)) {
2246 /* attempt to program RS400 disp2 regs correctly ??? */
2247 temp = RREG32(RS400_DISP2_REQ_CNTL1);
2248 temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
2249 RS400_DISP2_STOP_REQ_LEVEL_MASK);
2250 WREG32(RS400_DISP2_REQ_CNTL1, (temp |
2251 (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
2252 (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
2253 temp = RREG32(RS400_DISP2_REQ_CNTL2);
2254 temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
2255 RS400_DISP2_CRITICAL_POINT_STOP_MASK);
2256 WREG32(RS400_DISP2_REQ_CNTL2, (temp |
2257 (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
2258 (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
2260 WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
2261 WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
2262 WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
2263 WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
2266 DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
2267 (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));