Merge branch 'omap-clock-for-next' of git://git.pwsan.com/linux-2.6 into devel
[linux-2.6] / drivers / gpu / drm / radeon / r300.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/seq_file.h>
29 #include "drmP.h"
30 #include "drm.h"
31 #include "radeon_reg.h"
32 #include "radeon.h"
33
34 /* r300,r350,rv350,rv370,rv380 depends on : */
35 void r100_hdp_reset(struct radeon_device *rdev);
36 int r100_cp_reset(struct radeon_device *rdev);
37 int r100_rb2d_reset(struct radeon_device *rdev);
38 int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39 int r100_pci_gart_enable(struct radeon_device *rdev);
40 void r100_pci_gart_disable(struct radeon_device *rdev);
41 void r100_mc_setup(struct radeon_device *rdev);
42 void r100_mc_disable_clients(struct radeon_device *rdev);
43 int r100_gui_wait_for_idle(struct radeon_device *rdev);
44 int r100_cs_packet_parse(struct radeon_cs_parser *p,
45                          struct radeon_cs_packet *pkt,
46                          unsigned idx);
47 int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
48                               struct radeon_cs_reloc **cs_reloc);
49 int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50                           struct radeon_cs_packet *pkt,
51                           unsigned *auth, unsigned n,
52                           radeon_packet0_check_t check);
53 int r100_cs_parse_packet3(struct radeon_cs_parser *p,
54                           struct radeon_cs_packet *pkt,
55                           unsigned *auth, unsigned n,
56                           radeon_packet3_check_t check);
57 void r100_cs_dump_packet(struct radeon_cs_parser *p,
58                          struct radeon_cs_packet *pkt);
59
60 /* This files gather functions specifics to:
61  * r300,r350,rv350,rv370,rv380
62  *
63  * Some of these functions might be used by newer ASICs.
64  */
65 void r300_gpu_init(struct radeon_device *rdev);
66 int r300_mc_wait_for_idle(struct radeon_device *rdev);
67 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
68
69
70 /*
71  * rv370,rv380 PCIE GART
72  */
73 void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
74 {
75         uint32_t tmp;
76         int i;
77
78         /* Workaround HW bug do flush 2 times */
79         for (i = 0; i < 2; i++) {
80                 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
81                 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
82                 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83                 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
84                 mb();
85         }
86 }
87
88 int rv370_pcie_gart_enable(struct radeon_device *rdev)
89 {
90         uint32_t table_addr;
91         uint32_t tmp;
92         int r;
93
94         /* Initialize common gart structure */
95         r = radeon_gart_init(rdev);
96         if (r) {
97                 return r;
98         }
99         r = rv370_debugfs_pcie_gart_info_init(rdev);
100         if (r) {
101                 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
102         }
103         rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
104         r = radeon_gart_table_vram_alloc(rdev);
105         if (r) {
106                 return r;
107         }
108         /* discard memory request outside of configured range */
109         tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
110         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
111         WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
112         tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
113         WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
114         WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
115         WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
116         table_addr = rdev->gart.table_addr;
117         WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
118         /* FIXME: setup default page */
119         WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
120         WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
121         /* Clear error */
122         WREG32_PCIE(0x18, 0);
123         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
124         tmp |= RADEON_PCIE_TX_GART_EN;
125         tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
126         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
127         rv370_pcie_gart_tlb_flush(rdev);
128         DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
129                  rdev->mc.gtt_size >> 20, table_addr);
130         rdev->gart.ready = true;
131         return 0;
132 }
133
134 void rv370_pcie_gart_disable(struct radeon_device *rdev)
135 {
136         uint32_t tmp;
137
138         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
139         tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
140         WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
141         if (rdev->gart.table.vram.robj) {
142                 radeon_object_kunmap(rdev->gart.table.vram.robj);
143                 radeon_object_unpin(rdev->gart.table.vram.robj);
144         }
145 }
146
147 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
148 {
149         void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
150
151         if (i < 0 || i > rdev->gart.num_gpu_pages) {
152                 return -EINVAL;
153         }
154         addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
155         writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
156         return 0;
157 }
158
159 int r300_gart_enable(struct radeon_device *rdev)
160 {
161 #if __OS_HAS_AGP
162         if (rdev->flags & RADEON_IS_AGP) {
163                 if (rdev->family > CHIP_RV350) {
164                         rv370_pcie_gart_disable(rdev);
165                 } else {
166                         r100_pci_gart_disable(rdev);
167                 }
168                 return 0;
169         }
170 #endif
171         if (rdev->flags & RADEON_IS_PCIE) {
172                 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
173                 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
174                 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
175                 return rv370_pcie_gart_enable(rdev);
176         }
177         return r100_pci_gart_enable(rdev);
178 }
179
180
181 /*
182  * MC
183  */
184 int r300_mc_init(struct radeon_device *rdev)
185 {
186         int r;
187
188         if (r100_debugfs_rbbm_init(rdev)) {
189                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
190         }
191
192         r300_gpu_init(rdev);
193         r100_pci_gart_disable(rdev);
194         if (rdev->flags & RADEON_IS_PCIE) {
195                 rv370_pcie_gart_disable(rdev);
196         }
197
198         /* Setup GPU memory space */
199         rdev->mc.vram_location = 0xFFFFFFFFUL;
200         rdev->mc.gtt_location = 0xFFFFFFFFUL;
201         if (rdev->flags & RADEON_IS_AGP) {
202                 r = radeon_agp_init(rdev);
203                 if (r) {
204                         printk(KERN_WARNING "[drm] Disabling AGP\n");
205                         rdev->flags &= ~RADEON_IS_AGP;
206                         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
207                 } else {
208                         rdev->mc.gtt_location = rdev->mc.agp_base;
209                 }
210         }
211         r = radeon_mc_setup(rdev);
212         if (r) {
213                 return r;
214         }
215
216         /* Program GPU memory space */
217         r100_mc_disable_clients(rdev);
218         if (r300_mc_wait_for_idle(rdev)) {
219                 printk(KERN_WARNING "Failed to wait MC idle while "
220                        "programming pipes. Bad things might happen.\n");
221         }
222         r100_mc_setup(rdev);
223         return 0;
224 }
225
226 void r300_mc_fini(struct radeon_device *rdev)
227 {
228         if (rdev->flags & RADEON_IS_PCIE) {
229                 rv370_pcie_gart_disable(rdev);
230                 radeon_gart_table_vram_free(rdev);
231         } else {
232                 r100_pci_gart_disable(rdev);
233                 radeon_gart_table_ram_free(rdev);
234         }
235         radeon_gart_fini(rdev);
236 }
237
238
239 /*
240  * Fence emission
241  */
242 void r300_fence_ring_emit(struct radeon_device *rdev,
243                           struct radeon_fence *fence)
244 {
245         /* Who ever call radeon_fence_emit should call ring_lock and ask
246          * for enough space (today caller are ib schedule and buffer move) */
247         /* Write SC register so SC & US assert idle */
248         radeon_ring_write(rdev, PACKET0(0x43E0, 0));
249         radeon_ring_write(rdev, 0);
250         radeon_ring_write(rdev, PACKET0(0x43E4, 0));
251         radeon_ring_write(rdev, 0);
252         /* Flush 3D cache */
253         radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
254         radeon_ring_write(rdev, (2 << 0));
255         radeon_ring_write(rdev, PACKET0(0x4F18, 0));
256         radeon_ring_write(rdev, (1 << 0));
257         /* Wait until IDLE & CLEAN */
258         radeon_ring_write(rdev, PACKET0(0x1720, 0));
259         radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
260         /* Emit fence sequence & fire IRQ */
261         radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
262         radeon_ring_write(rdev, fence->seq);
263         radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
264         radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
265 }
266
267
268 /*
269  * Global GPU functions
270  */
271 int r300_copy_dma(struct radeon_device *rdev,
272                   uint64_t src_offset,
273                   uint64_t dst_offset,
274                   unsigned num_pages,
275                   struct radeon_fence *fence)
276 {
277         uint32_t size;
278         uint32_t cur_size;
279         int i, num_loops;
280         int r = 0;
281
282         /* radeon pitch is /64 */
283         size = num_pages << PAGE_SHIFT;
284         num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
285         r = radeon_ring_lock(rdev, num_loops * 4 + 64);
286         if (r) {
287                 DRM_ERROR("radeon: moving bo (%d).\n", r);
288                 return r;
289         }
290         /* Must wait for 2D idle & clean before DMA or hangs might happen */
291         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
292         radeon_ring_write(rdev, (1 << 16));
293         for (i = 0; i < num_loops; i++) {
294                 cur_size = size;
295                 if (cur_size > 0x1FFFFF) {
296                         cur_size = 0x1FFFFF;
297                 }
298                 size -= cur_size;
299                 radeon_ring_write(rdev, PACKET0(0x720, 2));
300                 radeon_ring_write(rdev, src_offset);
301                 radeon_ring_write(rdev, dst_offset);
302                 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
303                 src_offset += cur_size;
304                 dst_offset += cur_size;
305         }
306         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
307         radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
308         if (fence) {
309                 r = radeon_fence_emit(rdev, fence);
310         }
311         radeon_ring_unlock_commit(rdev);
312         return r;
313 }
314
315 void r300_ring_start(struct radeon_device *rdev)
316 {
317         unsigned gb_tile_config;
318         int r;
319
320         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
321         gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
322         switch (rdev->num_gb_pipes) {
323         case 2:
324                 gb_tile_config |= R300_PIPE_COUNT_R300;
325                 break;
326         case 3:
327                 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
328                 break;
329         case 4:
330                 gb_tile_config |= R300_PIPE_COUNT_R420;
331                 break;
332         case 1:
333         default:
334                 gb_tile_config |= R300_PIPE_COUNT_RV350;
335                 break;
336         }
337
338         r = radeon_ring_lock(rdev, 64);
339         if (r) {
340                 return;
341         }
342         radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
343         radeon_ring_write(rdev,
344                           RADEON_ISYNC_ANY2D_IDLE3D |
345                           RADEON_ISYNC_ANY3D_IDLE2D |
346                           RADEON_ISYNC_WAIT_IDLEGUI |
347                           RADEON_ISYNC_CPSCRATCH_IDLEGUI);
348         radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
349         radeon_ring_write(rdev, gb_tile_config);
350         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
351         radeon_ring_write(rdev,
352                           RADEON_WAIT_2D_IDLECLEAN |
353                           RADEON_WAIT_3D_IDLECLEAN);
354         radeon_ring_write(rdev, PACKET0(0x170C, 0));
355         radeon_ring_write(rdev, 1 << 31);
356         radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
357         radeon_ring_write(rdev, 0);
358         radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
359         radeon_ring_write(rdev, 0);
360         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
361         radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
362         radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
363         radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
364         radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
365         radeon_ring_write(rdev,
366                           RADEON_WAIT_2D_IDLECLEAN |
367                           RADEON_WAIT_3D_IDLECLEAN);
368         radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
369         radeon_ring_write(rdev, 0);
370         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
371         radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
372         radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
373         radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
374         radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
375         radeon_ring_write(rdev,
376                           ((6 << R300_MS_X0_SHIFT) |
377                            (6 << R300_MS_Y0_SHIFT) |
378                            (6 << R300_MS_X1_SHIFT) |
379                            (6 << R300_MS_Y1_SHIFT) |
380                            (6 << R300_MS_X2_SHIFT) |
381                            (6 << R300_MS_Y2_SHIFT) |
382                            (6 << R300_MSBD0_Y_SHIFT) |
383                            (6 << R300_MSBD0_X_SHIFT)));
384         radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
385         radeon_ring_write(rdev,
386                           ((6 << R300_MS_X3_SHIFT) |
387                            (6 << R300_MS_Y3_SHIFT) |
388                            (6 << R300_MS_X4_SHIFT) |
389                            (6 << R300_MS_Y4_SHIFT) |
390                            (6 << R300_MS_X5_SHIFT) |
391                            (6 << R300_MS_Y5_SHIFT) |
392                            (6 << R300_MSBD1_SHIFT)));
393         radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
394         radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
395         radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
396         radeon_ring_write(rdev,
397                           R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
398         radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
399         radeon_ring_write(rdev,
400                           R300_GEOMETRY_ROUND_NEAREST |
401                           R300_COLOR_ROUND_NEAREST);
402         radeon_ring_unlock_commit(rdev);
403 }
404
405 void r300_errata(struct radeon_device *rdev)
406 {
407         rdev->pll_errata = 0;
408
409         if (rdev->family == CHIP_R300 &&
410             (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
411                 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
412         }
413 }
414
415 int r300_mc_wait_for_idle(struct radeon_device *rdev)
416 {
417         unsigned i;
418         uint32_t tmp;
419
420         for (i = 0; i < rdev->usec_timeout; i++) {
421                 /* read MC_STATUS */
422                 tmp = RREG32(0x0150);
423                 if (tmp & (1 << 4)) {
424                         return 0;
425                 }
426                 DRM_UDELAY(1);
427         }
428         return -1;
429 }
430
431 void r300_gpu_init(struct radeon_device *rdev)
432 {
433         uint32_t gb_tile_config, tmp;
434
435         r100_hdp_reset(rdev);
436         /* FIXME: rv380 one pipes ? */
437         if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
438                 /* r300,r350 */
439                 rdev->num_gb_pipes = 2;
440         } else {
441                 /* rv350,rv370,rv380 */
442                 rdev->num_gb_pipes = 1;
443         }
444         gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
445         switch (rdev->num_gb_pipes) {
446         case 2:
447                 gb_tile_config |= R300_PIPE_COUNT_R300;
448                 break;
449         case 3:
450                 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
451                 break;
452         case 4:
453                 gb_tile_config |= R300_PIPE_COUNT_R420;
454                 break;
455         case 1:
456         default:
457                 gb_tile_config |= R300_PIPE_COUNT_RV350;
458                 break;
459         }
460         WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
461
462         if (r100_gui_wait_for_idle(rdev)) {
463                 printk(KERN_WARNING "Failed to wait GUI idle while "
464                        "programming pipes. Bad things might happen.\n");
465         }
466
467         tmp = RREG32(0x170C);
468         WREG32(0x170C, tmp | (1 << 31));
469
470         WREG32(R300_RB2D_DSTCACHE_MODE,
471                R300_DC_AUTOFLUSH_ENABLE |
472                R300_DC_DC_DISABLE_IGNORE_PE);
473
474         if (r100_gui_wait_for_idle(rdev)) {
475                 printk(KERN_WARNING "Failed to wait GUI idle while "
476                        "programming pipes. Bad things might happen.\n");
477         }
478         if (r300_mc_wait_for_idle(rdev)) {
479                 printk(KERN_WARNING "Failed to wait MC idle while "
480                        "programming pipes. Bad things might happen.\n");
481         }
482         DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
483 }
484
485 int r300_ga_reset(struct radeon_device *rdev)
486 {
487         uint32_t tmp;
488         bool reinit_cp;
489         int i;
490
491         reinit_cp = rdev->cp.ready;
492         rdev->cp.ready = false;
493         for (i = 0; i < rdev->usec_timeout; i++) {
494                 WREG32(RADEON_CP_CSQ_MODE, 0);
495                 WREG32(RADEON_CP_CSQ_CNTL, 0);
496                 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
497                 (void)RREG32(RADEON_RBBM_SOFT_RESET);
498                 udelay(200);
499                 WREG32(RADEON_RBBM_SOFT_RESET, 0);
500                 /* Wait to prevent race in RBBM_STATUS */
501                 mdelay(1);
502                 tmp = RREG32(RADEON_RBBM_STATUS);
503                 if (tmp & ((1 << 20) | (1 << 26))) {
504                         DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
505                         /* GA still busy soft reset it */
506                         WREG32(0x429C, 0x200);
507                         WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
508                         WREG32(0x43E0, 0);
509                         WREG32(0x43E4, 0);
510                         WREG32(0x24AC, 0);
511                 }
512                 /* Wait to prevent race in RBBM_STATUS */
513                 mdelay(1);
514                 tmp = RREG32(RADEON_RBBM_STATUS);
515                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
516                         break;
517                 }
518         }
519         for (i = 0; i < rdev->usec_timeout; i++) {
520                 tmp = RREG32(RADEON_RBBM_STATUS);
521                 if (!(tmp & ((1 << 20) | (1 << 26)))) {
522                         DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
523                                  tmp);
524                         if (reinit_cp) {
525                                 return r100_cp_init(rdev, rdev->cp.ring_size);
526                         }
527                         return 0;
528                 }
529                 DRM_UDELAY(1);
530         }
531         tmp = RREG32(RADEON_RBBM_STATUS);
532         DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
533         return -1;
534 }
535
536 int r300_gpu_reset(struct radeon_device *rdev)
537 {
538         uint32_t status;
539
540         /* reset order likely matter */
541         status = RREG32(RADEON_RBBM_STATUS);
542         /* reset HDP */
543         r100_hdp_reset(rdev);
544         /* reset rb2d */
545         if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
546                 r100_rb2d_reset(rdev);
547         }
548         /* reset GA */
549         if (status & ((1 << 20) | (1 << 26))) {
550                 r300_ga_reset(rdev);
551         }
552         /* reset CP */
553         status = RREG32(RADEON_RBBM_STATUS);
554         if (status & (1 << 16)) {
555                 r100_cp_reset(rdev);
556         }
557         /* Check if GPU is idle */
558         status = RREG32(RADEON_RBBM_STATUS);
559         if (status & (1 << 31)) {
560                 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
561                 return -1;
562         }
563         DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
564         return 0;
565 }
566
567
568 /*
569  * r300,r350,rv350,rv380 VRAM info
570  */
571 void r300_vram_info(struct radeon_device *rdev)
572 {
573         uint32_t tmp;
574
575         /* DDR for all card after R300 & IGP */
576         rdev->mc.vram_is_ddr = true;
577         tmp = RREG32(RADEON_MEM_CNTL);
578         if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
579                 rdev->mc.vram_width = 128;
580         } else {
581                 rdev->mc.vram_width = 64;
582         }
583         rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
584
585         rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
586         rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
587 }
588
589
590 /*
591  * Indirect registers accessor
592  */
593 uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
594 {
595         uint32_t r;
596
597         WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
598         (void)RREG32(RADEON_PCIE_INDEX);
599         r = RREG32(RADEON_PCIE_DATA);
600         return r;
601 }
602
603 void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
604 {
605         WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
606         (void)RREG32(RADEON_PCIE_INDEX);
607         WREG32(RADEON_PCIE_DATA, (v));
608         (void)RREG32(RADEON_PCIE_DATA);
609 }
610
611 /*
612  * PCIE Lanes
613  */
614
615 void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
616 {
617         uint32_t link_width_cntl, mask;
618
619         if (rdev->flags & RADEON_IS_IGP)
620                 return;
621
622         if (!(rdev->flags & RADEON_IS_PCIE))
623                 return;
624
625         /* FIXME wait for idle */
626
627         switch (lanes) {
628         case 0:
629                 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
630                 break;
631         case 1:
632                 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
633                 break;
634         case 2:
635                 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
636                 break;
637         case 4:
638                 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
639                 break;
640         case 8:
641                 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
642                 break;
643         case 12:
644                 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
645                 break;
646         case 16:
647         default:
648                 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
649                 break;
650         }
651
652         link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
653
654         if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
655             (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
656                 return;
657
658         link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
659                              RADEON_PCIE_LC_RECONFIG_NOW |
660                              RADEON_PCIE_LC_RECONFIG_LATER |
661                              RADEON_PCIE_LC_SHORT_RECONFIG_EN);
662         link_width_cntl |= mask;
663         WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
664         WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
665                                                      RADEON_PCIE_LC_RECONFIG_NOW));
666
667         /* wait for lane set to complete */
668         link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
669         while (link_width_cntl == 0xffffffff)
670                 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
671
672 }
673
674
675 /*
676  * Debugfs info
677  */
678 #if defined(CONFIG_DEBUG_FS)
679 static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
680 {
681         struct drm_info_node *node = (struct drm_info_node *) m->private;
682         struct drm_device *dev = node->minor->dev;
683         struct radeon_device *rdev = dev->dev_private;
684         uint32_t tmp;
685
686         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
687         seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
688         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
689         seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
690         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
691         seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
692         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
693         seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
694         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
695         seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
696         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
697         seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
698         tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
699         seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
700         return 0;
701 }
702
703 static struct drm_info_list rv370_pcie_gart_info_list[] = {
704         {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
705 };
706 #endif
707
708 int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
709 {
710 #if defined(CONFIG_DEBUG_FS)
711         return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
712 #else
713         return 0;
714 #endif
715 }
716
717
718 /*
719  * CS functions
720  */
721 struct r300_cs_track_cb {
722         struct radeon_object    *robj;
723         unsigned                pitch;
724         unsigned                cpp;
725         unsigned                offset;
726 };
727
728 struct r300_cs_track {
729         unsigned                num_cb;
730         unsigned                maxy;
731         struct r300_cs_track_cb cb[4];
732         struct r300_cs_track_cb zb;
733         bool                    z_enabled;
734 };
735
736 int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
737 {
738         unsigned i;
739         unsigned long size;
740
741         for (i = 0; i < track->num_cb; i++) {
742                 if (track->cb[i].robj == NULL) {
743                         DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
744                         return -EINVAL;
745                 }
746                 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
747                 size += track->cb[i].offset;
748                 if (size > radeon_object_size(track->cb[i].robj)) {
749                         DRM_ERROR("[drm] Buffer too small for color buffer %d "
750                                   "(need %lu have %lu) !\n", i, size,
751                                   radeon_object_size(track->cb[i].robj));
752                         DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
753                                   i, track->cb[i].pitch, track->cb[i].cpp,
754                                   track->cb[i].offset, track->maxy);
755                         return -EINVAL;
756                 }
757         }
758         if (track->z_enabled) {
759                 if (track->zb.robj == NULL) {
760                         DRM_ERROR("[drm] No buffer for z buffer !\n");
761                         return -EINVAL;
762                 }
763                 size = track->zb.pitch * track->zb.cpp * track->maxy;
764                 size += track->zb.offset;
765                 if (size > radeon_object_size(track->zb.robj)) {
766                         DRM_ERROR("[drm] Buffer too small for z buffer "
767                                   "(need %lu have %lu) !\n", size,
768                                   radeon_object_size(track->zb.robj));
769                         return -EINVAL;
770                 }
771         }
772         return 0;
773 }
774
775 static inline void r300_cs_track_clear(struct r300_cs_track *track)
776 {
777         unsigned i;
778
779         track->num_cb = 4;
780         track->maxy = 4096;
781         for (i = 0; i < track->num_cb; i++) {
782                 track->cb[i].robj = NULL;
783                 track->cb[i].pitch = 8192;
784                 track->cb[i].cpp = 16;
785                 track->cb[i].offset = 0;
786         }
787         track->z_enabled = true;
788         track->zb.robj = NULL;
789         track->zb.pitch = 8192;
790         track->zb.cpp = 4;
791         track->zb.offset = 0;
792 }
793
794 static unsigned r300_auth_reg[] = {
795         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
796         0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
797         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
798         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
799         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
800         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
801         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
802         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
803         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
804         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
805         0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
806         0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
807         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
808         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
809         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
810         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
811         0xFFFFFFFF, 0xFFFFCFCC, 0xF00E9FFF, 0x007C0000,
812         0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
813         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
814         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
815         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
816         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
817         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
818         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
819         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
820         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
821         0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
822         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
823         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
824         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
825         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
826         0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
827         0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFC, 0xFFFFFFFF,
828         0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
829         0x00000000, 0x00000000, 0xFFFF0000, 0x00000000,
830         0x00000000, 0x0000C100, 0x00000000, 0x00000000,
831         0x00000000, 0x00000000, 0x00000000, 0x00000000,
832         0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
833         0x00000000, 0x00000000, 0x00000000, 0x00000000,
834         0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
835 };
836
837 static int r300_packet0_check(struct radeon_cs_parser *p,
838                 struct radeon_cs_packet *pkt,
839                 unsigned idx, unsigned reg)
840 {
841         struct radeon_cs_chunk *ib_chunk;
842         struct radeon_cs_reloc *reloc;
843         struct r300_cs_track *track;
844         volatile uint32_t *ib;
845         uint32_t tmp;
846         unsigned i;
847         int r;
848
849         ib = p->ib->ptr;
850         ib_chunk = &p->chunks[p->chunk_ib_idx];
851         track = (struct r300_cs_track *)p->track;
852         switch (reg) {
853         case RADEON_DST_PITCH_OFFSET:
854         case RADEON_SRC_PITCH_OFFSET:
855                 r = r100_cs_packet_next_reloc(p, &reloc);
856                 if (r) {
857                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
858                                         idx, reg);
859                         r100_cs_dump_packet(p, pkt);
860                         return r;
861                 }
862                 tmp = ib_chunk->kdata[idx] & 0x003fffff;
863                 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
864                 ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
865                 break;
866         case R300_RB3D_COLOROFFSET0:
867         case R300_RB3D_COLOROFFSET1:
868         case R300_RB3D_COLOROFFSET2:
869         case R300_RB3D_COLOROFFSET3:
870                 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
871                 r = r100_cs_packet_next_reloc(p, &reloc);
872                 if (r) {
873                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
874                                         idx, reg);
875                         r100_cs_dump_packet(p, pkt);
876                         return r;
877                 }
878                 track->cb[i].robj = reloc->robj;
879                 track->cb[i].offset = ib_chunk->kdata[idx];
880                 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
881                 break;
882         case R300_ZB_DEPTHOFFSET:
883                 r = r100_cs_packet_next_reloc(p, &reloc);
884                 if (r) {
885                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
886                                         idx, reg);
887                         r100_cs_dump_packet(p, pkt);
888                         return r;
889                 }
890                 track->zb.robj = reloc->robj;
891                 track->zb.offset = ib_chunk->kdata[idx];
892                 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
893                 break;
894         case R300_TX_OFFSET_0:
895         case R300_TX_OFFSET_0+4:
896         case R300_TX_OFFSET_0+8:
897         case R300_TX_OFFSET_0+12:
898         case R300_TX_OFFSET_0+16:
899         case R300_TX_OFFSET_0+20:
900         case R300_TX_OFFSET_0+24:
901         case R300_TX_OFFSET_0+28:
902         case R300_TX_OFFSET_0+32:
903         case R300_TX_OFFSET_0+36:
904         case R300_TX_OFFSET_0+40:
905         case R300_TX_OFFSET_0+44:
906         case R300_TX_OFFSET_0+48:
907         case R300_TX_OFFSET_0+52:
908         case R300_TX_OFFSET_0+56:
909         case R300_TX_OFFSET_0+60:
910                 r = r100_cs_packet_next_reloc(p, &reloc);
911                 if (r) {
912                         DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
913                                         idx, reg);
914                         r100_cs_dump_packet(p, pkt);
915                         return r;
916                 }
917                 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
918                 break;
919         /* Tracked registers */
920         case 0x43E4:
921                 /* SC_SCISSOR1 */
922
923                 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
924                 if (p->rdev->family < CHIP_RV515) {
925                         track->maxy -= 1440;
926                 }
927                 break;
928         case 0x4E00:
929                 /* RB3D_CCTL */
930                 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
931                 break;
932         case 0x4E38:
933         case 0x4E3C:
934         case 0x4E40:
935         case 0x4E44:
936                 /* RB3D_COLORPITCH0 */
937                 /* RB3D_COLORPITCH1 */
938                 /* RB3D_COLORPITCH2 */
939                 /* RB3D_COLORPITCH3 */
940                 i = (reg - 0x4E38) >> 2;
941                 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
942                 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
943                 case 9:
944                 case 11:
945                 case 12:
946                         track->cb[i].cpp = 1;
947                         break;
948                 case 3:
949                 case 4:
950                 case 13:
951                 case 15:
952                         track->cb[i].cpp = 2;
953                         break;
954                 case 6:
955                         track->cb[i].cpp = 4;
956                         break;
957                 case 10:
958                         track->cb[i].cpp = 8;
959                         break;
960                 case 7:
961                         track->cb[i].cpp = 16;
962                         break;
963                 default:
964                         DRM_ERROR("Invalid color buffer format (%d) !\n",
965                                   ((ib_chunk->kdata[idx] >> 21) & 0xF));
966                         return -EINVAL;
967                 }
968                 break;
969         case 0x4F00:
970                 /* ZB_CNTL */
971                 if (ib_chunk->kdata[idx] & 2) {
972                         track->z_enabled = true;
973                 } else {
974                         track->z_enabled = false;
975                 }
976                 break;
977         case 0x4F10:
978                 /* ZB_FORMAT */
979                 switch ((ib_chunk->kdata[idx] & 0xF)) {
980                 case 0:
981                 case 1:
982                         track->zb.cpp = 2;
983                         break;
984                 case 2:
985                         track->zb.cpp = 4;
986                         break;
987                 default:
988                         DRM_ERROR("Invalid z buffer format (%d) !\n",
989                                   (ib_chunk->kdata[idx] & 0xF));
990                         return -EINVAL;
991                 }
992                 break;
993         case 0x4F24:
994                 /* ZB_DEPTHPITCH */
995                 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
996                 break;
997         default:
998                 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", reg, idx);
999                 return -EINVAL;
1000         }
1001         return 0;
1002 }
1003
1004 static int r300_packet3_check(struct radeon_cs_parser *p,
1005                               struct radeon_cs_packet *pkt)
1006 {
1007         struct radeon_cs_chunk *ib_chunk;
1008         struct radeon_cs_reloc *reloc;
1009         struct r300_cs_track *track;
1010         volatile uint32_t *ib;
1011         unsigned idx;
1012         unsigned i, c;
1013         int r;
1014
1015         ib = p->ib->ptr;
1016         ib_chunk = &p->chunks[p->chunk_ib_idx];
1017         idx = pkt->idx + 1;
1018         track = (struct r300_cs_track *)p->track;
1019         switch (pkt->opcode) {
1020         case PACKET3_3D_LOAD_VBPNTR:
1021                 c = ib_chunk->kdata[idx++];
1022                 for (i = 0; i < (c - 1); i += 2, idx += 3) {
1023                         r = r100_cs_packet_next_reloc(p, &reloc);
1024                         if (r) {
1025                                 DRM_ERROR("No reloc for packet3 %d\n",
1026                                           pkt->opcode);
1027                                 r100_cs_dump_packet(p, pkt);
1028                                 return r;
1029                         }
1030                         ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1031                         r = r100_cs_packet_next_reloc(p, &reloc);
1032                         if (r) {
1033                                 DRM_ERROR("No reloc for packet3 %d\n",
1034                                           pkt->opcode);
1035                                 r100_cs_dump_packet(p, pkt);
1036                                 return r;
1037                         }
1038                         ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1039                 }
1040                 if (c & 1) {
1041                         r = r100_cs_packet_next_reloc(p, &reloc);
1042                         if (r) {
1043                                 DRM_ERROR("No reloc for packet3 %d\n",
1044                                           pkt->opcode);
1045                                 r100_cs_dump_packet(p, pkt);
1046                                 return r;
1047                         }
1048                         ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1049                 }
1050                 break;
1051         case PACKET3_INDX_BUFFER:
1052                 r = r100_cs_packet_next_reloc(p, &reloc);
1053                 if (r) {
1054                         DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1055                         r100_cs_dump_packet(p, pkt);
1056                         return r;
1057                 }
1058                 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1059                 break;
1060         /* Draw packet */
1061         case PACKET3_3D_DRAW_VBUF:
1062         case PACKET3_3D_DRAW_IMMD:
1063         case PACKET3_3D_DRAW_INDX:
1064         case PACKET3_3D_DRAW_VBUF_2:
1065         case PACKET3_3D_DRAW_IMMD_2:
1066         case PACKET3_3D_DRAW_INDX_2:
1067                 r = r300_cs_track_check(p->rdev, track);
1068                 if (r) {
1069                         return r;
1070                 }
1071                 break;
1072         case PACKET3_NOP:
1073                 break;
1074         default:
1075                 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1076                 return -EINVAL;
1077         }
1078         return 0;
1079 }
1080
1081 int r300_cs_parse(struct radeon_cs_parser *p)
1082 {
1083         struct radeon_cs_packet pkt;
1084         struct r300_cs_track track;
1085         int r;
1086
1087         r300_cs_track_clear(&track);
1088         p->track = &track;
1089         do {
1090                 r = r100_cs_packet_parse(p, &pkt, p->idx);
1091                 if (r) {
1092                         return r;
1093                 }
1094                 p->idx += pkt.count + 2;
1095                 switch (pkt.type) {
1096                 case PACKET_TYPE0:
1097                         r = r100_cs_parse_packet0(p, &pkt,
1098                                                   r300_auth_reg,
1099                                                   ARRAY_SIZE(r300_auth_reg),
1100                                                   &r300_packet0_check);
1101                         break;
1102                 case PACKET_TYPE2:
1103                         break;
1104                 case PACKET_TYPE3:
1105                         r = r300_packet3_check(p, &pkt);
1106                         break;
1107                 default:
1108                         DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1109                         return -EINVAL;
1110                 }
1111                 if (r) {
1112                         return r;
1113                 }
1114         } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1115         return 0;
1116 }