Merge branch 'drm-forlinus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[linux-2.6] / drivers / char / drm / via_verifier.c
1 /*
2  * Copyright 2004 The Unichrome Project. All Rights Reserved.
3  * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sub license,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Author: Thomas Hellstrom 2004, 2005.
25  * This code was written using docs obtained under NDA from VIA Inc.
26  *
27  * Don't run this code directly on an AGP buffer. Due to cache problems it will
28  * be very slow.
29  */
30
31 #include "via_3d_reg.h"
32 #include "drmP.h"
33 #include "drm.h"
34 #include "via_drm.h"
35 #include "via_verifier.h"
36 #include "via_drv.h"
37
38 typedef enum {
39         state_command,
40         state_header2,
41         state_header1,
42         state_vheader5,
43         state_vheader6,
44         state_error
45 } verifier_state_t;
46
47 typedef enum {
48         no_check = 0,
49         check_for_header2,
50         check_for_header1,
51         check_for_header2_err,
52         check_for_header1_err,
53         check_for_fire,
54         check_z_buffer_addr0,
55         check_z_buffer_addr1,
56         check_z_buffer_addr_mode,
57         check_destination_addr0,
58         check_destination_addr1,
59         check_destination_addr_mode,
60         check_for_dummy,
61         check_for_dd,
62         check_texture_addr0,
63         check_texture_addr1,
64         check_texture_addr2,
65         check_texture_addr3,
66         check_texture_addr4,
67         check_texture_addr5,
68         check_texture_addr6,
69         check_texture_addr7,
70         check_texture_addr8,
71         check_texture_addr_mode,
72         check_for_vertex_count,
73         check_number_texunits,
74         forbidden_command
75 } hazard_t;
76
77 /*
78  * Associates each hazard above with a possible multi-command
79  * sequence. For example an address that is split over multiple
80  * commands and that needs to be checked at the first command
81  * that does not include any part of the address.
82  */
83
84 static drm_via_sequence_t seqs[] = {
85         no_sequence,
86         no_sequence,
87         no_sequence,
88         no_sequence,
89         no_sequence,
90         no_sequence,
91         z_address,
92         z_address,
93         z_address,
94         dest_address,
95         dest_address,
96         dest_address,
97         no_sequence,
98         no_sequence,
99         tex_address,
100         tex_address,
101         tex_address,
102         tex_address,
103         tex_address,
104         tex_address,
105         tex_address,
106         tex_address,
107         tex_address,
108         tex_address,
109         no_sequence
110 };
111
112 typedef struct {
113         unsigned int code;
114         hazard_t hz;
115 } hz_init_t;
116
117 static hz_init_t init_table1[] = {
118         {0xf2, check_for_header2_err},
119         {0xf0, check_for_header1_err},
120         {0xee, check_for_fire},
121         {0xcc, check_for_dummy},
122         {0xdd, check_for_dd},
123         {0x00, no_check},
124         {0x10, check_z_buffer_addr0},
125         {0x11, check_z_buffer_addr1},
126         {0x12, check_z_buffer_addr_mode},
127         {0x13, no_check},
128         {0x14, no_check},
129         {0x15, no_check},
130         {0x23, no_check},
131         {0x24, no_check},
132         {0x33, no_check},
133         {0x34, no_check},
134         {0x35, no_check},
135         {0x36, no_check},
136         {0x37, no_check},
137         {0x38, no_check},
138         {0x39, no_check},
139         {0x3A, no_check},
140         {0x3B, no_check},
141         {0x3C, no_check},
142         {0x3D, no_check},
143         {0x3E, no_check},
144         {0x40, check_destination_addr0},
145         {0x41, check_destination_addr1},
146         {0x42, check_destination_addr_mode},
147         {0x43, no_check},
148         {0x44, no_check},
149         {0x50, no_check},
150         {0x51, no_check},
151         {0x52, no_check},
152         {0x53, no_check},
153         {0x54, no_check},
154         {0x55, no_check},
155         {0x56, no_check},
156         {0x57, no_check},
157         {0x58, no_check},
158         {0x70, no_check},
159         {0x71, no_check},
160         {0x78, no_check},
161         {0x79, no_check},
162         {0x7A, no_check},
163         {0x7B, no_check},
164         {0x7C, no_check},
165         {0x7D, check_for_vertex_count}
166 };
167
168 static hz_init_t init_table2[] = {
169         {0xf2, check_for_header2_err},
170         {0xf0, check_for_header1_err},
171         {0xee, check_for_fire},
172         {0xcc, check_for_dummy},
173         {0x00, check_texture_addr0},
174         {0x01, check_texture_addr0},
175         {0x02, check_texture_addr0},
176         {0x03, check_texture_addr0},
177         {0x04, check_texture_addr0},
178         {0x05, check_texture_addr0},
179         {0x06, check_texture_addr0},
180         {0x07, check_texture_addr0},
181         {0x08, check_texture_addr0},
182         {0x09, check_texture_addr0},
183         {0x20, check_texture_addr1},
184         {0x21, check_texture_addr1},
185         {0x22, check_texture_addr1},
186         {0x23, check_texture_addr4},
187         {0x2B, check_texture_addr3},
188         {0x2C, check_texture_addr3},
189         {0x2D, check_texture_addr3},
190         {0x2E, check_texture_addr3},
191         {0x2F, check_texture_addr3},
192         {0x30, check_texture_addr3},
193         {0x31, check_texture_addr3},
194         {0x32, check_texture_addr3},
195         {0x33, check_texture_addr3},
196         {0x34, check_texture_addr3},
197         {0x4B, check_texture_addr5},
198         {0x4C, check_texture_addr6},
199         {0x51, check_texture_addr7},
200         {0x52, check_texture_addr8},
201         {0x77, check_texture_addr2},
202         {0x78, no_check},
203         {0x79, no_check},
204         {0x7A, no_check},
205         {0x7B, check_texture_addr_mode},
206         {0x7C, no_check},
207         {0x7D, no_check},
208         {0x7E, no_check},
209         {0x7F, no_check},
210         {0x80, no_check},
211         {0x81, no_check},
212         {0x82, no_check},
213         {0x83, no_check},
214         {0x85, no_check},
215         {0x86, no_check},
216         {0x87, no_check},
217         {0x88, no_check},
218         {0x89, no_check},
219         {0x8A, no_check},
220         {0x90, no_check},
221         {0x91, no_check},
222         {0x92, no_check},
223         {0x93, no_check}
224 };
225
226 static hz_init_t init_table3[] = {
227         {0xf2, check_for_header2_err},
228         {0xf0, check_for_header1_err},
229         {0xcc, check_for_dummy},
230         {0x00, check_number_texunits}
231 };
232
233 static hazard_t table1[256];
234 static hazard_t table2[256];
235 static hazard_t table3[256];
236
237 static __inline__ int
238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
239 {
240         if ((buf_end - *buf) >= num_words) {
241                 *buf += num_words;
242                 return 0;
243         }
244         DRM_ERROR("Illegal termination of DMA command buffer\n");
245         return 1;
246 }
247
248 /*
249  * Partially stolen from drm_memory.h
250  */
251
252 static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253                                                     unsigned long offset,
254                                                     unsigned long size,
255                                                     drm_device_t * dev)
256 {
257         struct list_head *list;
258         drm_map_list_t *r_list;
259         drm_local_map_t *map = seq->map_cache;
260
261         if (map && map->offset <= offset
262             && (offset + size) <= (map->offset + map->size)) {
263                 return map;
264         }
265
266         list_for_each(list, &dev->maplist->head) {
267                 r_list = (drm_map_list_t *) list;
268                 map = r_list->map;
269                 if (!map)
270                         continue;
271                 if (map->offset <= offset
272                     && (offset + size) <= (map->offset + map->size)
273                     && !(map->flags & _DRM_RESTRICTED)
274                     && (map->type == _DRM_AGP)) {
275                         seq->map_cache = map;
276                         return map;
277                 }
278         }
279         return NULL;
280 }
281
282 /*
283  * Require that all AGP texture levels reside in the same AGP map which should
284  * be mappable by the client. This is not a big restriction.
285  * FIXME: To actually enforce this security policy strictly, drm_rmmap
286  * would have to wait for dma quiescent before removing an AGP map.
287  * The via_drm_lookup_agp_map call in reality seems to take
288  * very little CPU time.
289  */
290
291 static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
292 {
293         switch (cur_seq->unfinished) {
294         case z_address:
295                 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
296                 break;
297         case dest_address:
298                 DRM_DEBUG("Destination start address is 0x%x\n",
299                           cur_seq->d_addr);
300                 break;
301         case tex_address:
302                 if (cur_seq->agp_texture) {
303                         unsigned start =
304                             cur_seq->tex_level_lo[cur_seq->texture];
305                         unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
306                         unsigned long lo = ~0, hi = 0, tmp;
307                         uint32_t *addr, *pitch, *height, tex;
308                         unsigned i;
309
310                         if (end > 9)
311                                 end = 9;
312                         if (start > 9)
313                                 start = 9;
314
315                         addr =
316                             &(cur_seq->t_addr[tex = cur_seq->texture][start]);
317                         pitch = &(cur_seq->pitch[tex][start]);
318                         height = &(cur_seq->height[tex][start]);
319
320                         for (i = start; i <= end; ++i) {
321                                 tmp = *addr++;
322                                 if (tmp < lo)
323                                         lo = tmp;
324                                 tmp += (*height++ << *pitch++);
325                                 if (tmp > hi)
326                                         hi = tmp;
327                         }
328
329                         if (!via_drm_lookup_agp_map
330                             (cur_seq, lo, hi - lo, cur_seq->dev)) {
331                                 DRM_ERROR
332                                     ("AGP texture is not in allowed map\n");
333                                 return 2;
334                         }
335                 }
336                 break;
337         default:
338                 break;
339         }
340         cur_seq->unfinished = no_sequence;
341         return 0;
342 }
343
344 static __inline__ int
345 investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
346 {
347         register uint32_t tmp, *tmp_addr;
348
349         if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
350                 int ret;
351                 if ((ret = finish_current_sequence(cur_seq)))
352                         return ret;
353         }
354
355         switch (hz) {
356         case check_for_header2:
357                 if (cmd == HALCYON_HEADER2)
358                         return 1;
359                 return 0;
360         case check_for_header1:
361                 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
362                         return 1;
363                 return 0;
364         case check_for_header2_err:
365                 if (cmd == HALCYON_HEADER2)
366                         return 1;
367                 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
368                 break;
369         case check_for_header1_err:
370                 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
371                         return 1;
372                 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
373                 break;
374         case check_for_fire:
375                 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
376                         return 1;
377                 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
378                 break;
379         case check_for_dummy:
380                 if (HC_DUMMY == cmd)
381                         return 0;
382                 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
383                 break;
384         case check_for_dd:
385                 if (0xdddddddd == cmd)
386                         return 0;
387                 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
388                 break;
389         case check_z_buffer_addr0:
390                 cur_seq->unfinished = z_address;
391                 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
392                     (cmd & 0x00FFFFFF);
393                 return 0;
394         case check_z_buffer_addr1:
395                 cur_seq->unfinished = z_address;
396                 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
397                     ((cmd & 0xFF) << 24);
398                 return 0;
399         case check_z_buffer_addr_mode:
400                 cur_seq->unfinished = z_address;
401                 if ((cmd & 0x0000C000) == 0)
402                         return 0;
403                 DRM_ERROR("Attempt to place Z buffer in system memory\n");
404                 return 2;
405         case check_destination_addr0:
406                 cur_seq->unfinished = dest_address;
407                 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
408                     (cmd & 0x00FFFFFF);
409                 return 0;
410         case check_destination_addr1:
411                 cur_seq->unfinished = dest_address;
412                 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
413                     ((cmd & 0xFF) << 24);
414                 return 0;
415         case check_destination_addr_mode:
416                 cur_seq->unfinished = dest_address;
417                 if ((cmd & 0x0000C000) == 0)
418                         return 0;
419                 DRM_ERROR
420                     ("Attempt to place 3D drawing buffer in system memory\n");
421                 return 2;
422         case check_texture_addr0:
423                 cur_seq->unfinished = tex_address;
424                 tmp = (cmd >> 24);
425                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
426                 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
427                 return 0;
428         case check_texture_addr1:
429                 cur_seq->unfinished = tex_address;
430                 tmp = ((cmd >> 24) - 0x20);
431                 tmp += tmp << 1;
432                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
433                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
434                 tmp_addr++;
435                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
436                 tmp_addr++;
437                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
438                 return 0;
439         case check_texture_addr2:
440                 cur_seq->unfinished = tex_address;
441                 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
442                 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
443                 return 0;
444         case check_texture_addr3:
445                 cur_seq->unfinished = tex_address;
446                 tmp = ((cmd >> 24) - 0x2B);
447                 cur_seq->pitch[cur_seq->texture][tmp] =
448                     (cmd & 0x00F00000) >> 20;
449                 if (!tmp && (cmd & 0x000FFFFF)) {
450                         DRM_ERROR
451                             ("Unimplemented texture level 0 pitch mode.\n");
452                         return 2;
453                 }
454                 return 0;
455         case check_texture_addr4:
456                 cur_seq->unfinished = tex_address;
457                 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
458                 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
459                 return 0;
460         case check_texture_addr5:
461         case check_texture_addr6:
462                 cur_seq->unfinished = tex_address;
463                 /*
464                  * Texture width. We don't care since we have the pitch.
465                  */
466                 return 0;
467         case check_texture_addr7:
468                 cur_seq->unfinished = tex_address;
469                 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
470                 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
471                 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
472                 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
473                 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
474                 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
475                 tmp_addr[0] = 1 << (cmd & 0x0000000F);
476                 return 0;
477         case check_texture_addr8:
478                 cur_seq->unfinished = tex_address;
479                 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
480                 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
481                 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
482                 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
483                 tmp_addr[6] = 1 << (cmd & 0x0000000F);
484                 return 0;
485         case check_texture_addr_mode:
486                 cur_seq->unfinished = tex_address;
487                 if (2 == (tmp = cmd & 0x00000003)) {
488                         DRM_ERROR
489                             ("Attempt to fetch texture from system memory.\n");
490                         return 2;
491                 }
492                 cur_seq->agp_texture = (tmp == 3);
493                 cur_seq->tex_palette_size[cur_seq->texture] =
494                     (cmd >> 16) & 0x000000007;
495                 return 0;
496         case check_for_vertex_count:
497                 cur_seq->vertex_count = cmd & 0x0000FFFF;
498                 return 0;
499         case check_number_texunits:
500                 cur_seq->multitex = (cmd >> 3) & 1;
501                 return 0;
502         default:
503                 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
504                 return 2;
505         }
506         return 2;
507 }
508
509 static __inline__ int
510 via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
511                     drm_via_state_t * cur_seq)
512 {
513         drm_via_private_t *dev_priv =
514             (drm_via_private_t *) cur_seq->dev->dev_private;
515         uint32_t a_fire, bcmd, dw_count;
516         int ret = 0;
517         int have_fire;
518         const uint32_t *buf = *buffer;
519
520         while (buf < buf_end) {
521                 have_fire = 0;
522                 if ((buf_end - buf) < 2) {
523                         DRM_ERROR
524                             ("Unexpected termination of primitive list.\n");
525                         ret = 1;
526                         break;
527                 }
528                 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
529                         break;
530                 bcmd = *buf++;
531                 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
532                         DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
533                                   *buf);
534                         ret = 1;
535                         break;
536                 }
537                 a_fire =
538                     *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
539                     HC_HE3Fire_MASK;
540
541                 /*
542                  * How many dwords per vertex ?
543                  */
544
545                 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
546                         DRM_ERROR("Illegal B command vertex data for AGP.\n");
547                         ret = 1;
548                         break;
549                 }
550
551                 dw_count = 0;
552                 if (bcmd & (1 << 7))
553                         dw_count += (cur_seq->multitex) ? 2 : 1;
554                 if (bcmd & (1 << 8))
555                         dw_count += (cur_seq->multitex) ? 2 : 1;
556                 if (bcmd & (1 << 9))
557                         dw_count++;
558                 if (bcmd & (1 << 10))
559                         dw_count++;
560                 if (bcmd & (1 << 11))
561                         dw_count++;
562                 if (bcmd & (1 << 12))
563                         dw_count++;
564                 if (bcmd & (1 << 13))
565                         dw_count++;
566                 if (bcmd & (1 << 14))
567                         dw_count++;
568
569                 while (buf < buf_end) {
570                         if (*buf == a_fire) {
571                                 if (dev_priv->num_fire_offsets >=
572                                     VIA_FIRE_BUF_SIZE) {
573                                         DRM_ERROR("Fire offset buffer full.\n");
574                                         ret = 1;
575                                         break;
576                                 }
577                                 dev_priv->fire_offsets[dev_priv->
578                                                        num_fire_offsets++] =
579                                     buf;
580                                 have_fire = 1;
581                                 buf++;
582                                 if (buf < buf_end && *buf == a_fire)
583                                         buf++;
584                                 break;
585                         }
586                         if ((*buf == HALCYON_HEADER2) ||
587                             ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
588                                 DRM_ERROR("Missing Vertex Fire command, "
589                                           "Stray Vertex Fire command  or verifier "
590                                           "lost sync.\n");
591                                 ret = 1;
592                                 break;
593                         }
594                         if ((ret = eat_words(&buf, buf_end, dw_count)))
595                                 break;
596                 }
597                 if (buf >= buf_end && !have_fire) {
598                         DRM_ERROR("Missing Vertex Fire command or verifier "
599                                   "lost sync.\n");
600                         ret = 1;
601                         break;
602                 }
603                 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
604                         DRM_ERROR("AGP Primitive list end misaligned.\n");
605                         ret = 1;
606                         break;
607                 }
608         }
609         *buffer = buf;
610         return ret;
611 }
612
613 static __inline__ verifier_state_t
614 via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
615                   drm_via_state_t * hc_state)
616 {
617         uint32_t cmd;
618         int hz_mode;
619         hazard_t hz;
620         const uint32_t *buf = *buffer;
621         const hazard_t *hz_table;
622
623         if ((buf_end - buf) < 2) {
624                 DRM_ERROR
625                     ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
626                 return state_error;
627         }
628         buf++;
629         cmd = (*buf++ & 0xFFFF0000) >> 16;
630
631         switch (cmd) {
632         case HC_ParaType_CmdVdata:
633                 if (via_check_prim_list(&buf, buf_end, hc_state))
634                         return state_error;
635                 *buffer = buf;
636                 return state_command;
637         case HC_ParaType_NotTex:
638                 hz_table = table1;
639                 break;
640         case HC_ParaType_Tex:
641                 hc_state->texture = 0;
642                 hz_table = table2;
643                 break;
644         case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
645                 hc_state->texture = 1;
646                 hz_table = table2;
647                 break;
648         case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
649                 hz_table = table3;
650                 break;
651         case HC_ParaType_Auto:
652                 if (eat_words(&buf, buf_end, 2))
653                         return state_error;
654                 *buffer = buf;
655                 return state_command;
656         case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
657                 if (eat_words(&buf, buf_end, 32))
658                         return state_error;
659                 *buffer = buf;
660                 return state_command;
661         case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
662         case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
663                 DRM_ERROR("Texture palettes are rejected because of "
664                           "lack of info how to determine their size.\n");
665                 return state_error;
666         case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
667                 DRM_ERROR("Fog factor palettes are rejected because of "
668                           "lack of info how to determine their size.\n");
669                 return state_error;
670         default:
671
672                 /*
673                  * There are some unimplemented HC_ParaTypes here, that
674                  * need to be implemented if the Mesa driver is extended.
675                  */
676
677                 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
678                           "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
679                           cmd, *(buf - 2));
680                 *buffer = buf;
681                 return state_error;
682         }
683
684         while (buf < buf_end) {
685                 cmd = *buf++;
686                 if ((hz = hz_table[cmd >> 24])) {
687                         if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
688                                 if (hz_mode == 1) {
689                                         buf--;
690                                         break;
691                                 }
692                                 return state_error;
693                         }
694                 } else if (hc_state->unfinished &&
695                            finish_current_sequence(hc_state)) {
696                         return state_error;
697                 }
698         }
699         if (hc_state->unfinished && finish_current_sequence(hc_state)) {
700                 return state_error;
701         }
702         *buffer = buf;
703         return state_command;
704 }
705
706 static __inline__ verifier_state_t
707 via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
708                   const uint32_t * buf_end, int *fire_count)
709 {
710         uint32_t cmd;
711         const uint32_t *buf = *buffer;
712         const uint32_t *next_fire;
713         int burst = 0;
714
715         next_fire = dev_priv->fire_offsets[*fire_count];
716         buf++;
717         cmd = (*buf & 0xFFFF0000) >> 16;
718         VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
719         switch (cmd) {
720         case HC_ParaType_CmdVdata:
721                 while ((buf < buf_end) &&
722                        (*fire_count < dev_priv->num_fire_offsets) &&
723                        (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
724                         while (buf <= next_fire) {
725                                 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
726                                           (burst & 63), *buf++);
727                                 burst += 4;
728                         }
729                         if ((buf < buf_end)
730                             && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
731                                 buf++;
732
733                         if (++(*fire_count) < dev_priv->num_fire_offsets)
734                                 next_fire = dev_priv->fire_offsets[*fire_count];
735                 }
736                 break;
737         default:
738                 while (buf < buf_end) {
739
740                         if (*buf == HC_HEADER2 ||
741                             (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
742                             (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
743                             (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
744                                 break;
745
746                         VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
747                                   (burst & 63), *buf++);
748                         burst += 4;
749                 }
750         }
751         *buffer = buf;
752         return state_command;
753 }
754
755 static __inline__ int verify_mmio_address(uint32_t address)
756 {
757         if ((address > 0x3FF) && (address < 0xC00)) {
758                 DRM_ERROR("Invalid VIDEO DMA command. "
759                           "Attempt to access 3D- or command burst area.\n");
760                 return 1;
761         } else if ((address > 0xCFF) && (address < 0x1300)) {
762                 DRM_ERROR("Invalid VIDEO DMA command. "
763                           "Attempt to access PCI DMA area.\n");
764                 return 1;
765         } else if (address > 0x13FF) {
766                 DRM_ERROR("Invalid VIDEO DMA command. "
767                           "Attempt to access VGA registers.\n");
768                 return 1;
769         }
770         return 0;
771 }
772
773 static __inline__ int
774 verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
775                   uint32_t dwords)
776 {
777         const uint32_t *buf = *buffer;
778
779         if (buf_end - buf < dwords) {
780                 DRM_ERROR("Illegal termination of video command.\n");
781                 return 1;
782         }
783         while (dwords--) {
784                 if (*buf++) {
785                         DRM_ERROR("Illegal video command tail.\n");
786                         return 1;
787                 }
788         }
789         *buffer = buf;
790         return 0;
791 }
792
793 static __inline__ verifier_state_t
794 via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
795 {
796         uint32_t cmd;
797         const uint32_t *buf = *buffer;
798         verifier_state_t ret = state_command;
799
800         while (buf < buf_end) {
801                 cmd = *buf;
802                 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
803                     (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
804                         if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
805                                 break;
806                         DRM_ERROR("Invalid HALCYON_HEADER1 command. "
807                                   "Attempt to access 3D- or command burst area.\n");
808                         ret = state_error;
809                         break;
810                 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
811                         if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
812                                 break;
813                         DRM_ERROR("Invalid HALCYON_HEADER1 command. "
814                                   "Attempt to access VGA registers.\n");
815                         ret = state_error;
816                         break;
817                 } else {
818                         buf += 2;
819                 }
820         }
821         *buffer = buf;
822         return ret;
823 }
824
825 static __inline__ verifier_state_t
826 via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
827                   const uint32_t * buf_end)
828 {
829         register uint32_t cmd;
830         const uint32_t *buf = *buffer;
831
832         while (buf < buf_end) {
833                 cmd = *buf;
834                 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
835                         break;
836                 VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
837                 buf++;
838         }
839         *buffer = buf;
840         return state_command;
841 }
842
843 static __inline__ verifier_state_t
844 via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
845 {
846         uint32_t data;
847         const uint32_t *buf = *buffer;
848
849         if (buf_end - buf < 4) {
850                 DRM_ERROR("Illegal termination of video header5 command\n");
851                 return state_error;
852         }
853
854         data = *buf++ & ~VIA_VIDEOMASK;
855         if (verify_mmio_address(data))
856                 return state_error;
857
858         data = *buf++;
859         if (*buf++ != 0x00F50000) {
860                 DRM_ERROR("Illegal header5 header data\n");
861                 return state_error;
862         }
863         if (*buf++ != 0x00000000) {
864                 DRM_ERROR("Illegal header5 header data\n");
865                 return state_error;
866         }
867         if (eat_words(&buf, buf_end, data))
868                 return state_error;
869         if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
870                 return state_error;
871         *buffer = buf;
872         return state_command;
873
874 }
875
876 static __inline__ verifier_state_t
877 via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
878                    const uint32_t * buf_end)
879 {
880         uint32_t addr, count, i;
881         const uint32_t *buf = *buffer;
882
883         addr = *buf++ & ~VIA_VIDEOMASK;
884         i = count = *buf;
885         buf += 3;
886         while (i--) {
887                 VIA_WRITE(addr, *buf++);
888         }
889         if (count & 3)
890                 buf += 4 - (count & 3);
891         *buffer = buf;
892         return state_command;
893 }
894
895 static __inline__ verifier_state_t
896 via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
897 {
898         uint32_t data;
899         const uint32_t *buf = *buffer;
900         uint32_t i;
901
902         if (buf_end - buf < 4) {
903                 DRM_ERROR("Illegal termination of video header6 command\n");
904                 return state_error;
905         }
906         buf++;
907         data = *buf++;
908         if (*buf++ != 0x00F60000) {
909                 DRM_ERROR("Illegal header6 header data\n");
910                 return state_error;
911         }
912         if (*buf++ != 0x00000000) {
913                 DRM_ERROR("Illegal header6 header data\n");
914                 return state_error;
915         }
916         if ((buf_end - buf) < (data << 1)) {
917                 DRM_ERROR("Illegal termination of video header6 command\n");
918                 return state_error;
919         }
920         for (i = 0; i < data; ++i) {
921                 if (verify_mmio_address(*buf++))
922                         return state_error;
923                 buf++;
924         }
925         data <<= 1;
926         if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
927                 return state_error;
928         *buffer = buf;
929         return state_command;
930 }
931
932 static __inline__ verifier_state_t
933 via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
934                    const uint32_t * buf_end)
935 {
936
937         uint32_t addr, count, i;
938         const uint32_t *buf = *buffer;
939
940         i = count = *++buf;
941         buf += 3;
942         while (i--) {
943                 addr = *buf++;
944                 VIA_WRITE(addr, *buf++);
945         }
946         count <<= 1;
947         if (count & 3)
948                 buf += 4 - (count & 3);
949         *buffer = buf;
950         return state_command;
951 }
952
953 int
954 via_verify_command_stream(const uint32_t * buf, unsigned int size,
955                           drm_device_t * dev, int agp)
956 {
957
958         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
959         drm_via_state_t *hc_state = &dev_priv->hc_state;
960         drm_via_state_t saved_state = *hc_state;
961         uint32_t cmd;
962         const uint32_t *buf_end = buf + (size >> 2);
963         verifier_state_t state = state_command;
964         int pro_group_a = dev_priv->pro_group_a;
965
966         hc_state->dev = dev;
967         hc_state->unfinished = no_sequence;
968         hc_state->map_cache = NULL;
969         hc_state->agp = agp;
970         hc_state->buf_start = buf;
971         dev_priv->num_fire_offsets = 0;
972
973         while (buf < buf_end) {
974
975                 switch (state) {
976                 case state_header2:
977                         state = via_check_header2(&buf, buf_end, hc_state);
978                         break;
979                 case state_header1:
980                         state = via_check_header1(&buf, buf_end);
981                         break;
982                 case state_vheader5:
983                         state = via_check_vheader5(&buf, buf_end);
984                         break;
985                 case state_vheader6:
986                         state = via_check_vheader6(&buf, buf_end);
987                         break;
988                 case state_command:
989                         if (HALCYON_HEADER2 == (cmd = *buf))
990                                 state = state_header2;
991                         else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
992                                 state = state_header1;
993                         else if (pro_group_a
994                                  && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
995                                 state = state_vheader5;
996                         else if (pro_group_a
997                                  && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
998                                 state = state_vheader6;
999                         else {
1000                                 DRM_ERROR
1001                                     ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1002                                      cmd);
1003                                 state = state_error;
1004                         }
1005                         break;
1006                 case state_error:
1007                 default:
1008                         *hc_state = saved_state;
1009                         return DRM_ERR(EINVAL);
1010                 }
1011         }
1012         if (state == state_error) {
1013                 *hc_state = saved_state;
1014                 return DRM_ERR(EINVAL);
1015         }
1016         return 0;
1017 }
1018
1019 int
1020 via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
1021                          unsigned int size)
1022 {
1023
1024         drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1025         uint32_t cmd;
1026         const uint32_t *buf_end = buf + (size >> 2);
1027         verifier_state_t state = state_command;
1028         int fire_count = 0;
1029
1030         while (buf < buf_end) {
1031
1032                 switch (state) {
1033                 case state_header2:
1034                         state =
1035                             via_parse_header2(dev_priv, &buf, buf_end,
1036                                               &fire_count);
1037                         break;
1038                 case state_header1:
1039                         state = via_parse_header1(dev_priv, &buf, buf_end);
1040                         break;
1041                 case state_vheader5:
1042                         state = via_parse_vheader5(dev_priv, &buf, buf_end);
1043                         break;
1044                 case state_vheader6:
1045                         state = via_parse_vheader6(dev_priv, &buf, buf_end);
1046                         break;
1047                 case state_command:
1048                         if (HALCYON_HEADER2 == (cmd = *buf))
1049                                 state = state_header2;
1050                         else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1051                                 state = state_header1;
1052                         else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1053                                 state = state_vheader5;
1054                         else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1055                                 state = state_vheader6;
1056                         else {
1057                                 DRM_ERROR
1058                                     ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1059                                      cmd);
1060                                 state = state_error;
1061                         }
1062                         break;
1063                 case state_error:
1064                 default:
1065                         return DRM_ERR(EINVAL);
1066                 }
1067         }
1068         if (state == state_error) {
1069                 return DRM_ERR(EINVAL);
1070         }
1071         return 0;
1072 }
1073
1074 static void
1075 setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1076 {
1077         int i;
1078
1079         for (i = 0; i < 256; ++i) {
1080                 table[i] = forbidden_command;
1081         }
1082
1083         for (i = 0; i < size; ++i) {
1084                 table[init_table[i].code] = init_table[i].hz;
1085         }
1086 }
1087
1088 void via_init_command_verifier(void)
1089 {
1090         setup_hazard_table(init_table1, table1,
1091                            sizeof(init_table1) / sizeof(hz_init_t));
1092         setup_hazard_table(init_table2, table2,
1093                            sizeof(init_table2) / sizeof(hz_init_t));
1094         setup_hazard_table(init_table3, table3,
1095                            sizeof(init_table3) / sizeof(hz_init_t));
1096 }