Merge branch 'master'
[linux-2.6] / drivers / char / drm / i915_dma.c
1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 /* Really want an OS-independent resettable timer.  Would like to have
35  * this loop run for (eg) 3 sec, but have the timer reset every time
36  * the head pointer changes, so that EBUSY only happens if the ring
37  * actually stalls for (eg) 3 seconds.
38  */
39 int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
40 {
41         drm_i915_private_t *dev_priv = dev->dev_private;
42         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43         u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
44         int i;
45
46         for (i = 0; i < 10000; i++) {
47                 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48                 ring->space = ring->head - (ring->tail + 8);
49                 if (ring->space < 0)
50                         ring->space += ring->Size;
51                 if (ring->space >= n)
52                         return 0;
53
54                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
55
56                 if (ring->head != last_head)
57                         i = 0;
58
59                 last_head = ring->head;
60         }
61
62         return DRM_ERR(EBUSY);
63 }
64
65 void i915_kernel_lost_context(drm_device_t * dev)
66 {
67         drm_i915_private_t *dev_priv = dev->dev_private;
68         drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
69
70         ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71         ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72         ring->space = ring->head - (ring->tail + 8);
73         if (ring->space < 0)
74                 ring->space += ring->Size;
75
76         if (ring->head == ring->tail)
77                 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
78 }
79
80 static int i915_dma_cleanup(drm_device_t * dev)
81 {
82         /* Make sure interrupts are disabled here because the uninstall ioctl
83          * may not have been called from userspace and after dev_private
84          * is freed, it's too late.
85          */
86         if (dev->irq)
87                 drm_irq_uninstall(dev);
88
89         if (dev->dev_private) {
90                 drm_i915_private_t *dev_priv =
91                     (drm_i915_private_t *) dev->dev_private;
92
93                 if (dev_priv->ring.virtual_start) {
94                         drm_core_ioremapfree(&dev_priv->ring.map, dev);
95                 }
96
97                 if (dev_priv->status_page_dmah) {
98                         drm_pci_free(dev, dev_priv->status_page_dmah);
99                         /* Need to rewrite hardware status page */
100                         I915_WRITE(0x02080, 0x1ffff000);
101                 }
102
103                 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
104                          DRM_MEM_DRIVER);
105
106                 dev->dev_private = NULL;
107         }
108
109         return 0;
110 }
111
112 static int i915_initialize(drm_device_t * dev,
113                            drm_i915_private_t * dev_priv,
114                            drm_i915_init_t * init)
115 {
116         memset(dev_priv, 0, sizeof(drm_i915_private_t));
117
118         DRM_GETSAREA();
119         if (!dev_priv->sarea) {
120                 DRM_ERROR("can not find sarea!\n");
121                 dev->dev_private = (void *)dev_priv;
122                 i915_dma_cleanup(dev);
123                 return DRM_ERR(EINVAL);
124         }
125
126         dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
127         if (!dev_priv->mmio_map) {
128                 dev->dev_private = (void *)dev_priv;
129                 i915_dma_cleanup(dev);
130                 DRM_ERROR("can not find mmio map!\n");
131                 return DRM_ERR(EINVAL);
132         }
133
134         dev_priv->sarea_priv = (drm_i915_sarea_t *)
135             ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
136
137         dev_priv->ring.Start = init->ring_start;
138         dev_priv->ring.End = init->ring_end;
139         dev_priv->ring.Size = init->ring_size;
140         dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
141
142         dev_priv->ring.map.offset = init->ring_start;
143         dev_priv->ring.map.size = init->ring_size;
144         dev_priv->ring.map.type = 0;
145         dev_priv->ring.map.flags = 0;
146         dev_priv->ring.map.mtrr = 0;
147
148         drm_core_ioremap(&dev_priv->ring.map, dev);
149
150         if (dev_priv->ring.map.handle == NULL) {
151                 dev->dev_private = (void *)dev_priv;
152                 i915_dma_cleanup(dev);
153                 DRM_ERROR("can not ioremap virtual address for"
154                           " ring buffer\n");
155                 return DRM_ERR(ENOMEM);
156         }
157
158         dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
159
160         dev_priv->back_offset = init->back_offset;
161         dev_priv->front_offset = init->front_offset;
162         dev_priv->current_page = 0;
163         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
164
165         /* We are using separate values as placeholders for mechanisms for
166          * private backbuffer/depthbuffer usage.
167          */
168         dev_priv->use_mi_batchbuffer_start = 0;
169
170         /* Allow hardware batchbuffers unless told otherwise.
171          */
172         dev_priv->allow_batchbuffer = 1;
173
174         /* Program Hardware Status Page */
175         dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
176                                                    0xffffffff);
177
178         if (!dev_priv->status_page_dmah) {
179                 dev->dev_private = (void *)dev_priv;
180                 i915_dma_cleanup(dev);
181                 DRM_ERROR("Can not allocate hardware status page\n");
182                 return DRM_ERR(ENOMEM);
183         }
184         dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
185         dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
186
187         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
188         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
189
190         I915_WRITE(0x02080, dev_priv->dma_status_page);
191         DRM_DEBUG("Enabled hardware status page\n");
192
193         dev->dev_private = (void *)dev_priv;
194
195         return 0;
196 }
197
198 static int i915_dma_resume(drm_device_t * dev)
199 {
200         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
201
202         DRM_DEBUG("%s\n", __FUNCTION__);
203
204         if (!dev_priv->sarea) {
205                 DRM_ERROR("can not find sarea!\n");
206                 return DRM_ERR(EINVAL);
207         }
208
209         if (!dev_priv->mmio_map) {
210                 DRM_ERROR("can not find mmio map!\n");
211                 return DRM_ERR(EINVAL);
212         }
213
214         if (dev_priv->ring.map.handle == NULL) {
215                 DRM_ERROR("can not ioremap virtual address for"
216                           " ring buffer\n");
217                 return DRM_ERR(ENOMEM);
218         }
219
220         /* Program Hardware Status Page */
221         if (!dev_priv->hw_status_page) {
222                 DRM_ERROR("Can not find hardware status page\n");
223                 return DRM_ERR(EINVAL);
224         }
225         DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
226
227         I915_WRITE(0x02080, dev_priv->dma_status_page);
228         DRM_DEBUG("Enabled hardware status page\n");
229
230         return 0;
231 }
232
233 static int i915_dma_init(DRM_IOCTL_ARGS)
234 {
235         DRM_DEVICE;
236         drm_i915_private_t *dev_priv;
237         drm_i915_init_t init;
238         int retcode = 0;
239
240         DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
241                                  sizeof(init));
242
243         switch (init.func) {
244         case I915_INIT_DMA:
245                 dev_priv = drm_alloc(sizeof(drm_i915_private_t),
246                                      DRM_MEM_DRIVER);
247                 if (dev_priv == NULL)
248                         return DRM_ERR(ENOMEM);
249                 retcode = i915_initialize(dev, dev_priv, &init);
250                 break;
251         case I915_CLEANUP_DMA:
252                 retcode = i915_dma_cleanup(dev);
253                 break;
254         case I915_RESUME_DMA:
255                 retcode = i915_dma_resume(dev);
256                 break;
257         default:
258                 retcode = -EINVAL;
259                 break;
260         }
261
262         return retcode;
263 }
264
265 /* Implement basically the same security restrictions as hardware does
266  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
267  *
268  * Most of the calculations below involve calculating the size of a
269  * particular instruction.  It's important to get the size right as
270  * that tells us where the next instruction to check is.  Any illegal
271  * instruction detected will be given a size of zero, which is a
272  * signal to abort the rest of the buffer.
273  */
274 static int do_validate_cmd(int cmd)
275 {
276         switch (((cmd >> 29) & 0x7)) {
277         case 0x0:
278                 switch ((cmd >> 23) & 0x3f) {
279                 case 0x0:
280                         return 1;       /* MI_NOOP */
281                 case 0x4:
282                         return 1;       /* MI_FLUSH */
283                 default:
284                         return 0;       /* disallow everything else */
285                 }
286                 break;
287         case 0x1:
288                 return 0;       /* reserved */
289         case 0x2:
290                 return (cmd & 0xff) + 2;        /* 2d commands */
291         case 0x3:
292                 if (((cmd >> 24) & 0x1f) <= 0x18)
293                         return 1;
294
295                 switch ((cmd >> 24) & 0x1f) {
296                 case 0x1c:
297                         return 1;
298                 case 0x1d:
299                         switch ((cmd >> 16) & 0xff) {
300                         case 0x3:
301                                 return (cmd & 0x1f) + 2;
302                         case 0x4:
303                                 return (cmd & 0xf) + 2;
304                         default:
305                                 return (cmd & 0xffff) + 2;
306                         }
307                 case 0x1e:
308                         if (cmd & (1 << 23))
309                                 return (cmd & 0xffff) + 1;
310                         else
311                                 return 1;
312                 case 0x1f:
313                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
314                                 return (cmd & 0x1ffff) + 2;
315                         else if (cmd & (1 << 17))       /* indirect random */
316                                 if ((cmd & 0xffff) == 0)
317                                         return 0;       /* unknown length, too hard */
318                                 else
319                                         return (((cmd & 0xffff) + 1) / 2) + 1;
320                         else
321                                 return 2;       /* indirect sequential */
322                 default:
323                         return 0;
324                 }
325         default:
326                 return 0;
327         }
328
329         return 0;
330 }
331
332 static int validate_cmd(int cmd)
333 {
334         int ret = do_validate_cmd(cmd);
335
336 /*      printk("validate_cmd( %x ): %d\n", cmd, ret); */
337
338         return ret;
339 }
340
341 static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
342 {
343         drm_i915_private_t *dev_priv = dev->dev_private;
344         int i;
345         RING_LOCALS;
346
347         if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
348                 return DRM_ERR(EINVAL);
349
350         BEGIN_LP_RING(((dwords+1)&~1));
351
352         for (i = 0; i < dwords;) {
353                 int cmd, sz;
354
355                 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
356                         return DRM_ERR(EINVAL);
357
358                 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
359                         return DRM_ERR(EINVAL);
360
361                 OUT_RING(cmd);
362
363                 while (++i, --sz) {
364                         if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
365                                                          sizeof(cmd))) {
366                                 return DRM_ERR(EINVAL);
367                         }
368                         OUT_RING(cmd);
369                 }
370         }
371
372         if (dwords & 1)
373                 OUT_RING(0);
374
375         ADVANCE_LP_RING();
376
377         return 0;
378 }
379
380 static int i915_emit_box(drm_device_t * dev,
381                          drm_clip_rect_t __user * boxes,
382                          int i, int DR1, int DR4)
383 {
384         drm_i915_private_t *dev_priv = dev->dev_private;
385         drm_clip_rect_t box;
386         RING_LOCALS;
387
388         if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
389                 return EFAULT;
390         }
391
392         if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
393                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
394                           box.x1, box.y1, box.x2, box.y2);
395                 return DRM_ERR(EINVAL);
396         }
397
398         BEGIN_LP_RING(6);
399         OUT_RING(GFX_OP_DRAWRECT_INFO);
400         OUT_RING(DR1);
401         OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
402         OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
403         OUT_RING(DR4);
404         OUT_RING(0);
405         ADVANCE_LP_RING();
406
407         return 0;
408 }
409
410 static void i915_emit_breadcrumb(drm_device_t *dev)
411 {
412         drm_i915_private_t *dev_priv = dev->dev_private;
413         RING_LOCALS;
414
415         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
416
417         BEGIN_LP_RING(4);
418         OUT_RING(CMD_STORE_DWORD_IDX);
419         OUT_RING(20);
420         OUT_RING(dev_priv->counter);
421         OUT_RING(0);
422         ADVANCE_LP_RING();
423 }
424
425 static int i915_dispatch_cmdbuffer(drm_device_t * dev,
426                                    drm_i915_cmdbuffer_t * cmd)
427 {
428         int nbox = cmd->num_cliprects;
429         int i = 0, count, ret;
430
431         if (cmd->sz & 0x3) {
432                 DRM_ERROR("alignment");
433                 return DRM_ERR(EINVAL);
434         }
435
436         i915_kernel_lost_context(dev);
437
438         count = nbox ? nbox : 1;
439
440         for (i = 0; i < count; i++) {
441                 if (i < nbox) {
442                         ret = i915_emit_box(dev, cmd->cliprects, i,
443                                             cmd->DR1, cmd->DR4);
444                         if (ret)
445                                 return ret;
446                 }
447
448                 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
449                 if (ret)
450                         return ret;
451         }
452
453         i915_emit_breadcrumb(dev);
454         return 0;
455 }
456
457 static int i915_dispatch_batchbuffer(drm_device_t * dev,
458                                      drm_i915_batchbuffer_t * batch)
459 {
460         drm_i915_private_t *dev_priv = dev->dev_private;
461         drm_clip_rect_t __user *boxes = batch->cliprects;
462         int nbox = batch->num_cliprects;
463         int i = 0, count;
464         RING_LOCALS;
465
466         if ((batch->start | batch->used) & 0x7) {
467                 DRM_ERROR("alignment");
468                 return DRM_ERR(EINVAL);
469         }
470
471         i915_kernel_lost_context(dev);
472
473         count = nbox ? nbox : 1;
474
475         for (i = 0; i < count; i++) {
476                 if (i < nbox) {
477                         int ret = i915_emit_box(dev, boxes, i,
478                                                 batch->DR1, batch->DR4);
479                         if (ret)
480                                 return ret;
481                 }
482
483                 if (dev_priv->use_mi_batchbuffer_start) {
484                         BEGIN_LP_RING(2);
485                         OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
486                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
487                         ADVANCE_LP_RING();
488                 } else {
489                         BEGIN_LP_RING(4);
490                         OUT_RING(MI_BATCH_BUFFER);
491                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
492                         OUT_RING(batch->start + batch->used - 4);
493                         OUT_RING(0);
494                         ADVANCE_LP_RING();
495                 }
496         }
497
498         i915_emit_breadcrumb(dev);
499
500         return 0;
501 }
502
503 static int i915_dispatch_flip(drm_device_t * dev)
504 {
505         drm_i915_private_t *dev_priv = dev->dev_private;
506         RING_LOCALS;
507
508         DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
509                   __FUNCTION__,
510                   dev_priv->current_page,
511                   dev_priv->sarea_priv->pf_current_page);
512
513         i915_kernel_lost_context(dev);
514
515         BEGIN_LP_RING(2);
516         OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
517         OUT_RING(0);
518         ADVANCE_LP_RING();
519
520         BEGIN_LP_RING(6);
521         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
522         OUT_RING(0);
523         if (dev_priv->current_page == 0) {
524                 OUT_RING(dev_priv->back_offset);
525                 dev_priv->current_page = 1;
526         } else {
527                 OUT_RING(dev_priv->front_offset);
528                 dev_priv->current_page = 0;
529         }
530         OUT_RING(0);
531         ADVANCE_LP_RING();
532
533         BEGIN_LP_RING(2);
534         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
535         OUT_RING(0);
536         ADVANCE_LP_RING();
537
538         dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
539
540         BEGIN_LP_RING(4);
541         OUT_RING(CMD_STORE_DWORD_IDX);
542         OUT_RING(20);
543         OUT_RING(dev_priv->counter);
544         OUT_RING(0);
545         ADVANCE_LP_RING();
546
547         dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
548         return 0;
549 }
550
551 static int i915_quiescent(drm_device_t * dev)
552 {
553         drm_i915_private_t *dev_priv = dev->dev_private;
554
555         i915_kernel_lost_context(dev);
556         return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
557 }
558
559 static int i915_flush_ioctl(DRM_IOCTL_ARGS)
560 {
561         DRM_DEVICE;
562
563         LOCK_TEST_WITH_RETURN(dev, filp);
564
565         return i915_quiescent(dev);
566 }
567
568 static int i915_batchbuffer(DRM_IOCTL_ARGS)
569 {
570         DRM_DEVICE;
571         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
572         u32 *hw_status = dev_priv->hw_status_page;
573         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
574             dev_priv->sarea_priv;
575         drm_i915_batchbuffer_t batch;
576         int ret;
577
578         if (!dev_priv->allow_batchbuffer) {
579                 DRM_ERROR("Batchbuffer ioctl disabled\n");
580                 return DRM_ERR(EINVAL);
581         }
582
583         DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
584                                  sizeof(batch));
585
586         DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
587                   batch.start, batch.used, batch.num_cliprects);
588
589         LOCK_TEST_WITH_RETURN(dev, filp);
590
591         if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
592                                                        batch.num_cliprects *
593                                                        sizeof(drm_clip_rect_t)))
594                 return DRM_ERR(EFAULT);
595
596         ret = i915_dispatch_batchbuffer(dev, &batch);
597
598         sarea_priv->last_dispatch = (int)hw_status[5];
599         return ret;
600 }
601
602 static int i915_cmdbuffer(DRM_IOCTL_ARGS)
603 {
604         DRM_DEVICE;
605         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
606         u32 *hw_status = dev_priv->hw_status_page;
607         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
608             dev_priv->sarea_priv;
609         drm_i915_cmdbuffer_t cmdbuf;
610         int ret;
611
612         DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
613                                  sizeof(cmdbuf));
614
615         DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
616                   cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
617
618         LOCK_TEST_WITH_RETURN(dev, filp);
619
620         if (cmdbuf.num_cliprects &&
621             DRM_VERIFYAREA_READ(cmdbuf.cliprects,
622                                 cmdbuf.num_cliprects *
623                                 sizeof(drm_clip_rect_t))) {
624                 DRM_ERROR("Fault accessing cliprects\n");
625                 return DRM_ERR(EFAULT);
626         }
627
628         ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
629         if (ret) {
630                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
631                 return ret;
632         }
633
634         sarea_priv->last_dispatch = (int)hw_status[5];
635         return 0;
636 }
637
638 static int i915_flip_bufs(DRM_IOCTL_ARGS)
639 {
640         DRM_DEVICE;
641
642         DRM_DEBUG("%s\n", __FUNCTION__);
643
644         LOCK_TEST_WITH_RETURN(dev, filp);
645
646         return i915_dispatch_flip(dev);
647 }
648
649 static int i915_getparam(DRM_IOCTL_ARGS)
650 {
651         DRM_DEVICE;
652         drm_i915_private_t *dev_priv = dev->dev_private;
653         drm_i915_getparam_t param;
654         int value;
655
656         if (!dev_priv) {
657                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
658                 return DRM_ERR(EINVAL);
659         }
660
661         DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
662                                  sizeof(param));
663
664         switch (param.param) {
665         case I915_PARAM_IRQ_ACTIVE:
666                 value = dev->irq ? 1 : 0;
667                 break;
668         case I915_PARAM_ALLOW_BATCHBUFFER:
669                 value = dev_priv->allow_batchbuffer ? 1 : 0;
670                 break;
671         case I915_PARAM_LAST_DISPATCH:
672                 value = READ_BREADCRUMB(dev_priv);
673                 break;
674         default:
675                 DRM_ERROR("Unknown parameter %d\n", param.param);
676                 return DRM_ERR(EINVAL);
677         }
678
679         if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
680                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
681                 return DRM_ERR(EFAULT);
682         }
683
684         return 0;
685 }
686
687 static int i915_setparam(DRM_IOCTL_ARGS)
688 {
689         DRM_DEVICE;
690         drm_i915_private_t *dev_priv = dev->dev_private;
691         drm_i915_setparam_t param;
692
693         if (!dev_priv) {
694                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
695                 return DRM_ERR(EINVAL);
696         }
697
698         DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
699                                  sizeof(param));
700
701         switch (param.param) {
702         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
703                 dev_priv->use_mi_batchbuffer_start = param.value;
704                 break;
705         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
706                 dev_priv->tex_lru_log_granularity = param.value;
707                 break;
708         case I915_SETPARAM_ALLOW_BATCHBUFFER:
709                 dev_priv->allow_batchbuffer = param.value;
710                 break;
711         default:
712                 DRM_ERROR("unknown parameter %d\n", param.param);
713                 return DRM_ERR(EINVAL);
714         }
715
716         return 0;
717 }
718
719 int i915_driver_load(drm_device_t *dev, unsigned long flags)
720 {
721         /* i915 has 4 more counters */
722         dev->counters += 4;
723         dev->types[6] = _DRM_STAT_IRQ;
724         dev->types[7] = _DRM_STAT_PRIMARY;
725         dev->types[8] = _DRM_STAT_SECONDARY;
726         dev->types[9] = _DRM_STAT_DMA;
727
728         return 0;
729 }
730
731 void i915_driver_lastclose(drm_device_t * dev)
732 {
733         if (dev->dev_private) {
734                 drm_i915_private_t *dev_priv = dev->dev_private;
735                 i915_mem_takedown(&(dev_priv->agp_heap));
736         }
737         i915_dma_cleanup(dev);
738 }
739
740 void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
741 {
742         if (dev->dev_private) {
743                 drm_i915_private_t *dev_priv = dev->dev_private;
744                 i915_mem_release(dev, filp, dev_priv->agp_heap);
745         }
746 }
747
748 drm_ioctl_desc_t i915_ioctls[] = {
749         [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
750         [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
751         [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
752         [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
753         [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
754         [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
755         [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
756         [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
757         [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
758         [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
759         [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
760         [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
761         [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }
762 };
763
764 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
765
766 /**
767  * Determine if the device really is AGP or not.
768  *
769  * All Intel graphics chipsets are treated as AGP, even if they are really
770  * PCI-e.
771  *
772  * \param dev   The device to be tested.
773  *
774  * \returns
775  * A value of 1 is always retured to indictate every i9x5 is AGP.
776  */
777 int i915_driver_device_is_agp(drm_device_t * dev)
778 {
779         return 1;
780 }