1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 /* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
46 for (i = 0; i < 10000; i++) {
47 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
48 ring->space = ring->head - (ring->tail + 8);
50 ring->space += ring->Size;
54 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
56 if (ring->head != last_head)
59 last_head = ring->head;
65 void i915_kernel_lost_context(struct drm_device * dev)
67 drm_i915_private_t *dev_priv = dev->dev_private;
68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
70 ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
71 ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
72 ring->space = ring->head - (ring->tail + 8);
74 ring->space += ring->Size;
76 if (ring->head == ring->tail)
77 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
80 static int i915_dma_cleanup(struct drm_device * dev)
82 drm_i915_private_t *dev_priv = dev->dev_private;
83 /* Make sure interrupts are disabled here because the uninstall ioctl
84 * may not have been called from userspace and after dev_private
85 * is freed, it's too late.
88 drm_irq_uninstall(dev);
90 if (dev_priv->ring.virtual_start) {
91 drm_core_ioremapfree(&dev_priv->ring.map, dev);
92 dev_priv->ring.virtual_start = 0;
93 dev_priv->ring.map.handle = 0;
94 dev_priv->ring.map.size = 0;
97 if (dev_priv->status_page_dmah) {
98 drm_pci_free(dev, dev_priv->status_page_dmah);
99 dev_priv->status_page_dmah = NULL;
100 /* Need to rewrite hardware status page */
101 I915_WRITE(0x02080, 0x1ffff000);
104 if (dev_priv->status_gfx_addr) {
105 dev_priv->status_gfx_addr = 0;
106 drm_core_ioremapfree(&dev_priv->hws_map, dev);
107 I915_WRITE(0x2080, 0x1ffff000);
113 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
115 drm_i915_private_t *dev_priv = dev->dev_private;
117 dev_priv->sarea = drm_getsarea(dev);
118 if (!dev_priv->sarea) {
119 DRM_ERROR("can not find sarea!\n");
120 i915_dma_cleanup(dev);
124 dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
125 if (!dev_priv->mmio_map) {
126 i915_dma_cleanup(dev);
127 DRM_ERROR("can not find mmio map!\n");
131 dev_priv->sarea_priv = (drm_i915_sarea_t *)
132 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
134 dev_priv->ring.Start = init->ring_start;
135 dev_priv->ring.End = init->ring_end;
136 dev_priv->ring.Size = init->ring_size;
137 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
139 dev_priv->ring.map.offset = init->ring_start;
140 dev_priv->ring.map.size = init->ring_size;
141 dev_priv->ring.map.type = 0;
142 dev_priv->ring.map.flags = 0;
143 dev_priv->ring.map.mtrr = 0;
145 drm_core_ioremap(&dev_priv->ring.map, dev);
147 if (dev_priv->ring.map.handle == NULL) {
148 i915_dma_cleanup(dev);
149 DRM_ERROR("can not ioremap virtual address for"
154 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
156 dev_priv->cpp = init->cpp;
157 dev_priv->back_offset = init->back_offset;
158 dev_priv->front_offset = init->front_offset;
159 dev_priv->current_page = 0;
160 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
162 /* We are using separate values as placeholders for mechanisms for
163 * private backbuffer/depthbuffer usage.
165 dev_priv->use_mi_batchbuffer_start = 0;
166 if (IS_I965G(dev)) /* 965 doesn't support older method */
167 dev_priv->use_mi_batchbuffer_start = 1;
169 /* Allow hardware batchbuffers unless told otherwise.
171 dev_priv->allow_batchbuffer = 1;
173 /* Program Hardware Status Page */
174 if (!I915_NEED_GFX_HWS(dev)) {
175 dev_priv->status_page_dmah =
176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
178 if (!dev_priv->status_page_dmah) {
179 i915_dma_cleanup(dev);
180 DRM_ERROR("Can not allocate hardware status page\n");
183 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
184 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
186 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
187 I915_WRITE(0x02080, dev_priv->dma_status_page);
189 DRM_DEBUG("Enabled hardware status page\n");
193 static int i915_dma_resume(struct drm_device * dev)
195 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
197 DRM_DEBUG("%s\n", __func__);
199 if (!dev_priv->sarea) {
200 DRM_ERROR("can not find sarea!\n");
204 if (!dev_priv->mmio_map) {
205 DRM_ERROR("can not find mmio map!\n");
209 if (dev_priv->ring.map.handle == NULL) {
210 DRM_ERROR("can not ioremap virtual address for"
215 /* Program Hardware Status Page */
216 if (!dev_priv->hw_status_page) {
217 DRM_ERROR("Can not find hardware status page\n");
220 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
222 if (dev_priv->status_gfx_addr != 0)
223 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
225 I915_WRITE(0x02080, dev_priv->dma_status_page);
226 DRM_DEBUG("Enabled hardware status page\n");
231 static int i915_dma_init(struct drm_device *dev, void *data,
232 struct drm_file *file_priv)
234 drm_i915_init_t *init = data;
237 switch (init->func) {
239 retcode = i915_initialize(dev, init);
241 case I915_CLEANUP_DMA:
242 retcode = i915_dma_cleanup(dev);
244 case I915_RESUME_DMA:
245 retcode = i915_dma_resume(dev);
255 /* Implement basically the same security restrictions as hardware does
256 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
258 * Most of the calculations below involve calculating the size of a
259 * particular instruction. It's important to get the size right as
260 * that tells us where the next instruction to check is. Any illegal
261 * instruction detected will be given a size of zero, which is a
262 * signal to abort the rest of the buffer.
264 static int do_validate_cmd(int cmd)
266 switch (((cmd >> 29) & 0x7)) {
268 switch ((cmd >> 23) & 0x3f) {
270 return 1; /* MI_NOOP */
272 return 1; /* MI_FLUSH */
274 return 0; /* disallow everything else */
278 return 0; /* reserved */
280 return (cmd & 0xff) + 2; /* 2d commands */
282 if (((cmd >> 24) & 0x1f) <= 0x18)
285 switch ((cmd >> 24) & 0x1f) {
289 switch ((cmd >> 16) & 0xff) {
291 return (cmd & 0x1f) + 2;
293 return (cmd & 0xf) + 2;
295 return (cmd & 0xffff) + 2;
299 return (cmd & 0xffff) + 1;
303 if ((cmd & (1 << 23)) == 0) /* inline vertices */
304 return (cmd & 0x1ffff) + 2;
305 else if (cmd & (1 << 17)) /* indirect random */
306 if ((cmd & 0xffff) == 0)
307 return 0; /* unknown length, too hard */
309 return (((cmd & 0xffff) + 1) / 2) + 1;
311 return 2; /* indirect sequential */
322 static int validate_cmd(int cmd)
324 int ret = do_validate_cmd(cmd);
326 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
331 static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
333 drm_i915_private_t *dev_priv = dev->dev_private;
337 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
340 BEGIN_LP_RING((dwords+1)&~1);
342 for (i = 0; i < dwords;) {
345 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
348 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
354 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
370 static int i915_emit_box(struct drm_device * dev,
371 struct drm_clip_rect __user * boxes,
372 int i, int DR1, int DR4)
374 drm_i915_private_t *dev_priv = dev->dev_private;
375 struct drm_clip_rect box;
378 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
382 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
383 DRM_ERROR("Bad box %d,%d..%d,%d\n",
384 box.x1, box.y1, box.x2, box.y2);
390 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
391 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
392 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
397 OUT_RING(GFX_OP_DRAWRECT_INFO);
399 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
400 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
409 /* XXX: Emitting the counter should really be moved to part of the IRQ
410 * emit. For now, do it in both places:
413 static void i915_emit_breadcrumb(struct drm_device *dev)
415 drm_i915_private_t *dev_priv = dev->dev_private;
418 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
420 if (dev_priv->counter > 0x7FFFFFFFUL)
421 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
424 OUT_RING(CMD_STORE_DWORD_IDX);
426 OUT_RING(dev_priv->counter);
431 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
432 drm_i915_cmdbuffer_t * cmd)
434 int nbox = cmd->num_cliprects;
435 int i = 0, count, ret;
438 DRM_ERROR("alignment");
442 i915_kernel_lost_context(dev);
444 count = nbox ? nbox : 1;
446 for (i = 0; i < count; i++) {
448 ret = i915_emit_box(dev, cmd->cliprects, i,
454 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
459 i915_emit_breadcrumb(dev);
463 static int i915_dispatch_batchbuffer(struct drm_device * dev,
464 drm_i915_batchbuffer_t * batch)
466 drm_i915_private_t *dev_priv = dev->dev_private;
467 struct drm_clip_rect __user *boxes = batch->cliprects;
468 int nbox = batch->num_cliprects;
472 if ((batch->start | batch->used) & 0x7) {
473 DRM_ERROR("alignment");
477 i915_kernel_lost_context(dev);
479 count = nbox ? nbox : 1;
481 for (i = 0; i < count; i++) {
483 int ret = i915_emit_box(dev, boxes, i,
484 batch->DR1, batch->DR4);
489 if (dev_priv->use_mi_batchbuffer_start) {
492 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
493 OUT_RING(batch->start);
495 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
496 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
501 OUT_RING(MI_BATCH_BUFFER);
502 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
503 OUT_RING(batch->start + batch->used - 4);
509 i915_emit_breadcrumb(dev);
514 static int i915_dispatch_flip(struct drm_device * dev)
516 drm_i915_private_t *dev_priv = dev->dev_private;
519 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
521 dev_priv->current_page,
522 dev_priv->sarea_priv->pf_current_page);
524 i915_kernel_lost_context(dev);
527 OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
532 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
534 if (dev_priv->current_page == 0) {
535 OUT_RING(dev_priv->back_offset);
536 dev_priv->current_page = 1;
538 OUT_RING(dev_priv->front_offset);
539 dev_priv->current_page = 0;
545 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
552 OUT_RING(CMD_STORE_DWORD_IDX);
554 OUT_RING(dev_priv->counter);
558 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
562 static int i915_quiescent(struct drm_device * dev)
564 drm_i915_private_t *dev_priv = dev->dev_private;
566 i915_kernel_lost_context(dev);
567 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
570 static int i915_flush_ioctl(struct drm_device *dev, void *data,
571 struct drm_file *file_priv)
573 LOCK_TEST_WITH_RETURN(dev, file_priv);
575 return i915_quiescent(dev);
578 static int i915_batchbuffer(struct drm_device *dev, void *data,
579 struct drm_file *file_priv)
581 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
582 u32 *hw_status = dev_priv->hw_status_page;
583 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
584 dev_priv->sarea_priv;
585 drm_i915_batchbuffer_t *batch = data;
588 if (!dev_priv->allow_batchbuffer) {
589 DRM_ERROR("Batchbuffer ioctl disabled\n");
593 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
594 batch->start, batch->used, batch->num_cliprects);
596 LOCK_TEST_WITH_RETURN(dev, file_priv);
598 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
599 batch->num_cliprects *
600 sizeof(struct drm_clip_rect)))
603 ret = i915_dispatch_batchbuffer(dev, batch);
605 sarea_priv->last_dispatch = (int)hw_status[5];
609 static int i915_cmdbuffer(struct drm_device *dev, void *data,
610 struct drm_file *file_priv)
612 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
613 u32 *hw_status = dev_priv->hw_status_page;
614 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
615 dev_priv->sarea_priv;
616 drm_i915_cmdbuffer_t *cmdbuf = data;
619 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
620 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
622 LOCK_TEST_WITH_RETURN(dev, file_priv);
624 if (cmdbuf->num_cliprects &&
625 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
626 cmdbuf->num_cliprects *
627 sizeof(struct drm_clip_rect))) {
628 DRM_ERROR("Fault accessing cliprects\n");
632 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
634 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
638 sarea_priv->last_dispatch = (int)hw_status[5];
642 static int i915_flip_bufs(struct drm_device *dev, void *data,
643 struct drm_file *file_priv)
645 DRM_DEBUG("%s\n", __func__);
647 LOCK_TEST_WITH_RETURN(dev, file_priv);
649 return i915_dispatch_flip(dev);
652 static int i915_getparam(struct drm_device *dev, void *data,
653 struct drm_file *file_priv)
655 drm_i915_private_t *dev_priv = dev->dev_private;
656 drm_i915_getparam_t *param = data;
660 DRM_ERROR("called with no initialization\n");
664 switch (param->param) {
665 case I915_PARAM_IRQ_ACTIVE:
666 value = dev->irq ? 1 : 0;
668 case I915_PARAM_ALLOW_BATCHBUFFER:
669 value = dev_priv->allow_batchbuffer ? 1 : 0;
671 case I915_PARAM_LAST_DISPATCH:
672 value = READ_BREADCRUMB(dev_priv);
675 DRM_ERROR("Unknown parameter %d\n", param->param);
679 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
680 DRM_ERROR("DRM_COPY_TO_USER failed\n");
687 static int i915_setparam(struct drm_device *dev, void *data,
688 struct drm_file *file_priv)
690 drm_i915_private_t *dev_priv = dev->dev_private;
691 drm_i915_setparam_t *param = data;
694 DRM_ERROR("called with no initialization\n");
698 switch (param->param) {
699 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
701 dev_priv->use_mi_batchbuffer_start = param->value;
703 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
704 dev_priv->tex_lru_log_granularity = param->value;
706 case I915_SETPARAM_ALLOW_BATCHBUFFER:
707 dev_priv->allow_batchbuffer = param->value;
710 DRM_ERROR("unknown parameter %d\n", param->param);
717 static int i915_set_status_page(struct drm_device *dev, void *data,
718 struct drm_file *file_priv)
720 drm_i915_private_t *dev_priv = dev->dev_private;
721 drm_i915_hws_addr_t *hws = data;
723 if (!I915_NEED_GFX_HWS(dev))
727 DRM_ERROR("called with no initialization\n");
731 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
733 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
735 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
736 dev_priv->hws_map.size = 4*1024;
737 dev_priv->hws_map.type = 0;
738 dev_priv->hws_map.flags = 0;
739 dev_priv->hws_map.mtrr = 0;
741 drm_core_ioremap(&dev_priv->hws_map, dev);
742 if (dev_priv->hws_map.handle == NULL) {
743 i915_dma_cleanup(dev);
744 dev_priv->status_gfx_addr = 0;
745 DRM_ERROR("can not ioremap virtual address for"
746 " G33 hw status page\n");
749 dev_priv->hw_status_page = dev_priv->hws_map.handle;
751 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
752 I915_WRITE(0x02080, dev_priv->status_gfx_addr);
753 DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
754 dev_priv->status_gfx_addr);
755 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
759 int i915_driver_load(struct drm_device *dev, unsigned long flags)
761 struct drm_i915_private *dev_priv = dev->dev_private;
762 unsigned long base, size;
763 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
765 /* i915 has 4 more counters */
767 dev->types[6] = _DRM_STAT_IRQ;
768 dev->types[7] = _DRM_STAT_PRIMARY;
769 dev->types[8] = _DRM_STAT_SECONDARY;
770 dev->types[9] = _DRM_STAT_DMA;
772 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
773 if (dev_priv == NULL)
776 memset(dev_priv, 0, sizeof(drm_i915_private_t));
778 dev->dev_private = (void *)dev_priv;
780 /* Add register map (needed for suspend/resume) */
781 base = drm_get_resource_start(dev, mmio_bar);
782 size = drm_get_resource_len(dev, mmio_bar);
784 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
785 _DRM_KERNEL | _DRM_DRIVER,
786 &dev_priv->mmio_map);
790 int i915_driver_unload(struct drm_device *dev)
792 struct drm_i915_private *dev_priv = dev->dev_private;
794 if (dev_priv->mmio_map)
795 drm_rmmap(dev, dev_priv->mmio_map);
797 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
803 void i915_driver_lastclose(struct drm_device * dev)
805 drm_i915_private_t *dev_priv = dev->dev_private;
810 if (dev_priv->agp_heap)
811 i915_mem_takedown(&(dev_priv->agp_heap));
813 i915_dma_cleanup(dev);
816 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
818 drm_i915_private_t *dev_priv = dev->dev_private;
819 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
822 struct drm_ioctl_desc i915_ioctls[] = {
823 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
824 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
825 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
826 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
827 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
828 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
829 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
830 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
831 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
832 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
833 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
834 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
835 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
836 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
837 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
838 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
839 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
842 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
845 * Determine if the device really is AGP or not.
847 * All Intel graphics chipsets are treated as AGP, even if they are really
850 * \param dev The device to be tested.
853 * A value of 1 is always retured to indictate every i9x5 is AGP.
855 int i915_driver_device_is_agp(struct drm_device * dev)