Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[linux-2.6] / drivers / char / drm / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
37
38 #define MAX_NOPID ((u32)~0)
39
40 /**
41  * Emit blits for scheduled buffer swaps.
42  *
43  * This function will be called with the HW lock held.
44  */
45 static void i915_vblank_tasklet(drm_device_t *dev)
46 {
47         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48         unsigned long irqflags;
49         struct list_head *list, *tmp, hits, *hit;
50         int nhits, nrects, slice[2], upper[2], lower[2], i;
51         unsigned counter[2] = { atomic_read(&dev->vbl_received),
52                                 atomic_read(&dev->vbl_received2) };
53         drm_drawable_info_t *drw;
54         drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55         u32 cpp = dev_priv->cpp;
56         u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57                                 XY_SRC_COPY_BLT_WRITE_ALPHA |
58                                 XY_SRC_COPY_BLT_WRITE_RGB)
59                              : XY_SRC_COPY_BLT_CMD;
60         u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
61                           (cpp << 23) | (1 << 24);
62         RING_LOCALS;
63
64         DRM_DEBUG("\n");
65
66         INIT_LIST_HEAD(&hits);
67
68         nhits = nrects = 0;
69
70         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
71
72         /* Find buffer swaps scheduled for this vertical blank */
73         list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
74                 drm_i915_vbl_swap_t *vbl_swap =
75                         list_entry(list, drm_i915_vbl_swap_t, head);
76
77                 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
78                         continue;
79
80                 list_del(list);
81                 dev_priv->swaps_pending--;
82
83                 spin_unlock(&dev_priv->swaps_lock);
84                 spin_lock(&dev->drw_lock);
85
86                 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
87
88                 if (!drw) {
89                         spin_unlock(&dev->drw_lock);
90                         drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
91                         spin_lock(&dev_priv->swaps_lock);
92                         continue;
93                 }
94
95                 list_for_each(hit, &hits) {
96                         drm_i915_vbl_swap_t *swap_cmp =
97                                 list_entry(hit, drm_i915_vbl_swap_t, head);
98                         drm_drawable_info_t *drw_cmp =
99                                 drm_get_drawable_info(dev, swap_cmp->drw_id);
100
101                         if (drw_cmp &&
102                             drw_cmp->rects[0].y1 > drw->rects[0].y1) {
103                                 list_add_tail(list, hit);
104                                 break;
105                         }
106                 }
107
108                 spin_unlock(&dev->drw_lock);
109
110                 /* List of hits was empty, or we reached the end of it */
111                 if (hit == &hits)
112                         list_add_tail(list, hits.prev);
113
114                 nhits++;
115
116                 spin_lock(&dev_priv->swaps_lock);
117         }
118
119         if (nhits == 0) {
120                 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
121                 return;
122         }
123
124         spin_unlock(&dev_priv->swaps_lock);
125
126         i915_kernel_lost_context(dev);
127
128         BEGIN_LP_RING(6);
129
130         OUT_RING(GFX_OP_DRAWRECT_INFO);
131         OUT_RING(0);
132         OUT_RING(0);
133         OUT_RING(sarea_priv->width | sarea_priv->height << 16);
134         OUT_RING(sarea_priv->width | sarea_priv->height << 16);
135         OUT_RING(0);
136
137         ADVANCE_LP_RING();
138
139         sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
140
141         upper[0] = upper[1] = 0;
142         slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
143         slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
144         lower[0] = sarea_priv->pipeA_y + slice[0];
145         lower[1] = sarea_priv->pipeB_y + slice[0];
146
147         spin_lock(&dev->drw_lock);
148
149         /* Emit blits for buffer swaps, partitioning both outputs into as many
150          * slices as there are buffer swaps scheduled in order to avoid tearing
151          * (based on the assumption that a single buffer swap would always
152          * complete before scanout starts).
153          */
154         for (i = 0; i++ < nhits;
155              upper[0] = lower[0], lower[0] += slice[0],
156              upper[1] = lower[1], lower[1] += slice[1]) {
157                 if (i == nhits)
158                         lower[0] = lower[1] = sarea_priv->height;
159
160                 list_for_each(hit, &hits) {
161                         drm_i915_vbl_swap_t *swap_hit =
162                                 list_entry(hit, drm_i915_vbl_swap_t, head);
163                         drm_clip_rect_t *rect;
164                         int num_rects, pipe;
165                         unsigned short top, bottom;
166
167                         drw = drm_get_drawable_info(dev, swap_hit->drw_id);
168
169                         if (!drw)
170                                 continue;
171
172                         rect = drw->rects;
173                         pipe = swap_hit->pipe;
174                         top = upper[pipe];
175                         bottom = lower[pipe];
176
177                         for (num_rects = drw->num_rects; num_rects--; rect++) {
178                                 int y1 = max(rect->y1, top);
179                                 int y2 = min(rect->y2, bottom);
180
181                                 if (y1 >= y2)
182                                         continue;
183
184                                 BEGIN_LP_RING(8);
185
186                                 OUT_RING(cmd);
187                                 OUT_RING(pitchropcpp);
188                                 OUT_RING((y1 << 16) | rect->x1);
189                                 OUT_RING((y2 << 16) | rect->x2);
190                                 OUT_RING(sarea_priv->front_offset);
191                                 OUT_RING((y1 << 16) | rect->x1);
192                                 OUT_RING(pitchropcpp & 0xffff);
193                                 OUT_RING(sarea_priv->back_offset);
194
195                                 ADVANCE_LP_RING();
196                         }
197                 }
198         }
199
200         spin_unlock_irqrestore(&dev->drw_lock, irqflags);
201
202         list_for_each_safe(hit, tmp, &hits) {
203                 drm_i915_vbl_swap_t *swap_hit =
204                         list_entry(hit, drm_i915_vbl_swap_t, head);
205
206                 list_del(hit);
207
208                 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
209         }
210 }
211
212 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
213 {
214         drm_device_t *dev = (drm_device_t *) arg;
215         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216         u16 temp;
217
218         temp = I915_READ16(I915REG_INT_IDENTITY_R);
219
220         temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
221
222         DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
223
224         if (temp == 0)
225                 return IRQ_NONE;
226
227         I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
228
229         dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
230
231         if (temp & USER_INT_FLAG)
232                 DRM_WAKEUP(&dev_priv->irq_queue);
233
234         if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
235                 int vblank_pipe = dev_priv->vblank_pipe;
236
237                 if ((vblank_pipe &
238                      (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
239                     == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
240                         if (temp & VSYNC_PIPEA_FLAG)
241                                 atomic_inc(&dev->vbl_received);
242                         if (temp & VSYNC_PIPEB_FLAG)
243                                 atomic_inc(&dev->vbl_received2);
244                 } else if (((temp & VSYNC_PIPEA_FLAG) &&
245                             (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
246                            ((temp & VSYNC_PIPEB_FLAG) &&
247                             (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
248                         atomic_inc(&dev->vbl_received);
249
250                 DRM_WAKEUP(&dev->vbl_queue);
251                 drm_vbl_send_signals(dev);
252
253                 if (dev_priv->swaps_pending > 0)
254                         drm_locked_tasklet(dev, i915_vblank_tasklet);
255         }
256
257         return IRQ_HANDLED;
258 }
259
260 static int i915_emit_irq(drm_device_t * dev)
261 {
262         drm_i915_private_t *dev_priv = dev->dev_private;
263         RING_LOCALS;
264
265         i915_kernel_lost_context(dev);
266
267         DRM_DEBUG("%s\n", __FUNCTION__);
268
269         dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
270
271         if (dev_priv->counter > 0x7FFFFFFFUL)
272                 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
273
274         BEGIN_LP_RING(6);
275         OUT_RING(CMD_STORE_DWORD_IDX);
276         OUT_RING(20);
277         OUT_RING(dev_priv->counter);
278         OUT_RING(0);
279         OUT_RING(0);
280         OUT_RING(GFX_OP_USER_INTERRUPT);
281         ADVANCE_LP_RING();
282         
283         return dev_priv->counter;
284 }
285
286 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
287 {
288         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
289         int ret = 0;
290
291         DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
292                   READ_BREADCRUMB(dev_priv));
293
294         if (READ_BREADCRUMB(dev_priv) >= irq_nr)
295                 return 0;
296
297         dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
298
299         DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
300                     READ_BREADCRUMB(dev_priv) >= irq_nr);
301
302         if (ret == DRM_ERR(EBUSY)) {
303                 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
304                           __FUNCTION__,
305                           READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
306         }
307
308         dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
309         return ret;
310 }
311
312 static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
313                                       atomic_t *counter)
314 {
315         drm_i915_private_t *dev_priv = dev->dev_private;
316         unsigned int cur_vblank;
317         int ret = 0;
318
319         if (!dev_priv) {
320                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
321                 return DRM_ERR(EINVAL);
322         }
323
324         DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
325                     (((cur_vblank = atomic_read(counter))
326                         - *sequence) <= (1<<23)));
327         
328         *sequence = cur_vblank;
329
330         return ret;
331 }
332
333
334 int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
335 {
336         return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
337 }
338
339 int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
340 {
341         return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
342 }
343
344 /* Needs the lock as it touches the ring.
345  */
346 int i915_irq_emit(DRM_IOCTL_ARGS)
347 {
348         DRM_DEVICE;
349         drm_i915_private_t *dev_priv = dev->dev_private;
350         drm_i915_irq_emit_t emit;
351         int result;
352
353         LOCK_TEST_WITH_RETURN(dev, filp);
354
355         if (!dev_priv) {
356                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
357                 return DRM_ERR(EINVAL);
358         }
359
360         DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
361                                  sizeof(emit));
362
363         result = i915_emit_irq(dev);
364
365         if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
366                 DRM_ERROR("copy_to_user\n");
367                 return DRM_ERR(EFAULT);
368         }
369
370         return 0;
371 }
372
373 /* Doesn't need the hardware lock.
374  */
375 int i915_irq_wait(DRM_IOCTL_ARGS)
376 {
377         DRM_DEVICE;
378         drm_i915_private_t *dev_priv = dev->dev_private;
379         drm_i915_irq_wait_t irqwait;
380
381         if (!dev_priv) {
382                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
383                 return DRM_ERR(EINVAL);
384         }
385
386         DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
387                                  sizeof(irqwait));
388
389         return i915_wait_irq(dev, irqwait.irq_seq);
390 }
391
392 static void i915_enable_interrupt (drm_device_t *dev)
393 {
394         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
395         u16 flag;
396
397         flag = 0;
398         if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
399                 flag |= VSYNC_PIPEA_FLAG;
400         if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
401                 flag |= VSYNC_PIPEB_FLAG;
402
403         I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
404 }
405
406 /* Set the vblank monitor pipe
407  */
408 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
409 {
410         DRM_DEVICE;
411         drm_i915_private_t *dev_priv = dev->dev_private;
412         drm_i915_vblank_pipe_t pipe;
413
414         if (!dev_priv) {
415                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
416                 return DRM_ERR(EINVAL);
417         }
418
419         DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
420                                  sizeof(pipe));
421
422         if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
423                 DRM_ERROR("%s called with invalid pipe 0x%x\n", 
424                           __FUNCTION__, pipe.pipe);
425                 return DRM_ERR(EINVAL);
426         }
427
428         dev_priv->vblank_pipe = pipe.pipe;
429
430         i915_enable_interrupt (dev);
431
432         return 0;
433 }
434
435 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
436 {
437         DRM_DEVICE;
438         drm_i915_private_t *dev_priv = dev->dev_private;
439         drm_i915_vblank_pipe_t pipe;
440         u16 flag;
441
442         if (!dev_priv) {
443                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
444                 return DRM_ERR(EINVAL);
445         }
446
447         flag = I915_READ(I915REG_INT_ENABLE_R);
448         pipe.pipe = 0;
449         if (flag & VSYNC_PIPEA_FLAG)
450                 pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
451         if (flag & VSYNC_PIPEB_FLAG)
452                 pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
453         DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
454                                  sizeof(pipe));
455         return 0;
456 }
457
458 /**
459  * Schedule buffer swap at given vertical blank.
460  */
461 int i915_vblank_swap(DRM_IOCTL_ARGS)
462 {
463         DRM_DEVICE;
464         drm_i915_private_t *dev_priv = dev->dev_private;
465         drm_i915_vblank_swap_t swap;
466         drm_i915_vbl_swap_t *vbl_swap;
467         unsigned int pipe, seqtype, curseq;
468         unsigned long irqflags;
469         struct list_head *list;
470
471         if (!dev_priv) {
472                 DRM_ERROR("%s called with no initialization\n", __func__);
473                 return DRM_ERR(EINVAL);
474         }
475
476         if (dev_priv->sarea_priv->rotation) {
477                 DRM_DEBUG("Rotation not supported\n");
478                 return DRM_ERR(EINVAL);
479         }
480
481         DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
482                                  sizeof(swap));
483
484         if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
485                              _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
486                 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
487                 return DRM_ERR(EINVAL);
488         }
489
490         pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
491
492         seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
493
494         if (!(dev_priv->vblank_pipe & (1 << pipe))) {
495                 DRM_ERROR("Invalid pipe %d\n", pipe);
496                 return DRM_ERR(EINVAL);
497         }
498
499         spin_lock_irqsave(&dev->drw_lock, irqflags);
500
501         if (!drm_get_drawable_info(dev, swap.drawable)) {
502                 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
503                 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
504                 return DRM_ERR(EINVAL);
505         }
506
507         spin_unlock_irqrestore(&dev->drw_lock, irqflags);
508
509         curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
510
511         if (seqtype == _DRM_VBLANK_RELATIVE)
512                 swap.sequence += curseq;
513
514         if ((curseq - swap.sequence) <= (1<<23)) {
515                 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
516                         swap.sequence = curseq + 1;
517                 } else {
518                         DRM_DEBUG("Missed target sequence\n");
519                         return DRM_ERR(EINVAL);
520                 }
521         }
522
523         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
524
525         list_for_each(list, &dev_priv->vbl_swaps.head) {
526                 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
527
528                 if (vbl_swap->drw_id == swap.drawable &&
529                     vbl_swap->pipe == pipe &&
530                     vbl_swap->sequence == swap.sequence) {
531                         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
532                         DRM_DEBUG("Already scheduled\n");
533                         return 0;
534                 }
535         }
536
537         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
538
539         if (dev_priv->swaps_pending >= 100) {
540                 DRM_DEBUG("Too many swaps queued\n");
541                 return DRM_ERR(EBUSY);
542         }
543
544         vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
545
546         if (!vbl_swap) {
547                 DRM_ERROR("Failed to allocate memory to queue swap\n");
548                 return DRM_ERR(ENOMEM);
549         }
550
551         DRM_DEBUG("\n");
552
553         vbl_swap->drw_id = swap.drawable;
554         vbl_swap->pipe = pipe;
555         vbl_swap->sequence = swap.sequence;
556
557         spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
558
559         list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
560         dev_priv->swaps_pending++;
561
562         spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
563
564         DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
565                                sizeof(swap));
566
567         return 0;
568 }
569
570 /* drm_dma.h hooks
571 */
572 void i915_driver_irq_preinstall(drm_device_t * dev)
573 {
574         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
575
576         I915_WRITE16(I915REG_HWSTAM, 0xfffe);
577         I915_WRITE16(I915REG_INT_MASK_R, 0x0);
578         I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
579 }
580
581 void i915_driver_irq_postinstall(drm_device_t * dev)
582 {
583         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584
585         spin_lock_init(&dev_priv->swaps_lock);
586         INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
587         dev_priv->swaps_pending = 0;
588
589         if (!dev_priv->vblank_pipe)
590                 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
591         i915_enable_interrupt(dev);
592         DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
593 }
594
595 void i915_driver_irq_uninstall(drm_device_t * dev)
596 {
597         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
598         u16 temp;
599
600         if (!dev_priv)
601                 return;
602
603         I915_WRITE16(I915REG_HWSTAM, 0xffff);
604         I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
605         I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
606
607         temp = I915_READ16(I915REG_INT_IDENTITY_R);
608         I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
609 }