5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/interrupt.h> /* For task queue support */
41 * Get interrupt from bus id.
43 * \param inode device inode.
44 * \param file_priv DRM file private.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
56 struct drm_irq_busid *p = data;
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
74 static void vblank_disable_fn(unsigned long arg)
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
80 for (i = 0; i < dev->num_crtcs; i++) {
81 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83 dev->vblank_enabled[i]) {
84 dev->driver->disable_vblank(dev, i);
85 dev->vblank_enabled[i] = 0;
87 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
91 static void drm_vblank_cleanup(struct drm_device *dev)
93 /* Bail if the driver didn't call drm_vblank_init() */
94 if (dev->num_crtcs == 0)
97 del_timer(&dev->vblank_disable_timer);
99 vblank_disable_fn((unsigned long)dev);
101 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
103 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
105 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
106 dev->num_crtcs, DRM_MEM_DRIVER);
107 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
108 dev->num_crtcs, DRM_MEM_DRIVER);
109 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
110 dev->num_crtcs, DRM_MEM_DRIVER);
111 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
113 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
114 dev->num_crtcs, DRM_MEM_DRIVER);
115 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
121 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
123 int i, ret = -ENOMEM;
125 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
127 spin_lock_init(&dev->vbl_lock);
128 atomic_set(&dev->vbl_signal_pending, 0);
129 dev->num_crtcs = num_crtcs;
131 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
136 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
141 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
143 if (!dev->_vblank_count)
146 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
148 if (!dev->vblank_refcount)
151 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
153 if (!dev->vblank_enabled)
156 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
157 if (!dev->last_vblank)
160 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
162 if (!dev->vblank_premodeset)
165 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
166 if (!dev->vblank_offset)
169 /* Zero per-crtc vblank stuff */
170 for (i = 0; i < num_crtcs; i++) {
171 init_waitqueue_head(&dev->vbl_queue[i]);
172 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
173 atomic_set(&dev->_vblank_count[i], 0);
174 atomic_set(&dev->vblank_refcount[i], 0);
180 drm_vblank_cleanup(dev);
183 EXPORT_SYMBOL(drm_vblank_init);
186 * Install IRQ handler.
188 * \param dev DRM device.
189 * \param irq IRQ number.
191 * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
192 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
193 * before and after the installation.
195 static int drm_irq_install(struct drm_device * dev)
198 unsigned long sh_flags = 0;
200 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
206 mutex_lock(&dev->struct_mutex);
208 /* Driver must have been initialized */
209 if (!dev->dev_private) {
210 mutex_unlock(&dev->struct_mutex);
214 if (dev->irq_enabled) {
215 mutex_unlock(&dev->struct_mutex);
218 dev->irq_enabled = 1;
219 mutex_unlock(&dev->struct_mutex);
221 DRM_DEBUG("irq=%d\n", dev->irq);
223 /* Before installing handler */
224 dev->driver->irq_preinstall(dev);
226 /* Install handler */
227 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
228 sh_flags = IRQF_SHARED;
230 ret = request_irq(dev->irq, dev->driver->irq_handler,
231 sh_flags, dev->devname, dev);
233 mutex_lock(&dev->struct_mutex);
234 dev->irq_enabled = 0;
235 mutex_unlock(&dev->struct_mutex);
239 /* After installing handler */
240 ret = dev->driver->irq_postinstall(dev);
242 mutex_lock(&dev->struct_mutex);
243 dev->irq_enabled = 0;
244 mutex_unlock(&dev->struct_mutex);
251 * Uninstall the IRQ handler.
253 * \param dev DRM device.
255 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
257 int drm_irq_uninstall(struct drm_device * dev)
261 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
264 mutex_lock(&dev->struct_mutex);
265 irq_enabled = dev->irq_enabled;
266 dev->irq_enabled = 0;
267 mutex_unlock(&dev->struct_mutex);
272 DRM_DEBUG("irq=%d\n", dev->irq);
274 dev->driver->irq_uninstall(dev);
276 free_irq(dev->irq, dev);
278 drm_vblank_cleanup(dev);
280 dev->locked_tasklet_func = NULL;
285 EXPORT_SYMBOL(drm_irq_uninstall);
290 * \param inode device inode.
291 * \param file_priv DRM file private.
292 * \param cmd command.
293 * \param arg user argument, pointing to a drm_control structure.
294 * \return zero on success or a negative number on failure.
296 * Calls irq_install() or irq_uninstall() according to \p arg.
298 int drm_control(struct drm_device *dev, void *data,
299 struct drm_file *file_priv)
301 struct drm_control *ctl = data;
303 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
307 case DRM_INST_HANDLER:
308 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
310 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
311 ctl->irq != dev->irq)
313 return drm_irq_install(dev);
314 case DRM_UNINST_HANDLER:
315 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
317 return drm_irq_uninstall(dev);
324 * drm_vblank_count - retrieve "cooked" vblank counter value
326 * @crtc: which counter to retrieve
328 * Fetches the "cooked" vblank count value that represents the number of
329 * vblank events since the system was booted, including lost events due to
330 * modesetting activity.
332 u32 drm_vblank_count(struct drm_device *dev, int crtc)
334 return atomic_read(&dev->_vblank_count[crtc]) +
335 dev->vblank_offset[crtc];
337 EXPORT_SYMBOL(drm_vblank_count);
340 * drm_update_vblank_count - update the master vblank counter
342 * @crtc: counter to update
344 * Call back into the driver to update the appropriate vblank counter
345 * (specified by @crtc). Deal with wraparound, if it occurred, and
346 * update the last read value so we can deal with wraparound on the next
349 void drm_update_vblank_count(struct drm_device *dev, int crtc)
351 unsigned long irqflags;
352 u32 cur_vblank, diff;
355 * Interrupts were disabled prior to this call, so deal with counter
357 * NOTE! It's possible we lost a full dev->max_vblank_count events
358 * here if the register is small or we had vblank interrupts off for
361 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
362 spin_lock_irqsave(&dev->vbl_lock, irqflags);
363 if (cur_vblank < dev->last_vblank[crtc]) {
364 diff = dev->max_vblank_count -
365 dev->last_vblank[crtc];
368 diff = cur_vblank - dev->last_vblank[crtc];
370 dev->last_vblank[crtc] = cur_vblank;
371 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
373 atomic_add(diff, &dev->_vblank_count[crtc]);
375 EXPORT_SYMBOL(drm_update_vblank_count);
378 * drm_vblank_get - get a reference count on vblank events
380 * @crtc: which CRTC to own
382 * Acquire a reference count on vblank events to avoid having them disabled
383 * while in use. Note callers will probably want to update the master counter
384 * using drm_update_vblank_count() above before calling this routine so that
385 * wakeups occur on the right vblank event.
388 * Zero on success, nonzero on failure.
390 int drm_vblank_get(struct drm_device *dev, int crtc)
392 unsigned long irqflags;
395 spin_lock_irqsave(&dev->vbl_lock, irqflags);
396 /* Going from 0->1 means we have to enable interrupts again */
397 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
398 !dev->vblank_enabled[crtc]) {
399 ret = dev->driver->enable_vblank(dev, crtc);
401 atomic_dec(&dev->vblank_refcount[crtc]);
403 dev->vblank_enabled[crtc] = 1;
405 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
409 EXPORT_SYMBOL(drm_vblank_get);
412 * drm_vblank_put - give up ownership of vblank events
414 * @crtc: which counter to give up
416 * Release ownership of a given vblank counter, turning off interrupts
419 void drm_vblank_put(struct drm_device *dev, int crtc)
421 /* Last user schedules interrupt disable */
422 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
423 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
425 EXPORT_SYMBOL(drm_vblank_put);
428 * drm_modeset_ctl - handle vblank event counter changes across mode switch
429 * @DRM_IOCTL_ARGS: standard ioctl arguments
431 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
432 * ioctls around modesetting so that any lost vblank events are accounted for.
434 int drm_modeset_ctl(struct drm_device *dev, void *data,
435 struct drm_file *file_priv)
437 struct drm_modeset_ctl *modeset = data;
442 if (crtc >= dev->num_crtcs) {
447 switch (modeset->cmd) {
448 case _DRM_PRE_MODESET:
449 dev->vblank_premodeset[crtc] =
450 dev->driver->get_vblank_counter(dev, crtc);
452 case _DRM_POST_MODESET:
453 new = dev->driver->get_vblank_counter(dev, crtc);
454 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
468 * \param inode device inode.
469 * \param file_priv DRM file private.
470 * \param cmd command.
471 * \param data user argument, pointing to a drm_wait_vblank structure.
472 * \return zero on success or a negative number on failure.
474 * Verifies the IRQ is installed.
476 * If a signal is requested checks if this task has already scheduled the same signal
477 * for the same vblank sequence number - nothing to be done in
478 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
479 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
482 * If a signal is not requested, then calls vblank_wait().
484 int drm_wait_vblank(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
487 union drm_wait_vblank *vblwait = data;
490 unsigned int flags, seq, crtc;
492 if ((!dev->irq) || (!dev->irq_enabled))
495 if (vblwait->request.type &
496 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
497 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
498 vblwait->request.type,
499 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
503 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
504 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
506 if (crtc >= dev->num_crtcs)
509 drm_update_vblank_count(dev, crtc);
510 seq = drm_vblank_count(dev, crtc);
512 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
513 case _DRM_VBLANK_RELATIVE:
514 vblwait->request.sequence += seq;
515 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
516 case _DRM_VBLANK_ABSOLUTE:
522 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
523 (seq - vblwait->request.sequence) <= (1<<23)) {
524 vblwait->request.sequence = seq + 1;
527 if (flags & _DRM_VBLANK_SIGNAL) {
528 unsigned long irqflags;
529 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
530 struct drm_vbl_sig *vbl_sig;
532 spin_lock_irqsave(&dev->vbl_lock, irqflags);
534 /* Check if this task has already scheduled the same signal
535 * for the same vblank sequence number; nothing to be done in
538 list_for_each_entry(vbl_sig, vbl_sigs, head) {
539 if (vbl_sig->sequence == vblwait->request.sequence
540 && vbl_sig->info.si_signo ==
541 vblwait->request.signal
542 && vbl_sig->task == current) {
543 spin_unlock_irqrestore(&dev->vbl_lock,
545 vblwait->reply.sequence = seq;
550 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
551 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
555 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
557 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
562 ret = drm_vblank_get(dev, crtc);
564 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
569 atomic_inc(&dev->vbl_signal_pending);
571 vbl_sig->sequence = vblwait->request.sequence;
572 vbl_sig->info.si_signo = vblwait->request.signal;
573 vbl_sig->task = current;
575 spin_lock_irqsave(&dev->vbl_lock, irqflags);
577 list_add_tail(&vbl_sig->head, vbl_sigs);
579 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
581 vblwait->reply.sequence = seq;
583 unsigned long cur_vblank;
585 ret = drm_vblank_get(dev, crtc);
588 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
589 (((cur_vblank = drm_vblank_count(dev, crtc))
590 - vblwait->request.sequence) <= (1 << 23)));
591 drm_vblank_put(dev, crtc);
592 do_gettimeofday(&now);
594 vblwait->reply.tval_sec = now.tv_sec;
595 vblwait->reply.tval_usec = now.tv_usec;
596 vblwait->reply.sequence = cur_vblank;
604 * Send the VBLANK signals.
606 * \param dev DRM device.
607 * \param crtc CRTC where the vblank event occurred
609 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
611 * If a signal is not requested, then calls vblank_wait().
613 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
615 struct drm_vbl_sig *vbl_sig, *tmp;
616 struct list_head *vbl_sigs;
617 unsigned int vbl_seq;
620 spin_lock_irqsave(&dev->vbl_lock, flags);
622 vbl_sigs = &dev->vbl_sigs[crtc];
623 vbl_seq = drm_vblank_count(dev, crtc);
625 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
626 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
627 vbl_sig->info.si_code = vbl_seq;
628 send_sig_info(vbl_sig->info.si_signo,
629 &vbl_sig->info, vbl_sig->task);
631 list_del(&vbl_sig->head);
633 drm_free(vbl_sig, sizeof(*vbl_sig),
635 atomic_dec(&dev->vbl_signal_pending);
636 drm_vblank_put(dev, crtc);
640 spin_unlock_irqrestore(&dev->vbl_lock, flags);
644 * drm_handle_vblank - handle a vblank event
646 * @crtc: where this event occurred
648 * Drivers should call this routine in their vblank interrupt handlers to
649 * update the vblank counter and send any signals that may be pending.
651 void drm_handle_vblank(struct drm_device *dev, int crtc)
653 drm_update_vblank_count(dev, crtc);
654 DRM_WAKEUP(&dev->vbl_queue[crtc]);
655 drm_vbl_send_signals(dev, crtc);
657 EXPORT_SYMBOL(drm_handle_vblank);
660 * Tasklet wrapper function.
662 * \param data DRM device in disguise.
664 * Attempts to grab the HW lock and calls the driver callback on success. On
665 * failure, leave the lock marked as contended so the callback can be called
668 static void drm_locked_tasklet_func(unsigned long data)
670 struct drm_device *dev = (struct drm_device *)data;
671 unsigned long irqflags;
673 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
675 if (!dev->locked_tasklet_func ||
676 !drm_lock_take(&dev->lock,
677 DRM_KERNEL_CONTEXT)) {
678 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
682 dev->lock.lock_time = jiffies;
683 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
685 dev->locked_tasklet_func(dev);
687 drm_lock_free(&dev->lock,
690 dev->locked_tasklet_func = NULL;
692 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
696 * Schedule a tasklet to call back a driver hook with the HW lock held.
698 * \param dev DRM device.
699 * \param func Driver callback.
701 * This is intended for triggering actions that require the HW lock from an
702 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
703 * completes. Note that the callback may be called from interrupt or process
704 * context, it must not make any assumptions about this. Also, the HW lock will
705 * be held with the kernel context or any client context.
707 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
709 unsigned long irqflags;
710 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
712 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
713 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
716 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
718 if (dev->locked_tasklet_func) {
719 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
723 dev->locked_tasklet_func = func;
725 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
727 drm_tasklet.data = (unsigned long)dev;
729 tasklet_hi_schedule(&drm_tasklet);
731 EXPORT_SYMBOL(drm_locked_tasklet);