Merge branches 'armv7', 'at91', 'misc' and 'omap' into devel
[linux-2.6] / drivers / char / drm / drm_lock.c
1 /**
2  * \file drm_lock.c
3  * IOCTLs for locking
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 static int drm_notifier(void *priv);
39
40 /**
41  * Lock ioctl.
42  *
43  * \param inode device inode.
44  * \param filp file pointer.
45  * \param cmd command.
46  * \param arg user argument, pointing to a drm_lock structure.
47  * \return zero on success or negative number on failure.
48  *
49  * Add the current task to the lock wait queue, and attempt to take to lock.
50  */
51 int drm_lock(struct inode *inode, struct file *filp,
52              unsigned int cmd, unsigned long arg)
53 {
54         drm_file_t *priv = filp->private_data;
55         drm_device_t *dev = priv->head->dev;
56         DECLARE_WAITQUEUE(entry, current);
57         drm_lock_t lock;
58         int ret = 0;
59
60         ++priv->lock_count;
61
62         if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
63                 return -EFAULT;
64
65         if (lock.context == DRM_KERNEL_CONTEXT) {
66                 DRM_ERROR("Process %d using kernel context %d\n",
67                           current->pid, lock.context);
68                 return -EINVAL;
69         }
70
71         DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
72                   lock.context, current->pid,
73                   dev->lock.hw_lock->lock, lock.flags);
74
75         if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
76                 if (lock.context < 0)
77                         return -EINVAL;
78
79         add_wait_queue(&dev->lock.lock_queue, &entry);
80         spin_lock(&dev->lock.spinlock);
81         dev->lock.user_waiters++;
82         spin_unlock(&dev->lock.spinlock);
83         for (;;) {
84                 __set_current_state(TASK_INTERRUPTIBLE);
85                 if (!dev->lock.hw_lock) {
86                         /* Device has been unregistered */
87                         ret = -EINTR;
88                         break;
89                 }
90                 if (drm_lock_take(&dev->lock, lock.context)) {
91                         dev->lock.filp = filp;
92                         dev->lock.lock_time = jiffies;
93                         atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
94                         break;  /* Got lock */
95                 }
96
97                 /* Contention */
98                 schedule();
99                 if (signal_pending(current)) {
100                         ret = -ERESTARTSYS;
101                         break;
102                 }
103         }
104         spin_lock(&dev->lock.spinlock);
105         dev->lock.user_waiters--;
106         spin_unlock(&dev->lock.spinlock);
107         __set_current_state(TASK_RUNNING);
108         remove_wait_queue(&dev->lock.lock_queue, &entry);
109
110         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
111         if (ret) return ret;
112
113         sigemptyset(&dev->sigmask);
114         sigaddset(&dev->sigmask, SIGSTOP);
115         sigaddset(&dev->sigmask, SIGTSTP);
116         sigaddset(&dev->sigmask, SIGTTIN);
117         sigaddset(&dev->sigmask, SIGTTOU);
118         dev->sigdata.context = lock.context;
119         dev->sigdata.lock = dev->lock.hw_lock;
120         block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
121
122         if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
123                 dev->driver->dma_ready(dev);
124
125         if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
126                 if (dev->driver->dma_quiescent(dev)) {
127                         DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context);
128                         return DRM_ERR(EBUSY);
129                 }
130         }
131
132         if (dev->driver->kernel_context_switch &&
133             dev->last_context != lock.context) {
134                 dev->driver->kernel_context_switch(dev, dev->last_context,
135                                                    lock.context);
136         }
137
138         return 0;
139 }
140
141 /**
142  * Unlock ioctl.
143  *
144  * \param inode device inode.
145  * \param filp file pointer.
146  * \param cmd command.
147  * \param arg user argument, pointing to a drm_lock structure.
148  * \return zero on success or negative number on failure.
149  *
150  * Transfer and free the lock.
151  */
152 int drm_unlock(struct inode *inode, struct file *filp,
153                unsigned int cmd, unsigned long arg)
154 {
155         drm_file_t *priv = filp->private_data;
156         drm_device_t *dev = priv->head->dev;
157         drm_lock_t lock;
158         unsigned long irqflags;
159
160         if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
161                 return -EFAULT;
162
163         if (lock.context == DRM_KERNEL_CONTEXT) {
164                 DRM_ERROR("Process %d using kernel context %d\n",
165                           current->pid, lock.context);
166                 return -EINVAL;
167         }
168
169         spin_lock_irqsave(&dev->tasklet_lock, irqflags);
170
171         if (dev->locked_tasklet_func) {
172                 dev->locked_tasklet_func(dev);
173
174                 dev->locked_tasklet_func = NULL;
175         }
176
177         spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
178
179         atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
180
181         /* kernel_context_switch isn't used by any of the x86 drm
182          * modules but is required by the Sparc driver.
183          */
184         if (dev->driver->kernel_context_switch_unlock)
185                 dev->driver->kernel_context_switch_unlock(dev);
186         else {
187                 if (drm_lock_free(&dev->lock,lock.context)) {
188                         /* FIXME: Should really bail out here. */
189                 }
190         }
191
192         unblock_all_signals();
193         return 0;
194 }
195
196 /**
197  * Take the heavyweight lock.
198  *
199  * \param lock lock pointer.
200  * \param context locking context.
201  * \return one if the lock is held, or zero otherwise.
202  *
203  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
204  */
205 int drm_lock_take(drm_lock_data_t *lock_data,
206                   unsigned int context)
207 {
208         unsigned int old, new, prev;
209         volatile unsigned int *lock = &lock_data->hw_lock->lock;
210
211         spin_lock(&lock_data->spinlock);
212         do {
213                 old = *lock;
214                 if (old & _DRM_LOCK_HELD)
215                         new = old | _DRM_LOCK_CONT;
216                 else {
217                         new = context | _DRM_LOCK_HELD |
218                                 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
219                                  _DRM_LOCK_CONT : 0);
220                 }
221                 prev = cmpxchg(lock, old, new);
222         } while (prev != old);
223         spin_unlock(&lock_data->spinlock);
224
225         if (_DRM_LOCKING_CONTEXT(old) == context) {
226                 if (old & _DRM_LOCK_HELD) {
227                         if (context != DRM_KERNEL_CONTEXT) {
228                                 DRM_ERROR("%d holds heavyweight lock\n",
229                                           context);
230                         }
231                         return 0;
232                 }
233         }
234
235         if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
236                 /* Have lock */
237                 return 1;
238         }
239         return 0;
240 }
241
242 /**
243  * This takes a lock forcibly and hands it to context.  Should ONLY be used
244  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
245  *
246  * \param dev DRM device.
247  * \param lock lock pointer.
248  * \param context locking context.
249  * \return always one.
250  *
251  * Resets the lock file pointer.
252  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
253  */
254 static int drm_lock_transfer(drm_lock_data_t *lock_data,
255                              unsigned int context)
256 {
257         unsigned int old, new, prev;
258         volatile unsigned int *lock = &lock_data->hw_lock->lock;
259
260         lock_data->filp = NULL;
261         do {
262                 old = *lock;
263                 new = context | _DRM_LOCK_HELD;
264                 prev = cmpxchg(lock, old, new);
265         } while (prev != old);
266         return 1;
267 }
268
269 /**
270  * Free lock.
271  *
272  * \param dev DRM device.
273  * \param lock lock.
274  * \param context context.
275  *
276  * Resets the lock file pointer.
277  * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
278  * waiting on the lock queue.
279  */
280 int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
281 {
282         unsigned int old, new, prev;
283         volatile unsigned int *lock = &lock_data->hw_lock->lock;
284
285         spin_lock(&lock_data->spinlock);
286         if (lock_data->kernel_waiters != 0) {
287                 drm_lock_transfer(lock_data, 0);
288                 lock_data->idle_has_lock = 1;
289                 spin_unlock(&lock_data->spinlock);
290                 return 1;
291         }
292         spin_unlock(&lock_data->spinlock);
293
294         do {
295                 old = *lock;
296                 new = _DRM_LOCKING_CONTEXT(old);
297                 prev = cmpxchg(lock, old, new);
298         } while (prev != old);
299
300         if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
301                 DRM_ERROR("%d freed heavyweight lock held by %d\n",
302                           context, _DRM_LOCKING_CONTEXT(old));
303                 return 1;
304         }
305         wake_up_interruptible(&lock_data->lock_queue);
306         return 0;
307 }
308
309 /**
310  * If we get here, it means that the process has called DRM_IOCTL_LOCK
311  * without calling DRM_IOCTL_UNLOCK.
312  *
313  * If the lock is not held, then let the signal proceed as usual.  If the lock
314  * is held, then set the contended flag and keep the signal blocked.
315  *
316  * \param priv pointer to a drm_sigdata structure.
317  * \return one if the signal should be delivered normally, or zero if the
318  * signal should be blocked.
319  */
320 static int drm_notifier(void *priv)
321 {
322         drm_sigdata_t *s = (drm_sigdata_t *) priv;
323         unsigned int old, new, prev;
324
325         /* Allow signal delivery if lock isn't held */
326         if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
327             || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
328                 return 1;
329
330         /* Otherwise, set flag to force call to
331            drmUnlock */
332         do {
333                 old = s->lock->lock;
334                 new = old | _DRM_LOCK_CONT;
335                 prev = cmpxchg(&s->lock->lock, old, new);
336         } while (prev != old);
337         return 0;
338 }
339
340 /**
341  * This function returns immediately and takes the hw lock
342  * with the kernel context if it is free, otherwise it gets the highest priority when and if
343  * it is eventually released.
344  *
345  * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
346  * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
347  * a deadlock, which is why the "idlelock" was invented).
348  *
349  * This should be sufficient to wait for GPU idle without
350  * having to worry about starvation.
351  */
352
353 void drm_idlelock_take(drm_lock_data_t *lock_data)
354 {
355         int ret = 0;
356
357         spin_lock(&lock_data->spinlock);
358         lock_data->kernel_waiters++;
359         if (!lock_data->idle_has_lock) {
360
361                 spin_unlock(&lock_data->spinlock);
362                 ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
363                 spin_lock(&lock_data->spinlock);
364
365                 if (ret == 1)
366                         lock_data->idle_has_lock = 1;
367         }
368         spin_unlock(&lock_data->spinlock);
369 }
370 EXPORT_SYMBOL(drm_idlelock_take);
371
372 void drm_idlelock_release(drm_lock_data_t *lock_data)
373 {
374         unsigned int old, prev;
375         volatile unsigned int *lock = &lock_data->hw_lock->lock;
376
377         spin_lock(&lock_data->spinlock);
378         if (--lock_data->kernel_waiters == 0) {
379                 if (lock_data->idle_has_lock) {
380                         do {
381                                 old = *lock;
382                                 prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
383                         } while (prev != old);
384                         wake_up_interruptible(&lock_data->lock_queue);
385                         lock_data->idle_has_lock = 0;
386                 }
387         }
388         spin_unlock(&lock_data->spinlock);
389 }
390 EXPORT_SYMBOL(drm_idlelock_release);
391
392
393 int drm_i_have_hw_lock(struct file *filp)
394 {
395         DRM_DEVICE;
396
397         return (priv->lock_count && dev->lock.hw_lock &&
398                 _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
399                 dev->lock.filp == filp);
400 }
401
402 EXPORT_SYMBOL(drm_i_have_hw_lock);