drm: add __GFP_COMP to the drm_alloc_pages
[linux-2.6] / drivers / char / drm / drm_fops.c
1 /**
2  * \file drm_fops.c
3  * File operations for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Daryll Strauss <daryll@valinux.com>
7  * \author Gareth Hughes <gareth@valinux.com>
8  */
9
10 /*
11  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
12  *
13  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15  * All Rights Reserved.
16  *
17  * Permission is hereby granted, free of charge, to any person obtaining a
18  * copy of this software and associated documentation files (the "Software"),
19  * to deal in the Software without restriction, including without limitation
20  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21  * and/or sell copies of the Software, and to permit persons to whom the
22  * Software is furnished to do so, subject to the following conditions:
23  *
24  * The above copyright notice and this permission notice (including the next
25  * paragraph) shall be included in all copies or substantial portions of the
26  * Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
31  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34  * OTHER DEALINGS IN THE SOFTWARE.
35  */
36
37 #include "drmP.h"
38 #include <linux/poll.h>
39
40 static int drm_open_helper(struct inode *inode, struct file *filp,
41                            drm_device_t * dev);
42
43 static int drm_setup(drm_device_t * dev)
44 {
45         int i;
46         int ret;
47
48         if (dev->driver->presetup) {
49                 ret = dev->driver->presetup(dev);
50                 if (ret != 0)
51                         return ret;
52         }
53
54         atomic_set(&dev->ioctl_count, 0);
55         atomic_set(&dev->vma_count, 0);
56         dev->buf_use = 0;
57         atomic_set(&dev->buf_alloc, 0);
58
59         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
60                 i = drm_dma_setup(dev);
61                 if (i < 0)
62                         return i;
63         }
64
65         for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
66                 atomic_set(&dev->counts[i], 0);
67
68         for (i = 0; i < DRM_HASH_SIZE; i++) {
69                 dev->magiclist[i].head = NULL;
70                 dev->magiclist[i].tail = NULL;
71         }
72
73         dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
74         if (dev->ctxlist == NULL)
75                 return -ENOMEM;
76         memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
77         INIT_LIST_HEAD(&dev->ctxlist->head);
78
79         dev->vmalist = NULL;
80         dev->sigdata.lock = dev->lock.hw_lock = NULL;
81         init_waitqueue_head(&dev->lock.lock_queue);
82         dev->queue_count = 0;
83         dev->queue_reserved = 0;
84         dev->queue_slots = 0;
85         dev->queuelist = NULL;
86         dev->irq_enabled = 0;
87         dev->context_flag = 0;
88         dev->interrupt_flag = 0;
89         dev->dma_flag = 0;
90         dev->last_context = 0;
91         dev->last_switch = 0;
92         dev->last_checked = 0;
93         init_waitqueue_head(&dev->context_wait);
94         dev->if_version = 0;
95
96         dev->ctx_start = 0;
97         dev->lck_start = 0;
98
99         dev->buf_async = NULL;
100         init_waitqueue_head(&dev->buf_readers);
101         init_waitqueue_head(&dev->buf_writers);
102
103         DRM_DEBUG("\n");
104
105         /*
106          * The kernel's context could be created here, but is now created
107          * in drm_dma_enqueue.  This is more resource-efficient for
108          * hardware that does not do DMA, but may mean that
109          * drm_select_queue fails between the time the interrupt is
110          * initialized and the time the queues are initialized.
111          */
112         if (dev->driver->postsetup)
113                 dev->driver->postsetup(dev);
114
115         return 0;
116 }
117
118 /**
119  * Open file.
120  *
121  * \param inode device inode
122  * \param filp file pointer.
123  * \return zero on success or a negative number on failure.
124  *
125  * Searches the DRM device with the same minor number, calls open_helper(), and
126  * increments the device open count. If the open count was previous at zero,
127  * i.e., it's the first that the device is open, then calls setup().
128  */
129 int drm_open(struct inode *inode, struct file *filp)
130 {
131         drm_device_t *dev = NULL;
132         int minor = iminor(inode);
133         int retcode = 0;
134
135         if (!((minor >= 0) && (minor < drm_cards_limit)))
136                 return -ENODEV;
137
138         if (!drm_heads[minor])
139                 return -ENODEV;
140
141         if (!(dev = drm_heads[minor]->dev))
142                 return -ENODEV;
143
144         retcode = drm_open_helper(inode, filp, dev);
145         if (!retcode) {
146                 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
147                 spin_lock(&dev->count_lock);
148                 if (!dev->open_count++) {
149                         spin_unlock(&dev->count_lock);
150                         return drm_setup(dev);
151                 }
152                 spin_unlock(&dev->count_lock);
153         }
154
155         return retcode;
156 }
157
158 EXPORT_SYMBOL(drm_open);
159
160 /**
161  * Release file.
162  *
163  * \param inode device inode
164  * \param filp file pointer.
165  * \return zero on success or a negative number on failure.
166  *
167  * If the hardware lock is held then free it, and take it again for the kernel
168  * context since it's necessary to reclaim buffers. Unlink the file private
169  * data from its list and free it. Decreases the open count and if it reaches
170  * zero calls takedown().
171  */
172 int drm_release(struct inode *inode, struct file *filp)
173 {
174         drm_file_t *priv = filp->private_data;
175         drm_device_t *dev;
176         int retcode = 0;
177
178         lock_kernel();
179         dev = priv->head->dev;
180
181         DRM_DEBUG("open_count = %d\n", dev->open_count);
182
183         if (dev->driver->prerelease)
184                 dev->driver->prerelease(dev, filp);
185
186         /* ========================================================
187          * Begin inline drm_release
188          */
189
190         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
191                   current->pid, (long)old_encode_dev(priv->head->device),
192                   dev->open_count);
193
194         if (priv->lock_count && dev->lock.hw_lock &&
195             _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
196             dev->lock.filp == filp) {
197                 DRM_DEBUG("File %p released, freeing lock for context %d\n",
198                           filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
199
200                 if (dev->driver->release)
201                         dev->driver->release(dev, filp);
202
203                 drm_lock_free(dev, &dev->lock.hw_lock->lock,
204                               _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
205
206                 /* FIXME: may require heavy-handed reset of
207                    hardware at this point, possibly
208                    processed via a callback to the X
209                    server. */
210         } else if (dev->driver->release && priv->lock_count
211                    && dev->lock.hw_lock) {
212                 /* The lock is required to reclaim buffers */
213                 DECLARE_WAITQUEUE(entry, current);
214
215                 add_wait_queue(&dev->lock.lock_queue, &entry);
216                 for (;;) {
217                         __set_current_state(TASK_INTERRUPTIBLE);
218                         if (!dev->lock.hw_lock) {
219                                 /* Device has been unregistered */
220                                 retcode = -EINTR;
221                                 break;
222                         }
223                         if (drm_lock_take(&dev->lock.hw_lock->lock,
224                                           DRM_KERNEL_CONTEXT)) {
225                                 dev->lock.filp = filp;
226                                 dev->lock.lock_time = jiffies;
227                                 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
228                                 break;  /* Got lock */
229                         }
230                         /* Contention */
231                         schedule();
232                         if (signal_pending(current)) {
233                                 retcode = -ERESTARTSYS;
234                                 break;
235                         }
236                 }
237                 __set_current_state(TASK_RUNNING);
238                 remove_wait_queue(&dev->lock.lock_queue, &entry);
239                 if (!retcode) {
240                         if (dev->driver->release)
241                                 dev->driver->release(dev, filp);
242                         drm_lock_free(dev, &dev->lock.hw_lock->lock,
243                                       DRM_KERNEL_CONTEXT);
244                 }
245         }
246
247         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)
248             && !dev->driver->release) {
249                 dev->driver->reclaim_buffers(dev, filp);
250         }
251
252         drm_fasync(-1, filp, 0);
253
254         down(&dev->ctxlist_sem);
255         if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
256                 drm_ctx_list_t *pos, *n;
257
258                 list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
259                         if (pos->tag == priv &&
260                             pos->handle != DRM_KERNEL_CONTEXT) {
261                                 if (dev->driver->context_dtor)
262                                         dev->driver->context_dtor(dev,
263                                                                   pos->handle);
264
265                                 drm_ctxbitmap_free(dev, pos->handle);
266
267                                 list_del(&pos->head);
268                                 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
269                                 --dev->ctx_count;
270                         }
271                 }
272         }
273         up(&dev->ctxlist_sem);
274
275         down(&dev->struct_sem);
276         if (priv->remove_auth_on_close == 1) {
277                 drm_file_t *temp = dev->file_first;
278                 while (temp) {
279                         temp->authenticated = 0;
280                         temp = temp->next;
281                 }
282         }
283         if (priv->prev) {
284                 priv->prev->next = priv->next;
285         } else {
286                 dev->file_first = priv->next;
287         }
288         if (priv->next) {
289                 priv->next->prev = priv->prev;
290         } else {
291                 dev->file_last = priv->prev;
292         }
293         up(&dev->struct_sem);
294
295         if (dev->driver->free_filp_priv)
296                 dev->driver->free_filp_priv(dev, priv);
297
298         drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
299
300         /* ========================================================
301          * End inline drm_release
302          */
303
304         atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
305         spin_lock(&dev->count_lock);
306         if (!--dev->open_count) {
307                 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
308                         DRM_ERROR("Device busy: %d %d\n",
309                                   atomic_read(&dev->ioctl_count), dev->blocked);
310                         spin_unlock(&dev->count_lock);
311                         unlock_kernel();
312                         return -EBUSY;
313                 }
314                 spin_unlock(&dev->count_lock);
315                 unlock_kernel();
316                 return drm_takedown(dev);
317         }
318         spin_unlock(&dev->count_lock);
319
320         unlock_kernel();
321
322         return retcode;
323 }
324
325 EXPORT_SYMBOL(drm_release);
326
327 /**
328  * Called whenever a process opens /dev/drm.
329  *
330  * \param inode device inode.
331  * \param filp file pointer.
332  * \param dev device.
333  * \return zero on success or a negative number on failure.
334  *
335  * Creates and initializes a drm_file structure for the file private data in \p
336  * filp and add it into the double linked list in \p dev.
337  */
338 static int drm_open_helper(struct inode *inode, struct file *filp,
339                            drm_device_t * dev)
340 {
341         int minor = iminor(inode);
342         drm_file_t *priv;
343         int ret;
344
345         if (filp->f_flags & O_EXCL)
346                 return -EBUSY;  /* No exclusive opens */
347         if (!drm_cpu_valid())
348                 return -EINVAL;
349
350         DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
351
352         priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
353         if (!priv)
354                 return -ENOMEM;
355
356         memset(priv, 0, sizeof(*priv));
357         filp->private_data = priv;
358         priv->uid = current->euid;
359         priv->pid = current->pid;
360         priv->minor = minor;
361         priv->head = drm_heads[minor];
362         priv->ioctl_count = 0;
363         priv->authenticated = capable(CAP_SYS_ADMIN);
364         priv->lock_count = 0;
365
366         if (dev->driver->open_helper) {
367                 ret = dev->driver->open_helper(dev, priv);
368                 if (ret < 0)
369                         goto out_free;
370         }
371
372         down(&dev->struct_sem);
373         if (!dev->file_last) {
374                 priv->next = NULL;
375                 priv->prev = NULL;
376                 dev->file_first = priv;
377                 dev->file_last = priv;
378         } else {
379                 priv->next = NULL;
380                 priv->prev = dev->file_last;
381                 dev->file_last->next = priv;
382                 dev->file_last = priv;
383         }
384         up(&dev->struct_sem);
385
386 #ifdef __alpha__
387         /*
388          * Default the hose
389          */
390         if (!dev->hose) {
391                 struct pci_dev *pci_dev;
392                 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
393                 if (pci_dev) {
394                         dev->hose = pci_dev->sysdata;
395                         pci_dev_put(pci_dev);
396                 }
397                 if (!dev->hose) {
398                         struct pci_bus *b = pci_bus_b(pci_root_buses.next);
399                         if (b)
400                                 dev->hose = b->sysdata;
401                 }
402         }
403 #endif
404
405         return 0;
406       out_free:
407         drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
408         filp->private_data = NULL;
409         return ret;
410 }
411
412 /** No-op. */
413 int drm_flush(struct file *filp)
414 {
415         drm_file_t *priv = filp->private_data;
416         drm_device_t *dev = priv->head->dev;
417
418         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
419                   current->pid, (long)old_encode_dev(priv->head->device),
420                   dev->open_count);
421         return 0;
422 }
423
424 EXPORT_SYMBOL(drm_flush);
425
426 /** No-op. */
427 int drm_fasync(int fd, struct file *filp, int on)
428 {
429         drm_file_t *priv = filp->private_data;
430         drm_device_t *dev = priv->head->dev;
431         int retcode;
432
433         DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
434                   (long)old_encode_dev(priv->head->device));
435         retcode = fasync_helper(fd, filp, on, &dev->buf_async);
436         if (retcode < 0)
437                 return retcode;
438         return 0;
439 }
440
441 EXPORT_SYMBOL(drm_fasync);
442
443 /** No-op. */
444 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
445 {
446         return 0;
447 }
448
449 EXPORT_SYMBOL(drm_poll);