Merge acpi-2.6.12 to-akpm
[linux-2.6] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int drm_order( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61 EXPORT_SYMBOL(drm_order);
62
63 #ifdef CONFIG_COMPAT
64 /*
65  * Used to allocate 32-bit handles for _DRM_SHM regions
66  * The 0x10000000 value is chosen to be out of the way of
67  * FB/register and GART physical addresses.
68  */
69 static unsigned int map32_handle = 0x10000000;
70 #endif
71
72 /**
73  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
74  *
75  * \param inode device inode.
76  * \param filp file pointer.
77  * \param cmd command.
78  * \param arg pointer to a drm_map structure.
79  * \return zero on success or a negative value on error.
80  *
81  * Adjusts the memory offset to its absolute value according to the mapping
82  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
83  * applicable and if supported by the kernel.
84  */
85 int drm_addmap( struct inode *inode, struct file *filp,
86                  unsigned int cmd, unsigned long arg )
87 {
88         drm_file_t *priv = filp->private_data;
89         drm_device_t *dev = priv->head->dev;
90         drm_map_t *map;
91         drm_map_t __user *argp = (void __user *)arg;
92         drm_map_list_t *list;
93
94         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
95
96         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
97         if ( !map )
98                 return -ENOMEM;
99
100         if ( copy_from_user( map, argp, sizeof(*map) ) ) {
101                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
102                 return -EFAULT;
103         }
104
105         /* Only allow shared memory to be removable since we only keep enough
106          * book keeping information about shared memory to allow for removal
107          * when processes fork.
108          */
109         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
110                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
111                 return -EINVAL;
112         }
113         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
114                    map->offset, map->size, map->type );
115         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
116                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
117                 return -EINVAL;
118         }
119         map->mtrr   = -1;
120         map->handle = NULL;
121
122         switch ( map->type ) {
123         case _DRM_REGISTERS:
124         case _DRM_FRAME_BUFFER:
125 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
126                 if ( map->offset + map->size < map->offset ||
127                      map->offset < virt_to_phys(high_memory) ) {
128                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
129                         return -EINVAL;
130                 }
131 #endif
132 #ifdef __alpha__
133                 map->offset += dev->hose->mem_space->start;
134 #endif
135                 if (drm_core_has_MTRR(dev)) {
136                         if ( map->type == _DRM_FRAME_BUFFER ||
137                              (map->flags & _DRM_WRITE_COMBINING) ) {
138                                 map->mtrr = mtrr_add( map->offset, map->size,
139                                                       MTRR_TYPE_WRCOMB, 1 );
140                         }
141                 }
142                 if (map->type == _DRM_REGISTERS)
143                         map->handle = drm_ioremap( map->offset, map->size,
144                                                     dev );
145                 break;
146
147         case _DRM_SHM:
148                 map->handle = vmalloc_32(map->size);
149                 DRM_DEBUG( "%lu %d %p\n",
150                            map->size, drm_order( map->size ), map->handle );
151                 if ( !map->handle ) {
152                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
153                         return -ENOMEM;
154                 }
155                 map->offset = (unsigned long)map->handle;
156                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
157                         /* Prevent a 2nd X Server from creating a 2nd lock */
158                         if (dev->lock.hw_lock != NULL) {
159                                 vfree( map->handle );
160                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
161                                 return -EBUSY;
162                         }
163                         dev->sigdata.lock =
164                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
165                 }
166                 break;
167         case _DRM_AGP:
168                 if (drm_core_has_AGP(dev)) {
169 #ifdef __alpha__
170                         map->offset += dev->hose->mem_space->start;
171 #endif
172                         map->offset += dev->agp->base;
173                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
174                 }
175                 break;
176         case _DRM_SCATTER_GATHER:
177                 if (!dev->sg) {
178                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179                         return -EINVAL;
180                 }
181                 map->offset += dev->sg->handle;
182                 break;
183
184         default:
185                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
186                 return -EINVAL;
187         }
188
189         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
190         if(!list) {
191                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
192                 return -EINVAL;
193         }
194         memset(list, 0, sizeof(*list));
195         list->map = map;
196
197         down(&dev->struct_sem);
198         list_add(&list->head, &dev->maplist->head);
199 #ifdef CONFIG_COMPAT
200         /* Assign a 32-bit handle for _DRM_SHM mappings */
201         /* We do it here so that dev->struct_sem protects the increment */
202         if (map->type == _DRM_SHM)
203                 map->offset = map32_handle += PAGE_SIZE;
204 #endif
205         up(&dev->struct_sem);
206
207         if ( copy_to_user( argp, map, sizeof(*map) ) )
208                 return -EFAULT;
209         if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
210                 return -EFAULT;
211         return 0;
212 }
213
214
215 /**
216  * Remove a map private from list and deallocate resources if the mapping
217  * isn't in use.
218  *
219  * \param inode device inode.
220  * \param filp file pointer.
221  * \param cmd command.
222  * \param arg pointer to a drm_map_t structure.
223  * \return zero on success or a negative value on error.
224  *
225  * Searches the map on drm_device::maplist, removes it from the list, see if
226  * its being used, and free any associate resource (such as MTRR's) if it's not
227  * being on use.
228  *
229  * \sa addmap().
230  */
231 int drm_rmmap(struct inode *inode, struct file *filp,
232                unsigned int cmd, unsigned long arg)
233 {
234         drm_file_t      *priv   = filp->private_data;
235         drm_device_t    *dev    = priv->head->dev;
236         struct list_head *list;
237         drm_map_list_t *r_list = NULL;
238         drm_vma_entry_t *pt, *prev;
239         drm_map_t *map;
240         drm_map_t request;
241         int found_maps = 0;
242
243         if (copy_from_user(&request, (drm_map_t __user *)arg,
244                            sizeof(request))) {
245                 return -EFAULT;
246         }
247
248         down(&dev->struct_sem);
249         list = &dev->maplist->head;
250         list_for_each(list, &dev->maplist->head) {
251                 r_list = list_entry(list, drm_map_list_t, head);
252
253                 if(r_list->map &&
254                    r_list->map->offset == (unsigned long) request.handle &&
255                    r_list->map->flags & _DRM_REMOVABLE) break;
256         }
257
258         /* List has wrapped around to the head pointer, or its empty we didn't
259          * find anything.
260          */
261         if(list == (&dev->maplist->head)) {
262                 up(&dev->struct_sem);
263                 return -EINVAL;
264         }
265         map = r_list->map;
266         list_del(list);
267         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
268
269         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
270                 if (pt->vma->vm_private_data == map) found_maps++;
271         }
272
273         if(!found_maps) {
274                 switch (map->type) {
275                 case _DRM_REGISTERS:
276                 case _DRM_FRAME_BUFFER:
277                   if (drm_core_has_MTRR(dev)) {
278                                 if (map->mtrr >= 0) {
279                                         int retcode;
280                                         retcode = mtrr_del(map->mtrr,
281                                                            map->offset,
282                                                            map->size);
283                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
284                                 }
285                         }
286                         drm_ioremapfree(map->handle, map->size, dev);
287                         break;
288                 case _DRM_SHM:
289                         vfree(map->handle);
290                         break;
291                 case _DRM_AGP:
292                 case _DRM_SCATTER_GATHER:
293                         break;
294                 }
295                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
296         }
297         up(&dev->struct_sem);
298         return 0;
299 }
300
301 /**
302  * Cleanup after an error on one of the addbufs() functions.
303  *
304  * \param entry buffer entry where the error occurred.
305  *
306  * Frees any pages and buffers associated with the given entry.
307  */
308 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
309 {
310         int i;
311
312         if (entry->seg_count) {
313                 for (i = 0; i < entry->seg_count; i++) {
314                         if (entry->seglist[i]) {
315                                 drm_free_pages(entry->seglist[i],
316                                                 entry->page_order,
317                                                 DRM_MEM_DMA);
318                         }
319                 }
320                 drm_free(entry->seglist,
321                           entry->seg_count *
322                           sizeof(*entry->seglist),
323                           DRM_MEM_SEGS);
324
325                 entry->seg_count = 0;
326         }
327
328         if (entry->buf_count) {
329                 for (i = 0; i < entry->buf_count; i++) {
330                         if (entry->buflist[i].dev_private) {
331                                 drm_free(entry->buflist[i].dev_private,
332                                           entry->buflist[i].dev_priv_size,
333                                           DRM_MEM_BUFS);
334                         }
335                 }
336                 drm_free(entry->buflist,
337                           entry->buf_count *
338                           sizeof(*entry->buflist),
339                           DRM_MEM_BUFS);
340
341                 entry->buf_count = 0;
342         }
343 }
344
345 #if __OS_HAS_AGP
346 /**
347  * Add AGP buffers for DMA transfers (ioctl).
348  *
349  * \param inode device inode.
350  * \param filp file pointer.
351  * \param cmd command.
352  * \param arg pointer to a drm_buf_desc_t request.
353  * \return zero on success or a negative number on failure.
354  * 
355  * After some sanity checks creates a drm_buf structure for each buffer and
356  * reallocates the buffer list of the same size order to accommodate the new
357  * buffers.
358  */
359 static int drm_addbufs_agp( struct inode *inode, struct file *filp,
360                             unsigned int cmd, unsigned long arg )
361 {
362         drm_file_t *priv = filp->private_data;
363         drm_device_t *dev = priv->head->dev;
364         drm_device_dma_t *dma = dev->dma;
365         drm_buf_desc_t request;
366         drm_buf_entry_t *entry;
367         drm_buf_t *buf;
368         unsigned long offset;
369         unsigned long agp_offset;
370         int count;
371         int order;
372         int size;
373         int alignment;
374         int page_order;
375         int total;
376         int byte_count;
377         int i;
378         drm_buf_t **temp_buflist;
379         drm_buf_desc_t __user *argp = (void __user *)arg;
380
381         if ( !dma ) return -EINVAL;
382
383         if ( copy_from_user( &request, argp,
384                              sizeof(request) ) )
385                 return -EFAULT;
386
387         count = request.count;
388         order = drm_order( request.size );
389         size = 1 << order;
390
391         alignment  = (request.flags & _DRM_PAGE_ALIGN)
392                 ? PAGE_ALIGN(size) : size;
393         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
394         total = PAGE_SIZE << page_order;
395
396         byte_count = 0;
397         agp_offset = dev->agp->base + request.agp_start;
398
399         DRM_DEBUG( "count:      %d\n",  count );
400         DRM_DEBUG( "order:      %d\n",  order );
401         DRM_DEBUG( "size:       %d\n",  size );
402         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
403         DRM_DEBUG( "alignment:  %d\n",  alignment );
404         DRM_DEBUG( "page_order: %d\n",  page_order );
405         DRM_DEBUG( "total:      %d\n",  total );
406
407         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
408         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
409
410         spin_lock( &dev->count_lock );
411         if ( dev->buf_use ) {
412                 spin_unlock( &dev->count_lock );
413                 return -EBUSY;
414         }
415         atomic_inc( &dev->buf_alloc );
416         spin_unlock( &dev->count_lock );
417
418         down( &dev->struct_sem );
419         entry = &dma->bufs[order];
420         if ( entry->buf_count ) {
421                 up( &dev->struct_sem );
422                 atomic_dec( &dev->buf_alloc );
423                 return -ENOMEM; /* May only call once for each order */
424         }
425
426         if (count < 0 || count > 4096) {
427                 up( &dev->struct_sem );
428                 atomic_dec( &dev->buf_alloc );
429                 return -EINVAL;
430         }
431
432         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
433                                     DRM_MEM_BUFS );
434         if ( !entry->buflist ) {
435                 up( &dev->struct_sem );
436                 atomic_dec( &dev->buf_alloc );
437                 return -ENOMEM;
438         }
439         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
440
441         entry->buf_size = size;
442         entry->page_order = page_order;
443
444         offset = 0;
445
446         while ( entry->buf_count < count ) {
447                 buf          = &entry->buflist[entry->buf_count];
448                 buf->idx     = dma->buf_count + entry->buf_count;
449                 buf->total   = alignment;
450                 buf->order   = order;
451                 buf->used    = 0;
452
453                 buf->offset  = (dma->byte_count + offset);
454                 buf->bus_address = agp_offset + offset;
455                 buf->address = (void *)(agp_offset + offset);
456                 buf->next    = NULL;
457                 buf->waiting = 0;
458                 buf->pending = 0;
459                 init_waitqueue_head( &buf->dma_wait );
460                 buf->filp    = NULL;
461
462                 buf->dev_priv_size = dev->driver->dev_priv_size;
463                 buf->dev_private = drm_alloc( buf->dev_priv_size,
464                                                DRM_MEM_BUFS );
465                 if(!buf->dev_private) {
466                         /* Set count correctly so we free the proper amount. */
467                         entry->buf_count = count;
468                         drm_cleanup_buf_error(dev,entry);
469                         up( &dev->struct_sem );
470                         atomic_dec( &dev->buf_alloc );
471                         return -ENOMEM;
472                 }
473                 memset( buf->dev_private, 0, buf->dev_priv_size );
474
475                 DRM_DEBUG( "buffer %d @ %p\n",
476                            entry->buf_count, buf->address );
477
478                 offset += alignment;
479                 entry->buf_count++;
480                 byte_count += PAGE_SIZE << page_order;
481         }
482
483         DRM_DEBUG( "byte_count: %d\n", byte_count );
484
485         temp_buflist = drm_realloc( dma->buflist,
486                                      dma->buf_count * sizeof(*dma->buflist),
487                                      (dma->buf_count + entry->buf_count)
488                                      * sizeof(*dma->buflist),
489                                      DRM_MEM_BUFS );
490         if(!temp_buflist) {
491                 /* Free the entry because it isn't valid */
492                 drm_cleanup_buf_error(dev,entry);
493                 up( &dev->struct_sem );
494                 atomic_dec( &dev->buf_alloc );
495                 return -ENOMEM;
496         }
497         dma->buflist = temp_buflist;
498
499         for ( i = 0 ; i < entry->buf_count ; i++ ) {
500                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
501         }
502
503         dma->buf_count += entry->buf_count;
504         dma->byte_count += byte_count;
505
506         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
507         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
508
509         up( &dev->struct_sem );
510
511         request.count = entry->buf_count;
512         request.size = size;
513
514         if ( copy_to_user( argp, &request, sizeof(request) ) )
515                 return -EFAULT;
516
517         dma->flags = _DRM_DMA_USE_AGP;
518
519         atomic_dec( &dev->buf_alloc );
520         return 0;
521 }
522 #endif /* __OS_HAS_AGP */
523
524 static int drm_addbufs_pci( struct inode *inode, struct file *filp,
525                             unsigned int cmd, unsigned long arg )
526 {
527         drm_file_t *priv = filp->private_data;
528         drm_device_t *dev = priv->head->dev;
529         drm_device_dma_t *dma = dev->dma;
530         drm_buf_desc_t request;
531         int count;
532         int order;
533         int size;
534         int total;
535         int page_order;
536         drm_buf_entry_t *entry;
537         unsigned long page;
538         drm_buf_t *buf;
539         int alignment;
540         unsigned long offset;
541         int i;
542         int byte_count;
543         int page_count;
544         unsigned long *temp_pagelist;
545         drm_buf_t **temp_buflist;
546         drm_buf_desc_t __user *argp = (void __user *)arg;
547
548         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
549         if ( !dma ) return -EINVAL;
550
551         if ( copy_from_user( &request, argp, sizeof(request) ) )
552                 return -EFAULT;
553
554         count = request.count;
555         order = drm_order( request.size );
556         size = 1 << order;
557
558         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559                    request.count, request.size, size,
560                    order, dev->queue_count );
561
562         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
564
565         alignment = (request.flags & _DRM_PAGE_ALIGN)
566                 ? PAGE_ALIGN(size) : size;
567         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568         total = PAGE_SIZE << page_order;
569
570         spin_lock( &dev->count_lock );
571         if ( dev->buf_use ) {
572                 spin_unlock( &dev->count_lock );
573                 return -EBUSY;
574         }
575         atomic_inc( &dev->buf_alloc );
576         spin_unlock( &dev->count_lock );
577
578         down( &dev->struct_sem );
579         entry = &dma->bufs[order];
580         if ( entry->buf_count ) {
581                 up( &dev->struct_sem );
582                 atomic_dec( &dev->buf_alloc );
583                 return -ENOMEM; /* May only call once for each order */
584         }
585
586         if (count < 0 || count > 4096) {
587                 up( &dev->struct_sem );
588                 atomic_dec( &dev->buf_alloc );
589                 return -EINVAL;
590         }
591
592         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
593                                     DRM_MEM_BUFS );
594         if ( !entry->buflist ) {
595                 up( &dev->struct_sem );
596                 atomic_dec( &dev->buf_alloc );
597                 return -ENOMEM;
598         }
599         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
600
601         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
602                                     DRM_MEM_SEGS );
603         if ( !entry->seglist ) {
604                 drm_free( entry->buflist,
605                           count * sizeof(*entry->buflist),
606                           DRM_MEM_BUFS );
607                 up( &dev->struct_sem );
608                 atomic_dec( &dev->buf_alloc );
609                 return -ENOMEM;
610         }
611         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
612
613         /* Keep the original pagelist until we know all the allocations
614          * have succeeded
615          */
616         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
617                                     * sizeof(*dma->pagelist),
618                                     DRM_MEM_PAGES );
619         if (!temp_pagelist) {
620                 drm_free( entry->buflist,
621                            count * sizeof(*entry->buflist),
622                            DRM_MEM_BUFS );
623                 drm_free( entry->seglist,
624                            count * sizeof(*entry->seglist),
625                            DRM_MEM_SEGS );
626                 up( &dev->struct_sem );
627                 atomic_dec( &dev->buf_alloc );
628                 return -ENOMEM;
629         }
630         memcpy(temp_pagelist,
631                dma->pagelist,
632                dma->page_count * sizeof(*dma->pagelist));
633         DRM_DEBUG( "pagelist: %d entries\n",
634                    dma->page_count + (count << page_order) );
635
636         entry->buf_size = size;
637         entry->page_order = page_order;
638         byte_count = 0;
639         page_count = 0;
640
641         while ( entry->buf_count < count ) {
642                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
643                 if ( !page ) {
644                         /* Set count correctly so we free the proper amount. */
645                         entry->buf_count = count;
646                         entry->seg_count = count;
647                         drm_cleanup_buf_error(dev, entry);
648                         drm_free( temp_pagelist,
649                                    (dma->page_count + (count << page_order))
650                                    * sizeof(*dma->pagelist),
651                                    DRM_MEM_PAGES );
652                         up( &dev->struct_sem );
653                         atomic_dec( &dev->buf_alloc );
654                         return -ENOMEM;
655                 }
656                 entry->seglist[entry->seg_count++] = page;
657                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
658                         DRM_DEBUG( "page %d @ 0x%08lx\n",
659                                    dma->page_count + page_count,
660                                    page + PAGE_SIZE * i );
661                         temp_pagelist[dma->page_count + page_count++]
662                                 = page + PAGE_SIZE * i;
663                 }
664                 for ( offset = 0 ;
665                       offset + size <= total && entry->buf_count < count ;
666                       offset += alignment, ++entry->buf_count ) {
667                         buf          = &entry->buflist[entry->buf_count];
668                         buf->idx     = dma->buf_count + entry->buf_count;
669                         buf->total   = alignment;
670                         buf->order   = order;
671                         buf->used    = 0;
672                         buf->offset  = (dma->byte_count + byte_count + offset);
673                         buf->address = (void *)(page + offset);
674                         buf->next    = NULL;
675                         buf->waiting = 0;
676                         buf->pending = 0;
677                         init_waitqueue_head( &buf->dma_wait );
678                         buf->filp    = NULL;
679
680                         buf->dev_priv_size = dev->driver->dev_priv_size;
681                         buf->dev_private = drm_alloc( buf->dev_priv_size,
682                                                        DRM_MEM_BUFS );
683                         if(!buf->dev_private) {
684                                 /* Set count correctly so we free the proper amount. */
685                                 entry->buf_count = count;
686                                 entry->seg_count = count;
687                                 drm_cleanup_buf_error(dev,entry);
688                                 drm_free( temp_pagelist,
689                                            (dma->page_count + (count << page_order))
690                                            * sizeof(*dma->pagelist),
691                                            DRM_MEM_PAGES );
692                                 up( &dev->struct_sem );
693                                 atomic_dec( &dev->buf_alloc );
694                                 return -ENOMEM;
695                         }
696                         memset( buf->dev_private, 0, buf->dev_priv_size );
697
698                         DRM_DEBUG( "buffer %d @ %p\n",
699                                    entry->buf_count, buf->address );
700                 }
701                 byte_count += PAGE_SIZE << page_order;
702         }
703
704         temp_buflist = drm_realloc( dma->buflist,
705                                      dma->buf_count * sizeof(*dma->buflist),
706                                      (dma->buf_count + entry->buf_count)
707                                      * sizeof(*dma->buflist),
708                                      DRM_MEM_BUFS );
709         if (!temp_buflist) {
710                 /* Free the entry because it isn't valid */
711                 drm_cleanup_buf_error(dev,entry);
712                 drm_free( temp_pagelist,
713                            (dma->page_count + (count << page_order))
714                            * sizeof(*dma->pagelist),
715                            DRM_MEM_PAGES );
716                 up( &dev->struct_sem );
717                 atomic_dec( &dev->buf_alloc );
718                 return -ENOMEM;
719         }
720         dma->buflist = temp_buflist;
721
722         for ( i = 0 ; i < entry->buf_count ; i++ ) {
723                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
724         }
725
726         /* No allocations failed, so now we can replace the orginal pagelist
727          * with the new one.
728          */
729         if (dma->page_count) {
730                 drm_free(dma->pagelist,
731                           dma->page_count * sizeof(*dma->pagelist),
732                           DRM_MEM_PAGES);
733         }
734         dma->pagelist = temp_pagelist;
735
736         dma->buf_count += entry->buf_count;
737         dma->seg_count += entry->seg_count;
738         dma->page_count += entry->seg_count << page_order;
739         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
740
741         up( &dev->struct_sem );
742
743         request.count = entry->buf_count;
744         request.size = size;
745
746         if ( copy_to_user( argp, &request, sizeof(request) ) )
747                 return -EFAULT;
748
749         atomic_dec( &dev->buf_alloc );
750         return 0;
751
752 }
753
754 static int drm_addbufs_sg( struct inode *inode, struct file *filp,
755                            unsigned int cmd, unsigned long arg )
756 {
757         drm_file_t *priv = filp->private_data;
758         drm_device_t *dev = priv->head->dev;
759         drm_device_dma_t *dma = dev->dma;
760         drm_buf_desc_t __user *argp = (void __user *)arg;
761         drm_buf_desc_t request;
762         drm_buf_entry_t *entry;
763         drm_buf_t *buf;
764         unsigned long offset;
765         unsigned long agp_offset;
766         int count;
767         int order;
768         int size;
769         int alignment;
770         int page_order;
771         int total;
772         int byte_count;
773         int i;
774         drm_buf_t **temp_buflist;
775
776         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
777         
778         if ( !dma ) return -EINVAL;
779
780         if ( copy_from_user( &request, argp, sizeof(request) ) )
781                 return -EFAULT;
782
783         count = request.count;
784         order = drm_order( request.size );
785         size = 1 << order;
786
787         alignment  = (request.flags & _DRM_PAGE_ALIGN)
788                         ? PAGE_ALIGN(size) : size;
789         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
790         total = PAGE_SIZE << page_order;
791
792         byte_count = 0;
793         agp_offset = request.agp_start;
794
795         DRM_DEBUG( "count:      %d\n",  count );
796         DRM_DEBUG( "order:      %d\n",  order );
797         DRM_DEBUG( "size:       %d\n",  size );
798         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
799         DRM_DEBUG( "alignment:  %d\n",  alignment );
800         DRM_DEBUG( "page_order: %d\n",  page_order );
801         DRM_DEBUG( "total:      %d\n",  total );
802
803         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
804         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
805
806         spin_lock( &dev->count_lock );
807         if ( dev->buf_use ) {
808                 spin_unlock( &dev->count_lock );
809                 return -EBUSY;
810         }
811         atomic_inc( &dev->buf_alloc );
812         spin_unlock( &dev->count_lock );
813
814         down( &dev->struct_sem );
815         entry = &dma->bufs[order];
816         if ( entry->buf_count ) {
817                 up( &dev->struct_sem );
818                 atomic_dec( &dev->buf_alloc );
819                 return -ENOMEM; /* May only call once for each order */
820         }
821
822         if (count < 0 || count > 4096) {
823                 up( &dev->struct_sem );
824                 atomic_dec( &dev->buf_alloc );
825                 return -EINVAL;
826         }
827
828         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
829                                      DRM_MEM_BUFS );
830         if ( !entry->buflist ) {
831                 up( &dev->struct_sem );
832                 atomic_dec( &dev->buf_alloc );
833                 return -ENOMEM;
834         }
835         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
836
837         entry->buf_size = size;
838         entry->page_order = page_order;
839
840         offset = 0;
841
842         while ( entry->buf_count < count ) {
843                 buf          = &entry->buflist[entry->buf_count];
844                 buf->idx     = dma->buf_count + entry->buf_count;
845                 buf->total   = alignment;
846                 buf->order   = order;
847                 buf->used    = 0;
848
849                 buf->offset  = (dma->byte_count + offset);
850                 buf->bus_address = agp_offset + offset;
851                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
852                 buf->next    = NULL;
853                 buf->waiting = 0;
854                 buf->pending = 0;
855                 init_waitqueue_head( &buf->dma_wait );
856                 buf->filp    = NULL;
857
858                 buf->dev_priv_size = dev->driver->dev_priv_size;
859                 buf->dev_private = drm_alloc( buf->dev_priv_size,
860                                                DRM_MEM_BUFS );
861                 if(!buf->dev_private) {
862                         /* Set count correctly so we free the proper amount. */
863                         entry->buf_count = count;
864                         drm_cleanup_buf_error(dev,entry);
865                         up( &dev->struct_sem );
866                         atomic_dec( &dev->buf_alloc );
867                         return -ENOMEM;
868                 }
869
870                 memset( buf->dev_private, 0, buf->dev_priv_size );
871
872                 DRM_DEBUG( "buffer %d @ %p\n",
873                            entry->buf_count, buf->address );
874
875                 offset += alignment;
876                 entry->buf_count++;
877                 byte_count += PAGE_SIZE << page_order;
878         }
879
880         DRM_DEBUG( "byte_count: %d\n", byte_count );
881
882         temp_buflist = drm_realloc( dma->buflist,
883                                      dma->buf_count * sizeof(*dma->buflist),
884                                      (dma->buf_count + entry->buf_count)
885                                      * sizeof(*dma->buflist),
886                                      DRM_MEM_BUFS );
887         if(!temp_buflist) {
888                 /* Free the entry because it isn't valid */
889                 drm_cleanup_buf_error(dev,entry);
890                 up( &dev->struct_sem );
891                 atomic_dec( &dev->buf_alloc );
892                 return -ENOMEM;
893         }
894         dma->buflist = temp_buflist;
895
896         for ( i = 0 ; i < entry->buf_count ; i++ ) {
897                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
898         }
899
900         dma->buf_count += entry->buf_count;
901         dma->byte_count += byte_count;
902
903         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
904         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
905
906         up( &dev->struct_sem );
907
908         request.count = entry->buf_count;
909         request.size = size;
910
911         if ( copy_to_user( argp, &request, sizeof(request) ) )
912                 return -EFAULT;
913
914         dma->flags = _DRM_DMA_USE_SG;
915
916         atomic_dec( &dev->buf_alloc );
917         return 0;
918 }
919
920 /**
921  * Add buffers for DMA transfers (ioctl).
922  *
923  * \param inode device inode.
924  * \param filp file pointer.
925  * \param cmd command.
926  * \param arg pointer to a drm_buf_desc_t request.
927  * \return zero on success or a negative number on failure.
928  *
929  * According with the memory type specified in drm_buf_desc::flags and the
930  * build options, it dispatches the call either to addbufs_agp(),
931  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
932  * PCI memory respectively.
933  */
934 int drm_addbufs( struct inode *inode, struct file *filp,
935                   unsigned int cmd, unsigned long arg )
936 {
937         drm_buf_desc_t request;
938         drm_file_t *priv = filp->private_data;
939         drm_device_t *dev = priv->head->dev;
940         
941         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
942                 return -EINVAL;
943
944         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
945                              sizeof(request) ) )
946                 return -EFAULT;
947
948 #if __OS_HAS_AGP
949         if ( request.flags & _DRM_AGP_BUFFER )
950                 return drm_addbufs_agp( inode, filp, cmd, arg );
951         else
952 #endif
953         if ( request.flags & _DRM_SG_BUFFER )
954                 return drm_addbufs_sg( inode, filp, cmd, arg );
955         else
956                 return drm_addbufs_pci( inode, filp, cmd, arg );
957 }
958
959
960 /**
961  * Get information about the buffer mappings.
962  *
963  * This was originally mean for debugging purposes, or by a sophisticated
964  * client library to determine how best to use the available buffers (e.g.,
965  * large buffers can be used for image transfer).
966  *
967  * \param inode device inode.
968  * \param filp file pointer.
969  * \param cmd command.
970  * \param arg pointer to a drm_buf_info structure.
971  * \return zero on success or a negative number on failure.
972  *
973  * Increments drm_device::buf_use while holding the drm_device::count_lock
974  * lock, preventing of allocating more buffers after this call. Information
975  * about each requested buffer is then copied into user space.
976  */
977 int drm_infobufs( struct inode *inode, struct file *filp,
978                    unsigned int cmd, unsigned long arg )
979 {
980         drm_file_t *priv = filp->private_data;
981         drm_device_t *dev = priv->head->dev;
982         drm_device_dma_t *dma = dev->dma;
983         drm_buf_info_t request;
984         drm_buf_info_t __user *argp = (void __user *)arg;
985         int i;
986         int count;
987
988         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
989                 return -EINVAL;
990
991         if ( !dma ) return -EINVAL;
992
993         spin_lock( &dev->count_lock );
994         if ( atomic_read( &dev->buf_alloc ) ) {
995                 spin_unlock( &dev->count_lock );
996                 return -EBUSY;
997         }
998         ++dev->buf_use;         /* Can't allocate more after this call */
999         spin_unlock( &dev->count_lock );
1000
1001         if ( copy_from_user( &request, argp, sizeof(request) ) )
1002                 return -EFAULT;
1003
1004         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1005                 if ( dma->bufs[i].buf_count ) ++count;
1006         }
1007
1008         DRM_DEBUG( "count = %d\n", count );
1009
1010         if ( request.count >= count ) {
1011                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1012                         if ( dma->bufs[i].buf_count ) {
1013                                 drm_buf_desc_t __user *to = &request.list[count];
1014                                 drm_buf_entry_t *from = &dma->bufs[i];
1015                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1016                                 if ( copy_to_user( &to->count,
1017                                                    &from->buf_count,
1018                                                    sizeof(from->buf_count) ) ||
1019                                      copy_to_user( &to->size,
1020                                                    &from->buf_size,
1021                                                    sizeof(from->buf_size) ) ||
1022                                      copy_to_user( &to->low_mark,
1023                                                    &list->low_mark,
1024                                                    sizeof(list->low_mark) ) ||
1025                                      copy_to_user( &to->high_mark,
1026                                                    &list->high_mark,
1027                                                    sizeof(list->high_mark) ) )
1028                                         return -EFAULT;
1029
1030                                 DRM_DEBUG( "%d %d %d %d %d\n",
1031                                            i,
1032                                            dma->bufs[i].buf_count,
1033                                            dma->bufs[i].buf_size,
1034                                            dma->bufs[i].freelist.low_mark,
1035                                            dma->bufs[i].freelist.high_mark );
1036                                 ++count;
1037                         }
1038                 }
1039         }
1040         request.count = count;
1041
1042         if ( copy_to_user( argp, &request, sizeof(request) ) )
1043                 return -EFAULT;
1044
1045         return 0;
1046 }
1047
1048 /**
1049  * Specifies a low and high water mark for buffer allocation
1050  *
1051  * \param inode device inode.
1052  * \param filp file pointer.
1053  * \param cmd command.
1054  * \param arg a pointer to a drm_buf_desc structure.
1055  * \return zero on success or a negative number on failure.
1056  *
1057  * Verifies that the size order is bounded between the admissible orders and
1058  * updates the respective drm_device_dma::bufs entry low and high water mark.
1059  *
1060  * \note This ioctl is deprecated and mostly never used.
1061  */
1062 int drm_markbufs( struct inode *inode, struct file *filp,
1063                    unsigned int cmd, unsigned long arg )
1064 {
1065         drm_file_t *priv = filp->private_data;
1066         drm_device_t *dev = priv->head->dev;
1067         drm_device_dma_t *dma = dev->dma;
1068         drm_buf_desc_t request;
1069         int order;
1070         drm_buf_entry_t *entry;
1071
1072         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1073                 return -EINVAL;
1074
1075         if ( !dma ) return -EINVAL;
1076
1077         if ( copy_from_user( &request,
1078                              (drm_buf_desc_t __user *)arg,
1079                              sizeof(request) ) )
1080                 return -EFAULT;
1081
1082         DRM_DEBUG( "%d, %d, %d\n",
1083                    request.size, request.low_mark, request.high_mark );
1084         order = drm_order( request.size );
1085         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1086         entry = &dma->bufs[order];
1087
1088         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1089                 return -EINVAL;
1090         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1091                 return -EINVAL;
1092
1093         entry->freelist.low_mark  = request.low_mark;
1094         entry->freelist.high_mark = request.high_mark;
1095
1096         return 0;
1097 }
1098
1099 /**
1100  * Unreserve the buffers in list, previously reserved using drmDMA. 
1101  *
1102  * \param inode device inode.
1103  * \param filp file pointer.
1104  * \param cmd command.
1105  * \param arg pointer to a drm_buf_free structure.
1106  * \return zero on success or a negative number on failure.
1107  * 
1108  * Calls free_buffer() for each used buffer.
1109  * This function is primarily used for debugging.
1110  */
1111 int drm_freebufs( struct inode *inode, struct file *filp,
1112                    unsigned int cmd, unsigned long arg )
1113 {
1114         drm_file_t *priv = filp->private_data;
1115         drm_device_t *dev = priv->head->dev;
1116         drm_device_dma_t *dma = dev->dma;
1117         drm_buf_free_t request;
1118         int i;
1119         int idx;
1120         drm_buf_t *buf;
1121
1122         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1123                 return -EINVAL;
1124
1125         if ( !dma ) return -EINVAL;
1126
1127         if ( copy_from_user( &request,
1128                              (drm_buf_free_t __user *)arg,
1129                              sizeof(request) ) )
1130                 return -EFAULT;
1131
1132         DRM_DEBUG( "%d\n", request.count );
1133         for ( i = 0 ; i < request.count ; i++ ) {
1134                 if ( copy_from_user( &idx,
1135                                      &request.list[i],
1136                                      sizeof(idx) ) )
1137                         return -EFAULT;
1138                 if ( idx < 0 || idx >= dma->buf_count ) {
1139                         DRM_ERROR( "Index %d (of %d max)\n",
1140                                    idx, dma->buf_count - 1 );
1141                         return -EINVAL;
1142                 }
1143                 buf = dma->buflist[idx];
1144                 if ( buf->filp != filp ) {
1145                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1146                                    current->pid );
1147                         return -EINVAL;
1148                 }
1149                 drm_free_buffer( dev, buf );
1150         }
1151
1152         return 0;
1153 }
1154
1155 /**
1156  * Maps all of the DMA buffers into client-virtual space (ioctl).
1157  *
1158  * \param inode device inode.
1159  * \param filp file pointer.
1160  * \param cmd command.
1161  * \param arg pointer to a drm_buf_map structure.
1162  * \return zero on success or a negative number on failure.
1163  *
1164  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1165  * about each buffer into user space. The PCI buffers are already mapped on the
1166  * addbufs_pci() call.
1167  */
1168 int drm_mapbufs( struct inode *inode, struct file *filp,
1169                   unsigned int cmd, unsigned long arg )
1170 {
1171         drm_file_t *priv = filp->private_data;
1172         drm_device_t *dev = priv->head->dev;
1173         drm_device_dma_t *dma = dev->dma;
1174         drm_buf_map_t __user *argp = (void __user *)arg;
1175         int retcode = 0;
1176         const int zero = 0;
1177         unsigned long virtual;
1178         unsigned long address;
1179         drm_buf_map_t request;
1180         int i;
1181
1182         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1183                 return -EINVAL;
1184
1185         if ( !dma ) return -EINVAL;
1186
1187         spin_lock( &dev->count_lock );
1188         if ( atomic_read( &dev->buf_alloc ) ) {
1189                 spin_unlock( &dev->count_lock );
1190                 return -EBUSY;
1191         }
1192         dev->buf_use++;         /* Can't allocate more after this call */
1193         spin_unlock( &dev->count_lock );
1194
1195         if ( copy_from_user( &request, argp, sizeof(request) ) )
1196                 return -EFAULT;
1197
1198         if ( request.count >= dma->buf_count ) {
1199                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1200                     (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) {
1201                         drm_map_t *map = dev->agp_buffer_map;
1202
1203                         if ( !map ) {
1204                                 retcode = -EINVAL;
1205                                 goto done;
1206                         }
1207
1208 #if LINUX_VERSION_CODE <= 0x020402
1209                         down( &current->mm->mmap_sem );
1210 #else
1211                         down_write( &current->mm->mmap_sem );
1212 #endif
1213                         virtual = do_mmap( filp, 0, map->size,
1214                                            PROT_READ | PROT_WRITE,
1215                                            MAP_SHARED,
1216                                            (unsigned long)map->offset );
1217 #if LINUX_VERSION_CODE <= 0x020402
1218                         up( &current->mm->mmap_sem );
1219 #else
1220                         up_write( &current->mm->mmap_sem );
1221 #endif
1222                 } else {
1223 #if LINUX_VERSION_CODE <= 0x020402
1224                         down( &current->mm->mmap_sem );
1225 #else
1226                         down_write( &current->mm->mmap_sem );
1227 #endif
1228                         virtual = do_mmap( filp, 0, dma->byte_count,
1229                                            PROT_READ | PROT_WRITE,
1230                                            MAP_SHARED, 0 );
1231 #if LINUX_VERSION_CODE <= 0x020402
1232                         up( &current->mm->mmap_sem );
1233 #else
1234                         up_write( &current->mm->mmap_sem );
1235 #endif
1236                 }
1237                 if ( virtual > -1024UL ) {
1238                         /* Real error */
1239                         retcode = (signed long)virtual;
1240                         goto done;
1241                 }
1242                 request.virtual = (void __user *)virtual;
1243
1244                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1245                         if ( copy_to_user( &request.list[i].idx,
1246                                            &dma->buflist[i]->idx,
1247                                            sizeof(request.list[0].idx) ) ) {
1248                                 retcode = -EFAULT;
1249                                 goto done;
1250                         }
1251                         if ( copy_to_user( &request.list[i].total,
1252                                            &dma->buflist[i]->total,
1253                                            sizeof(request.list[0].total) ) ) {
1254                                 retcode = -EFAULT;
1255                                 goto done;
1256                         }
1257                         if ( copy_to_user( &request.list[i].used,
1258                                            &zero,
1259                                            sizeof(zero) ) ) {
1260                                 retcode = -EFAULT;
1261                                 goto done;
1262                         }
1263                         address = virtual + dma->buflist[i]->offset; /* *** */
1264                         if ( copy_to_user( &request.list[i].address,
1265                                            &address,
1266                                            sizeof(address) ) ) {
1267                                 retcode = -EFAULT;
1268                                 goto done;
1269                         }
1270                 }
1271         }
1272  done:
1273         request.count = dma->buf_count;
1274         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1275
1276         if ( copy_to_user( argp, &request, sizeof(request) ) )
1277                 return -EFAULT;
1278
1279         return retcode;
1280 }
1281