drm: destatic exported function.
[linux-2.6] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int drm_order( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61 EXPORT_SYMBOL(drm_order);
62
63 #ifdef CONFIG_COMPAT
64 /*
65  * Used to allocate 32-bit handles for _DRM_SHM regions
66  * The 0x10000000 value is chosen to be out of the way of
67  * FB/register and GART physical addresses.
68  */
69 static unsigned int map32_handle = 0x10000000;
70 #endif
71
72 /**
73  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
74  *
75  * \param inode device inode.
76  * \param filp file pointer.
77  * \param cmd command.
78  * \param arg pointer to a drm_map structure.
79  * \return zero on success or a negative value on error.
80  *
81  * Adjusts the memory offset to its absolute value according to the mapping
82  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
83  * applicable and if supported by the kernel.
84  */
85 int drm_addmap(drm_device_t * dev, unsigned int offset,
86                unsigned int size, drm_map_type_t type,
87                drm_map_flags_t flags, drm_local_map_t ** map_ptr)
88 {
89         drm_map_t *map;
90         drm_map_list_t *list;
91         drm_dma_handle_t *dmah;
92
93         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
94         if ( !map )
95                 return -ENOMEM;
96
97         map->offset = offset;
98         map->size = size;
99         map->flags = flags;
100         map->type = type;
101
102         /* Only allow shared memory to be removable since we only keep enough
103          * book keeping information about shared memory to allow for removal
104          * when processes fork.
105          */
106         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
107                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
108                 return -EINVAL;
109         }
110         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
111                    map->offset, map->size, map->type );
112         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
113                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
114                 return -EINVAL;
115         }
116         map->mtrr   = -1;
117         map->handle = NULL;
118
119         switch ( map->type ) {
120         case _DRM_REGISTERS:
121         case _DRM_FRAME_BUFFER:
122 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
123                 if ( map->offset + map->size < map->offset ||
124                      map->offset < virt_to_phys(high_memory) ) {
125                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
126                         return -EINVAL;
127                 }
128 #endif
129 #ifdef __alpha__
130                 map->offset += dev->hose->mem_space->start;
131 #endif
132                 if (drm_core_has_MTRR(dev)) {
133                         if ( map->type == _DRM_FRAME_BUFFER ||
134                              (map->flags & _DRM_WRITE_COMBINING) ) {
135                                 map->mtrr = mtrr_add( map->offset, map->size,
136                                                       MTRR_TYPE_WRCOMB, 1 );
137                         }
138                 }
139                 if (map->type == _DRM_REGISTERS)
140                         map->handle = drm_ioremap( map->offset, map->size,
141                                                     dev );
142                 break;
143
144         case _DRM_SHM:
145                 map->handle = vmalloc_32(map->size);
146                 DRM_DEBUG( "%lu %d %p\n",
147                            map->size, drm_order( map->size ), map->handle );
148                 if ( !map->handle ) {
149                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
150                         return -ENOMEM;
151                 }
152                 map->offset = (unsigned long)map->handle;
153                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
154                         /* Prevent a 2nd X Server from creating a 2nd lock */
155                         if (dev->lock.hw_lock != NULL) {
156                                 vfree( map->handle );
157                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
158                                 return -EBUSY;
159                         }
160                         dev->sigdata.lock =
161                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
162                 }
163                 break;
164         case _DRM_AGP:
165                 if (drm_core_has_AGP(dev)) {
166 #ifdef __alpha__
167                         map->offset += dev->hose->mem_space->start;
168 #endif
169                         map->offset += dev->agp->base;
170                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
171                 }
172                 break;
173         case _DRM_SCATTER_GATHER:
174                 if (!dev->sg) {
175                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
176                         return -EINVAL;
177                 }
178                 map->offset += dev->sg->handle;
179                 break;
180         case _DRM_CONSISTENT: 
181                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
182                  * As we're limiting the address to 2^32-1 (or less),
183                  * casting it down to 32 bits is no problem, but we
184                  * need to point to a 64bit variable first. */
185                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
186                 if (!dmah) {
187                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
188                         return -ENOMEM;
189                 }
190                 map->handle = dmah->vaddr;
191                 map->offset = (unsigned long)dmah->busaddr;
192                 kfree(dmah);
193                 break;
194         default:
195                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
196                 return -EINVAL;
197         }
198
199         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
200         if(!list) {
201                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
202                 return -EINVAL;
203         }
204         memset(list, 0, sizeof(*list));
205         list->map = map;
206
207         down(&dev->struct_sem);
208         list_add(&list->head, &dev->maplist->head);
209 #ifdef CONFIG_COMPAT
210         /* Assign a 32-bit handle for _DRM_SHM mappings */
211         /* We do it here so that dev->struct_sem protects the increment */
212         if (map->type == _DRM_SHM)
213                 map->offset = map32_handle += PAGE_SIZE;
214 #endif
215         up(&dev->struct_sem);
216
217         *map_ptr = map;
218         return 0;
219 }
220 EXPORT_SYMBOL(drm_addmap);
221
222 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
223                      unsigned int cmd, unsigned long arg)
224 {
225         drm_file_t *priv = filp->private_data;
226         drm_device_t *dev = priv->head->dev;
227         drm_map_t map;
228         drm_map_t *map_ptr;
229         drm_map_t __user *argp = (void __user *)arg;
230         int err;
231
232         if (!(filp->f_mode & 3))
233                 return -EACCES; /* Require read/write */
234
235         if (copy_from_user(& map, argp, sizeof(map))) {
236                 return -EFAULT;
237         }
238
239         err = drm_addmap( dev, map.offset, map.size, map.type, map.flags,
240                           &map_ptr );
241
242         if (err) {
243                 return err;
244         }
245
246         if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
247                 return -EFAULT;
248         if (map_ptr->type != _DRM_SHM) {
249                 if (copy_to_user(&argp->handle, &map_ptr->offset,
250                                  sizeof(map_ptr->offset)))
251                         return -EFAULT;
252         }
253         return 0;
254 }
255
256
257 /**
258  * Remove a map private from list and deallocate resources if the mapping
259  * isn't in use.
260  *
261  * \param inode device inode.
262  * \param filp file pointer.
263  * \param cmd command.
264  * \param arg pointer to a drm_map_t structure.
265  * \return zero on success or a negative value on error.
266  *
267  * Searches the map on drm_device::maplist, removes it from the list, see if
268  * its being used, and free any associate resource (such as MTRR's) if it's not
269  * being on use.
270  *
271  * \sa drm_addmap
272  */
273 int drm_rmmap(drm_device_t *dev, void *handle)
274 {
275         struct list_head *list;
276         drm_map_list_t *r_list = NULL;
277         drm_vma_entry_t *pt, *prev;
278         drm_map_t *map;
279         int found_maps = 0;
280
281         down(&dev->struct_sem);
282         list = &dev->maplist->head;
283         list_for_each(list, &dev->maplist->head) {
284                 r_list = list_entry(list, drm_map_list_t, head);
285
286                 if(r_list->map &&
287                    r_list->map->handle == handle &&
288                    r_list->map->flags & _DRM_REMOVABLE) break;
289         }
290
291         /* List has wrapped around to the head pointer, or its empty we didn't
292          * find anything.
293          */
294         if(list == (&dev->maplist->head)) {
295                 up(&dev->struct_sem);
296                 return -EINVAL;
297         }
298         map = r_list->map;
299         list_del(list);
300         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
301
302         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
303                 if (pt->vma->vm_private_data == map) found_maps++;
304         }
305
306         if(!found_maps) {
307                 drm_dma_handle_t dmah;
308
309                 switch (map->type) {
310                 case _DRM_REGISTERS:
311                 case _DRM_FRAME_BUFFER:
312                   if (drm_core_has_MTRR(dev)) {
313                                 if (map->mtrr >= 0) {
314                                         int retcode;
315                                         retcode = mtrr_del(map->mtrr,
316                                                            map->offset,
317                                                            map->size);
318                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
319                                 }
320                         }
321                         drm_ioremapfree(map->handle, map->size, dev);
322                         break;
323                 case _DRM_SHM:
324                         vfree(map->handle);
325                         break;
326                 case _DRM_AGP:
327                 case _DRM_SCATTER_GATHER:
328                         break;
329                 case _DRM_CONSISTENT:
330                         dmah.vaddr = map->handle;
331                         dmah.busaddr = map->offset;
332                         dmah.size = map->size;
333                         __drm_pci_free(dev, &dmah);
334                         break;
335                 }
336                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
337         }
338         up(&dev->struct_sem);
339         return 0;
340 }
341 EXPORT_SYMBOL(drm_rmmap);
342
343 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
344                     unsigned int cmd, unsigned long arg)
345 {
346         drm_file_t *priv = filp->private_data;
347         drm_device_t *dev = priv->head->dev;
348         drm_map_t request;
349
350         if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
351                 return -EFAULT;
352         }
353
354         return drm_rmmap(dev, request.handle);
355 }
356
357 /**
358  * Cleanup after an error on one of the addbufs() functions.
359  *
360  * \param entry buffer entry where the error occurred.
361  *
362  * Frees any pages and buffers associated with the given entry.
363  */
364 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
365 {
366         int i;
367
368         if (entry->seg_count) {
369                 for (i = 0; i < entry->seg_count; i++) {
370                         if (entry->seglist[i]) {
371                                 drm_free_pages(entry->seglist[i],
372                                                 entry->page_order,
373                                                 DRM_MEM_DMA);
374                         }
375                 }
376                 drm_free(entry->seglist,
377                           entry->seg_count *
378                           sizeof(*entry->seglist),
379                           DRM_MEM_SEGS);
380
381                 entry->seg_count = 0;
382         }
383
384         if (entry->buf_count) {
385                 for (i = 0; i < entry->buf_count; i++) {
386                         if (entry->buflist[i].dev_private) {
387                                 drm_free(entry->buflist[i].dev_private,
388                                           entry->buflist[i].dev_priv_size,
389                                           DRM_MEM_BUFS);
390                         }
391                 }
392                 drm_free(entry->buflist,
393                           entry->buf_count *
394                           sizeof(*entry->buflist),
395                           DRM_MEM_BUFS);
396
397                 entry->buf_count = 0;
398         }
399 }
400
401 #if __OS_HAS_AGP
402 /**
403  * Add AGP buffers for DMA transfers.
404  *
405  * \param dev drm_device_t to which the buffers are to be added.
406  * \param request pointer to a drm_buf_desc_t describing the request.
407  * \return zero on success or a negative number on failure.
408  * 
409  * After some sanity checks creates a drm_buf structure for each buffer and
410  * reallocates the buffer list of the same size order to accommodate the new
411  * buffers.
412  */
413 int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
414 {
415         drm_device_dma_t *dma = dev->dma;
416         drm_buf_entry_t *entry;
417         drm_buf_t *buf;
418         unsigned long offset;
419         unsigned long agp_offset;
420         int count;
421         int order;
422         int size;
423         int alignment;
424         int page_order;
425         int total;
426         int byte_count;
427         int i;
428         drm_buf_t **temp_buflist;
429
430         if ( !dma ) return -EINVAL;
431
432         count = request->count;
433         order = drm_order(request->size);
434         size = 1 << order;
435
436         alignment  = (request->flags & _DRM_PAGE_ALIGN)
437                 ? PAGE_ALIGN(size) : size;
438         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
439         total = PAGE_SIZE << page_order;
440
441         byte_count = 0;
442         agp_offset = dev->agp->base + request->agp_start;
443
444         DRM_DEBUG( "count:      %d\n",  count );
445         DRM_DEBUG( "order:      %d\n",  order );
446         DRM_DEBUG( "size:       %d\n",  size );
447         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
448         DRM_DEBUG( "alignment:  %d\n",  alignment );
449         DRM_DEBUG( "page_order: %d\n",  page_order );
450         DRM_DEBUG( "total:      %d\n",  total );
451
452         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
453         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
454
455         spin_lock( &dev->count_lock );
456         if ( dev->buf_use ) {
457                 spin_unlock( &dev->count_lock );
458                 return -EBUSY;
459         }
460         atomic_inc( &dev->buf_alloc );
461         spin_unlock( &dev->count_lock );
462
463         down( &dev->struct_sem );
464         entry = &dma->bufs[order];
465         if ( entry->buf_count ) {
466                 up( &dev->struct_sem );
467                 atomic_dec( &dev->buf_alloc );
468                 return -ENOMEM; /* May only call once for each order */
469         }
470
471         if (count < 0 || count > 4096) {
472                 up( &dev->struct_sem );
473                 atomic_dec( &dev->buf_alloc );
474                 return -EINVAL;
475         }
476
477         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
478                                     DRM_MEM_BUFS );
479         if ( !entry->buflist ) {
480                 up( &dev->struct_sem );
481                 atomic_dec( &dev->buf_alloc );
482                 return -ENOMEM;
483         }
484         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
485
486         entry->buf_size = size;
487         entry->page_order = page_order;
488
489         offset = 0;
490
491         while ( entry->buf_count < count ) {
492                 buf          = &entry->buflist[entry->buf_count];
493                 buf->idx     = dma->buf_count + entry->buf_count;
494                 buf->total   = alignment;
495                 buf->order   = order;
496                 buf->used    = 0;
497
498                 buf->offset  = (dma->byte_count + offset);
499                 buf->bus_address = agp_offset + offset;
500                 buf->address = (void *)(agp_offset + offset);
501                 buf->next    = NULL;
502                 buf->waiting = 0;
503                 buf->pending = 0;
504                 init_waitqueue_head( &buf->dma_wait );
505                 buf->filp    = NULL;
506
507                 buf->dev_priv_size = dev->driver->dev_priv_size;
508                 buf->dev_private = drm_alloc( buf->dev_priv_size,
509                                                DRM_MEM_BUFS );
510                 if(!buf->dev_private) {
511                         /* Set count correctly so we free the proper amount. */
512                         entry->buf_count = count;
513                         drm_cleanup_buf_error(dev,entry);
514                         up( &dev->struct_sem );
515                         atomic_dec( &dev->buf_alloc );
516                         return -ENOMEM;
517                 }
518                 memset( buf->dev_private, 0, buf->dev_priv_size );
519
520                 DRM_DEBUG( "buffer %d @ %p\n",
521                            entry->buf_count, buf->address );
522
523                 offset += alignment;
524                 entry->buf_count++;
525                 byte_count += PAGE_SIZE << page_order;
526         }
527
528         DRM_DEBUG( "byte_count: %d\n", byte_count );
529
530         temp_buflist = drm_realloc( dma->buflist,
531                                      dma->buf_count * sizeof(*dma->buflist),
532                                      (dma->buf_count + entry->buf_count)
533                                      * sizeof(*dma->buflist),
534                                      DRM_MEM_BUFS );
535         if(!temp_buflist) {
536                 /* Free the entry because it isn't valid */
537                 drm_cleanup_buf_error(dev,entry);
538                 up( &dev->struct_sem );
539                 atomic_dec( &dev->buf_alloc );
540                 return -ENOMEM;
541         }
542         dma->buflist = temp_buflist;
543
544         for ( i = 0 ; i < entry->buf_count ; i++ ) {
545                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
546         }
547
548         dma->buf_count += entry->buf_count;
549         dma->byte_count += byte_count;
550
551         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
552         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
553
554         up( &dev->struct_sem );
555
556         request->count = entry->buf_count;
557         request->size = size;
558
559         dma->flags = _DRM_DMA_USE_AGP;
560
561         atomic_dec( &dev->buf_alloc );
562         return 0;
563 }
564 EXPORT_SYMBOL(drm_addbufs_agp);
565 #endif /* __OS_HAS_AGP */
566
567 int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
568 {
569         drm_device_dma_t *dma = dev->dma;
570         int count;
571         int order;
572         int size;
573         int total;
574         int page_order;
575         drm_buf_entry_t *entry;
576         unsigned long page;
577         drm_buf_t *buf;
578         int alignment;
579         unsigned long offset;
580         int i;
581         int byte_count;
582         int page_count;
583         unsigned long *temp_pagelist;
584         drm_buf_t **temp_buflist;
585
586         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
587         if ( !dma ) return -EINVAL;
588
589         count = request->count;
590         order = drm_order(request->size);
591         size = 1 << order;
592
593         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
594                    request->count, request->size, size,
595                    order, dev->queue_count );
596
597         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
598         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
599
600         alignment = (request->flags & _DRM_PAGE_ALIGN)
601                 ? PAGE_ALIGN(size) : size;
602         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
603         total = PAGE_SIZE << page_order;
604
605         spin_lock( &dev->count_lock );
606         if ( dev->buf_use ) {
607                 spin_unlock( &dev->count_lock );
608                 return -EBUSY;
609         }
610         atomic_inc( &dev->buf_alloc );
611         spin_unlock( &dev->count_lock );
612
613         down( &dev->struct_sem );
614         entry = &dma->bufs[order];
615         if ( entry->buf_count ) {
616                 up( &dev->struct_sem );
617                 atomic_dec( &dev->buf_alloc );
618                 return -ENOMEM; /* May only call once for each order */
619         }
620
621         if (count < 0 || count > 4096) {
622                 up( &dev->struct_sem );
623                 atomic_dec( &dev->buf_alloc );
624                 return -EINVAL;
625         }
626
627         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
628                                     DRM_MEM_BUFS );
629         if ( !entry->buflist ) {
630                 up( &dev->struct_sem );
631                 atomic_dec( &dev->buf_alloc );
632                 return -ENOMEM;
633         }
634         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
635
636         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
637                                     DRM_MEM_SEGS );
638         if ( !entry->seglist ) {
639                 drm_free( entry->buflist,
640                           count * sizeof(*entry->buflist),
641                           DRM_MEM_BUFS );
642                 up( &dev->struct_sem );
643                 atomic_dec( &dev->buf_alloc );
644                 return -ENOMEM;
645         }
646         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
647
648         /* Keep the original pagelist until we know all the allocations
649          * have succeeded
650          */
651         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
652                                     * sizeof(*dma->pagelist),
653                                     DRM_MEM_PAGES );
654         if (!temp_pagelist) {
655                 drm_free( entry->buflist,
656                            count * sizeof(*entry->buflist),
657                            DRM_MEM_BUFS );
658                 drm_free( entry->seglist,
659                            count * sizeof(*entry->seglist),
660                            DRM_MEM_SEGS );
661                 up( &dev->struct_sem );
662                 atomic_dec( &dev->buf_alloc );
663                 return -ENOMEM;
664         }
665         memcpy(temp_pagelist,
666                dma->pagelist,
667                dma->page_count * sizeof(*dma->pagelist));
668         DRM_DEBUG( "pagelist: %d entries\n",
669                    dma->page_count + (count << page_order) );
670
671         entry->buf_size = size;
672         entry->page_order = page_order;
673         byte_count = 0;
674         page_count = 0;
675
676         while ( entry->buf_count < count ) {
677                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
678                 if ( !page ) {
679                         /* Set count correctly so we free the proper amount. */
680                         entry->buf_count = count;
681                         entry->seg_count = count;
682                         drm_cleanup_buf_error(dev, entry);
683                         drm_free( temp_pagelist,
684                                    (dma->page_count + (count << page_order))
685                                    * sizeof(*dma->pagelist),
686                                    DRM_MEM_PAGES );
687                         up( &dev->struct_sem );
688                         atomic_dec( &dev->buf_alloc );
689                         return -ENOMEM;
690                 }
691                 entry->seglist[entry->seg_count++] = page;
692                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
693                         DRM_DEBUG( "page %d @ 0x%08lx\n",
694                                    dma->page_count + page_count,
695                                    page + PAGE_SIZE * i );
696                         temp_pagelist[dma->page_count + page_count++]
697                                 = page + PAGE_SIZE * i;
698                 }
699                 for ( offset = 0 ;
700                       offset + size <= total && entry->buf_count < count ;
701                       offset += alignment, ++entry->buf_count ) {
702                         buf          = &entry->buflist[entry->buf_count];
703                         buf->idx     = dma->buf_count + entry->buf_count;
704                         buf->total   = alignment;
705                         buf->order   = order;
706                         buf->used    = 0;
707                         buf->offset  = (dma->byte_count + byte_count + offset);
708                         buf->address = (void *)(page + offset);
709                         buf->next    = NULL;
710                         buf->waiting = 0;
711                         buf->pending = 0;
712                         init_waitqueue_head( &buf->dma_wait );
713                         buf->filp    = NULL;
714
715                         buf->dev_priv_size = dev->driver->dev_priv_size;
716                         buf->dev_private = drm_alloc( buf->dev_priv_size,
717                                                        DRM_MEM_BUFS );
718                         if(!buf->dev_private) {
719                                 /* Set count correctly so we free the proper amount. */
720                                 entry->buf_count = count;
721                                 entry->seg_count = count;
722                                 drm_cleanup_buf_error(dev,entry);
723                                 drm_free( temp_pagelist,
724                                            (dma->page_count + (count << page_order))
725                                            * sizeof(*dma->pagelist),
726                                            DRM_MEM_PAGES );
727                                 up( &dev->struct_sem );
728                                 atomic_dec( &dev->buf_alloc );
729                                 return -ENOMEM;
730                         }
731                         memset( buf->dev_private, 0, buf->dev_priv_size );
732
733                         DRM_DEBUG( "buffer %d @ %p\n",
734                                    entry->buf_count, buf->address );
735                 }
736                 byte_count += PAGE_SIZE << page_order;
737         }
738
739         temp_buflist = drm_realloc( dma->buflist,
740                                      dma->buf_count * sizeof(*dma->buflist),
741                                      (dma->buf_count + entry->buf_count)
742                                      * sizeof(*dma->buflist),
743                                      DRM_MEM_BUFS );
744         if (!temp_buflist) {
745                 /* Free the entry because it isn't valid */
746                 drm_cleanup_buf_error(dev,entry);
747                 drm_free( temp_pagelist,
748                            (dma->page_count + (count << page_order))
749                            * sizeof(*dma->pagelist),
750                            DRM_MEM_PAGES );
751                 up( &dev->struct_sem );
752                 atomic_dec( &dev->buf_alloc );
753                 return -ENOMEM;
754         }
755         dma->buflist = temp_buflist;
756
757         for ( i = 0 ; i < entry->buf_count ; i++ ) {
758                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
759         }
760
761         /* No allocations failed, so now we can replace the orginal pagelist
762          * with the new one.
763          */
764         if (dma->page_count) {
765                 drm_free(dma->pagelist,
766                           dma->page_count * sizeof(*dma->pagelist),
767                           DRM_MEM_PAGES);
768         }
769         dma->pagelist = temp_pagelist;
770
771         dma->buf_count += entry->buf_count;
772         dma->seg_count += entry->seg_count;
773         dma->page_count += entry->seg_count << page_order;
774         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
775
776         up( &dev->struct_sem );
777
778         request->count = entry->buf_count;
779         request->size = size;
780
781         atomic_dec( &dev->buf_alloc );
782         return 0;
783
784 }
785 EXPORT_SYMBOL(drm_addbufs_pci);
786
787 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
788 {
789         drm_device_dma_t *dma = dev->dma;
790         drm_buf_entry_t *entry;
791         drm_buf_t *buf;
792         unsigned long offset;
793         unsigned long agp_offset;
794         int count;
795         int order;
796         int size;
797         int alignment;
798         int page_order;
799         int total;
800         int byte_count;
801         int i;
802         drm_buf_t **temp_buflist;
803
804         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
805         
806         if ( !dma ) return -EINVAL;
807
808         count = request->count;
809         order = drm_order(request->size);
810         size = 1 << order;
811
812         alignment  = (request->flags & _DRM_PAGE_ALIGN)
813                         ? PAGE_ALIGN(size) : size;
814         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
815         total = PAGE_SIZE << page_order;
816
817         byte_count = 0;
818         agp_offset = request->agp_start;
819
820         DRM_DEBUG( "count:      %d\n",  count );
821         DRM_DEBUG( "order:      %d\n",  order );
822         DRM_DEBUG( "size:       %d\n",  size );
823         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
824         DRM_DEBUG( "alignment:  %d\n",  alignment );
825         DRM_DEBUG( "page_order: %d\n",  page_order );
826         DRM_DEBUG( "total:      %d\n",  total );
827
828         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
829         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
830
831         spin_lock( &dev->count_lock );
832         if ( dev->buf_use ) {
833                 spin_unlock( &dev->count_lock );
834                 return -EBUSY;
835         }
836         atomic_inc( &dev->buf_alloc );
837         spin_unlock( &dev->count_lock );
838
839         down( &dev->struct_sem );
840         entry = &dma->bufs[order];
841         if ( entry->buf_count ) {
842                 up( &dev->struct_sem );
843                 atomic_dec( &dev->buf_alloc );
844                 return -ENOMEM; /* May only call once for each order */
845         }
846
847         if (count < 0 || count > 4096) {
848                 up( &dev->struct_sem );
849                 atomic_dec( &dev->buf_alloc );
850                 return -EINVAL;
851         }
852
853         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
854                                      DRM_MEM_BUFS );
855         if ( !entry->buflist ) {
856                 up( &dev->struct_sem );
857                 atomic_dec( &dev->buf_alloc );
858                 return -ENOMEM;
859         }
860         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
861
862         entry->buf_size = size;
863         entry->page_order = page_order;
864
865         offset = 0;
866
867         while ( entry->buf_count < count ) {
868                 buf          = &entry->buflist[entry->buf_count];
869                 buf->idx     = dma->buf_count + entry->buf_count;
870                 buf->total   = alignment;
871                 buf->order   = order;
872                 buf->used    = 0;
873
874                 buf->offset  = (dma->byte_count + offset);
875                 buf->bus_address = agp_offset + offset;
876                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
877                 buf->next    = NULL;
878                 buf->waiting = 0;
879                 buf->pending = 0;
880                 init_waitqueue_head( &buf->dma_wait );
881                 buf->filp    = NULL;
882
883                 buf->dev_priv_size = dev->driver->dev_priv_size;
884                 buf->dev_private = drm_alloc( buf->dev_priv_size,
885                                                DRM_MEM_BUFS );
886                 if(!buf->dev_private) {
887                         /* Set count correctly so we free the proper amount. */
888                         entry->buf_count = count;
889                         drm_cleanup_buf_error(dev,entry);
890                         up( &dev->struct_sem );
891                         atomic_dec( &dev->buf_alloc );
892                         return -ENOMEM;
893                 }
894
895                 memset( buf->dev_private, 0, buf->dev_priv_size );
896
897                 DRM_DEBUG( "buffer %d @ %p\n",
898                            entry->buf_count, buf->address );
899
900                 offset += alignment;
901                 entry->buf_count++;
902                 byte_count += PAGE_SIZE << page_order;
903         }
904
905         DRM_DEBUG( "byte_count: %d\n", byte_count );
906
907         temp_buflist = drm_realloc( dma->buflist,
908                                      dma->buf_count * sizeof(*dma->buflist),
909                                      (dma->buf_count + entry->buf_count)
910                                      * sizeof(*dma->buflist),
911                                      DRM_MEM_BUFS );
912         if(!temp_buflist) {
913                 /* Free the entry because it isn't valid */
914                 drm_cleanup_buf_error(dev,entry);
915                 up( &dev->struct_sem );
916                 atomic_dec( &dev->buf_alloc );
917                 return -ENOMEM;
918         }
919         dma->buflist = temp_buflist;
920
921         for ( i = 0 ; i < entry->buf_count ; i++ ) {
922                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
923         }
924
925         dma->buf_count += entry->buf_count;
926         dma->byte_count += byte_count;
927
928         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
929         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
930
931         up( &dev->struct_sem );
932
933         request->count = entry->buf_count;
934         request->size = size;
935
936         dma->flags = _DRM_DMA_USE_SG;
937
938         atomic_dec( &dev->buf_alloc );
939         return 0;
940 }
941
942 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
943 {
944         drm_device_dma_t *dma = dev->dma;
945         drm_buf_entry_t *entry;
946         drm_buf_t *buf;
947         unsigned long offset;
948         unsigned long agp_offset;
949         int count;
950         int order;
951         int size;
952         int alignment;
953         int page_order;
954         int total;
955         int byte_count;
956         int i;
957         drm_buf_t **temp_buflist;
958
959         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
960                 return -EINVAL;
961     
962         if (!dma)
963                 return -EINVAL;
964
965         count = request->count;
966         order = drm_order(request->size);
967         size = 1 << order;
968
969         alignment = (request->flags & _DRM_PAGE_ALIGN)
970             ? PAGE_ALIGN(size) : size;
971         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
972         total = PAGE_SIZE << page_order;
973
974         byte_count = 0;
975         agp_offset = request->agp_start;
976
977         DRM_DEBUG("count:      %d\n", count);
978         DRM_DEBUG("order:      %d\n", order);
979         DRM_DEBUG("size:       %d\n", size);
980         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
981         DRM_DEBUG("alignment:  %d\n", alignment);
982         DRM_DEBUG("page_order: %d\n", page_order);
983         DRM_DEBUG("total:      %d\n", total);
984
985         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
986                 return -EINVAL;
987         if (dev->queue_count)
988                 return -EBUSY;  /* Not while in use */
989
990         spin_lock(&dev->count_lock);
991         if (dev->buf_use) {
992                 spin_unlock(&dev->count_lock);
993                 return -EBUSY;
994         }
995         atomic_inc(&dev->buf_alloc);
996         spin_unlock(&dev->count_lock);
997
998         down(&dev->struct_sem);
999         entry = &dma->bufs[order];
1000         if (entry->buf_count) {
1001                 up(&dev->struct_sem);
1002                 atomic_dec(&dev->buf_alloc);
1003                 return -ENOMEM; /* May only call once for each order */
1004         }
1005
1006         if (count < 0 || count > 4096) {
1007                 up(&dev->struct_sem);
1008                 atomic_dec(&dev->buf_alloc);
1009                 return -EINVAL;
1010         }
1011
1012         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1013                                    DRM_MEM_BUFS);
1014         if (!entry->buflist) {
1015                 up(&dev->struct_sem);
1016                 atomic_dec(&dev->buf_alloc);
1017                 return -ENOMEM;
1018         }
1019         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1020
1021         entry->buf_size = size;
1022         entry->page_order = page_order;
1023
1024         offset = 0;
1025
1026         while (entry->buf_count < count) {
1027                 buf = &entry->buflist[entry->buf_count];
1028                 buf->idx = dma->buf_count + entry->buf_count;
1029                 buf->total = alignment;
1030                 buf->order = order;
1031                 buf->used = 0;
1032
1033                 buf->offset = (dma->byte_count + offset);
1034                 buf->bus_address = agp_offset + offset;
1035                 buf->address = (void *)(agp_offset + offset);
1036                 buf->next = NULL;
1037                 buf->waiting = 0;
1038                 buf->pending = 0;
1039                 init_waitqueue_head(&buf->dma_wait);
1040                 buf->filp = NULL;
1041
1042                 buf->dev_priv_size = dev->driver->dev_priv_size;
1043                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1044                 if (!buf->dev_private) {
1045                         /* Set count correctly so we free the proper amount. */
1046                         entry->buf_count = count;
1047                         drm_cleanup_buf_error(dev, entry);
1048                         up(&dev->struct_sem);
1049                         atomic_dec(&dev->buf_alloc);
1050                         return -ENOMEM;
1051                 }
1052                 memset(buf->dev_private, 0, buf->dev_priv_size);
1053
1054                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1055
1056                 offset += alignment;
1057                 entry->buf_count++;
1058                 byte_count += PAGE_SIZE << page_order;
1059         }
1060
1061         DRM_DEBUG("byte_count: %d\n", byte_count);
1062
1063         temp_buflist = drm_realloc(dma->buflist,
1064                                    dma->buf_count * sizeof(*dma->buflist),
1065                                    (dma->buf_count + entry->buf_count)
1066                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1067         if (!temp_buflist) {
1068                 /* Free the entry because it isn't valid */
1069                 drm_cleanup_buf_error(dev, entry);
1070                 up(&dev->struct_sem);
1071                 atomic_dec(&dev->buf_alloc);
1072                 return -ENOMEM;
1073         }
1074         dma->buflist = temp_buflist;
1075
1076         for (i = 0; i < entry->buf_count; i++) {
1077                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1078         }
1079
1080         dma->buf_count += entry->buf_count;
1081         dma->byte_count += byte_count;
1082
1083         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1084         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1085
1086         up(&dev->struct_sem);
1087
1088         request->count = entry->buf_count;
1089         request->size = size;
1090
1091         dma->flags = _DRM_DMA_USE_FB;
1092
1093         atomic_dec(&dev->buf_alloc);
1094         return 0;
1095 }
1096
1097 /**
1098  * Add buffers for DMA transfers (ioctl).
1099  *
1100  * \param inode device inode.
1101  * \param filp file pointer.
1102  * \param cmd command.
1103  * \param arg pointer to a drm_buf_desc_t request.
1104  * \return zero on success or a negative number on failure.
1105  *
1106  * According with the memory type specified in drm_buf_desc::flags and the
1107  * build options, it dispatches the call either to addbufs_agp(),
1108  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1109  * PCI memory respectively.
1110  */
1111 int drm_addbufs( struct inode *inode, struct file *filp,
1112                   unsigned int cmd, unsigned long arg )
1113 {
1114         drm_buf_desc_t request;
1115         drm_file_t *priv = filp->private_data;
1116         drm_device_t *dev = priv->head->dev;
1117         int ret;
1118         
1119         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1120                 return -EINVAL;
1121
1122         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1123                              sizeof(request) ) )
1124                 return -EFAULT;
1125
1126 #if __OS_HAS_AGP
1127         if ( request.flags & _DRM_AGP_BUFFER )
1128                 ret=drm_addbufs_agp(dev, &request);
1129         else
1130 #endif
1131         if ( request.flags & _DRM_SG_BUFFER )
1132                 ret=drm_addbufs_sg(dev, &request);
1133         else if ( request.flags & _DRM_FB_BUFFER)
1134                 ret=drm_addbufs_fb(dev, &request);
1135         else
1136                 ret=drm_addbufs_pci(dev, &request);
1137
1138         if (ret==0) {
1139                 if (copy_to_user((void __user *)arg, &request,
1140                                  sizeof(request))) {
1141                         ret = -EFAULT;
1142                 }
1143         }
1144         return ret;
1145 }
1146
1147
1148 /**
1149  * Get information about the buffer mappings.
1150  *
1151  * This was originally mean for debugging purposes, or by a sophisticated
1152  * client library to determine how best to use the available buffers (e.g.,
1153  * large buffers can be used for image transfer).
1154  *
1155  * \param inode device inode.
1156  * \param filp file pointer.
1157  * \param cmd command.
1158  * \param arg pointer to a drm_buf_info structure.
1159  * \return zero on success or a negative number on failure.
1160  *
1161  * Increments drm_device::buf_use while holding the drm_device::count_lock
1162  * lock, preventing of allocating more buffers after this call. Information
1163  * about each requested buffer is then copied into user space.
1164  */
1165 int drm_infobufs( struct inode *inode, struct file *filp,
1166                    unsigned int cmd, unsigned long arg )
1167 {
1168         drm_file_t *priv = filp->private_data;
1169         drm_device_t *dev = priv->head->dev;
1170         drm_device_dma_t *dma = dev->dma;
1171         drm_buf_info_t request;
1172         drm_buf_info_t __user *argp = (void __user *)arg;
1173         int i;
1174         int count;
1175
1176         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1177                 return -EINVAL;
1178
1179         if ( !dma ) return -EINVAL;
1180
1181         spin_lock( &dev->count_lock );
1182         if ( atomic_read( &dev->buf_alloc ) ) {
1183                 spin_unlock( &dev->count_lock );
1184                 return -EBUSY;
1185         }
1186         ++dev->buf_use;         /* Can't allocate more after this call */
1187         spin_unlock( &dev->count_lock );
1188
1189         if ( copy_from_user( &request, argp, sizeof(request) ) )
1190                 return -EFAULT;
1191
1192         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1193                 if ( dma->bufs[i].buf_count ) ++count;
1194         }
1195
1196         DRM_DEBUG( "count = %d\n", count );
1197
1198         if ( request.count >= count ) {
1199                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1200                         if ( dma->bufs[i].buf_count ) {
1201                                 drm_buf_desc_t __user *to = &request.list[count];
1202                                 drm_buf_entry_t *from = &dma->bufs[i];
1203                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1204                                 if ( copy_to_user( &to->count,
1205                                                    &from->buf_count,
1206                                                    sizeof(from->buf_count) ) ||
1207                                      copy_to_user( &to->size,
1208                                                    &from->buf_size,
1209                                                    sizeof(from->buf_size) ) ||
1210                                      copy_to_user( &to->low_mark,
1211                                                    &list->low_mark,
1212                                                    sizeof(list->low_mark) ) ||
1213                                      copy_to_user( &to->high_mark,
1214                                                    &list->high_mark,
1215                                                    sizeof(list->high_mark) ) )
1216                                         return -EFAULT;
1217
1218                                 DRM_DEBUG( "%d %d %d %d %d\n",
1219                                            i,
1220                                            dma->bufs[i].buf_count,
1221                                            dma->bufs[i].buf_size,
1222                                            dma->bufs[i].freelist.low_mark,
1223                                            dma->bufs[i].freelist.high_mark );
1224                                 ++count;
1225                         }
1226                 }
1227         }
1228         request.count = count;
1229
1230         if ( copy_to_user( argp, &request, sizeof(request) ) )
1231                 return -EFAULT;
1232
1233         return 0;
1234 }
1235
1236 /**
1237  * Specifies a low and high water mark for buffer allocation
1238  *
1239  * \param inode device inode.
1240  * \param filp file pointer.
1241  * \param cmd command.
1242  * \param arg a pointer to a drm_buf_desc structure.
1243  * \return zero on success or a negative number on failure.
1244  *
1245  * Verifies that the size order is bounded between the admissible orders and
1246  * updates the respective drm_device_dma::bufs entry low and high water mark.
1247  *
1248  * \note This ioctl is deprecated and mostly never used.
1249  */
1250 int drm_markbufs( struct inode *inode, struct file *filp,
1251                    unsigned int cmd, unsigned long arg )
1252 {
1253         drm_file_t *priv = filp->private_data;
1254         drm_device_t *dev = priv->head->dev;
1255         drm_device_dma_t *dma = dev->dma;
1256         drm_buf_desc_t request;
1257         int order;
1258         drm_buf_entry_t *entry;
1259
1260         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1261                 return -EINVAL;
1262
1263         if ( !dma ) return -EINVAL;
1264
1265         if ( copy_from_user( &request,
1266                              (drm_buf_desc_t __user *)arg,
1267                              sizeof(request) ) )
1268                 return -EFAULT;
1269
1270         DRM_DEBUG( "%d, %d, %d\n",
1271                    request.size, request.low_mark, request.high_mark );
1272         order = drm_order( request.size );
1273         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1274         entry = &dma->bufs[order];
1275
1276         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1277                 return -EINVAL;
1278         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1279                 return -EINVAL;
1280
1281         entry->freelist.low_mark  = request.low_mark;
1282         entry->freelist.high_mark = request.high_mark;
1283
1284         return 0;
1285 }
1286
1287 /**
1288  * Unreserve the buffers in list, previously reserved using drmDMA. 
1289  *
1290  * \param inode device inode.
1291  * \param filp file pointer.
1292  * \param cmd command.
1293  * \param arg pointer to a drm_buf_free structure.
1294  * \return zero on success or a negative number on failure.
1295  * 
1296  * Calls free_buffer() for each used buffer.
1297  * This function is primarily used for debugging.
1298  */
1299 int drm_freebufs( struct inode *inode, struct file *filp,
1300                    unsigned int cmd, unsigned long arg )
1301 {
1302         drm_file_t *priv = filp->private_data;
1303         drm_device_t *dev = priv->head->dev;
1304         drm_device_dma_t *dma = dev->dma;
1305         drm_buf_free_t request;
1306         int i;
1307         int idx;
1308         drm_buf_t *buf;
1309
1310         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1311                 return -EINVAL;
1312
1313         if ( !dma ) return -EINVAL;
1314
1315         if ( copy_from_user( &request,
1316                              (drm_buf_free_t __user *)arg,
1317                              sizeof(request) ) )
1318                 return -EFAULT;
1319
1320         DRM_DEBUG( "%d\n", request.count );
1321         for ( i = 0 ; i < request.count ; i++ ) {
1322                 if ( copy_from_user( &idx,
1323                                      &request.list[i],
1324                                      sizeof(idx) ) )
1325                         return -EFAULT;
1326                 if ( idx < 0 || idx >= dma->buf_count ) {
1327                         DRM_ERROR( "Index %d (of %d max)\n",
1328                                    idx, dma->buf_count - 1 );
1329                         return -EINVAL;
1330                 }
1331                 buf = dma->buflist[idx];
1332                 if ( buf->filp != filp ) {
1333                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1334                                    current->pid );
1335                         return -EINVAL;
1336                 }
1337                 drm_free_buffer( dev, buf );
1338         }
1339
1340         return 0;
1341 }
1342
1343 /**
1344  * Maps all of the DMA buffers into client-virtual space (ioctl).
1345  *
1346  * \param inode device inode.
1347  * \param filp file pointer.
1348  * \param cmd command.
1349  * \param arg pointer to a drm_buf_map structure.
1350  * \return zero on success or a negative number on failure.
1351  *
1352  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1353  * about each buffer into user space. The PCI buffers are already mapped on the
1354  * addbufs_pci() call.
1355  */
1356 int drm_mapbufs( struct inode *inode, struct file *filp,
1357                   unsigned int cmd, unsigned long arg )
1358 {
1359         drm_file_t *priv = filp->private_data;
1360         drm_device_t *dev = priv->head->dev;
1361         drm_device_dma_t *dma = dev->dma;
1362         drm_buf_map_t __user *argp = (void __user *)arg;
1363         int retcode = 0;
1364         const int zero = 0;
1365         unsigned long virtual;
1366         unsigned long address;
1367         drm_buf_map_t request;
1368         int i;
1369
1370         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1371                 return -EINVAL;
1372
1373         if ( !dma ) return -EINVAL;
1374
1375         spin_lock( &dev->count_lock );
1376         if ( atomic_read( &dev->buf_alloc ) ) {
1377                 spin_unlock( &dev->count_lock );
1378                 return -EBUSY;
1379         }
1380         dev->buf_use++;         /* Can't allocate more after this call */
1381         spin_unlock( &dev->count_lock );
1382
1383         if ( copy_from_user( &request, argp, sizeof(request) ) )
1384                 return -EFAULT;
1385
1386         if ( request.count >= dma->buf_count ) {
1387                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1388                     || (drm_core_check_feature(dev, DRIVER_SG) 
1389                         && (dma->flags & _DRM_DMA_USE_SG))
1390                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1391                         && (dma->flags & _DRM_DMA_USE_FB))) {
1392                         drm_map_t *map = dev->agp_buffer_map;
1393
1394                         if ( !map ) {
1395                                 retcode = -EINVAL;
1396                                 goto done;
1397                         }
1398
1399 #if LINUX_VERSION_CODE <= 0x020402
1400                         down( &current->mm->mmap_sem );
1401 #else
1402                         down_write( &current->mm->mmap_sem );
1403 #endif
1404                         virtual = do_mmap( filp, 0, map->size,
1405                                            PROT_READ | PROT_WRITE,
1406                                            MAP_SHARED,
1407                                            (unsigned long)map->offset );
1408 #if LINUX_VERSION_CODE <= 0x020402
1409                         up( &current->mm->mmap_sem );
1410 #else
1411                         up_write( &current->mm->mmap_sem );
1412 #endif
1413                 } else {
1414 #if LINUX_VERSION_CODE <= 0x020402
1415                         down( &current->mm->mmap_sem );
1416 #else
1417                         down_write( &current->mm->mmap_sem );
1418 #endif
1419                         virtual = do_mmap( filp, 0, dma->byte_count,
1420                                            PROT_READ | PROT_WRITE,
1421                                            MAP_SHARED, 0 );
1422 #if LINUX_VERSION_CODE <= 0x020402
1423                         up( &current->mm->mmap_sem );
1424 #else
1425                         up_write( &current->mm->mmap_sem );
1426 #endif
1427                 }
1428                 if ( virtual > -1024UL ) {
1429                         /* Real error */
1430                         retcode = (signed long)virtual;
1431                         goto done;
1432                 }
1433                 request.virtual = (void __user *)virtual;
1434
1435                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1436                         if ( copy_to_user( &request.list[i].idx,
1437                                            &dma->buflist[i]->idx,
1438                                            sizeof(request.list[0].idx) ) ) {
1439                                 retcode = -EFAULT;
1440                                 goto done;
1441                         }
1442                         if ( copy_to_user( &request.list[i].total,
1443                                            &dma->buflist[i]->total,
1444                                            sizeof(request.list[0].total) ) ) {
1445                                 retcode = -EFAULT;
1446                                 goto done;
1447                         }
1448                         if ( copy_to_user( &request.list[i].used,
1449                                            &zero,
1450                                            sizeof(zero) ) ) {
1451                                 retcode = -EFAULT;
1452                                 goto done;
1453                         }
1454                         address = virtual + dma->buflist[i]->offset; /* *** */
1455                         if ( copy_to_user( &request.list[i].address,
1456                                            &address,
1457                                            sizeof(address) ) ) {
1458                                 retcode = -EFAULT;
1459                                 goto done;
1460                         }
1461                 }
1462         }
1463  done:
1464         request.count = dma->buf_count;
1465         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1466
1467         if ( copy_to_user( argp, &request, sizeof(request) ) )
1468                 return -EFAULT;
1469
1470         return retcode;
1471 }
1472