Merge branch 'x86/debug' into x86/cpu
[linux-2.6] / drivers / char / agp / generic.c
1 /*
2  * AGPGART driver.
3  * Copyright (C) 2004 Silicon Graphics, Inc.
4  * Copyright (C) 2002-2005 Dave Jones.
5  * Copyright (C) 1999 Jeff Hartmann.
6  * Copyright (C) 1999 Precision Insight, Inc.
7  * Copyright (C) 1999 Xi Graphics, Inc.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included
17  * in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * TODO:
28  * - Allocate more than order 0 pages to avoid too much linear map splitting.
29  */
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h>
35 #include <linux/pm.h>
36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/mm.h>
40 #include <linux/sched.h>
41 #include <asm/io.h>
42 #include <asm/cacheflush.h>
43 #include <asm/pgtable.h>
44 #include "agp.h"
45
46 __u32 *agp_gatt_table;
47 int agp_memory_reserved;
48
49 /*
50  * Needed by the Nforce GART driver for the time being. Would be
51  * nice to do this some other way instead of needing this export.
52  */
53 EXPORT_SYMBOL_GPL(agp_memory_reserved);
54
55 /*
56  * Generic routines for handling agp_memory structures -
57  * They use the basic page allocation routines to do the brunt of the work.
58  */
59
60 void agp_free_key(int key)
61 {
62         if (key < 0)
63                 return;
64
65         if (key < MAXKEY)
66                 clear_bit(key, agp_bridge->key_list);
67 }
68 EXPORT_SYMBOL(agp_free_key);
69
70
71 static int agp_get_key(void)
72 {
73         int bit;
74
75         bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
76         if (bit < MAXKEY) {
77                 set_bit(bit, agp_bridge->key_list);
78                 return bit;
79         }
80         return -1;
81 }
82
83 void agp_flush_chipset(struct agp_bridge_data *bridge)
84 {
85         if (bridge->driver->chipset_flush)
86                 bridge->driver->chipset_flush(bridge);
87 }
88 EXPORT_SYMBOL(agp_flush_chipset);
89
90 /*
91  * Use kmalloc if possible for the page list. Otherwise fall back to
92  * vmalloc. This speeds things up and also saves memory for small AGP
93  * regions.
94  */
95
96 void agp_alloc_page_array(size_t size, struct agp_memory *mem)
97 {
98         mem->memory = NULL;
99         mem->vmalloc_flag = false;
100
101         if (size <= 2*PAGE_SIZE)
102                 mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
103         if (mem->memory == NULL) {
104                 mem->memory = vmalloc(size);
105                 mem->vmalloc_flag = true;
106         }
107 }
108 EXPORT_SYMBOL(agp_alloc_page_array);
109
110 void agp_free_page_array(struct agp_memory *mem)
111 {
112         if (mem->vmalloc_flag) {
113                 vfree(mem->memory);
114         } else {
115                 kfree(mem->memory);
116         }
117 }
118 EXPORT_SYMBOL(agp_free_page_array);
119
120
121 static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
122 {
123         struct agp_memory *new;
124         unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
125
126         new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
127         if (new == NULL)
128                 return NULL;
129
130         new->key = agp_get_key();
131
132         if (new->key < 0) {
133                 kfree(new);
134                 return NULL;
135         }
136
137         agp_alloc_page_array(alloc_size, new);
138
139         if (new->memory == NULL) {
140                 agp_free_key(new->key);
141                 kfree(new);
142                 return NULL;
143         }
144         new->num_scratch_pages = 0;
145         return new;
146 }
147
148 struct agp_memory *agp_create_memory(int scratch_pages)
149 {
150         struct agp_memory *new;
151
152         new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
153         if (new == NULL)
154                 return NULL;
155
156         new->key = agp_get_key();
157
158         if (new->key < 0) {
159                 kfree(new);
160                 return NULL;
161         }
162
163         agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
164
165         if (new->memory == NULL) {
166                 agp_free_key(new->key);
167                 kfree(new);
168                 return NULL;
169         }
170         new->num_scratch_pages = scratch_pages;
171         new->type = AGP_NORMAL_MEMORY;
172         return new;
173 }
174 EXPORT_SYMBOL(agp_create_memory);
175
176 /**
177  *      agp_free_memory - free memory associated with an agp_memory pointer.
178  *
179  *      @curr:          agp_memory pointer to be freed.
180  *
181  *      It is the only function that can be called when the backend is not owned
182  *      by the caller.  (So it can free memory on client death.)
183  */
184 void agp_free_memory(struct agp_memory *curr)
185 {
186         size_t i;
187
188         if (curr == NULL)
189                 return;
190
191         if (curr->is_bound)
192                 agp_unbind_memory(curr);
193
194         if (curr->type >= AGP_USER_TYPES) {
195                 agp_generic_free_by_type(curr);
196                 return;
197         }
198
199         if (curr->type != 0) {
200                 curr->bridge->driver->free_by_type(curr);
201                 return;
202         }
203         if (curr->page_count != 0) {
204                 for (i = 0; i < curr->page_count; i++) {
205                         curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
206                         curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
207                                                                AGP_PAGE_DESTROY_UNMAP);
208                 }
209                 for (i = 0; i < curr->page_count; i++) {
210                         curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
211                                                                AGP_PAGE_DESTROY_FREE);
212                 }
213         }
214         agp_free_key(curr->key);
215         agp_free_page_array(curr);
216         kfree(curr);
217 }
218 EXPORT_SYMBOL(agp_free_memory);
219
220 #define ENTRIES_PER_PAGE                (PAGE_SIZE / sizeof(unsigned long))
221
222 /**
223  *      agp_allocate_memory  -  allocate a group of pages of a certain type.
224  *
225  *      @page_count:    size_t argument of the number of pages
226  *      @type:  u32 argument of the type of memory to be allocated.
227  *
228  *      Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
229  *      maps to physical ram.  Any other type is device dependent.
230  *
231  *      It returns NULL whenever memory is unavailable.
232  */
233 struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
234                                         size_t page_count, u32 type)
235 {
236         int scratch_pages;
237         struct agp_memory *new;
238         size_t i;
239
240         if (!bridge)
241                 return NULL;
242
243         if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
244                 return NULL;
245
246         if (type >= AGP_USER_TYPES) {
247                 new = agp_generic_alloc_user(page_count, type);
248                 if (new)
249                         new->bridge = bridge;
250                 return new;
251         }
252
253         if (type != 0) {
254                 new = bridge->driver->alloc_by_type(page_count, type);
255                 if (new)
256                         new->bridge = bridge;
257                 return new;
258         }
259
260         scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
261
262         new = agp_create_memory(scratch_pages);
263
264         if (new == NULL)
265                 return NULL;
266
267         for (i = 0; i < page_count; i++) {
268                 void *addr = bridge->driver->agp_alloc_page(bridge);
269
270                 if (addr == NULL) {
271                         agp_free_memory(new);
272                         return NULL;
273                 }
274                 new->memory[i] = virt_to_gart(addr);
275                 new->page_count++;
276         }
277         new->bridge = bridge;
278
279         return new;
280 }
281 EXPORT_SYMBOL(agp_allocate_memory);
282
283
284 /* End - Generic routines for handling agp_memory structures */
285
286
287 static int agp_return_size(void)
288 {
289         int current_size;
290         void *temp;
291
292         temp = agp_bridge->current_size;
293
294         switch (agp_bridge->driver->size_type) {
295         case U8_APER_SIZE:
296                 current_size = A_SIZE_8(temp)->size;
297                 break;
298         case U16_APER_SIZE:
299                 current_size = A_SIZE_16(temp)->size;
300                 break;
301         case U32_APER_SIZE:
302                 current_size = A_SIZE_32(temp)->size;
303                 break;
304         case LVL2_APER_SIZE:
305                 current_size = A_SIZE_LVL2(temp)->size;
306                 break;
307         case FIXED_APER_SIZE:
308                 current_size = A_SIZE_FIX(temp)->size;
309                 break;
310         default:
311                 current_size = 0;
312                 break;
313         }
314
315         current_size -= (agp_memory_reserved / (1024*1024));
316         if (current_size <0)
317                 current_size = 0;
318         return current_size;
319 }
320
321
322 int agp_num_entries(void)
323 {
324         int num_entries;
325         void *temp;
326
327         temp = agp_bridge->current_size;
328
329         switch (agp_bridge->driver->size_type) {
330         case U8_APER_SIZE:
331                 num_entries = A_SIZE_8(temp)->num_entries;
332                 break;
333         case U16_APER_SIZE:
334                 num_entries = A_SIZE_16(temp)->num_entries;
335                 break;
336         case U32_APER_SIZE:
337                 num_entries = A_SIZE_32(temp)->num_entries;
338                 break;
339         case LVL2_APER_SIZE:
340                 num_entries = A_SIZE_LVL2(temp)->num_entries;
341                 break;
342         case FIXED_APER_SIZE:
343                 num_entries = A_SIZE_FIX(temp)->num_entries;
344                 break;
345         default:
346                 num_entries = 0;
347                 break;
348         }
349
350         num_entries -= agp_memory_reserved>>PAGE_SHIFT;
351         if (num_entries<0)
352                 num_entries = 0;
353         return num_entries;
354 }
355 EXPORT_SYMBOL_GPL(agp_num_entries);
356
357
358 /**
359  *      agp_copy_info  -  copy bridge state information
360  *
361  *      @info:          agp_kern_info pointer.  The caller should insure that this pointer is valid.
362  *
363  *      This function copies information about the agp bridge device and the state of
364  *      the agp backend into an agp_kern_info pointer.
365  */
366 int agp_copy_info(struct agp_bridge_data *bridge, struct agp_kern_info *info)
367 {
368         memset(info, 0, sizeof(struct agp_kern_info));
369         if (!bridge) {
370                 info->chipset = NOT_SUPPORTED;
371                 return -EIO;
372         }
373
374         info->version.major = bridge->version->major;
375         info->version.minor = bridge->version->minor;
376         info->chipset = SUPPORTED;
377         info->device = bridge->dev;
378         if (bridge->mode & AGPSTAT_MODE_3_0)
379                 info->mode = bridge->mode & ~AGP3_RESERVED_MASK;
380         else
381                 info->mode = bridge->mode & ~AGP2_RESERVED_MASK;
382         info->aper_base = bridge->gart_bus_addr;
383         info->aper_size = agp_return_size();
384         info->max_memory = bridge->max_memory_agp;
385         info->current_memory = atomic_read(&bridge->current_memory_agp);
386         info->cant_use_aperture = bridge->driver->cant_use_aperture;
387         info->vm_ops = bridge->vm_ops;
388         info->page_mask = ~0UL;
389         return 0;
390 }
391 EXPORT_SYMBOL(agp_copy_info);
392
393 /* End - Routine to copy over information structure */
394
395 /*
396  * Routines for handling swapping of agp_memory into the GATT -
397  * These routines take agp_memory and insert them into the GATT.
398  * They call device specific routines to actually write to the GATT.
399  */
400
401 /**
402  *      agp_bind_memory  -  Bind an agp_memory structure into the GATT.
403  *
404  *      @curr:          agp_memory pointer
405  *      @pg_start:      an offset into the graphics aperture translation table
406  *
407  *      It returns -EINVAL if the pointer == NULL.
408  *      It returns -EBUSY if the area of the table requested is already in use.
409  */
410 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
411 {
412         int ret_val;
413
414         if (curr == NULL)
415                 return -EINVAL;
416
417         if (curr->is_bound) {
418                 printk(KERN_INFO PFX "memory %p is already bound!\n", curr);
419                 return -EINVAL;
420         }
421         if (!curr->is_flushed) {
422                 curr->bridge->driver->cache_flush();
423                 curr->is_flushed = true;
424         }
425         ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
426
427         if (ret_val != 0)
428                 return ret_val;
429
430         curr->is_bound = true;
431         curr->pg_start = pg_start;
432         spin_lock(&agp_bridge->mapped_lock);
433         list_add(&curr->mapped_list, &agp_bridge->mapped_list);
434         spin_unlock(&agp_bridge->mapped_lock);
435
436         return 0;
437 }
438 EXPORT_SYMBOL(agp_bind_memory);
439
440
441 /**
442  *      agp_unbind_memory  -  Removes an agp_memory structure from the GATT
443  *
444  * @curr:       agp_memory pointer to be removed from the GATT.
445  *
446  * It returns -EINVAL if this piece of agp_memory is not currently bound to
447  * the graphics aperture translation table or if the agp_memory pointer == NULL
448  */
449 int agp_unbind_memory(struct agp_memory *curr)
450 {
451         int ret_val;
452
453         if (curr == NULL)
454                 return -EINVAL;
455
456         if (!curr->is_bound) {
457                 printk(KERN_INFO PFX "memory %p was not bound!\n", curr);
458                 return -EINVAL;
459         }
460
461         ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
462
463         if (ret_val != 0)
464                 return ret_val;
465
466         curr->is_bound = false;
467         curr->pg_start = 0;
468         spin_lock(&curr->bridge->mapped_lock);
469         list_del(&curr->mapped_list);
470         spin_unlock(&curr->bridge->mapped_lock);
471         return 0;
472 }
473 EXPORT_SYMBOL(agp_unbind_memory);
474
475 /**
476  *      agp_rebind_emmory  -  Rewrite the entire GATT, useful on resume
477  */
478 int agp_rebind_memory(void)
479 {
480         struct agp_memory *curr;
481         int ret_val = 0;
482
483         spin_lock(&agp_bridge->mapped_lock);
484         list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
485                 ret_val = curr->bridge->driver->insert_memory(curr,
486                                                               curr->pg_start,
487                                                               curr->type);
488                 if (ret_val != 0)
489                         break;
490         }
491         spin_unlock(&agp_bridge->mapped_lock);
492         return ret_val;
493 }
494 EXPORT_SYMBOL(agp_rebind_memory);
495
496 /* End - Routines for handling swapping of agp_memory into the GATT */
497
498
499 /* Generic Agp routines - Start */
500 static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
501 {
502         u32 tmp;
503
504         if (*requested_mode & AGP2_RESERVED_MASK) {
505                 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
506                         *requested_mode & AGP2_RESERVED_MASK, *requested_mode);
507                 *requested_mode &= ~AGP2_RESERVED_MASK;
508         }
509
510         /*
511          * Some dumb bridges are programmed to disobey the AGP2 spec.
512          * This is likely a BIOS misprogramming rather than poweron default, or
513          * it would be a lot more common.
514          * https://bugs.freedesktop.org/show_bug.cgi?id=8816
515          * AGPv2 spec 6.1.9 states:
516          *   The RATE field indicates the data transfer rates supported by this
517          *   device. A.G.P. devices must report all that apply.
518          * Fix them up as best we can.
519          */
520         switch (*bridge_agpstat & 7) {
521         case 4:
522                 *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
523                 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
524                         "Fixing up support for x2 & x1\n");
525                 break;
526         case 2:
527                 *bridge_agpstat |= AGPSTAT2_1X;
528                 printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
529                         "Fixing up support for x1\n");
530                 break;
531         default:
532                 break;
533         }
534
535         /* Check the speed bits make sense. Only one should be set. */
536         tmp = *requested_mode & 7;
537         switch (tmp) {
538                 case 0:
539                         printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm);
540                         *requested_mode |= AGPSTAT2_1X;
541                         break;
542                 case 1:
543                 case 2:
544                         break;
545                 case 3:
546                         *requested_mode &= ~(AGPSTAT2_1X);      /* rate=2 */
547                         break;
548                 case 4:
549                         break;
550                 case 5:
551                 case 6:
552                 case 7:
553                         *requested_mode &= ~(AGPSTAT2_1X|AGPSTAT2_2X); /* rate=4*/
554                         break;
555         }
556
557         /* disable SBA if it's not supported */
558         if (!((*bridge_agpstat & AGPSTAT_SBA) && (*vga_agpstat & AGPSTAT_SBA) && (*requested_mode & AGPSTAT_SBA)))
559                 *bridge_agpstat &= ~AGPSTAT_SBA;
560
561         /* Set rate */
562         if (!((*bridge_agpstat & AGPSTAT2_4X) && (*vga_agpstat & AGPSTAT2_4X) && (*requested_mode & AGPSTAT2_4X)))
563                 *bridge_agpstat &= ~AGPSTAT2_4X;
564
565         if (!((*bridge_agpstat & AGPSTAT2_2X) && (*vga_agpstat & AGPSTAT2_2X) && (*requested_mode & AGPSTAT2_2X)))
566                 *bridge_agpstat &= ~AGPSTAT2_2X;
567
568         if (!((*bridge_agpstat & AGPSTAT2_1X) && (*vga_agpstat & AGPSTAT2_1X) && (*requested_mode & AGPSTAT2_1X)))
569                 *bridge_agpstat &= ~AGPSTAT2_1X;
570
571         /* Now we know what mode it should be, clear out the unwanted bits. */
572         if (*bridge_agpstat & AGPSTAT2_4X)
573                 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_2X);        /* 4X */
574
575         if (*bridge_agpstat & AGPSTAT2_2X)
576                 *bridge_agpstat &= ~(AGPSTAT2_1X | AGPSTAT2_4X);        /* 2X */
577
578         if (*bridge_agpstat & AGPSTAT2_1X)
579                 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);        /* 1X */
580
581         /* Apply any errata. */
582         if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
583                 *bridge_agpstat &= ~AGPSTAT_FW;
584
585         if (agp_bridge->flags & AGP_ERRATA_SBA)
586                 *bridge_agpstat &= ~AGPSTAT_SBA;
587
588         if (agp_bridge->flags & AGP_ERRATA_1X) {
589                 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
590                 *bridge_agpstat |= AGPSTAT2_1X;
591         }
592
593         /* If we've dropped down to 1X, disable fast writes. */
594         if (*bridge_agpstat & AGPSTAT2_1X)
595                 *bridge_agpstat &= ~AGPSTAT_FW;
596 }
597
598 /*
599  * requested_mode = Mode requested by (typically) X.
600  * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
601  * vga_agpstat = PCI_AGP_STATUS from graphic card.
602  */
603 static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_agpstat)
604 {
605         u32 origbridge=*bridge_agpstat, origvga=*vga_agpstat;
606         u32 tmp;
607
608         if (*requested_mode & AGP3_RESERVED_MASK) {
609                 printk(KERN_INFO PFX "reserved bits set (%x) in mode 0x%x. Fixed.\n",
610                         *requested_mode & AGP3_RESERVED_MASK, *requested_mode);
611                 *requested_mode &= ~AGP3_RESERVED_MASK;
612         }
613
614         /* Check the speed bits make sense. */
615         tmp = *requested_mode & 7;
616         if (tmp == 0) {
617                 printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm);
618                 *requested_mode |= AGPSTAT3_4X;
619         }
620         if (tmp >= 3) {
621                 printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4);
622                 *requested_mode = (*requested_mode & ~7) | AGPSTAT3_8X;
623         }
624
625         /* ARQSZ - Set the value to the maximum one.
626          * Don't allow the mode register to override values. */
627         *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_ARQSZ) |
628                 max_t(u32,(*bridge_agpstat & AGPSTAT_ARQSZ),(*vga_agpstat & AGPSTAT_ARQSZ)));
629
630         /* Calibration cycle.
631          * Don't allow the mode register to override values. */
632         *bridge_agpstat = ((*bridge_agpstat & ~AGPSTAT_CAL_MASK) |
633                 min_t(u32,(*bridge_agpstat & AGPSTAT_CAL_MASK),(*vga_agpstat & AGPSTAT_CAL_MASK)));
634
635         /* SBA *must* be supported for AGP v3 */
636         *bridge_agpstat |= AGPSTAT_SBA;
637
638         /*
639          * Set speed.
640          * Check for invalid speeds. This can happen when applications
641          * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
642          */
643         if (*requested_mode & AGPSTAT_MODE_3_0) {
644                 /*
645                  * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
646                  * have been passed a 3.0 mode, but with 2.x speed bits set.
647                  * AGP2.x 4x -> AGP3.0 4x.
648                  */
649                 if (*requested_mode & AGPSTAT2_4X) {
650                         printk(KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
651                                                 current->comm, *requested_mode);
652                         *requested_mode &= ~AGPSTAT2_4X;
653                         *requested_mode |= AGPSTAT3_4X;
654                 }
655         } else {
656                 /*
657                  * The caller doesn't know what they are doing. We are in 3.0 mode,
658                  * but have been passed an AGP 2.x mode.
659                  * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
660                  */
661                 printk(KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
662                                         current->comm, *requested_mode);
663                 *requested_mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
664                 *requested_mode |= AGPSTAT3_4X;
665         }
666
667         if (*requested_mode & AGPSTAT3_8X) {
668                 if (!(*bridge_agpstat & AGPSTAT3_8X)) {
669                         *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
670                         *bridge_agpstat |= AGPSTAT3_4X;
671                         printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm);
672                         return;
673                 }
674                 if (!(*vga_agpstat & AGPSTAT3_8X)) {
675                         *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
676                         *bridge_agpstat |= AGPSTAT3_4X;
677                         printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm);
678                         return;
679                 }
680                 /* All set, bridge & device can do AGP x8*/
681                 *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
682                 goto done;
683
684         } else if (*requested_mode & AGPSTAT3_4X) {
685                 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
686                 *bridge_agpstat |= AGPSTAT3_4X;
687                 goto done;
688
689         } else {
690
691                 /*
692                  * If we didn't specify an AGP mode, we see if both
693                  * the graphics card, and the bridge can do x8, and use if so.
694                  * If not, we fall back to x4 mode.
695                  */
696                 if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) {
697                         printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode "
698                                 "supported by bridge & card (x8).\n");
699                         *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
700                         *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
701                 } else {
702                         printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
703                         if (!(*bridge_agpstat & AGPSTAT3_8X)) {
704                                 printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
705                                         *bridge_agpstat, origbridge);
706                                 *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
707                                 *bridge_agpstat |= AGPSTAT3_4X;
708                         }
709                         if (!(*vga_agpstat & AGPSTAT3_8X)) {
710                                 printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
711                                         *vga_agpstat, origvga);
712                                 *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
713                                 *vga_agpstat |= AGPSTAT3_4X;
714                         }
715                 }
716         }
717
718 done:
719         /* Apply any errata. */
720         if (agp_bridge->flags & AGP_ERRATA_FASTWRITES)
721                 *bridge_agpstat &= ~AGPSTAT_FW;
722
723         if (agp_bridge->flags & AGP_ERRATA_SBA)
724                 *bridge_agpstat &= ~AGPSTAT_SBA;
725
726         if (agp_bridge->flags & AGP_ERRATA_1X) {
727                 *bridge_agpstat &= ~(AGPSTAT2_2X | AGPSTAT2_4X);
728                 *bridge_agpstat |= AGPSTAT2_1X;
729         }
730 }
731
732
733 /**
734  * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
735  * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
736  * @requested_mode: requested agp_stat from userspace (Typically from X)
737  * @bridge_agpstat: current agp_stat from AGP bridge.
738  *
739  * This function will hunt for an AGP graphics card, and try to match
740  * the requested mode to the capabilities of both the bridge and the card.
741  */
742 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode, u32 bridge_agpstat)
743 {
744         struct pci_dev *device = NULL;
745         u32 vga_agpstat;
746         u8 cap_ptr;
747
748         for (;;) {
749                 device = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, device);
750                 if (!device) {
751                         printk(KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
752                         return 0;
753                 }
754                 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
755                 if (cap_ptr)
756                         break;
757         }
758
759         /*
760          * Ok, here we have a AGP device. Disable impossible
761          * settings, and adjust the readqueue to the minimum.
762          */
763         pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &vga_agpstat);
764
765         /* adjust RQ depth */
766         bridge_agpstat = ((bridge_agpstat & ~AGPSTAT_RQ_DEPTH) |
767              min_t(u32, (requested_mode & AGPSTAT_RQ_DEPTH),
768                  min_t(u32, (bridge_agpstat & AGPSTAT_RQ_DEPTH), (vga_agpstat & AGPSTAT_RQ_DEPTH))));
769
770         /* disable FW if it's not supported */
771         if (!((bridge_agpstat & AGPSTAT_FW) &&
772                  (vga_agpstat & AGPSTAT_FW) &&
773                  (requested_mode & AGPSTAT_FW)))
774                 bridge_agpstat &= ~AGPSTAT_FW;
775
776         /* Check to see if we are operating in 3.0 mode */
777         if (agp_bridge->mode & AGPSTAT_MODE_3_0)
778                 agp_v3_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
779         else
780                 agp_v2_parse_one(&requested_mode, &bridge_agpstat, &vga_agpstat);
781
782         pci_dev_put(device);
783         return bridge_agpstat;
784 }
785 EXPORT_SYMBOL(agp_collect_device_status);
786
787
788 void agp_device_command(u32 bridge_agpstat, bool agp_v3)
789 {
790         struct pci_dev *device = NULL;
791         int mode;
792
793         mode = bridge_agpstat & 0x7;
794         if (agp_v3)
795                 mode *= 4;
796
797         for_each_pci_dev(device) {
798                 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
799                 if (!agp)
800                         continue;
801
802                 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
803                          agp_v3 ? 3 : 2, mode);
804                 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
805         }
806 }
807 EXPORT_SYMBOL(agp_device_command);
808
809
810 void get_agp_version(struct agp_bridge_data *bridge)
811 {
812         u32 ncapid;
813
814         /* Exit early if already set by errata workarounds. */
815         if (bridge->major_version != 0)
816                 return;
817
818         pci_read_config_dword(bridge->dev, bridge->capndx, &ncapid);
819         bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
820         bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
821 }
822 EXPORT_SYMBOL(get_agp_version);
823
824
825 void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
826 {
827         u32 bridge_agpstat, temp;
828
829         get_agp_version(agp_bridge);
830
831         dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
832                  agp_bridge->major_version, agp_bridge->minor_version);
833
834         pci_read_config_dword(agp_bridge->dev,
835                       agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
836
837         bridge_agpstat = agp_collect_device_status(agp_bridge, requested_mode, bridge_agpstat);
838         if (bridge_agpstat == 0)
839                 /* Something bad happened. FIXME: Return error code? */
840                 return;
841
842         bridge_agpstat |= AGPSTAT_AGP_ENABLE;
843
844         /* Do AGP version specific frobbing. */
845         if (bridge->major_version >= 3) {
846                 if (bridge->mode & AGPSTAT_MODE_3_0) {
847                         /* If we have 3.5, we can do the isoch stuff. */
848                         if (bridge->minor_version >= 5)
849                                 agp_3_5_enable(bridge);
850                         agp_device_command(bridge_agpstat, true);
851                         return;
852                 } else {
853                     /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
854                     bridge_agpstat &= ~(7<<10) ;
855                     pci_read_config_dword(bridge->dev,
856                                         bridge->capndx+AGPCTRL, &temp);
857                     temp |= (1<<9);
858                     pci_write_config_dword(bridge->dev,
859                                         bridge->capndx+AGPCTRL, temp);
860
861                     dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
862                 }
863         }
864
865         /* AGP v<3 */
866         agp_device_command(bridge_agpstat, false);
867 }
868 EXPORT_SYMBOL(agp_generic_enable);
869
870
871 int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
872 {
873         char *table;
874         char *table_end;
875         int size;
876         int page_order;
877         int num_entries;
878         int i;
879         void *temp;
880         struct page *page;
881
882         /* The generic routines can't handle 2 level gatt's */
883         if (bridge->driver->size_type == LVL2_APER_SIZE)
884                 return -EINVAL;
885
886         table = NULL;
887         i = bridge->aperture_size_idx;
888         temp = bridge->current_size;
889         size = page_order = num_entries = 0;
890
891         if (bridge->driver->size_type != FIXED_APER_SIZE) {
892                 do {
893                         switch (bridge->driver->size_type) {
894                         case U8_APER_SIZE:
895                                 size = A_SIZE_8(temp)->size;
896                                 page_order =
897                                     A_SIZE_8(temp)->page_order;
898                                 num_entries =
899                                     A_SIZE_8(temp)->num_entries;
900                                 break;
901                         case U16_APER_SIZE:
902                                 size = A_SIZE_16(temp)->size;
903                                 page_order = A_SIZE_16(temp)->page_order;
904                                 num_entries = A_SIZE_16(temp)->num_entries;
905                                 break;
906                         case U32_APER_SIZE:
907                                 size = A_SIZE_32(temp)->size;
908                                 page_order = A_SIZE_32(temp)->page_order;
909                                 num_entries = A_SIZE_32(temp)->num_entries;
910                                 break;
911                                 /* This case will never really happen. */
912                         case FIXED_APER_SIZE:
913                         case LVL2_APER_SIZE:
914                         default:
915                                 size = page_order = num_entries = 0;
916                                 break;
917                         }
918
919                         table = alloc_gatt_pages(page_order);
920
921                         if (table == NULL) {
922                                 i++;
923                                 switch (bridge->driver->size_type) {
924                                 case U8_APER_SIZE:
925                                         bridge->current_size = A_IDX8(bridge);
926                                         break;
927                                 case U16_APER_SIZE:
928                                         bridge->current_size = A_IDX16(bridge);
929                                         break;
930                                 case U32_APER_SIZE:
931                                         bridge->current_size = A_IDX32(bridge);
932                                         break;
933                                 /* These cases will never really happen. */
934                                 case FIXED_APER_SIZE:
935                                 case LVL2_APER_SIZE:
936                                 default:
937                                         break;
938                                 }
939                                 temp = bridge->current_size;
940                         } else {
941                                 bridge->aperture_size_idx = i;
942                         }
943                 } while (!table && (i < bridge->driver->num_aperture_sizes));
944         } else {
945                 size = ((struct aper_size_info_fixed *) temp)->size;
946                 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
947                 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
948                 table = alloc_gatt_pages(page_order);
949         }
950
951         if (table == NULL)
952                 return -ENOMEM;
953
954         table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
955
956         for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
957                 SetPageReserved(page);
958
959         bridge->gatt_table_real = (u32 *) table;
960         agp_gatt_table = (void *)table;
961
962         bridge->driver->cache_flush();
963 #ifdef CONFIG_X86
964         set_memory_uc((unsigned long)table, 1 << page_order);
965         bridge->gatt_table = (void *)table;
966 #else
967         bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
968                                         (PAGE_SIZE * (1 << page_order)));
969         bridge->driver->cache_flush();
970 #endif
971
972         if (bridge->gatt_table == NULL) {
973                 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
974                         ClearPageReserved(page);
975
976                 free_gatt_pages(table, page_order);
977
978                 return -ENOMEM;
979         }
980         bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
981
982         /* AK: bogus, should encode addresses > 4GB */
983         for (i = 0; i < num_entries; i++) {
984                 writel(bridge->scratch_page, bridge->gatt_table+i);
985                 readl(bridge->gatt_table+i);    /* PCI Posting. */
986         }
987
988         return 0;
989 }
990 EXPORT_SYMBOL(agp_generic_create_gatt_table);
991
992 int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
993 {
994         int page_order;
995         char *table, *table_end;
996         void *temp;
997         struct page *page;
998
999         temp = bridge->current_size;
1000
1001         switch (bridge->driver->size_type) {
1002         case U8_APER_SIZE:
1003                 page_order = A_SIZE_8(temp)->page_order;
1004                 break;
1005         case U16_APER_SIZE:
1006                 page_order = A_SIZE_16(temp)->page_order;
1007                 break;
1008         case U32_APER_SIZE:
1009                 page_order = A_SIZE_32(temp)->page_order;
1010                 break;
1011         case FIXED_APER_SIZE:
1012                 page_order = A_SIZE_FIX(temp)->page_order;
1013                 break;
1014         case LVL2_APER_SIZE:
1015                 /* The generic routines can't deal with 2 level gatt's */
1016                 return -EINVAL;
1017                 break;
1018         default:
1019                 page_order = 0;
1020                 break;
1021         }
1022
1023         /* Do not worry about freeing memory, because if this is
1024          * called, then all agp memory is deallocated and removed
1025          * from the table. */
1026
1027 #ifdef CONFIG_X86
1028         set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1029 #else
1030         iounmap(bridge->gatt_table);
1031 #endif
1032         table = (char *) bridge->gatt_table_real;
1033         table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
1034
1035         for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
1036                 ClearPageReserved(page);
1037
1038         free_gatt_pages(bridge->gatt_table_real, page_order);
1039
1040         agp_gatt_table = NULL;
1041         bridge->gatt_table = NULL;
1042         bridge->gatt_table_real = NULL;
1043         bridge->gatt_bus_addr = 0;
1044
1045         return 0;
1046 }
1047 EXPORT_SYMBOL(agp_generic_free_gatt_table);
1048
1049
1050 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1051 {
1052         int num_entries;
1053         size_t i;
1054         off_t j;
1055         void *temp;
1056         struct agp_bridge_data *bridge;
1057         int mask_type;
1058
1059         bridge = mem->bridge;
1060         if (!bridge)
1061                 return -EINVAL;
1062
1063         if (mem->page_count == 0)
1064                 return 0;
1065
1066         temp = bridge->current_size;
1067
1068         switch (bridge->driver->size_type) {
1069         case U8_APER_SIZE:
1070                 num_entries = A_SIZE_8(temp)->num_entries;
1071                 break;
1072         case U16_APER_SIZE:
1073                 num_entries = A_SIZE_16(temp)->num_entries;
1074                 break;
1075         case U32_APER_SIZE:
1076                 num_entries = A_SIZE_32(temp)->num_entries;
1077                 break;
1078         case FIXED_APER_SIZE:
1079                 num_entries = A_SIZE_FIX(temp)->num_entries;
1080                 break;
1081         case LVL2_APER_SIZE:
1082                 /* The generic routines can't deal with 2 level gatt's */
1083                 return -EINVAL;
1084                 break;
1085         default:
1086                 num_entries = 0;
1087                 break;
1088         }
1089
1090         num_entries -= agp_memory_reserved/PAGE_SIZE;
1091         if (num_entries < 0) num_entries = 0;
1092
1093         if (type != mem->type)
1094                 return -EINVAL;
1095
1096         mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1097         if (mask_type != 0) {
1098                 /* The generic routines know nothing of memory types */
1099                 return -EINVAL;
1100         }
1101
1102         /* AK: could wrap */
1103         if ((pg_start + mem->page_count) > num_entries)
1104                 return -EINVAL;
1105
1106         j = pg_start;
1107
1108         while (j < (pg_start + mem->page_count)) {
1109                 if (!PGE_EMPTY(bridge, readl(bridge->gatt_table+j)))
1110                         return -EBUSY;
1111                 j++;
1112         }
1113
1114         if (!mem->is_flushed) {
1115                 bridge->driver->cache_flush();
1116                 mem->is_flushed = true;
1117         }
1118
1119         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1120                 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
1121                        bridge->gatt_table+j);
1122         }
1123         readl(bridge->gatt_table+j-1);  /* PCI Posting. */
1124
1125         bridge->driver->tlb_flush(mem);
1126         return 0;
1127 }
1128 EXPORT_SYMBOL(agp_generic_insert_memory);
1129
1130
1131 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1132 {
1133         size_t i;
1134         struct agp_bridge_data *bridge;
1135         int mask_type;
1136
1137         bridge = mem->bridge;
1138         if (!bridge)
1139                 return -EINVAL;
1140
1141         if (mem->page_count == 0)
1142                 return 0;
1143
1144         if (type != mem->type)
1145                 return -EINVAL;
1146
1147         mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1148         if (mask_type != 0) {
1149                 /* The generic routines know nothing of memory types */
1150                 return -EINVAL;
1151         }
1152
1153         /* AK: bogus, should encode addresses > 4GB */
1154         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1155                 writel(bridge->scratch_page, bridge->gatt_table+i);
1156         }
1157         readl(bridge->gatt_table+i-1);  /* PCI Posting. */
1158
1159         bridge->driver->tlb_flush(mem);
1160         return 0;
1161 }
1162 EXPORT_SYMBOL(agp_generic_remove_memory);
1163
1164 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
1165 {
1166         return NULL;
1167 }
1168 EXPORT_SYMBOL(agp_generic_alloc_by_type);
1169
1170 void agp_generic_free_by_type(struct agp_memory *curr)
1171 {
1172         agp_free_page_array(curr);
1173         agp_free_key(curr->key);
1174         kfree(curr);
1175 }
1176 EXPORT_SYMBOL(agp_generic_free_by_type);
1177
1178 struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
1179 {
1180         struct agp_memory *new;
1181         int i;
1182         int pages;
1183
1184         pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
1185         new = agp_create_user_memory(page_count);
1186         if (new == NULL)
1187                 return NULL;
1188
1189         for (i = 0; i < page_count; i++)
1190                 new->memory[i] = 0;
1191         new->page_count = 0;
1192         new->type = type;
1193         new->num_scratch_pages = pages;
1194
1195         return new;
1196 }
1197 EXPORT_SYMBOL(agp_generic_alloc_user);
1198
1199 /*
1200  * Basic Page Allocation Routines -
1201  * These routines handle page allocation and by default they reserve the allocated
1202  * memory.  They also handle incrementing the current_memory_agp value, Which is checked
1203  * against a maximum value.
1204  */
1205
1206 void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
1207 {
1208         struct page * page;
1209
1210         page = alloc_page(GFP_KERNEL | GFP_DMA32);
1211         if (page == NULL)
1212                 return NULL;
1213
1214         map_page_into_agp(page);
1215
1216         get_page(page);
1217         atomic_inc(&agp_bridge->current_memory_agp);
1218         return page_address(page);
1219 }
1220 EXPORT_SYMBOL(agp_generic_alloc_page);
1221
1222
1223 void agp_generic_destroy_page(void *addr, int flags)
1224 {
1225         struct page *page;
1226
1227         if (addr == NULL)
1228                 return;
1229
1230         page = virt_to_page(addr);
1231         if (flags & AGP_PAGE_DESTROY_UNMAP)
1232                 unmap_page_from_agp(page);
1233
1234         if (flags & AGP_PAGE_DESTROY_FREE) {
1235                 put_page(page);
1236                 free_page((unsigned long)addr);
1237                 atomic_dec(&agp_bridge->current_memory_agp);
1238         }
1239 }
1240 EXPORT_SYMBOL(agp_generic_destroy_page);
1241
1242 /* End Basic Page Allocation Routines */
1243
1244
1245 /**
1246  * agp_enable  -  initialise the agp point-to-point connection.
1247  *
1248  * @mode:       agp mode register value to configure with.
1249  */
1250 void agp_enable(struct agp_bridge_data *bridge, u32 mode)
1251 {
1252         if (!bridge)
1253                 return;
1254         bridge->driver->agp_enable(bridge, mode);
1255 }
1256 EXPORT_SYMBOL(agp_enable);
1257
1258 /* When we remove the global variable agp_bridge from all drivers
1259  * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1260  */
1261
1262 struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev)
1263 {
1264         if (list_empty(&agp_bridges))
1265                 return NULL;
1266
1267         return agp_bridge;
1268 }
1269
1270 static void ipi_handler(void *null)
1271 {
1272         flush_agp_cache();
1273 }
1274
1275 void global_cache_flush(void)
1276 {
1277         if (on_each_cpu(ipi_handler, NULL, 1) != 0)
1278                 panic(PFX "timed out waiting for the other CPUs!\n");
1279 }
1280 EXPORT_SYMBOL(global_cache_flush);
1281
1282 unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
1283         unsigned long addr, int type)
1284 {
1285         /* memory type is ignored in the generic routine */
1286         if (bridge->driver->masks)
1287                 return addr | bridge->driver->masks[0].mask;
1288         else
1289                 return addr;
1290 }
1291 EXPORT_SYMBOL(agp_generic_mask_memory);
1292
1293 int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
1294                                   int type)
1295 {
1296         if (type >= AGP_USER_TYPES)
1297                 return 0;
1298         return type;
1299 }
1300 EXPORT_SYMBOL(agp_generic_type_to_mask_type);
1301
1302 /*
1303  * These functions are implemented according to the AGPv3 spec,
1304  * which covers implementation details that had previously been
1305  * left open.
1306  */
1307
1308 int agp3_generic_fetch_size(void)
1309 {
1310         u16 temp_size;
1311         int i;
1312         struct aper_size_info_16 *values;
1313
1314         pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
1315         values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
1316
1317         for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
1318                 if (temp_size == values[i].size_value) {
1319                         agp_bridge->previous_size =
1320                                 agp_bridge->current_size = (void *) (values + i);
1321
1322                         agp_bridge->aperture_size_idx = i;
1323                         return values[i].size;
1324                 }
1325         }
1326         return 0;
1327 }
1328 EXPORT_SYMBOL(agp3_generic_fetch_size);
1329
1330 void agp3_generic_tlbflush(struct agp_memory *mem)
1331 {
1332         u32 ctrl;
1333         pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1334         pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
1335         pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
1336 }
1337 EXPORT_SYMBOL(agp3_generic_tlbflush);
1338
1339 int agp3_generic_configure(void)
1340 {
1341         u32 temp;
1342         struct aper_size_info_16 *current_size;
1343
1344         current_size = A_SIZE_16(agp_bridge->current_size);
1345
1346         pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1347         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1348
1349         /* set aperture size */
1350         pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1351         /* set gart pointer */
1352         pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1353         /* enable aperture and GTLB */
1354         pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1355         pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1356         return 0;
1357 }
1358 EXPORT_SYMBOL(agp3_generic_configure);
1359
1360 void agp3_generic_cleanup(void)
1361 {
1362         u32 ctrl;
1363         pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1364         pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1365 }
1366 EXPORT_SYMBOL(agp3_generic_cleanup);
1367
1368 const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1369 {
1370         {4096, 1048576, 10,0x000},
1371         {2048,  524288, 9, 0x800},
1372         {1024,  262144, 8, 0xc00},
1373         { 512,  131072, 7, 0xe00},
1374         { 256,   65536, 6, 0xf00},
1375         { 128,   32768, 5, 0xf20},
1376         {  64,   16384, 4, 0xf30},
1377         {  32,    8192, 3, 0xf38},
1378         {  16,    4096, 2, 0xf3c},
1379         {   8,    2048, 1, 0xf3e},
1380         {   4,    1024, 0, 0xf3f}
1381 };
1382 EXPORT_SYMBOL(agp3_generic_sizes);
1383