2 * Serverworks AGPGART routines.
5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/jiffies.h>
11 #include <linux/agp_backend.h>
14 #define SVWRKS_COMMAND 0x04
15 #define SVWRKS_APSIZE 0x10
16 #define SVWRKS_MMBASE 0x14
17 #define SVWRKS_CACHING 0x4b
18 #define SVWRKS_AGP_ENABLE 0x60
19 #define SVWRKS_FEATURE 0x68
21 #define SVWRKS_SIZE_MASK 0xfe000000
23 /* Memory mapped registers */
24 #define SVWRKS_GART_CACHE 0x02
25 #define SVWRKS_GATTBASE 0x04
26 #define SVWRKS_TLBFLUSH 0x10
27 #define SVWRKS_POSTFLUSH 0x14
28 #define SVWRKS_DIRFLUSH 0x0c
31 struct serverworks_page_map {
33 unsigned long __iomem *remapped;
36 static struct _serverworks_private {
37 struct pci_dev *svrwrks_dev; /* device one */
38 volatile u8 __iomem *registers;
39 struct serverworks_page_map **gatt_pages;
41 struct serverworks_page_map scratch_dir;
45 } serverworks_private;
47 static int serverworks_create_page_map(struct serverworks_page_map *page_map)
51 page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
52 if (page_map->real == NULL) {
55 SetPageReserved(virt_to_page(page_map->real));
57 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
59 if (page_map->remapped == NULL) {
60 ClearPageReserved(virt_to_page(page_map->real));
61 free_page((unsigned long) page_map->real);
62 page_map->real = NULL;
67 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
68 writel(agp_bridge->scratch_page, page_map->remapped+i);
73 static void serverworks_free_page_map(struct serverworks_page_map *page_map)
75 iounmap(page_map->remapped);
76 ClearPageReserved(virt_to_page(page_map->real));
77 free_page((unsigned long) page_map->real);
80 static void serverworks_free_gatt_pages(void)
83 struct serverworks_page_map **tables;
84 struct serverworks_page_map *entry;
86 tables = serverworks_private.gatt_pages;
87 for (i = 0; i < serverworks_private.num_tables; i++) {
90 if (entry->real != NULL) {
91 serverworks_free_page_map(entry);
99 static int serverworks_create_gatt_pages(int nr_tables)
101 struct serverworks_page_map **tables;
102 struct serverworks_page_map *entry;
106 tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
111 for (i = 0; i < nr_tables; i++) {
112 entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
118 retval = serverworks_create_page_map(entry);
119 if (retval != 0) break;
121 serverworks_private.num_tables = nr_tables;
122 serverworks_private.gatt_pages = tables;
124 if (retval != 0) serverworks_free_gatt_pages();
129 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
130 GET_PAGE_DIR_IDX(addr)]->remapped)
132 #ifndef GET_PAGE_DIR_OFF
133 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
136 #ifndef GET_PAGE_DIR_IDX
137 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
138 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
142 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
145 static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
147 struct aper_size_info_lvl2 *value;
148 struct serverworks_page_map page_dir;
153 value = A_SIZE_LVL2(agp_bridge->current_size);
154 retval = serverworks_create_page_map(&page_dir);
158 retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
160 serverworks_free_page_map(&page_dir);
163 /* Create a fake scratch directory */
164 for (i = 0; i < 1024; i++) {
165 writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
166 writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
169 retval = serverworks_create_gatt_pages(value->num_entries / 1024);
171 serverworks_free_page_map(&page_dir);
172 serverworks_free_page_map(&serverworks_private.scratch_dir);
176 agp_bridge->gatt_table_real = (u32 *)page_dir.real;
177 agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
178 agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
180 /* Get the address for the gart region.
181 * This is a bus address even on the alpha, b/c its
182 * used to program the agp master not the cpu
185 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
186 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
188 /* Calculate the agp offset */
189 for (i = 0; i < value->num_entries / 1024; i++)
190 writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
195 static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
197 struct serverworks_page_map page_dir;
199 page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
200 page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
202 serverworks_free_gatt_pages();
203 serverworks_free_page_map(&page_dir);
204 serverworks_free_page_map(&serverworks_private.scratch_dir);
208 static int serverworks_fetch_size(void)
213 struct aper_size_info_lvl2 *values;
215 values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
216 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
217 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
219 pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
220 pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
221 temp2 &= SVWRKS_SIZE_MASK;
223 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
224 if (temp2 == values[i].size_value) {
225 agp_bridge->previous_size =
226 agp_bridge->current_size = (void *) (values + i);
228 agp_bridge->aperture_size_idx = i;
229 return values[i].size;
237 * This routine could be implemented by taking the addresses
238 * written to the GATT, and flushing them individually. However
239 * currently it just flushes the whole table. Which is probably
240 * more efficent, since agp_memory blocks can be a large number of
243 static void serverworks_tlbflush(struct agp_memory *temp)
245 unsigned long timeout;
247 writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
248 timeout = jiffies + 3*HZ;
249 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
251 if (time_after(jiffies, timeout)) {
252 printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n");
257 writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
258 timeout = jiffies + 3*HZ;
259 while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
261 if (time_after(jiffies, timeout)) {
262 printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n");
268 static int serverworks_configure(void)
270 struct aper_size_info_lvl2 *current_size;
275 current_size = A_SIZE_LVL2(agp_bridge->current_size);
277 /* Get the memory mapped registers */
278 pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
279 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
280 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
281 if (!serverworks_private.registers) {
282 printk (KERN_ERR PFX "Unable to ioremap() memory.\n");
286 writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
287 readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
289 writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
290 readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
292 cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
295 writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
296 readw(serverworks_private.registers+SVWRKS_COMMAND);
298 pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
299 enable_reg |= 0x1; /* Agp Enable bit */
300 pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
301 serverworks_tlbflush(NULL);
303 agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
305 /* Fill in the mode register */
306 pci_read_config_dword(serverworks_private.svrwrks_dev,
307 agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
309 pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
311 pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
313 pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
314 enable_reg |= (1<<6);
315 pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
320 static void serverworks_cleanup(void)
322 iounmap((void __iomem *) serverworks_private.registers);
325 static int serverworks_insert_memory(struct agp_memory *mem,
326 off_t pg_start, int type)
328 int i, j, num_entries;
329 unsigned long __iomem *cur_gatt;
332 num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
334 if (type != 0 || mem->type != 0) {
337 if ((pg_start + mem->page_count) > num_entries) {
342 while (j < (pg_start + mem->page_count)) {
343 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
344 cur_gatt = SVRWRKS_GET_GATT(addr);
345 if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
350 if (mem->is_flushed == FALSE) {
351 global_cache_flush();
352 mem->is_flushed = TRUE;
355 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
356 addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
357 cur_gatt = SVRWRKS_GET_GATT(addr);
358 writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
360 serverworks_tlbflush(mem);
364 static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
368 unsigned long __iomem *cur_gatt;
371 if (type != 0 || mem->type != 0) {
375 global_cache_flush();
376 serverworks_tlbflush(mem);
378 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
379 addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
380 cur_gatt = SVRWRKS_GET_GATT(addr);
381 writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
384 serverworks_tlbflush(mem);
388 static const struct gatt_mask serverworks_masks[] =
390 {.mask = 1, .type = 0}
393 static const struct aper_size_info_lvl2 serverworks_sizes[7] =
395 {2048, 524288, 0x80000000},
396 {1024, 262144, 0xc0000000},
397 {512, 131072, 0xe0000000},
398 {256, 65536, 0xf0000000},
399 {128, 32768, 0xf8000000},
400 {64, 16384, 0xfc000000},
401 {32, 8192, 0xfe000000}
404 static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
408 pci_read_config_dword(serverworks_private.svrwrks_dev,
409 bridge->capndx + PCI_AGP_STATUS,
412 command = agp_collect_device_status(bridge, mode, command);
414 command &= ~0x10; /* disable FW */
419 pci_write_config_dword(serverworks_private.svrwrks_dev,
420 bridge->capndx + PCI_AGP_COMMAND,
423 agp_device_command(command, 0);
426 static const struct agp_bridge_driver sworks_driver = {
427 .owner = THIS_MODULE,
428 .aperture_sizes = serverworks_sizes,
429 .size_type = LVL2_APER_SIZE,
430 .num_aperture_sizes = 7,
431 .configure = serverworks_configure,
432 .fetch_size = serverworks_fetch_size,
433 .cleanup = serverworks_cleanup,
434 .tlb_flush = serverworks_tlbflush,
435 .mask_memory = agp_generic_mask_memory,
436 .masks = serverworks_masks,
437 .agp_enable = serverworks_agp_enable,
438 .cache_flush = global_cache_flush,
439 .create_gatt_table = serverworks_create_gatt_table,
440 .free_gatt_table = serverworks_free_gatt_table,
441 .insert_memory = serverworks_insert_memory,
442 .remove_memory = serverworks_remove_memory,
443 .alloc_by_type = agp_generic_alloc_by_type,
444 .free_by_type = agp_generic_free_by_type,
445 .agp_alloc_page = agp_generic_alloc_page,
446 .agp_destroy_page = agp_generic_destroy_page,
447 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
450 static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
451 const struct pci_device_id *ent)
453 struct agp_bridge_data *bridge;
454 struct pci_dev *bridge_dev;
458 /* Everything is on func 1 here so we are hardcoding function one */
459 bridge_dev = pci_find_slot((unsigned int)pdev->bus->number,
462 printk(KERN_INFO PFX "Detected a Serverworks chipset "
463 "but could not find the secondary device.\n");
467 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
469 switch (pdev->device) {
471 printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
474 case PCI_DEVICE_ID_SERVERWORKS_HE:
475 case PCI_DEVICE_ID_SERVERWORKS_LE:
481 printk(KERN_ERR PFX "Unsupported Serverworks chipset "
482 "(device id: %04x)\n", pdev->device);
486 serverworks_private.svrwrks_dev = bridge_dev;
487 serverworks_private.gart_addr_ofs = 0x10;
489 pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
490 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
491 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
493 printk(KERN_INFO PFX "Detected 64 bit aperture address, "
494 "but top bits are not zero. Disabling agp\n");
497 serverworks_private.mm_addr_ofs = 0x18;
499 serverworks_private.mm_addr_ofs = 0x14;
501 pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
502 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
503 pci_read_config_dword(pdev,
504 serverworks_private.mm_addr_ofs + 4, &temp2);
506 printk(KERN_INFO PFX "Detected 64 bit MMIO address, "
507 "but top bits are not zero. Disabling agp\n");
512 bridge = agp_alloc_bridge();
516 bridge->driver = &sworks_driver;
517 bridge->dev_private_data = &serverworks_private,
520 pci_set_drvdata(pdev, bridge);
521 return agp_add_bridge(bridge);
524 static void __devexit agp_serverworks_remove(struct pci_dev *pdev)
526 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
528 agp_remove_bridge(bridge);
529 agp_put_bridge(bridge);
532 static struct pci_device_id agp_serverworks_pci_table[] = {
534 .class = (PCI_CLASS_BRIDGE_HOST << 8),
536 .vendor = PCI_VENDOR_ID_SERVERWORKS,
537 .device = PCI_ANY_ID,
538 .subvendor = PCI_ANY_ID,
539 .subdevice = PCI_ANY_ID,
544 MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
546 static struct pci_driver agp_serverworks_pci_driver = {
547 .name = "agpgart-serverworks",
548 .id_table = agp_serverworks_pci_table,
549 .probe = agp_serverworks_probe,
550 .remove = agp_serverworks_remove,
553 static int __init agp_serverworks_init(void)
557 return pci_register_driver(&agp_serverworks_pci_driver);
560 static void __exit agp_serverworks_cleanup(void)
562 pci_unregister_driver(&agp_serverworks_pci_driver);
565 module_init(agp_serverworks_init);
566 module_exit(agp_serverworks_cleanup);
568 MODULE_LICENSE("GPL and additional rights");