2 * iommu.c: IOMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
15 #include <linux/scatterlist.h>
17 #include <asm/pgalloc.h>
18 #include <asm/pgtable.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/bitext.h>
26 #include <asm/iommu.h>
30 * This can be sized dynamically, but we will do this
31 * only when we have a guidance about actual I/O pressures.
33 #define IOMMU_RNGE IOMMU_RNGE_256MB
34 #define IOMMU_START 0xF0000000
35 #define IOMMU_WINSIZE (256*1024*1024U)
36 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
37 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
40 extern int viking_mxcc_present;
41 BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
42 #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
43 extern int flush_page_for_dma_global;
44 static int viking_flush;
46 extern void viking_flush_page(unsigned long page);
47 extern void viking_mxcc_flush_page(unsigned long page);
50 * Values precomputed according to CPU type.
52 static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
53 static pgprot_t dvma_prot; /* Consistent mapping pte flags */
55 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
56 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
58 void __init iommu_init(struct device_node *parent, struct sbus_bus *sbus)
60 struct of_device *parent_op, *op;
61 struct iommu_struct *iommu;
62 unsigned int impl, vers;
63 unsigned long *bitmap;
66 parent_op = of_find_device_by_node(parent);
68 prom_printf("Unable to find IOMMU of_device\n");
72 op = of_find_device_by_node(sbus->ofdev.node);
74 prom_printf("Unable to find SBUS of_device\n");
78 iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
80 prom_printf("Unable to allocate iommu structure\n");
84 iommu->regs = of_ioremap(&parent_op->resource[0], 0, PAGE_SIZE * 3,
87 prom_printf("Cannot map IOMMU registers\n");
90 impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
91 vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
92 tmp = iommu->regs->control;
93 tmp &= ~(IOMMU_CTRL_RNGE);
94 tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
95 iommu->regs->control = tmp;
96 iommu_invalidate(iommu->regs);
97 iommu->start = IOMMU_START;
98 iommu->end = 0xffffffff;
100 /* Allocate IOMMU page table */
101 /* Stupid alignment constraints give me a headache.
102 We need 256K or 512K or 1M or 2M area aligned to
103 its size and current gfp will fortunately give
105 tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
107 prom_printf("Unable to allocate iommu table [0x%08x]\n",
108 IOMMU_NPTES*sizeof(iopte_t));
111 iommu->page_table = (iopte_t *)tmp;
113 /* Initialize new table. */
114 memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
117 iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
118 iommu_invalidate(iommu->regs);
120 bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
122 prom_printf("Unable to allocate iommu bitmap [%d]\n",
123 (int)(IOMMU_NPTES>>3));
126 bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
127 /* To be coherent on HyperSparc, the page color of DVMA
128 * and physical addresses must match.
130 if (srmmu_modtype == HyperSparc)
131 iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
133 iommu->usemap.num_colors = 1;
135 printk("IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
136 impl, vers, iommu->page_table,
137 (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
139 sbus->ofdev.dev.archdata.iommu = iommu;
140 op->dev.archdata.iommu = iommu;
143 /* This begs to be btfixup-ed by srmmu. */
144 /* Flush the iotlb entries to ram. */
145 /* This could be better if we didn't have to flush whole pages. */
146 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
151 start = (unsigned long)iopte;
152 end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
154 if (viking_mxcc_present) {
156 viking_mxcc_flush_page(start);
159 } else if (viking_flush) {
161 viking_flush_page(start);
166 __flush_page_to_ram(start);
172 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
174 struct iommu_struct *iommu = dev->archdata.iommu;
176 iopte_t *iopte, *iopte0;
177 unsigned int busa, busa0;
180 /* page color = pfn of page */
181 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
184 busa0 = iommu->start + (ioptex << PAGE_SHIFT);
185 iopte0 = &iommu->page_table[ioptex];
189 for (i = 0; i < npages; i++) {
190 iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
191 iommu_invalidate_page(iommu->regs, busa);
197 iommu_flush_iotlb(iopte0, npages);
202 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
209 off = (unsigned long)vaddr & ~PAGE_MASK;
210 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
211 page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
212 busa = iommu_get_one(dev, page, npages);
216 static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
218 return iommu_get_scsi_one(dev, vaddr, len);
221 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
223 flush_page_for_dma(0);
224 return iommu_get_scsi_one(dev, vaddr, len);
227 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
229 unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
231 while(page < ((unsigned long)(vaddr + len))) {
232 flush_page_for_dma(page);
235 return iommu_get_scsi_one(dev, vaddr, len);
238 static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
244 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
245 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
246 sg->dvma_length = (__u32) sg->length;
251 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255 flush_page_for_dma(0);
258 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
259 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
260 sg->dvma_length = (__u32) sg->length;
265 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
267 unsigned long page, oldpage = 0;
273 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
276 * We expect unmapped highmem pages to be not in the cache.
277 * XXX Is this a good assumption?
278 * XXX What if someone else unmaps it here and races us?
280 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
281 for (i = 0; i < n; i++) {
282 if (page != oldpage) { /* Already flushed? */
283 flush_page_for_dma(page);
290 sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
291 sg->dvma_length = (__u32) sg->length;
296 static void iommu_release_one(struct device *dev, u32 busa, int npages)
298 struct iommu_struct *iommu = dev->archdata.iommu;
302 BUG_ON(busa < iommu->start);
303 ioptex = (busa - iommu->start) >> PAGE_SHIFT;
304 for (i = 0; i < npages; i++) {
305 iopte_val(iommu->page_table[ioptex + i]) = 0;
306 iommu_invalidate_page(iommu->regs, busa);
309 bit_map_clear(&iommu->usemap, ioptex, npages);
312 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
317 off = vaddr & ~PAGE_MASK;
318 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
319 iommu_release_one(dev, vaddr & PAGE_MASK, npages);
322 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
329 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
330 iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
331 sg->dvma_address = 0x21212121;
337 static int iommu_map_dma_area(dma_addr_t *pba, unsigned long va,
338 unsigned long addr, int len)
340 unsigned long page, end;
341 struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
342 iopte_t *iopte = iommu->page_table;
346 BUG_ON((va & ~PAGE_MASK) != 0);
347 BUG_ON((addr & ~PAGE_MASK) != 0);
348 BUG_ON((len & ~PAGE_MASK) != 0);
350 /* page color = physical address */
351 ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
366 if (viking_mxcc_present)
367 viking_mxcc_flush_page(page);
368 else if (viking_flush)
369 viking_flush_page(page);
371 __flush_page_to_ram(page);
373 pgdp = pgd_offset(&init_mm, addr);
374 pmdp = pmd_offset(pgdp, addr);
375 ptep = pte_offset_map(pmdp, addr);
377 set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
379 iopte_val(*iopte++) =
380 MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
384 /* P3: why do we need this?
386 * DAVEM: Because there are several aspects, none of which
387 * are handled by a single interface. Some cpus are
388 * completely not I/O DMA coherent, and some have
389 * virtually indexed caches. The driver DMA flushing
390 * methods handle the former case, but here during
391 * IOMMU page table modifications, and usage of non-cacheable
392 * cpu mappings of pages potentially in the cpu caches, we have
393 * to handle the latter case as well.
396 iommu_flush_iotlb(first, len >> PAGE_SHIFT);
398 iommu_invalidate(iommu->regs);
400 *pba = iommu->start + (ioptex << PAGE_SHIFT);
404 static void iommu_unmap_dma_area(unsigned long busa, int len)
406 struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
407 iopte_t *iopte = iommu->page_table;
409 int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
411 BUG_ON((busa & ~PAGE_MASK) != 0);
412 BUG_ON((len & ~PAGE_MASK) != 0);
417 iopte_val(*iopte++) = 0;
421 iommu_invalidate(iommu->regs);
422 bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
425 static struct page *iommu_translate_dvma(unsigned long busa)
427 struct iommu_struct *iommu = sbus_root->ofdev.dev.archdata.iommu;
428 iopte_t *iopte = iommu->page_table;
430 iopte += ((busa - iommu->start) >> PAGE_SHIFT);
431 return pfn_to_page((iopte_val(*iopte) & IOPTE_PAGE) >> (PAGE_SHIFT-4));
435 static char *iommu_lockarea(char *vaddr, unsigned long len)
440 static void iommu_unlockarea(char *vaddr, unsigned long len)
444 void __init ld_mmu_iommu(void)
446 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
447 BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
448 BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
450 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
451 /* IO coherent chip */
452 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
453 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
454 } else if (flush_page_for_dma_global) {
455 /* flush_page_for_dma flushes everything, no matter of what page is it */
456 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
457 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
459 BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
460 BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
462 BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
463 BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
466 BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
467 BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
468 BTFIXUPSET_CALL(mmu_translate_dvma, iommu_translate_dvma, BTFIXUPCALL_NORM);
471 if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
472 dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
473 ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
475 dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
476 ioperm_noc = IOPTE_WRITE | IOPTE_VALID;