1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
16 #include <linux/pci.h>
19 #include <asm/iommu.h>
21 #include "iommu_common.h"
23 #define STC_CTXMATCH_ADDR(STC, CTX) \
24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
30 #define iommu_read(__reg) \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38 #define iommu_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
44 /* Must be invoked under the IOMMU lock. */
45 static void iommu_flushall(struct iommu *iommu)
47 if (iommu->iommu_flushinv) {
48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
53 tag = iommu->iommu_tags;
54 for (entry = 0; entry < 16; entry++) {
59 /* Ensure completion of previous PIO writes. */
60 (void) iommu_read(iommu->write_complete_reg);
64 #define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
68 #define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71 /* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
74 #define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
79 unsigned long val = iopte_val(*iopte);
82 val |= iommu->dummy_page_pa;
84 iopte_val(*iopte) = val;
87 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
94 unsigned long iommu_range_alloc(struct device *dev,
97 unsigned long *handle)
99 unsigned long n, end, start, limit, boundary_size;
100 struct iommu_arena *arena = &iommu->arena;
103 /* This allocator was derived from x86_64's bit string search */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
109 return DMA_ERROR_CODE;
112 if (handle && *handle)
117 limit = arena->limit;
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
123 if (start >= limit) {
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
137 n = iommu_area_alloc(arena->map, limit, start, npages, 0,
138 boundary_size >> IO_PAGE_SHIFT, 0);
140 if (likely(pass < 1)) {
141 /* First failure, rescan from the beginning. */
143 if (iommu->flush_all)
144 iommu->flush_all(iommu);
148 /* Second failure, give up */
149 return DMA_ERROR_CODE;
157 /* Update handle for SG allocations */
164 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
166 struct iommu_arena *arena = &iommu->arena;
169 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
171 iommu_area_free(arena->map, entry, npages);
174 int iommu_table_init(struct iommu *iommu, int tsbsize,
175 u32 dma_offset, u32 dma_addr_mask)
177 unsigned long i, tsbbase, order, sz, num_tsb_entries;
179 num_tsb_entries = tsbsize / sizeof(iopte_t);
181 /* Setup initial software IOMMU state. */
182 spin_lock_init(&iommu->lock);
183 iommu->ctx_lowest_free = 1;
184 iommu->page_table_map_base = dma_offset;
185 iommu->dma_addr_mask = dma_addr_mask;
187 /* Allocate and initialize the free area map. */
188 sz = num_tsb_entries / 8;
189 sz = (sz + 7UL) & ~7UL;
190 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
191 if (!iommu->arena.map) {
192 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
195 iommu->arena.limit = num_tsb_entries;
197 if (tlb_type != hypervisor)
198 iommu->flush_all = iommu_flushall;
200 /* Allocate and initialize the dummy page which we
201 * set inactive IO PTEs to point to.
203 iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
204 if (!iommu->dummy_page) {
205 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
208 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
210 /* Now allocate and setup the IOMMU page table itself. */
211 order = get_order(tsbsize);
212 tsbbase = __get_free_pages(GFP_KERNEL, order);
214 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
215 goto out_free_dummy_page;
217 iommu->page_table = (iopte_t *)tsbbase;
219 for (i = 0; i < num_tsb_entries; i++)
220 iopte_make_dummy(iommu, &iommu->page_table[i]);
225 free_page(iommu->dummy_page);
226 iommu->dummy_page = 0UL;
229 kfree(iommu->arena.map);
230 iommu->arena.map = NULL;
235 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
236 unsigned long npages)
240 entry = iommu_range_alloc(dev, iommu, npages, NULL);
241 if (unlikely(entry == DMA_ERROR_CODE))
244 return iommu->page_table + entry;
247 static int iommu_alloc_ctx(struct iommu *iommu)
249 int lowest = iommu->ctx_lowest_free;
250 int sz = IOMMU_NUM_CTXS - lowest;
251 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
253 if (unlikely(n == sz)) {
254 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
255 if (unlikely(n == lowest)) {
256 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
261 __set_bit(n, iommu->ctx_bitmap);
266 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
269 __clear_bit(ctx, iommu->ctx_bitmap);
270 if (ctx < iommu->ctx_lowest_free)
271 iommu->ctx_lowest_free = ctx;
275 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
276 dma_addr_t *dma_addrp, gfp_t gfp)
280 unsigned long flags, order, first_page;
284 size = IO_PAGE_ALIGN(size);
285 order = get_order(size);
289 first_page = __get_free_pages(gfp, order);
290 if (first_page == 0UL)
292 memset((char *)first_page, 0, PAGE_SIZE << order);
294 iommu = dev->archdata.iommu;
296 spin_lock_irqsave(&iommu->lock, flags);
297 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
298 spin_unlock_irqrestore(&iommu->lock, flags);
300 if (unlikely(iopte == NULL)) {
301 free_pages(first_page, order);
305 *dma_addrp = (iommu->page_table_map_base +
306 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
307 ret = (void *) first_page;
308 npages = size >> IO_PAGE_SHIFT;
309 first_page = __pa(first_page);
311 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
313 (first_page & IOPTE_PAGE));
315 first_page += IO_PAGE_SIZE;
321 static void dma_4u_free_coherent(struct device *dev, size_t size,
322 void *cpu, dma_addr_t dvma)
326 unsigned long flags, order, npages;
328 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
329 iommu = dev->archdata.iommu;
330 iopte = iommu->page_table +
331 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
333 spin_lock_irqsave(&iommu->lock, flags);
335 iommu_range_free(iommu, dvma, npages);
337 spin_unlock_irqrestore(&iommu->lock, flags);
339 order = get_order(size);
341 free_pages((unsigned long)cpu, order);
344 static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
345 enum dma_data_direction direction)
348 struct strbuf *strbuf;
350 unsigned long flags, npages, oaddr;
351 unsigned long i, base_paddr, ctx;
353 unsigned long iopte_protection;
355 iommu = dev->archdata.iommu;
356 strbuf = dev->archdata.stc;
358 if (unlikely(direction == DMA_NONE))
361 oaddr = (unsigned long)ptr;
362 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
363 npages >>= IO_PAGE_SHIFT;
365 spin_lock_irqsave(&iommu->lock, flags);
366 base = alloc_npages(dev, iommu, npages);
368 if (iommu->iommu_ctxflush)
369 ctx = iommu_alloc_ctx(iommu);
370 spin_unlock_irqrestore(&iommu->lock, flags);
375 bus_addr = (iommu->page_table_map_base +
376 ((base - iommu->page_table) << IO_PAGE_SHIFT));
377 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
378 base_paddr = __pa(oaddr & IO_PAGE_MASK);
379 if (strbuf->strbuf_enabled)
380 iopte_protection = IOPTE_STREAMING(ctx);
382 iopte_protection = IOPTE_CONSISTENT(ctx);
383 if (direction != DMA_TO_DEVICE)
384 iopte_protection |= IOPTE_WRITE;
386 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
387 iopte_val(*base) = iopte_protection | base_paddr;
392 iommu_free_ctx(iommu, ctx);
394 if (printk_ratelimit())
396 return DMA_ERROR_CODE;
399 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
400 u32 vaddr, unsigned long ctx, unsigned long npages,
401 enum dma_data_direction direction)
405 if (strbuf->strbuf_ctxflush &&
406 iommu->iommu_ctxflush) {
407 unsigned long matchreg, flushreg;
410 flushreg = strbuf->strbuf_ctxflush;
411 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
413 iommu_write(flushreg, ctx);
414 val = iommu_read(matchreg);
421 iommu_write(flushreg, ctx);
424 val = iommu_read(matchreg);
426 printk(KERN_WARNING "strbuf_flush: ctx flush "
427 "timeout matchreg[%lx] ctx[%lx]\n",
435 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
436 iommu_write(strbuf->strbuf_pflush, vaddr);
440 /* If the device could not have possibly put dirty data into
441 * the streaming cache, no flush-flag synchronization needs
444 if (direction == DMA_TO_DEVICE)
447 STC_FLUSHFLAG_INIT(strbuf);
448 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
449 (void) iommu_read(iommu->write_complete_reg);
452 while (!STC_FLUSHFLAG_SET(strbuf)) {
460 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
461 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
465 static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
466 size_t sz, enum dma_data_direction direction)
469 struct strbuf *strbuf;
471 unsigned long flags, npages, ctx, i;
473 if (unlikely(direction == DMA_NONE)) {
474 if (printk_ratelimit())
479 iommu = dev->archdata.iommu;
480 strbuf = dev->archdata.stc;
482 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
483 npages >>= IO_PAGE_SHIFT;
484 base = iommu->page_table +
485 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
486 bus_addr &= IO_PAGE_MASK;
488 spin_lock_irqsave(&iommu->lock, flags);
490 /* Record the context, if any. */
492 if (iommu->iommu_ctxflush)
493 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
495 /* Step 1: Kick data out of streaming buffers if necessary. */
496 if (strbuf->strbuf_enabled)
497 strbuf_flush(strbuf, iommu, bus_addr, ctx,
500 /* Step 2: Clear out TSB entries. */
501 for (i = 0; i < npages; i++)
502 iopte_make_dummy(iommu, base + i);
504 iommu_range_free(iommu, bus_addr, npages);
506 iommu_free_ctx(iommu, ctx);
508 spin_unlock_irqrestore(&iommu->lock, flags);
511 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
512 int nelems, enum dma_data_direction direction)
514 struct scatterlist *s, *outs, *segstart;
515 unsigned long flags, handle, prot, ctx;
516 dma_addr_t dma_next = 0, dma_addr;
517 unsigned int max_seg_size;
518 int outcount, incount, i;
519 struct strbuf *strbuf;
522 BUG_ON(direction == DMA_NONE);
524 iommu = dev->archdata.iommu;
525 strbuf = dev->archdata.stc;
526 if (nelems == 0 || !iommu)
529 spin_lock_irqsave(&iommu->lock, flags);
532 if (iommu->iommu_ctxflush)
533 ctx = iommu_alloc_ctx(iommu);
535 if (strbuf->strbuf_enabled)
536 prot = IOPTE_STREAMING(ctx);
538 prot = IOPTE_CONSISTENT(ctx);
539 if (direction != DMA_TO_DEVICE)
542 outs = s = segstart = &sglist[0];
547 /* Init first segment length for backout at failure */
548 outs->dma_length = 0;
550 max_seg_size = dma_get_max_seg_size(dev);
551 for_each_sg(sglist, s, nelems, i) {
552 unsigned long paddr, npages, entry, slen;
561 /* Allocate iommu entries for that segment */
562 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
563 npages = iommu_num_pages(paddr, slen);
564 entry = iommu_range_alloc(dev, iommu, npages, &handle);
567 if (unlikely(entry == DMA_ERROR_CODE)) {
568 if (printk_ratelimit())
569 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
570 " npages %lx\n", iommu, paddr, npages);
571 goto iommu_map_failed;
574 base = iommu->page_table + entry;
576 /* Convert entry to a dma_addr_t */
577 dma_addr = iommu->page_table_map_base +
578 (entry << IO_PAGE_SHIFT);
579 dma_addr |= (s->offset & ~IO_PAGE_MASK);
581 /* Insert into HW table */
582 paddr &= IO_PAGE_MASK;
584 iopte_val(*base) = prot | paddr;
586 paddr += IO_PAGE_SIZE;
589 /* If we are in an open segment, try merging */
591 /* We cannot merge if:
592 * - allocated dma_addr isn't contiguous to previous allocation
594 if ((dma_addr != dma_next) ||
595 (outs->dma_length + s->length > max_seg_size)) {
596 /* Can't merge: create a new segment */
599 outs = sg_next(outs);
601 outs->dma_length += s->length;
606 /* This is a new segment, fill entries */
607 outs->dma_address = dma_addr;
608 outs->dma_length = slen;
611 /* Calculate next page pointer for contiguous check */
612 dma_next = dma_addr + slen;
615 spin_unlock_irqrestore(&iommu->lock, flags);
617 if (outcount < incount) {
618 outs = sg_next(outs);
619 outs->dma_address = DMA_ERROR_CODE;
620 outs->dma_length = 0;
626 for_each_sg(sglist, s, nelems, i) {
627 if (s->dma_length != 0) {
628 unsigned long vaddr, npages, entry, i;
631 vaddr = s->dma_address & IO_PAGE_MASK;
632 npages = iommu_num_pages(s->dma_address, s->dma_length);
633 iommu_range_free(iommu, vaddr, npages);
635 entry = (vaddr - iommu->page_table_map_base)
637 base = iommu->page_table + entry;
639 for (i = 0; i < npages; i++)
640 iopte_make_dummy(iommu, base + i);
642 s->dma_address = DMA_ERROR_CODE;
648 spin_unlock_irqrestore(&iommu->lock, flags);
653 /* If contexts are being used, they are the same in all of the mappings
654 * we make for a particular SG.
656 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
658 unsigned long ctx = 0;
660 if (iommu->iommu_ctxflush) {
664 bus_addr = sg->dma_address & IO_PAGE_MASK;
665 base = iommu->page_table +
666 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
668 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
673 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
674 int nelems, enum dma_data_direction direction)
676 unsigned long flags, ctx;
677 struct scatterlist *sg;
678 struct strbuf *strbuf;
681 BUG_ON(direction == DMA_NONE);
683 iommu = dev->archdata.iommu;
684 strbuf = dev->archdata.stc;
686 ctx = fetch_sg_ctx(iommu, sglist);
688 spin_lock_irqsave(&iommu->lock, flags);
692 dma_addr_t dma_handle = sg->dma_address;
693 unsigned int len = sg->dma_length;
694 unsigned long npages, entry;
700 npages = iommu_num_pages(dma_handle, len);
701 iommu_range_free(iommu, dma_handle, npages);
703 entry = ((dma_handle - iommu->page_table_map_base)
705 base = iommu->page_table + entry;
707 dma_handle &= IO_PAGE_MASK;
708 if (strbuf->strbuf_enabled)
709 strbuf_flush(strbuf, iommu, dma_handle, ctx,
712 for (i = 0; i < npages; i++)
713 iopte_make_dummy(iommu, base + i);
718 iommu_free_ctx(iommu, ctx);
720 spin_unlock_irqrestore(&iommu->lock, flags);
723 static void dma_4u_sync_single_for_cpu(struct device *dev,
724 dma_addr_t bus_addr, size_t sz,
725 enum dma_data_direction direction)
728 struct strbuf *strbuf;
729 unsigned long flags, ctx, npages;
731 iommu = dev->archdata.iommu;
732 strbuf = dev->archdata.stc;
734 if (!strbuf->strbuf_enabled)
737 spin_lock_irqsave(&iommu->lock, flags);
739 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
740 npages >>= IO_PAGE_SHIFT;
741 bus_addr &= IO_PAGE_MASK;
743 /* Step 1: Record the context, if any. */
745 if (iommu->iommu_ctxflush &&
746 strbuf->strbuf_ctxflush) {
749 iopte = iommu->page_table +
750 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
751 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
754 /* Step 2: Kick data out of streaming buffers. */
755 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
757 spin_unlock_irqrestore(&iommu->lock, flags);
760 static void dma_4u_sync_sg_for_cpu(struct device *dev,
761 struct scatterlist *sglist, int nelems,
762 enum dma_data_direction direction)
765 struct strbuf *strbuf;
766 unsigned long flags, ctx, npages, i;
767 struct scatterlist *sg, *sgprv;
770 iommu = dev->archdata.iommu;
771 strbuf = dev->archdata.stc;
773 if (!strbuf->strbuf_enabled)
776 spin_lock_irqsave(&iommu->lock, flags);
778 /* Step 1: Record the context, if any. */
780 if (iommu->iommu_ctxflush &&
781 strbuf->strbuf_ctxflush) {
784 iopte = iommu->page_table +
785 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
786 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
789 /* Step 2: Kick data out of streaming buffers. */
790 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
792 for_each_sg(sglist, sg, nelems, i) {
793 if (sg->dma_length == 0)
798 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
799 - bus_addr) >> IO_PAGE_SHIFT;
800 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
802 spin_unlock_irqrestore(&iommu->lock, flags);
805 const struct dma_ops sun4u_dma_ops = {
806 .alloc_coherent = dma_4u_alloc_coherent,
807 .free_coherent = dma_4u_free_coherent,
808 .map_single = dma_4u_map_single,
809 .unmap_single = dma_4u_unmap_single,
810 .map_sg = dma_4u_map_sg,
811 .unmap_sg = dma_4u_unmap_sg,
812 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
813 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
816 const struct dma_ops *dma_ops = &sun4u_dma_ops;
817 EXPORT_SYMBOL(dma_ops);
819 int dma_supported(struct device *dev, u64 device_mask)
821 struct iommu *iommu = dev->archdata.iommu;
822 u64 dma_addr_mask = iommu->dma_addr_mask;
824 if (device_mask >= (1UL << 32UL))
827 if ((device_mask & dma_addr_mask) == dma_addr_mask)
831 if (dev->bus == &pci_bus_type)
832 return pci_dma_supported(to_pci_dev(dev), device_mask);
837 EXPORT_SYMBOL(dma_supported);
839 int dma_set_mask(struct device *dev, u64 dma_mask)
842 if (dev->bus == &pci_bus_type)
843 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
847 EXPORT_SYMBOL(dma_set_mask);