1 /* iommu.c: Generic sparc64 IOMMU support.
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/iommu-helper.h>
16 #include <linux/pci.h>
19 #include <asm/iommu.h>
21 #include "iommu_common.h"
23 #define STC_CTXMATCH_ADDR(STC, CTX) \
24 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 #define STC_FLUSHFLAG_INIT(STC) \
26 (*((STC)->strbuf_flushflag) = 0UL)
27 #define STC_FLUSHFLAG_SET(STC) \
28 (*((STC)->strbuf_flushflag) != 0UL)
30 #define iommu_read(__reg) \
32 __asm__ __volatile__("ldxa [%1] %2, %0" \
34 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38 #define iommu_write(__reg, __val) \
39 __asm__ __volatile__("stxa %0, [%1] %2" \
41 : "r" (__val), "r" (__reg), \
42 "i" (ASI_PHYS_BYPASS_EC_E))
44 /* Must be invoked under the IOMMU lock. */
45 static void iommu_flushall(struct iommu *iommu)
47 if (iommu->iommu_flushinv) {
48 iommu_write(iommu->iommu_flushinv, ~(u64)0);
53 tag = iommu->iommu_tags;
54 for (entry = 0; entry < 16; entry++) {
59 /* Ensure completion of previous PIO writes. */
60 (void) iommu_read(iommu->write_complete_reg);
64 #define IOPTE_CONSISTENT(CTX) \
65 (IOPTE_VALID | IOPTE_CACHE | \
66 (((CTX) << 47) & IOPTE_CONTEXT))
68 #define IOPTE_STREAMING(CTX) \
69 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
71 /* Existing mappings are never marked invalid, instead they
72 * are pointed to a dummy page.
74 #define IOPTE_IS_DUMMY(iommu, iopte) \
75 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
77 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
79 unsigned long val = iopte_val(*iopte);
82 val |= iommu->dummy_page_pa;
84 iopte_val(*iopte) = val;
87 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
88 * facility it must all be done in one pass while under the iommu lock.
90 * On sun4u platforms, we only flush the IOMMU once every time we've passed
91 * over the entire page table doing allocations. Therefore we only ever advance
92 * the hint and cannot backtrack it.
94 unsigned long iommu_range_alloc(struct device *dev,
97 unsigned long *handle)
99 unsigned long n, end, start, limit, boundary_size;
100 struct iommu_arena *arena = &iommu->arena;
103 /* This allocator was derived from x86_64's bit string search */
106 if (unlikely(npages == 0)) {
107 if (printk_ratelimit())
109 return DMA_ERROR_CODE;
112 if (handle && *handle)
117 limit = arena->limit;
119 /* The case below can happen if we have a small segment appended
120 * to a large, or when the previous alloc was at the very end of
121 * the available space. If so, go back to the beginning and flush.
123 if (start >= limit) {
125 if (iommu->flush_all)
126 iommu->flush_all(iommu);
132 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135 boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
137 n = iommu_area_alloc(arena->map, limit, start, npages,
138 iommu->page_table_map_base >> IO_PAGE_SHIFT,
139 boundary_size >> IO_PAGE_SHIFT, 0);
141 if (likely(pass < 1)) {
142 /* First failure, rescan from the beginning. */
144 if (iommu->flush_all)
145 iommu->flush_all(iommu);
149 /* Second failure, give up */
150 return DMA_ERROR_CODE;
158 /* Update handle for SG allocations */
165 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
167 struct iommu_arena *arena = &iommu->arena;
170 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
172 iommu_area_free(arena->map, entry, npages);
175 int iommu_table_init(struct iommu *iommu, int tsbsize,
176 u32 dma_offset, u32 dma_addr_mask)
178 unsigned long i, tsbbase, order, sz, num_tsb_entries;
180 num_tsb_entries = tsbsize / sizeof(iopte_t);
182 /* Setup initial software IOMMU state. */
183 spin_lock_init(&iommu->lock);
184 iommu->ctx_lowest_free = 1;
185 iommu->page_table_map_base = dma_offset;
186 iommu->dma_addr_mask = dma_addr_mask;
188 /* Allocate and initialize the free area map. */
189 sz = num_tsb_entries / 8;
190 sz = (sz + 7UL) & ~7UL;
191 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
192 if (!iommu->arena.map) {
193 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
196 iommu->arena.limit = num_tsb_entries;
198 if (tlb_type != hypervisor)
199 iommu->flush_all = iommu_flushall;
201 /* Allocate and initialize the dummy page which we
202 * set inactive IO PTEs to point to.
204 iommu->dummy_page = get_zeroed_page(GFP_KERNEL);
205 if (!iommu->dummy_page) {
206 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
209 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
211 /* Now allocate and setup the IOMMU page table itself. */
212 order = get_order(tsbsize);
213 tsbbase = __get_free_pages(GFP_KERNEL, order);
215 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
216 goto out_free_dummy_page;
218 iommu->page_table = (iopte_t *)tsbbase;
220 for (i = 0; i < num_tsb_entries; i++)
221 iopte_make_dummy(iommu, &iommu->page_table[i]);
226 free_page(iommu->dummy_page);
227 iommu->dummy_page = 0UL;
230 kfree(iommu->arena.map);
231 iommu->arena.map = NULL;
236 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
237 unsigned long npages)
241 entry = iommu_range_alloc(dev, iommu, npages, NULL);
242 if (unlikely(entry == DMA_ERROR_CODE))
245 return iommu->page_table + entry;
248 static int iommu_alloc_ctx(struct iommu *iommu)
250 int lowest = iommu->ctx_lowest_free;
251 int sz = IOMMU_NUM_CTXS - lowest;
252 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
254 if (unlikely(n == sz)) {
255 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
256 if (unlikely(n == lowest)) {
257 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
262 __set_bit(n, iommu->ctx_bitmap);
267 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
270 __clear_bit(ctx, iommu->ctx_bitmap);
271 if (ctx < iommu->ctx_lowest_free)
272 iommu->ctx_lowest_free = ctx;
276 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
277 dma_addr_t *dma_addrp, gfp_t gfp)
281 unsigned long flags, order, first_page;
285 size = IO_PAGE_ALIGN(size);
286 order = get_order(size);
290 first_page = __get_free_pages(gfp, order);
291 if (first_page == 0UL)
293 memset((char *)first_page, 0, PAGE_SIZE << order);
295 iommu = dev->archdata.iommu;
297 spin_lock_irqsave(&iommu->lock, flags);
298 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
299 spin_unlock_irqrestore(&iommu->lock, flags);
301 if (unlikely(iopte == NULL)) {
302 free_pages(first_page, order);
306 *dma_addrp = (iommu->page_table_map_base +
307 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
308 ret = (void *) first_page;
309 npages = size >> IO_PAGE_SHIFT;
310 first_page = __pa(first_page);
312 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
314 (first_page & IOPTE_PAGE));
316 first_page += IO_PAGE_SIZE;
322 static void dma_4u_free_coherent(struct device *dev, size_t size,
323 void *cpu, dma_addr_t dvma)
327 unsigned long flags, order, npages;
329 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
330 iommu = dev->archdata.iommu;
331 iopte = iommu->page_table +
332 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
334 spin_lock_irqsave(&iommu->lock, flags);
336 iommu_range_free(iommu, dvma, npages);
338 spin_unlock_irqrestore(&iommu->lock, flags);
340 order = get_order(size);
342 free_pages((unsigned long)cpu, order);
345 static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
346 enum dma_data_direction direction)
349 struct strbuf *strbuf;
351 unsigned long flags, npages, oaddr;
352 unsigned long i, base_paddr, ctx;
354 unsigned long iopte_protection;
356 iommu = dev->archdata.iommu;
357 strbuf = dev->archdata.stc;
359 if (unlikely(direction == DMA_NONE))
362 oaddr = (unsigned long)ptr;
363 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
364 npages >>= IO_PAGE_SHIFT;
366 spin_lock_irqsave(&iommu->lock, flags);
367 base = alloc_npages(dev, iommu, npages);
369 if (iommu->iommu_ctxflush)
370 ctx = iommu_alloc_ctx(iommu);
371 spin_unlock_irqrestore(&iommu->lock, flags);
376 bus_addr = (iommu->page_table_map_base +
377 ((base - iommu->page_table) << IO_PAGE_SHIFT));
378 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
379 base_paddr = __pa(oaddr & IO_PAGE_MASK);
380 if (strbuf->strbuf_enabled)
381 iopte_protection = IOPTE_STREAMING(ctx);
383 iopte_protection = IOPTE_CONSISTENT(ctx);
384 if (direction != DMA_TO_DEVICE)
385 iopte_protection |= IOPTE_WRITE;
387 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
388 iopte_val(*base) = iopte_protection | base_paddr;
393 iommu_free_ctx(iommu, ctx);
395 if (printk_ratelimit())
397 return DMA_ERROR_CODE;
400 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
401 u32 vaddr, unsigned long ctx, unsigned long npages,
402 enum dma_data_direction direction)
406 if (strbuf->strbuf_ctxflush &&
407 iommu->iommu_ctxflush) {
408 unsigned long matchreg, flushreg;
411 flushreg = strbuf->strbuf_ctxflush;
412 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
414 iommu_write(flushreg, ctx);
415 val = iommu_read(matchreg);
422 iommu_write(flushreg, ctx);
425 val = iommu_read(matchreg);
427 printk(KERN_WARNING "strbuf_flush: ctx flush "
428 "timeout matchreg[%lx] ctx[%lx]\n",
436 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
437 iommu_write(strbuf->strbuf_pflush, vaddr);
441 /* If the device could not have possibly put dirty data into
442 * the streaming cache, no flush-flag synchronization needs
445 if (direction == DMA_TO_DEVICE)
448 STC_FLUSHFLAG_INIT(strbuf);
449 iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
450 (void) iommu_read(iommu->write_complete_reg);
453 while (!STC_FLUSHFLAG_SET(strbuf)) {
461 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
462 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
466 static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
467 size_t sz, enum dma_data_direction direction)
470 struct strbuf *strbuf;
472 unsigned long flags, npages, ctx, i;
474 if (unlikely(direction == DMA_NONE)) {
475 if (printk_ratelimit())
480 iommu = dev->archdata.iommu;
481 strbuf = dev->archdata.stc;
483 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
484 npages >>= IO_PAGE_SHIFT;
485 base = iommu->page_table +
486 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
487 bus_addr &= IO_PAGE_MASK;
489 spin_lock_irqsave(&iommu->lock, flags);
491 /* Record the context, if any. */
493 if (iommu->iommu_ctxflush)
494 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
496 /* Step 1: Kick data out of streaming buffers if necessary. */
497 if (strbuf->strbuf_enabled)
498 strbuf_flush(strbuf, iommu, bus_addr, ctx,
501 /* Step 2: Clear out TSB entries. */
502 for (i = 0; i < npages; i++)
503 iopte_make_dummy(iommu, base + i);
505 iommu_range_free(iommu, bus_addr, npages);
507 iommu_free_ctx(iommu, ctx);
509 spin_unlock_irqrestore(&iommu->lock, flags);
512 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
513 int nelems, enum dma_data_direction direction)
515 struct scatterlist *s, *outs, *segstart;
516 unsigned long flags, handle, prot, ctx;
517 dma_addr_t dma_next = 0, dma_addr;
518 unsigned int max_seg_size;
519 unsigned long seg_boundary_size;
520 int outcount, incount, i;
521 struct strbuf *strbuf;
523 unsigned long base_shift;
525 BUG_ON(direction == DMA_NONE);
527 iommu = dev->archdata.iommu;
528 strbuf = dev->archdata.stc;
529 if (nelems == 0 || !iommu)
532 spin_lock_irqsave(&iommu->lock, flags);
535 if (iommu->iommu_ctxflush)
536 ctx = iommu_alloc_ctx(iommu);
538 if (strbuf->strbuf_enabled)
539 prot = IOPTE_STREAMING(ctx);
541 prot = IOPTE_CONSISTENT(ctx);
542 if (direction != DMA_TO_DEVICE)
545 outs = s = segstart = &sglist[0];
550 /* Init first segment length for backout at failure */
551 outs->dma_length = 0;
553 max_seg_size = dma_get_max_seg_size(dev);
554 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
555 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
556 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
557 for_each_sg(sglist, s, nelems, i) {
558 unsigned long paddr, npages, entry, out_entry = 0, slen;
567 /* Allocate iommu entries for that segment */
568 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
569 npages = iommu_num_pages(paddr, slen);
570 entry = iommu_range_alloc(dev, iommu, npages, &handle);
573 if (unlikely(entry == DMA_ERROR_CODE)) {
574 if (printk_ratelimit())
575 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
576 " npages %lx\n", iommu, paddr, npages);
577 goto iommu_map_failed;
580 base = iommu->page_table + entry;
582 /* Convert entry to a dma_addr_t */
583 dma_addr = iommu->page_table_map_base +
584 (entry << IO_PAGE_SHIFT);
585 dma_addr |= (s->offset & ~IO_PAGE_MASK);
587 /* Insert into HW table */
588 paddr &= IO_PAGE_MASK;
590 iopte_val(*base) = prot | paddr;
592 paddr += IO_PAGE_SIZE;
595 /* If we are in an open segment, try merging */
597 /* We cannot merge if:
598 * - allocated dma_addr isn't contiguous to previous allocation
600 if ((dma_addr != dma_next) ||
601 (outs->dma_length + s->length > max_seg_size) ||
602 (is_span_boundary(out_entry, base_shift,
603 seg_boundary_size, outs, s))) {
604 /* Can't merge: create a new segment */
607 outs = sg_next(outs);
609 outs->dma_length += s->length;
614 /* This is a new segment, fill entries */
615 outs->dma_address = dma_addr;
616 outs->dma_length = slen;
620 /* Calculate next page pointer for contiguous check */
621 dma_next = dma_addr + slen;
624 spin_unlock_irqrestore(&iommu->lock, flags);
626 if (outcount < incount) {
627 outs = sg_next(outs);
628 outs->dma_address = DMA_ERROR_CODE;
629 outs->dma_length = 0;
635 for_each_sg(sglist, s, nelems, i) {
636 if (s->dma_length != 0) {
637 unsigned long vaddr, npages, entry, j;
640 vaddr = s->dma_address & IO_PAGE_MASK;
641 npages = iommu_num_pages(s->dma_address, s->dma_length);
642 iommu_range_free(iommu, vaddr, npages);
644 entry = (vaddr - iommu->page_table_map_base)
646 base = iommu->page_table + entry;
648 for (j = 0; j < npages; j++)
649 iopte_make_dummy(iommu, base + j);
651 s->dma_address = DMA_ERROR_CODE;
657 spin_unlock_irqrestore(&iommu->lock, flags);
662 /* If contexts are being used, they are the same in all of the mappings
663 * we make for a particular SG.
665 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
667 unsigned long ctx = 0;
669 if (iommu->iommu_ctxflush) {
673 bus_addr = sg->dma_address & IO_PAGE_MASK;
674 base = iommu->page_table +
675 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
677 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
682 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
683 int nelems, enum dma_data_direction direction)
685 unsigned long flags, ctx;
686 struct scatterlist *sg;
687 struct strbuf *strbuf;
690 BUG_ON(direction == DMA_NONE);
692 iommu = dev->archdata.iommu;
693 strbuf = dev->archdata.stc;
695 ctx = fetch_sg_ctx(iommu, sglist);
697 spin_lock_irqsave(&iommu->lock, flags);
701 dma_addr_t dma_handle = sg->dma_address;
702 unsigned int len = sg->dma_length;
703 unsigned long npages, entry;
709 npages = iommu_num_pages(dma_handle, len);
710 iommu_range_free(iommu, dma_handle, npages);
712 entry = ((dma_handle - iommu->page_table_map_base)
714 base = iommu->page_table + entry;
716 dma_handle &= IO_PAGE_MASK;
717 if (strbuf->strbuf_enabled)
718 strbuf_flush(strbuf, iommu, dma_handle, ctx,
721 for (i = 0; i < npages; i++)
722 iopte_make_dummy(iommu, base + i);
727 iommu_free_ctx(iommu, ctx);
729 spin_unlock_irqrestore(&iommu->lock, flags);
732 static void dma_4u_sync_single_for_cpu(struct device *dev,
733 dma_addr_t bus_addr, size_t sz,
734 enum dma_data_direction direction)
737 struct strbuf *strbuf;
738 unsigned long flags, ctx, npages;
740 iommu = dev->archdata.iommu;
741 strbuf = dev->archdata.stc;
743 if (!strbuf->strbuf_enabled)
746 spin_lock_irqsave(&iommu->lock, flags);
748 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
749 npages >>= IO_PAGE_SHIFT;
750 bus_addr &= IO_PAGE_MASK;
752 /* Step 1: Record the context, if any. */
754 if (iommu->iommu_ctxflush &&
755 strbuf->strbuf_ctxflush) {
758 iopte = iommu->page_table +
759 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
760 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
763 /* Step 2: Kick data out of streaming buffers. */
764 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
766 spin_unlock_irqrestore(&iommu->lock, flags);
769 static void dma_4u_sync_sg_for_cpu(struct device *dev,
770 struct scatterlist *sglist, int nelems,
771 enum dma_data_direction direction)
774 struct strbuf *strbuf;
775 unsigned long flags, ctx, npages, i;
776 struct scatterlist *sg, *sgprv;
779 iommu = dev->archdata.iommu;
780 strbuf = dev->archdata.stc;
782 if (!strbuf->strbuf_enabled)
785 spin_lock_irqsave(&iommu->lock, flags);
787 /* Step 1: Record the context, if any. */
789 if (iommu->iommu_ctxflush &&
790 strbuf->strbuf_ctxflush) {
793 iopte = iommu->page_table +
794 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
795 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
798 /* Step 2: Kick data out of streaming buffers. */
799 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
801 for_each_sg(sglist, sg, nelems, i) {
802 if (sg->dma_length == 0)
807 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
808 - bus_addr) >> IO_PAGE_SHIFT;
809 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
811 spin_unlock_irqrestore(&iommu->lock, flags);
814 static const struct dma_ops sun4u_dma_ops = {
815 .alloc_coherent = dma_4u_alloc_coherent,
816 .free_coherent = dma_4u_free_coherent,
817 .map_single = dma_4u_map_single,
818 .unmap_single = dma_4u_unmap_single,
819 .map_sg = dma_4u_map_sg,
820 .unmap_sg = dma_4u_unmap_sg,
821 .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
822 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
825 const struct dma_ops *dma_ops = &sun4u_dma_ops;
826 EXPORT_SYMBOL(dma_ops);
828 int dma_supported(struct device *dev, u64 device_mask)
830 struct iommu *iommu = dev->archdata.iommu;
831 u64 dma_addr_mask = iommu->dma_addr_mask;
833 if (device_mask >= (1UL << 32UL))
836 if ((device_mask & dma_addr_mask) == dma_addr_mask)
840 if (dev->bus == &pci_bus_type)
841 return pci_dma_supported(to_pci_dev(dev), device_mask);
846 EXPORT_SYMBOL(dma_supported);
848 int dma_set_mask(struct device *dev, u64 dma_mask)
851 if (dev->bus == &pci_bus_type)
852 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
856 EXPORT_SYMBOL(dma_set_mask);