2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <linux/iommu-helper.h>
37 #include <asm/iommu.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/machdep.h>
40 #include <asm/kdump.h>
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge = 0;
47 static int novmerge = 1;
50 static int protect4gb = 1;
52 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
54 static int __init setup_protect4gb(char *str)
56 if (strcmp(str, "on") == 0)
58 else if (strcmp(str, "off") == 0)
64 static int __init setup_iommu(char *str)
66 if (!strcmp(str, "novmerge"))
68 else if (!strcmp(str, "vmerge"))
73 __setup("protect4gb=", setup_protect4gb);
74 __setup("iommu=", setup_iommu);
76 static unsigned long iommu_range_alloc(struct device *dev,
77 struct iommu_table *tbl,
79 unsigned long *handle,
81 unsigned int align_order)
83 unsigned long n, end, start;
85 int largealloc = npages > 15;
87 unsigned long align_mask;
88 unsigned long boundary_size;
90 align_mask = 0xffffffffffffffffl >> (64 - align_order);
92 /* This allocator was derived from x86_64's bit string search */
95 if (unlikely(npages == 0)) {
96 if (printk_ratelimit())
98 return DMA_ERROR_CODE;
101 if (handle && *handle)
104 start = largealloc ? tbl->it_largehint : tbl->it_hint;
106 /* Use only half of the table for small allocs (15 pages or less) */
107 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
109 if (largealloc && start < tbl->it_halfpoint)
110 start = tbl->it_halfpoint;
112 /* The case below can happen if we have a small segment appended
113 * to a large, or when the previous alloc was at the very end of
114 * the available space. If so, go back to the initial start.
117 start = largealloc ? tbl->it_largehint : tbl->it_hint;
121 if (limit + tbl->it_offset > mask) {
122 limit = mask - tbl->it_offset + 1;
123 /* If we're constrained on address range, first try
124 * at the masked hint to avoid O(n) search complexity,
125 * but on second pass, start at 0.
127 if ((start & mask) >= limit || pass > 0)
134 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135 1 << IOMMU_PAGE_SHIFT);
137 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
138 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
140 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
141 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
144 if (likely(pass < 2)) {
145 /* First failure, just rescan the half of the table.
146 * Second failure, rescan the other half of the table.
148 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
149 limit = pass ? tbl->it_size : limit;
153 /* Third failure, give up */
154 return DMA_ERROR_CODE;
160 /* Bump the hint to a new block for small allocs. */
162 /* Don't bump to new block to avoid fragmentation */
163 tbl->it_largehint = end;
165 /* Overflow will be taken care of at the next allocation */
166 tbl->it_hint = (end + tbl->it_blocksize - 1) &
167 ~(tbl->it_blocksize - 1);
170 /* Update handle for SG allocations */
177 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
178 void *page, unsigned int npages,
179 enum dma_data_direction direction,
180 unsigned long mask, unsigned int align_order,
181 struct dma_attrs *attrs)
183 unsigned long entry, flags;
184 dma_addr_t ret = DMA_ERROR_CODE;
187 spin_lock_irqsave(&(tbl->it_lock), flags);
189 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
191 if (unlikely(entry == DMA_ERROR_CODE)) {
192 spin_unlock_irqrestore(&(tbl->it_lock), flags);
193 return DMA_ERROR_CODE;
196 entry += tbl->it_offset; /* Offset into real TCE table */
197 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
199 /* Put the TCEs in the HW table */
200 build_fail = ppc_md.tce_build(tbl, entry, npages,
201 (unsigned long)page & IOMMU_PAGE_MASK,
204 /* ppc_md.tce_build() only returns non-zero for transient errors.
205 * Clean up the table bitmap in this case and return
206 * DMA_ERROR_CODE. For all other errors the functionality is
209 if (unlikely(build_fail)) {
210 __iommu_free(tbl, ret, npages);
212 spin_unlock_irqrestore(&(tbl->it_lock), flags);
213 return DMA_ERROR_CODE;
216 /* Flush/invalidate TLB caches if necessary */
217 if (ppc_md.tce_flush)
218 ppc_md.tce_flush(tbl);
220 spin_unlock_irqrestore(&(tbl->it_lock), flags);
222 /* Make sure updates are seen by hardware */
228 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
231 unsigned long entry, free_entry;
233 entry = dma_addr >> IOMMU_PAGE_SHIFT;
234 free_entry = entry - tbl->it_offset;
236 if (((free_entry + npages) > tbl->it_size) ||
237 (entry < tbl->it_offset)) {
238 if (printk_ratelimit()) {
239 printk(KERN_INFO "iommu_free: invalid entry\n");
240 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
241 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
242 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
243 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
244 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
245 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
246 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
252 ppc_md.tce_free(tbl, entry, npages);
253 iommu_area_free(tbl->it_map, free_entry, npages);
256 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
261 spin_lock_irqsave(&(tbl->it_lock), flags);
263 __iommu_free(tbl, dma_addr, npages);
265 /* Make sure TLB cache is flushed if the HW needs it. We do
266 * not do an mb() here on purpose, it is not needed on any of
267 * the current platforms.
269 if (ppc_md.tce_flush)
270 ppc_md.tce_flush(tbl);
272 spin_unlock_irqrestore(&(tbl->it_lock), flags);
275 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
276 struct scatterlist *sglist, int nelems,
277 unsigned long mask, enum dma_data_direction direction,
278 struct dma_attrs *attrs)
280 dma_addr_t dma_next = 0, dma_addr;
282 struct scatterlist *s, *outs, *segstart;
283 int outcount, incount, i, build_fail = 0;
285 unsigned long handle;
286 unsigned int max_seg_size;
288 BUG_ON(direction == DMA_NONE);
290 if ((nelems == 0) || !tbl)
293 outs = s = segstart = &sglist[0];
298 /* Init first segment length for backout at failure */
299 outs->dma_length = 0;
301 DBG("sg mapping %d elements:\n", nelems);
303 spin_lock_irqsave(&(tbl->it_lock), flags);
305 max_seg_size = dma_get_max_seg_size(dev);
306 for_each_sg(sglist, s, nelems, i) {
307 unsigned long vaddr, npages, entry, slen;
315 /* Allocate iommu entries for that segment */
316 vaddr = (unsigned long) sg_virt(s);
317 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
319 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
320 (vaddr & ~PAGE_MASK) == 0)
321 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
322 entry = iommu_range_alloc(dev, tbl, npages, &handle,
323 mask >> IOMMU_PAGE_SHIFT, align);
325 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
328 if (unlikely(entry == DMA_ERROR_CODE)) {
329 if (printk_ratelimit())
330 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
331 " npages %lx\n", tbl, vaddr, npages);
335 /* Convert entry to a dma_addr_t */
336 entry += tbl->it_offset;
337 dma_addr = entry << IOMMU_PAGE_SHIFT;
338 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
340 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
341 npages, entry, dma_addr);
343 /* Insert into HW table */
344 build_fail = ppc_md.tce_build(tbl, entry, npages,
345 vaddr & IOMMU_PAGE_MASK,
347 if(unlikely(build_fail))
350 /* If we are in an open segment, try merging */
352 DBG(" - trying merge...\n");
353 /* We cannot merge if:
354 * - allocated dma_addr isn't contiguous to previous allocation
356 if (novmerge || (dma_addr != dma_next) ||
357 (outs->dma_length + s->length > max_seg_size)) {
358 /* Can't merge: create a new segment */
361 outs = sg_next(outs);
362 DBG(" can't merge, new segment.\n");
364 outs->dma_length += s->length;
365 DBG(" merged, new len: %ux\n", outs->dma_length);
370 /* This is a new segment, fill entries */
371 DBG(" - filling new segment.\n");
372 outs->dma_address = dma_addr;
373 outs->dma_length = slen;
376 /* Calculate next page pointer for contiguous check */
377 dma_next = dma_addr + slen;
379 DBG(" - dma next is: %lx\n", dma_next);
382 /* Flush/invalidate TLB caches if necessary */
383 if (ppc_md.tce_flush)
384 ppc_md.tce_flush(tbl);
386 spin_unlock_irqrestore(&(tbl->it_lock), flags);
388 DBG("mapped %d elements:\n", outcount);
390 /* For the sake of iommu_unmap_sg, we clear out the length in the
391 * next entry of the sglist if we didn't fill the list completely
393 if (outcount < incount) {
394 outs = sg_next(outs);
395 outs->dma_address = DMA_ERROR_CODE;
396 outs->dma_length = 0;
399 /* Make sure updates are seen by hardware */
405 for_each_sg(sglist, s, nelems, i) {
406 if (s->dma_length != 0) {
407 unsigned long vaddr, npages;
409 vaddr = s->dma_address & IOMMU_PAGE_MASK;
410 npages = iommu_num_pages(s->dma_address, s->dma_length,
412 __iommu_free(tbl, vaddr, npages);
413 s->dma_address = DMA_ERROR_CODE;
419 spin_unlock_irqrestore(&(tbl->it_lock), flags);
424 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
425 int nelems, enum dma_data_direction direction,
426 struct dma_attrs *attrs)
428 struct scatterlist *sg;
431 BUG_ON(direction == DMA_NONE);
436 spin_lock_irqsave(&(tbl->it_lock), flags);
441 dma_addr_t dma_handle = sg->dma_address;
443 if (sg->dma_length == 0)
445 npages = iommu_num_pages(dma_handle, sg->dma_length,
447 __iommu_free(tbl, dma_handle, npages);
451 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
452 * do not do an mb() here, the affected platforms do not need it
455 if (ppc_md.tce_flush)
456 ppc_md.tce_flush(tbl);
458 spin_unlock_irqrestore(&(tbl->it_lock), flags);
462 * Build a iommu_table structure. This contains a bit map which
463 * is used to manage allocation of the tce space.
465 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
468 static int welcomed = 0;
471 /* Set aside 1/4 of the table for large allocations. */
472 tbl->it_halfpoint = tbl->it_size * 3 / 4;
474 /* number of bytes needed for the bitmap */
475 sz = (tbl->it_size + 7) >> 3;
477 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
479 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
480 tbl->it_map = page_address(page);
481 memset(tbl->it_map, 0, sz);
484 tbl->it_largehint = tbl->it_halfpoint;
485 spin_lock_init(&tbl->it_lock);
487 #ifdef CONFIG_CRASH_DUMP
488 if (ppc_md.tce_get) {
490 unsigned long tceval;
491 unsigned long tcecount = 0;
494 * Reserve the existing mappings left by the first kernel.
496 for (index = 0; index < tbl->it_size; index++) {
497 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
499 * Freed TCE entry contains 0x7fffffffffffffff on JS20
501 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
502 __set_bit(index, tbl->it_map);
506 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
507 printk(KERN_WARNING "TCE table is full; ");
508 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
509 KDUMP_MIN_TCE_ENTRIES);
510 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
511 index < tbl->it_size; index++)
512 __clear_bit(index, tbl->it_map);
516 /* Clear the hardware table in case firmware left allocations in it */
517 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
521 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
522 novmerge ? "disabled" : "enabled");
529 void iommu_free_table(struct iommu_table *tbl, const char *node_name)
531 unsigned long bitmap_sz, i;
534 if (!tbl || !tbl->it_map) {
535 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
540 /* verify that table contains no entries */
541 /* it_size is in entries, and we're examining 64 at a time */
542 for (i = 0; i < (tbl->it_size/64); i++) {
543 if (tbl->it_map[i] != 0) {
544 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
545 __func__, node_name);
550 /* calculate bitmap size in bytes */
551 bitmap_sz = (tbl->it_size + 7) / 8;
554 order = get_order(bitmap_sz);
555 free_pages((unsigned long) tbl->it_map, order);
561 /* Creates TCEs for a user provided buffer. The user buffer must be
562 * contiguous real kernel storage (not vmalloc). The address of the buffer
563 * passed here is the kernel (virtual) address of the buffer. The buffer
564 * need not be page aligned, the dma_addr_t returned will point to the same
565 * byte within the page as vaddr.
567 dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
568 void *vaddr, size_t size, unsigned long mask,
569 enum dma_data_direction direction, struct dma_attrs *attrs)
571 dma_addr_t dma_handle = DMA_ERROR_CODE;
573 unsigned int npages, align;
575 BUG_ON(direction == DMA_NONE);
577 uaddr = (unsigned long)vaddr;
578 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
582 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
583 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
584 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
586 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
587 mask >> IOMMU_PAGE_SHIFT, align,
589 if (dma_handle == DMA_ERROR_CODE) {
590 if (printk_ratelimit()) {
591 printk(KERN_INFO "iommu_alloc failed, "
592 "tbl %p vaddr %p npages %d\n",
596 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
602 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
603 size_t size, enum dma_data_direction direction,
604 struct dma_attrs *attrs)
608 BUG_ON(direction == DMA_NONE);
611 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
612 iommu_free(tbl, dma_handle, npages);
616 /* Allocates a contiguous real buffer and creates mappings over it.
617 * Returns the virtual address of the buffer and sets dma_handle
618 * to the dma address (mapping) of the first page.
620 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
621 size_t size, dma_addr_t *dma_handle,
622 unsigned long mask, gfp_t flag, int node)
627 unsigned int nio_pages, io_order;
630 size = PAGE_ALIGN(size);
631 order = get_order(size);
634 * Client asked for way too much space. This is checked later
635 * anyway. It is easier to debug here for the drivers than in
638 if (order >= IOMAP_MAX_ORDER) {
639 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
646 /* Alloc enough pages (and possibly more) */
647 page = alloc_pages_node(node, flag, order);
650 ret = page_address(page);
651 memset(ret, 0, size);
653 /* Set up tces to cover the allocated range */
654 nio_pages = size >> IOMMU_PAGE_SHIFT;
655 io_order = get_iommu_order(size);
656 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
657 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
658 if (mapping == DMA_ERROR_CODE) {
659 free_pages((unsigned long)ret, order);
662 *dma_handle = mapping;
666 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
667 void *vaddr, dma_addr_t dma_handle)
670 unsigned int nio_pages;
672 size = PAGE_ALIGN(size);
673 nio_pages = size >> IOMMU_PAGE_SHIFT;
674 iommu_free(tbl, dma_handle, nio_pages);
675 size = PAGE_ALIGN(size);
676 free_pages((unsigned long)vaddr, get_order(size));