2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
8 * Dynamic DMA mapping support, bus-independent parts.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/config.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/init.h>
35 #include <linux/bitops.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge = 0;
47 static int novmerge = 1;
50 static int __init setup_iommu(char *str)
52 if (!strcmp(str, "novmerge"))
54 else if (!strcmp(str, "vmerge"))
59 __setup("iommu=", setup_iommu);
61 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
63 unsigned long *handle,
65 unsigned int align_order)
67 unsigned long n, end, i, start;
69 int largealloc = npages > 15;
71 unsigned long align_mask;
73 align_mask = 0xffffffffffffffffl >> (64 - align_order);
75 /* This allocator was derived from x86_64's bit string search */
78 if (unlikely(npages) == 0) {
79 if (printk_ratelimit())
81 return DMA_ERROR_CODE;
84 if (handle && *handle)
87 start = largealloc ? tbl->it_largehint : tbl->it_hint;
89 /* Use only half of the table for small allocs (15 pages or less) */
90 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
92 if (largealloc && start < tbl->it_halfpoint)
93 start = tbl->it_halfpoint;
95 /* The case below can happen if we have a small segment appended
96 * to a large, or when the previous alloc was at the very end of
97 * the available space. If so, go back to the initial start.
100 start = largealloc ? tbl->it_largehint : tbl->it_hint;
104 if (limit + tbl->it_offset > mask) {
105 limit = mask - tbl->it_offset + 1;
106 /* If we're constrained on address range, first try
107 * at the masked hint to avoid O(n) search complexity,
108 * but on second pass, start at 0.
110 if ((start & mask) >= limit || pass > 0)
116 n = find_next_zero_bit(tbl->it_map, limit, start);
118 /* Align allocation */
119 n = (n + align_mask) & ~align_mask;
123 if (unlikely(end >= limit)) {
124 if (likely(pass < 2)) {
125 /* First failure, just rescan the half of the table.
126 * Second failure, rescan the other half of the table.
128 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
129 limit = pass ? tbl->it_size : limit;
133 /* Third failure, give up */
134 return DMA_ERROR_CODE;
138 for (i = n; i < end; i++)
139 if (test_bit(i, tbl->it_map)) {
144 for (i = n; i < end; i++)
145 __set_bit(i, tbl->it_map);
147 /* Bump the hint to a new block for small allocs. */
149 /* Don't bump to new block to avoid fragmentation */
150 tbl->it_largehint = end;
152 /* Overflow will be taken care of at the next allocation */
153 tbl->it_hint = (end + tbl->it_blocksize - 1) &
154 ~(tbl->it_blocksize - 1);
157 /* Update handle for SG allocations */
164 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
165 unsigned int npages, enum dma_data_direction direction,
166 unsigned long mask, unsigned int align_order)
168 unsigned long entry, flags;
169 dma_addr_t ret = DMA_ERROR_CODE;
171 spin_lock_irqsave(&(tbl->it_lock), flags);
173 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
175 if (unlikely(entry == DMA_ERROR_CODE)) {
176 spin_unlock_irqrestore(&(tbl->it_lock), flags);
177 return DMA_ERROR_CODE;
180 entry += tbl->it_offset; /* Offset into real TCE table */
181 ret = entry << PAGE_SHIFT; /* Set the return dma address */
183 /* Put the TCEs in the HW table */
184 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
188 /* Flush/invalidate TLB caches if necessary */
189 if (ppc_md.tce_flush)
190 ppc_md.tce_flush(tbl);
192 spin_unlock_irqrestore(&(tbl->it_lock), flags);
194 /* Make sure updates are seen by hardware */
200 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
203 unsigned long entry, free_entry;
206 entry = dma_addr >> PAGE_SHIFT;
207 free_entry = entry - tbl->it_offset;
209 if (((free_entry + npages) > tbl->it_size) ||
210 (entry < tbl->it_offset)) {
211 if (printk_ratelimit()) {
212 printk(KERN_INFO "iommu_free: invalid entry\n");
213 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
214 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
215 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
216 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
217 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
218 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
219 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
225 ppc_md.tce_free(tbl, entry, npages);
227 for (i = 0; i < npages; i++)
228 __clear_bit(free_entry+i, tbl->it_map);
231 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
236 spin_lock_irqsave(&(tbl->it_lock), flags);
238 __iommu_free(tbl, dma_addr, npages);
240 /* Make sure TLB cache is flushed if the HW needs it. We do
241 * not do an mb() here on purpose, it is not needed on any of
242 * the current platforms.
244 if (ppc_md.tce_flush)
245 ppc_md.tce_flush(tbl);
247 spin_unlock_irqrestore(&(tbl->it_lock), flags);
250 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
251 struct scatterlist *sglist, int nelems,
252 unsigned long mask, enum dma_data_direction direction)
254 dma_addr_t dma_next = 0, dma_addr;
256 struct scatterlist *s, *outs, *segstart;
257 int outcount, incount;
258 unsigned long handle;
260 BUG_ON(direction == DMA_NONE);
262 if ((nelems == 0) || !tbl)
265 outs = s = segstart = &sglist[0];
270 /* Init first segment length for backout at failure */
271 outs->dma_length = 0;
273 DBG("mapping %d elements:\n", nelems);
275 spin_lock_irqsave(&(tbl->it_lock), flags);
277 for (s = outs; nelems; nelems--, s++) {
278 unsigned long vaddr, npages, entry, slen;
286 /* Allocate iommu entries for that segment */
287 vaddr = (unsigned long)page_address(s->page) + s->offset;
288 npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
289 npages >>= PAGE_SHIFT;
290 entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
292 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
295 if (unlikely(entry == DMA_ERROR_CODE)) {
296 if (printk_ratelimit())
297 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
298 " npages %lx\n", tbl, vaddr, npages);
302 /* Convert entry to a dma_addr_t */
303 entry += tbl->it_offset;
304 dma_addr = entry << PAGE_SHIFT;
305 dma_addr |= s->offset;
307 DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
308 npages, entry, dma_addr);
310 /* Insert into HW table */
311 ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
313 /* If we are in an open segment, try merging */
315 DBG(" - trying merge...\n");
316 /* We cannot merge if:
317 * - allocated dma_addr isn't contiguous to previous allocation
319 if (novmerge || (dma_addr != dma_next)) {
320 /* Can't merge: create a new segment */
323 DBG(" can't merge, new segment.\n");
325 outs->dma_length += s->length;
326 DBG(" merged, new len: %lx\n", outs->dma_length);
331 /* This is a new segment, fill entries */
332 DBG(" - filling new segment.\n");
333 outs->dma_address = dma_addr;
334 outs->dma_length = slen;
337 /* Calculate next page pointer for contiguous check */
338 dma_next = dma_addr + slen;
340 DBG(" - dma next is: %lx\n", dma_next);
343 /* Flush/invalidate TLB caches if necessary */
344 if (ppc_md.tce_flush)
345 ppc_md.tce_flush(tbl);
347 spin_unlock_irqrestore(&(tbl->it_lock), flags);
349 DBG("mapped %d elements:\n", outcount);
351 /* For the sake of iommu_unmap_sg, we clear out the length in the
352 * next entry of the sglist if we didn't fill the list completely
354 if (outcount < incount) {
356 outs->dma_address = DMA_ERROR_CODE;
357 outs->dma_length = 0;
360 /* Make sure updates are seen by hardware */
366 for (s = &sglist[0]; s <= outs; s++) {
367 if (s->dma_length != 0) {
368 unsigned long vaddr, npages;
370 vaddr = s->dma_address & PAGE_MASK;
371 npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
373 __iommu_free(tbl, vaddr, npages);
374 s->dma_address = DMA_ERROR_CODE;
378 spin_unlock_irqrestore(&(tbl->it_lock), flags);
383 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
384 int nelems, enum dma_data_direction direction)
388 BUG_ON(direction == DMA_NONE);
393 spin_lock_irqsave(&(tbl->it_lock), flags);
397 dma_addr_t dma_handle = sglist->dma_address;
399 if (sglist->dma_length == 0)
401 npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
402 - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
403 __iommu_free(tbl, dma_handle, npages);
407 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
408 * do not do an mb() here, the affected platforms do not need it
411 if (ppc_md.tce_flush)
412 ppc_md.tce_flush(tbl);
414 spin_unlock_irqrestore(&(tbl->it_lock), flags);
418 * Build a iommu_table structure. This contains a bit map which
419 * is used to manage allocation of the tce space.
421 struct iommu_table *iommu_init_table(struct iommu_table *tbl)
424 static int welcomed = 0;
426 /* Set aside 1/4 of the table for large allocations. */
427 tbl->it_halfpoint = tbl->it_size * 3 / 4;
429 /* number of bytes needed for the bitmap */
430 sz = (tbl->it_size + 7) >> 3;
432 tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz));
434 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
436 memset(tbl->it_map, 0, sz);
439 tbl->it_largehint = tbl->it_halfpoint;
440 spin_lock_init(&tbl->it_lock);
442 /* Clear the hardware table in case firmware left allocations in it */
443 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
446 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
447 novmerge ? "disabled" : "enabled");
454 void iommu_free_table(struct device_node *dn)
456 struct pci_dn *pdn = dn->data;
457 struct iommu_table *tbl = pdn->iommu_table;
458 unsigned long bitmap_sz, i;
461 if (!tbl || !tbl->it_map) {
462 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
467 /* verify that table contains no entries */
468 /* it_size is in entries, and we're examining 64 at a time */
469 for (i = 0; i < (tbl->it_size/64); i++) {
470 if (tbl->it_map[i] != 0) {
471 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
472 __FUNCTION__, dn->full_name);
477 /* calculate bitmap size in bytes */
478 bitmap_sz = (tbl->it_size + 7) / 8;
481 order = get_order(bitmap_sz);
482 free_pages((unsigned long) tbl->it_map, order);
488 /* Creates TCEs for a user provided buffer. The user buffer must be
489 * contiguous real kernel storage (not vmalloc). The address of the buffer
490 * passed here is the kernel (virtual) address of the buffer. The buffer
491 * need not be page aligned, the dma_addr_t returned will point to the same
492 * byte within the page as vaddr.
494 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
495 size_t size, unsigned long mask,
496 enum dma_data_direction direction)
498 dma_addr_t dma_handle = DMA_ERROR_CODE;
502 BUG_ON(direction == DMA_NONE);
504 uaddr = (unsigned long)vaddr;
505 npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
506 npages >>= PAGE_SHIFT;
509 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
510 mask >> PAGE_SHIFT, 0);
511 if (dma_handle == DMA_ERROR_CODE) {
512 if (printk_ratelimit()) {
513 printk(KERN_INFO "iommu_alloc failed, "
514 "tbl %p vaddr %p npages %d\n",
518 dma_handle |= (uaddr & ~PAGE_MASK);
524 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
525 size_t size, enum dma_data_direction direction)
527 BUG_ON(direction == DMA_NONE);
530 iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
531 (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
534 /* Allocates a contiguous real buffer and creates mappings over it.
535 * Returns the virtual address of the buffer and sets dma_handle
536 * to the dma address (mapping) of the first page.
538 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
539 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag)
543 unsigned int npages, order;
545 size = PAGE_ALIGN(size);
546 npages = size >> PAGE_SHIFT;
547 order = get_order(size);
550 * Client asked for way too much space. This is checked later
551 * anyway. It is easier to debug here for the drivers than in
554 if (order >= IOMAP_MAX_ORDER) {
555 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
562 /* Alloc enough pages (and possibly more) */
563 ret = (void *)__get_free_pages(flag, order);
566 memset(ret, 0, size);
568 /* Set up tces to cover the allocated range */
569 mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
570 mask >> PAGE_SHIFT, order);
571 if (mapping == DMA_ERROR_CODE) {
572 free_pages((unsigned long)ret, order);
575 *dma_handle = mapping;
579 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
580 void *vaddr, dma_addr_t dma_handle)
585 size = PAGE_ALIGN(size);
586 npages = size >> PAGE_SHIFT;
587 iommu_free(tbl, dma_handle, npages);
588 free_pages((unsigned long)vaddr, get_order(size));