2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
45 calc_npages(long bytes)
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 /* Return the minimum of MAX or the first power of two larger
55 size_for_memory(unsigned long max)
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
59 max = roundup_pow_of_two(mem);
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
80 #ifdef CONFIG_DISCONTIGMEM
82 if (!NODE_DATA(nid) ||
83 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
85 printk("%s: couldn't allocate arena from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena = alloc_bootmem(sizeof(*arena));
91 if (!NODE_DATA(nid) ||
92 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
96 printk("%s: couldn't allocate arena ptes from node %d\n"
97 " falling back to system-wide allocation\n",
99 arena->ptes = __alloc_bootmem(mem_size, align, 0);
102 #else /* CONFIG_DISCONTIGMEM */
104 arena = alloc_bootmem(sizeof(*arena));
105 arena->ptes = __alloc_bootmem(mem_size, align, 0);
107 #endif /* CONFIG_DISCONTIGMEM */
109 spin_lock_init(&arena->lock);
111 arena->dma_base = base;
112 arena->size = window_size;
113 arena->next_entry = 0;
115 /* Align allocations to a multiple of a page size. Not needed
116 unless there are chip bugs. */
117 arena->align_entry = 1;
122 struct pci_iommu_arena * __init
123 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
124 unsigned long window_size, unsigned long align)
126 return iommu_arena_new_node(0, hose, base, window_size, align);
129 /* Must be called with the arena lock held */
131 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
138 unsigned long boundary_size;
140 base = arena->dma_base >> PAGE_SHIFT;
142 boundary_size = dma_get_seg_boundary(dev) + 1;
143 boundary_size >>= PAGE_SHIFT;
145 boundary_size = 1UL << (32 - PAGE_SHIFT);
148 /* Search forward for the first mask-aligned sequence of N free ptes */
150 nent = arena->size >> PAGE_SHIFT;
151 p = ALIGN(arena->next_entry, mask + 1);
155 while (i < n && p+i < nent) {
156 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
157 p = ALIGN(p + 1, mask + 1);
162 p = ALIGN(p + i + 1, mask + 1), i = 0;
170 * Reached the end. Flush the TLB and restart
171 * the search from the beginning.
173 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
183 /* Success. It's the responsibility of the caller to mark them
184 in use before releasing the lock */
189 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
196 spin_lock_irqsave(&arena->lock, flags);
198 /* Search for N empty ptes */
200 mask = max(align, arena->align_entry) - 1;
201 p = iommu_arena_find_pages(dev, arena, n, mask);
203 spin_unlock_irqrestore(&arena->lock, flags);
207 /* Success. Mark them all in use, ie not zero and invalid
208 for the iommu tlb that could load them from under us.
209 The chip specific bits will fill this in with something
210 kosher when we return. */
211 for (i = 0; i < n; ++i)
212 ptes[p+i] = IOMMU_INVALID_PTE;
214 arena->next_entry = p + n;
215 spin_unlock_irqrestore(&arena->lock, flags);
221 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
226 p = arena->ptes + ofs;
227 for (i = 0; i < n; ++i)
231 /* True if the machine supports DAC addressing, and DEV can
232 make use of it given MASK. */
233 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
235 /* Map a single buffer of the indicated size for PCI DMA in streaming
236 mode. The 32-bit PCI bus mastering address to use is returned.
237 Once the device is given the dma address, the device owns this memory
238 until either pci_unmap_single or pci_dma_sync_single is performed. */
241 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
244 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
245 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
246 struct pci_iommu_arena *arena;
247 long npages, dma_ofs, i;
250 unsigned int align = 0;
251 struct device *dev = pdev ? &pdev->dev : NULL;
253 paddr = __pa(cpu_addr);
256 /* First check to see if we can use the direct map window. */
257 if (paddr + size + __direct_map_base - 1 <= max_dma
258 && paddr + size <= __direct_map_size) {
259 ret = paddr + __direct_map_base;
261 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
262 cpu_addr, size, ret, __builtin_return_address(0));
268 /* Next, use DAC if selected earlier. */
270 ret = paddr + alpha_mv.pci_dac_offset;
272 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
273 cpu_addr, size, ret, __builtin_return_address(0));
278 /* If the machine doesn't define a pci_tbi routine, we have to
279 assume it doesn't support sg mapping, and, since we tried to
280 use direct_map above, it now must be considered an error. */
281 if (! alpha_mv.mv_pci_tbi) {
282 static int been_here = 0; /* Only print the message once. */
284 printk(KERN_WARNING "pci_map_single: no HW sg\n");
290 arena = hose->sg_pci;
291 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
292 arena = hose->sg_isa;
294 npages = calc_npages((paddr & ~PAGE_MASK) + size);
296 /* Force allocation to 64KB boundary for ISA bridges. */
297 if (pdev && pdev == isa_bridge)
299 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
301 printk(KERN_WARNING "pci_map_single failed: "
302 "could not allocate dma page tables\n");
307 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
308 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
310 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
311 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
313 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
314 cpu_addr, size, npages, ret, __builtin_return_address(0));
320 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
324 if (dir == PCI_DMA_NONE)
327 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
328 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
330 EXPORT_SYMBOL(pci_map_single);
333 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
334 size_t size, int dir)
338 if (dir == PCI_DMA_NONE)
341 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
342 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
345 EXPORT_SYMBOL(pci_map_page);
347 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
348 SIZE must match what was provided for in a previous pci_map_single
349 call. All other usages are undefined. After this call, reads by
350 the cpu to the buffer are guaranteed to see whatever the device
354 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
358 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
359 struct pci_iommu_arena *arena;
360 long dma_ofs, npages;
362 if (direction == PCI_DMA_NONE)
365 if (dma_addr >= __direct_map_base
366 && dma_addr < __direct_map_base + __direct_map_size) {
369 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
370 dma_addr, size, __builtin_return_address(0));
375 if (dma_addr > 0xffffffff) {
376 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
377 dma_addr, size, __builtin_return_address(0));
381 arena = hose->sg_pci;
382 if (!arena || dma_addr < arena->dma_base)
383 arena = hose->sg_isa;
385 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
386 if (dma_ofs * PAGE_SIZE >= arena->size) {
387 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
388 " base %lx size %x\n", dma_addr, arena->dma_base,
394 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
396 spin_lock_irqsave(&arena->lock, flags);
398 iommu_arena_free(arena, dma_ofs, npages);
400 /* If we're freeing ptes above the `next_entry' pointer (they
401 may have snuck back into the TLB since the last wrap flush),
402 we need to flush the TLB before reallocating the latter. */
403 if (dma_ofs >= arena->next_entry)
404 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
406 spin_unlock_irqrestore(&arena->lock, flags);
408 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
409 dma_addr, size, npages, __builtin_return_address(0));
411 EXPORT_SYMBOL(pci_unmap_single);
414 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
415 size_t size, int direction)
417 pci_unmap_single(pdev, dma_addr, size, direction);
419 EXPORT_SYMBOL(pci_unmap_page);
421 /* Allocate and map kernel buffer using consistent mode DMA for PCI
422 device. Returns non-NULL cpu-view pointer to the buffer if
423 successful and sets *DMA_ADDRP to the pci side dma address as well,
424 else DMA_ADDRP is undefined. */
427 __pci_alloc_consistent(struct pci_dev *pdev, size_t size,
428 dma_addr_t *dma_addrp, gfp_t gfp)
431 long order = get_order(size);
436 cpu_addr = (void *)__get_free_pages(gfp, order);
438 printk(KERN_INFO "pci_alloc_consistent: "
439 "get_free_pages failed from %p\n",
440 __builtin_return_address(0));
441 /* ??? Really atomic allocation? Otherwise we could play
442 with vmalloc and sg if we can't find contiguous memory. */
445 memset(cpu_addr, 0, size);
447 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
448 if (*dma_addrp == 0) {
449 free_pages((unsigned long)cpu_addr, order);
450 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
452 /* The address doesn't fit required mask and we
453 do not have iommu. Try again with GFP_DMA. */
458 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
459 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
463 EXPORT_SYMBOL(__pci_alloc_consistent);
465 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
466 be values that were returned from pci_alloc_consistent. SIZE must
467 be the same as what as passed into pci_alloc_consistent.
468 References to the memory and mappings associated with CPU_ADDR or
469 DMA_ADDR past this call are illegal. */
472 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
475 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
476 free_pages((unsigned long)cpu_addr, get_order(size));
478 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
479 dma_addr, size, __builtin_return_address(0));
481 EXPORT_SYMBOL(pci_free_consistent);
483 /* Classify the elements of the scatterlist. Write dma_address
484 of each element with:
485 0 : Followers all physically adjacent.
486 1 : Followers all virtually adjacent.
487 -1 : Not leader, physically adjacent to previous.
488 -2 : Not leader, virtually adjacent to previous.
489 Write dma_length of each leader with the combined lengths of
490 the mergable followers. */
492 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
493 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
496 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
499 unsigned long next_paddr;
500 struct scatterlist *leader;
501 long leader_flag, leader_length;
502 unsigned int max_seg_size;
506 leader_length = leader->length;
507 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
509 /* we will not marge sg without device. */
510 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
511 for (++sg; sg < end; ++sg) {
512 unsigned long addr, len;
513 addr = SG_ENT_PHYS_ADDRESS(sg);
516 if (leader_length + len > max_seg_size)
519 if (next_paddr == addr) {
520 sg->dma_address = -1;
521 leader_length += len;
522 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
523 sg->dma_address = -2;
525 leader_length += len;
528 leader->dma_address = leader_flag;
529 leader->dma_length = leader_length;
535 next_paddr = addr + len;
538 leader->dma_address = leader_flag;
539 leader->dma_length = leader_length;
542 /* Given a scatterlist leader, choose an allocation method and fill
546 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
547 struct scatterlist *out, struct pci_iommu_arena *arena,
548 dma_addr_t max_dma, int dac_allowed)
550 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
551 long size = leader->dma_length;
552 struct scatterlist *sg;
554 long npages, dma_ofs, i;
557 /* If everything is physically contiguous, and the addresses
558 fall into the direct-map window, use it. */
559 if (leader->dma_address == 0
560 && paddr + size + __direct_map_base - 1 <= max_dma
561 && paddr + size <= __direct_map_size) {
562 out->dma_address = paddr + __direct_map_base;
563 out->dma_length = size;
565 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
566 __va(paddr), size, out->dma_address);
572 /* If physically contiguous and DAC is available, use it. */
573 if (leader->dma_address == 0 && dac_allowed) {
574 out->dma_address = paddr + alpha_mv.pci_dac_offset;
575 out->dma_length = size;
577 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
578 __va(paddr), size, out->dma_address);
583 /* Otherwise, we'll use the iommu to make the pages virtually
587 npages = calc_npages(paddr + size);
588 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
590 /* If we attempted a direct map above but failed, die. */
591 if (leader->dma_address == 0)
594 /* Otherwise, break up the remaining virtually contiguous
595 hunks into individual direct maps and retry. */
596 sg_classify(dev, leader, end, 0);
597 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
600 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
601 out->dma_length = size;
603 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
604 __va(paddr), size, out->dma_address, npages);
606 /* All virtually contiguous. We need to find the length of each
607 physically contiguous subsegment to fill in the ptes. */
608 ptes = &arena->ptes[dma_ofs];
612 struct scatterlist *last_sg = sg;
616 paddr = SG_ENT_PHYS_ADDRESS(sg);
618 while (sg+1 < end && (int) sg[1].dma_address == -1) {
619 size += sg[1].length;
623 npages = calc_npages((paddr & ~PAGE_MASK) + size);
626 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
627 *ptes++ = mk_iommu_pte(paddr);
630 DBGA(" (%ld) [%p,%x] np %ld\n",
631 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
632 last_sg->length, npages);
633 while (++last_sg <= sg) {
634 DBGA(" (%ld) [%p,%x] cont\n",
635 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
639 } while (++sg < end && (int) sg->dma_address < 0);
645 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
648 struct scatterlist *start, *end, *out;
649 struct pci_controller *hose;
650 struct pci_iommu_arena *arena;
655 if (direction == PCI_DMA_NONE)
658 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
660 dev = pdev ? &pdev->dev : NULL;
662 /* Fast path single entry scatterlists. */
664 sg->dma_length = sg->length;
666 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
667 sg->length, dac_allowed);
668 return sg->dma_address != 0;
674 /* First, prepare information about the entries. */
675 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
677 /* Second, figure out where we're going to map things. */
678 if (alpha_mv.mv_pci_tbi) {
679 hose = pdev ? pdev->sysdata : pci_isa_hose;
680 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
681 arena = hose->sg_pci;
682 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
683 arena = hose->sg_isa;
690 /* Third, iterate over the scatterlist leaders and allocate
691 dma space as needed. */
692 for (out = sg; sg < end; ++sg) {
693 if ((int) sg->dma_address < 0)
695 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
700 /* Mark the end of the list for pci_unmap_sg. */
704 if (out - start == 0)
705 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
706 DBGA("pci_map_sg: %ld entries\n", out - start);
711 printk(KERN_WARNING "pci_map_sg failed: "
712 "could not allocate dma page tables\n");
714 /* Some allocation failed while mapping the scatterlist
715 entries. Unmap them now. */
717 pci_unmap_sg(pdev, start, out - start, direction);
720 EXPORT_SYMBOL(pci_map_sg);
722 /* Unmap a set of streaming mode DMA translations. Again, cpu read
723 rules concerning calls here are the same as for pci_unmap_single()
727 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
731 struct pci_controller *hose;
732 struct pci_iommu_arena *arena;
733 struct scatterlist *end;
735 dma_addr_t fbeg, fend;
737 if (direction == PCI_DMA_NONE)
740 if (! alpha_mv.mv_pci_tbi)
743 hose = pdev ? pdev->sysdata : pci_isa_hose;
744 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
745 arena = hose->sg_pci;
746 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
747 arena = hose->sg_isa;
751 spin_lock_irqsave(&arena->lock, flags);
753 for (end = sg + nents; sg < end; ++sg) {
759 addr = sg->dma_address;
760 size = sg->dma_length;
764 if (addr > 0xffffffff) {
765 /* It's a DAC address -- nothing to do. */
766 DBGA(" (%ld) DAC [%lx,%lx]\n",
767 sg - end + nents, addr, size);
771 if (addr >= __direct_map_base
772 && addr < __direct_map_base + __direct_map_size) {
774 DBGA(" (%ld) direct [%lx,%lx]\n",
775 sg - end + nents, addr, size);
779 DBGA(" (%ld) sg [%lx,%lx]\n",
780 sg - end + nents, addr, size);
782 npages = calc_npages((addr & ~PAGE_MASK) + size);
783 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
784 iommu_arena_free(arena, ofs, npages);
786 tend = addr + size - 1;
787 if (fbeg > addr) fbeg = addr;
788 if (fend < tend) fend = tend;
791 /* If we're freeing ptes above the `next_entry' pointer (they
792 may have snuck back into the TLB since the last wrap flush),
793 we need to flush the TLB before reallocating the latter. */
794 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
795 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
797 spin_unlock_irqrestore(&arena->lock, flags);
799 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
801 EXPORT_SYMBOL(pci_unmap_sg);
804 /* Return whether the given PCI device DMA address mask can be
805 supported properly. */
808 pci_dma_supported(struct pci_dev *pdev, u64 mask)
810 struct pci_controller *hose;
811 struct pci_iommu_arena *arena;
813 /* If there exists a direct map, and the mask fits either
814 the entire direct mapped space or the total system memory as
815 shifted by the map base */
816 if (__direct_map_size != 0
817 && (__direct_map_base + __direct_map_size - 1 <= mask ||
818 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
821 /* Check that we have a scatter-gather arena that fits. */
822 hose = pdev ? pdev->sysdata : pci_isa_hose;
823 arena = hose->sg_isa;
824 if (arena && arena->dma_base + arena->size - 1 <= mask)
826 arena = hose->sg_pci;
827 if (arena && arena->dma_base + arena->size - 1 <= mask)
830 /* As last resort try ZONE_DMA. */
831 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
836 EXPORT_SYMBOL(pci_dma_supported);
840 * AGP GART extensions to the IOMMU
843 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
849 if (!arena) return -EINVAL;
851 spin_lock_irqsave(&arena->lock, flags);
853 /* Search for N empty ptes. */
855 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
857 spin_unlock_irqrestore(&arena->lock, flags);
861 /* Success. Mark them all reserved (ie not zero and invalid)
862 for the iommu tlb that could load them from under us.
863 They will be filled in with valid bits by _bind() */
864 for (i = 0; i < pg_count; ++i)
865 ptes[p+i] = IOMMU_RESERVED_PTE;
867 arena->next_entry = p + pg_count;
868 spin_unlock_irqrestore(&arena->lock, flags);
874 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
879 if (!arena) return -EINVAL;
883 /* Make sure they're all reserved first... */
884 for(i = pg_start; i < pg_start + pg_count; i++)
885 if (ptes[i] != IOMMU_RESERVED_PTE)
888 iommu_arena_free(arena, pg_start, pg_count);
893 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
894 unsigned long *physaddrs)
900 if (!arena) return -EINVAL;
902 spin_lock_irqsave(&arena->lock, flags);
906 for(j = pg_start; j < pg_start + pg_count; j++) {
907 if (ptes[j] != IOMMU_RESERVED_PTE) {
908 spin_unlock_irqrestore(&arena->lock, flags);
913 for(i = 0, j = pg_start; i < pg_count; i++, j++)
914 ptes[j] = mk_iommu_pte(physaddrs[i]);
916 spin_unlock_irqrestore(&arena->lock, flags);
922 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
927 if (!arena) return -EINVAL;
929 p = arena->ptes + pg_start;
930 for(i = 0; i < pg_count; i++)
931 p[i] = IOMMU_RESERVED_PTE;
936 /* True if the machine supports DAC addressing, and DEV can
937 make use of it given MASK. */
940 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
942 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
945 /* If this is not set, the machine doesn't support DAC at all. */
949 /* The device has to be able to address our DAC bit. */
950 if ((dac_offset & dev->dma_mask) != dac_offset)
953 /* If both conditions above are met, we are fine. */
954 DBGA("pci_dac_dma_supported %s from %p\n",
955 ok ? "yes" : "no", __builtin_return_address(0));
960 /* Helper for generic DMA-mapping functions. */
963 alpha_gendev_to_pci(struct device *dev)
965 if (dev && dev->bus == &pci_bus_type)
966 return to_pci_dev(dev);
968 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
972 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
973 bridge is bus master then). */
974 if (!dev || !dev->dma_mask || !*dev->dma_mask)
977 /* For EISA bus masters, return isa_bridge (it might have smaller
978 dma_mask due to wiring limitations). */
979 if (*dev->dma_mask >= isa_bridge->dma_mask)
982 /* This assumes ISA bus master with dma_mask 0xffffff. */
985 EXPORT_SYMBOL(alpha_gendev_to_pci);
988 dma_set_mask(struct device *dev, u64 mask)
990 if (!dev->dma_mask ||
991 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
994 *dev->dma_mask = mask;
998 EXPORT_SYMBOL(dma_set_mask);