2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
45 calc_npages(long bytes)
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 /* Return the minimum of MAX or the first power of two larger
55 size_for_memory(unsigned long max)
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
59 max = roundup_pow_of_two(mem);
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
80 #ifdef CONFIG_DISCONTIGMEM
82 if (!NODE_DATA(nid) ||
83 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
85 printk("%s: couldn't allocate arena from node %d\n"
86 " falling back to system-wide allocation\n",
88 arena = alloc_bootmem(sizeof(*arena));
91 if (!NODE_DATA(nid) ||
92 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
96 printk("%s: couldn't allocate arena ptes from node %d\n"
97 " falling back to system-wide allocation\n",
99 arena->ptes = __alloc_bootmem(mem_size, align, 0);
102 #else /* CONFIG_DISCONTIGMEM */
104 arena = alloc_bootmem(sizeof(*arena));
105 arena->ptes = __alloc_bootmem(mem_size, align, 0);
107 #endif /* CONFIG_DISCONTIGMEM */
109 spin_lock_init(&arena->lock);
111 arena->dma_base = base;
112 arena->size = window_size;
113 arena->next_entry = 0;
115 /* Align allocations to a multiple of a page size. Not needed
116 unless there are chip bugs. */
117 arena->align_entry = 1;
122 struct pci_iommu_arena * __init
123 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
124 unsigned long window_size, unsigned long align)
126 return iommu_arena_new_node(0, hose, base, window_size, align);
129 /* Must be called with the arena lock held */
131 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
138 unsigned long boundary_size;
140 base = arena->dma_base >> PAGE_SHIFT;
142 boundary_size = dma_get_seg_boundary(dev) + 1;
143 boundary_size >>= PAGE_SHIFT;
145 boundary_size = 1UL << (32 - PAGE_SHIFT);
148 /* Search forward for the first mask-aligned sequence of N free ptes */
150 nent = arena->size >> PAGE_SHIFT;
151 p = ALIGN(arena->next_entry, mask + 1);
155 while (i < n && p+i < nent) {
156 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
157 p = ALIGN(p + 1, mask + 1);
162 p = ALIGN(p + i + 1, mask + 1), i = 0;
170 * Reached the end. Flush the TLB and restart
171 * the search from the beginning.
173 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
183 /* Success. It's the responsibility of the caller to mark them
184 in use before releasing the lock */
189 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
196 spin_lock_irqsave(&arena->lock, flags);
198 /* Search for N empty ptes */
200 mask = max(align, arena->align_entry) - 1;
201 p = iommu_arena_find_pages(dev, arena, n, mask);
203 spin_unlock_irqrestore(&arena->lock, flags);
207 /* Success. Mark them all in use, ie not zero and invalid
208 for the iommu tlb that could load them from under us.
209 The chip specific bits will fill this in with something
210 kosher when we return. */
211 for (i = 0; i < n; ++i)
212 ptes[p+i] = IOMMU_INVALID_PTE;
214 arena->next_entry = p + n;
215 spin_unlock_irqrestore(&arena->lock, flags);
221 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
226 p = arena->ptes + ofs;
227 for (i = 0; i < n; ++i)
231 /* True if the machine supports DAC addressing, and DEV can
232 make use of it given MASK. */
233 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
235 /* Map a single buffer of the indicated size for PCI DMA in streaming
236 mode. The 32-bit PCI bus mastering address to use is returned.
237 Once the device is given the dma address, the device owns this memory
238 until either pci_unmap_single or pci_dma_sync_single is performed. */
241 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
244 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
245 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
246 struct pci_iommu_arena *arena;
247 long npages, dma_ofs, i;
250 unsigned int align = 0;
251 struct device *dev = pdev ? &pdev->dev : NULL;
253 paddr = __pa(cpu_addr);
256 /* First check to see if we can use the direct map window. */
257 if (paddr + size + __direct_map_base - 1 <= max_dma
258 && paddr + size <= __direct_map_size) {
259 ret = paddr + __direct_map_base;
261 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
262 cpu_addr, size, ret, __builtin_return_address(0));
268 /* Next, use DAC if selected earlier. */
270 ret = paddr + alpha_mv.pci_dac_offset;
272 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
273 cpu_addr, size, ret, __builtin_return_address(0));
278 /* If the machine doesn't define a pci_tbi routine, we have to
279 assume it doesn't support sg mapping, and, since we tried to
280 use direct_map above, it now must be considered an error. */
281 if (! alpha_mv.mv_pci_tbi) {
282 static int been_here = 0; /* Only print the message once. */
284 printk(KERN_WARNING "pci_map_single: no HW sg\n");
290 arena = hose->sg_pci;
291 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
292 arena = hose->sg_isa;
294 npages = calc_npages((paddr & ~PAGE_MASK) + size);
296 /* Force allocation to 64KB boundary for ISA bridges. */
297 if (pdev && pdev == isa_bridge)
299 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
301 printk(KERN_WARNING "pci_map_single failed: "
302 "could not allocate dma page tables\n");
307 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
308 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
310 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
311 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
313 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
314 cpu_addr, size, npages, ret, __builtin_return_address(0));
320 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
324 if (dir == PCI_DMA_NONE)
327 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
328 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
330 EXPORT_SYMBOL(pci_map_single);
333 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
334 size_t size, int dir)
338 if (dir == PCI_DMA_NONE)
341 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
342 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
345 EXPORT_SYMBOL(pci_map_page);
347 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
348 SIZE must match what was provided for in a previous pci_map_single
349 call. All other usages are undefined. After this call, reads by
350 the cpu to the buffer are guaranteed to see whatever the device
354 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
358 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
359 struct pci_iommu_arena *arena;
360 long dma_ofs, npages;
362 if (direction == PCI_DMA_NONE)
365 if (dma_addr >= __direct_map_base
366 && dma_addr < __direct_map_base + __direct_map_size) {
369 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
370 dma_addr, size, __builtin_return_address(0));
375 if (dma_addr > 0xffffffff) {
376 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
377 dma_addr, size, __builtin_return_address(0));
381 arena = hose->sg_pci;
382 if (!arena || dma_addr < arena->dma_base)
383 arena = hose->sg_isa;
385 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
386 if (dma_ofs * PAGE_SIZE >= arena->size) {
387 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
388 " base %lx size %x\n", dma_addr, arena->dma_base,
394 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
396 spin_lock_irqsave(&arena->lock, flags);
398 iommu_arena_free(arena, dma_ofs, npages);
400 /* If we're freeing ptes above the `next_entry' pointer (they
401 may have snuck back into the TLB since the last wrap flush),
402 we need to flush the TLB before reallocating the latter. */
403 if (dma_ofs >= arena->next_entry)
404 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
406 spin_unlock_irqrestore(&arena->lock, flags);
408 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
409 dma_addr, size, npages, __builtin_return_address(0));
411 EXPORT_SYMBOL(pci_unmap_single);
414 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
415 size_t size, int direction)
417 pci_unmap_single(pdev, dma_addr, size, direction);
419 EXPORT_SYMBOL(pci_unmap_page);
421 /* Allocate and map kernel buffer using consistent mode DMA for PCI
422 device. Returns non-NULL cpu-view pointer to the buffer if
423 successful and sets *DMA_ADDRP to the pci side dma address as well,
424 else DMA_ADDRP is undefined. */
427 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
430 long order = get_order(size);
431 gfp_t gfp = GFP_ATOMIC;
434 cpu_addr = (void *)__get_free_pages(gfp, order);
436 printk(KERN_INFO "pci_alloc_consistent: "
437 "get_free_pages failed from %p\n",
438 __builtin_return_address(0));
439 /* ??? Really atomic allocation? Otherwise we could play
440 with vmalloc and sg if we can't find contiguous memory. */
443 memset(cpu_addr, 0, size);
445 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
446 if (*dma_addrp == 0) {
447 free_pages((unsigned long)cpu_addr, order);
448 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
450 /* The address doesn't fit required mask and we
451 do not have iommu. Try again with GFP_DMA. */
456 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
457 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
461 EXPORT_SYMBOL(pci_alloc_consistent);
463 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
464 be values that were returned from pci_alloc_consistent. SIZE must
465 be the same as what as passed into pci_alloc_consistent.
466 References to the memory and mappings associated with CPU_ADDR or
467 DMA_ADDR past this call are illegal. */
470 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
473 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
474 free_pages((unsigned long)cpu_addr, get_order(size));
476 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
477 dma_addr, size, __builtin_return_address(0));
479 EXPORT_SYMBOL(pci_free_consistent);
481 /* Classify the elements of the scatterlist. Write dma_address
482 of each element with:
483 0 : Followers all physically adjacent.
484 1 : Followers all virtually adjacent.
485 -1 : Not leader, physically adjacent to previous.
486 -2 : Not leader, virtually adjacent to previous.
487 Write dma_length of each leader with the combined lengths of
488 the mergable followers. */
490 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
491 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
494 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
497 unsigned long next_paddr;
498 struct scatterlist *leader;
499 long leader_flag, leader_length;
500 unsigned int max_seg_size;
504 leader_length = leader->length;
505 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
507 /* we will not marge sg without device. */
508 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
509 for (++sg; sg < end; ++sg) {
510 unsigned long addr, len;
511 addr = SG_ENT_PHYS_ADDRESS(sg);
514 if (leader_length + len > max_seg_size)
517 if (next_paddr == addr) {
518 sg->dma_address = -1;
519 leader_length += len;
520 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
521 sg->dma_address = -2;
523 leader_length += len;
526 leader->dma_address = leader_flag;
527 leader->dma_length = leader_length;
533 next_paddr = addr + len;
536 leader->dma_address = leader_flag;
537 leader->dma_length = leader_length;
540 /* Given a scatterlist leader, choose an allocation method and fill
544 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
545 struct scatterlist *out, struct pci_iommu_arena *arena,
546 dma_addr_t max_dma, int dac_allowed)
548 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
549 long size = leader->dma_length;
550 struct scatterlist *sg;
552 long npages, dma_ofs, i;
555 /* If everything is physically contiguous, and the addresses
556 fall into the direct-map window, use it. */
557 if (leader->dma_address == 0
558 && paddr + size + __direct_map_base - 1 <= max_dma
559 && paddr + size <= __direct_map_size) {
560 out->dma_address = paddr + __direct_map_base;
561 out->dma_length = size;
563 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
564 __va(paddr), size, out->dma_address);
570 /* If physically contiguous and DAC is available, use it. */
571 if (leader->dma_address == 0 && dac_allowed) {
572 out->dma_address = paddr + alpha_mv.pci_dac_offset;
573 out->dma_length = size;
575 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
576 __va(paddr), size, out->dma_address);
581 /* Otherwise, we'll use the iommu to make the pages virtually
585 npages = calc_npages(paddr + size);
586 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
588 /* If we attempted a direct map above but failed, die. */
589 if (leader->dma_address == 0)
592 /* Otherwise, break up the remaining virtually contiguous
593 hunks into individual direct maps and retry. */
594 sg_classify(dev, leader, end, 0);
595 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
598 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
599 out->dma_length = size;
601 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
602 __va(paddr), size, out->dma_address, npages);
604 /* All virtually contiguous. We need to find the length of each
605 physically contiguous subsegment to fill in the ptes. */
606 ptes = &arena->ptes[dma_ofs];
610 struct scatterlist *last_sg = sg;
614 paddr = SG_ENT_PHYS_ADDRESS(sg);
616 while (sg+1 < end && (int) sg[1].dma_address == -1) {
617 size += sg[1].length;
621 npages = calc_npages((paddr & ~PAGE_MASK) + size);
624 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
625 *ptes++ = mk_iommu_pte(paddr);
628 DBGA(" (%ld) [%p,%x] np %ld\n",
629 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
630 last_sg->length, npages);
631 while (++last_sg <= sg) {
632 DBGA(" (%ld) [%p,%x] cont\n",
633 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
637 } while (++sg < end && (int) sg->dma_address < 0);
643 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
646 struct scatterlist *start, *end, *out;
647 struct pci_controller *hose;
648 struct pci_iommu_arena *arena;
653 if (direction == PCI_DMA_NONE)
656 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
658 dev = pdev ? &pdev->dev : NULL;
660 /* Fast path single entry scatterlists. */
662 sg->dma_length = sg->length;
664 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
665 sg->length, dac_allowed);
666 return sg->dma_address != 0;
672 /* First, prepare information about the entries. */
673 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
675 /* Second, figure out where we're going to map things. */
676 if (alpha_mv.mv_pci_tbi) {
677 hose = pdev ? pdev->sysdata : pci_isa_hose;
678 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
679 arena = hose->sg_pci;
680 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
681 arena = hose->sg_isa;
688 /* Third, iterate over the scatterlist leaders and allocate
689 dma space as needed. */
690 for (out = sg; sg < end; ++sg) {
691 if ((int) sg->dma_address < 0)
693 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
698 /* Mark the end of the list for pci_unmap_sg. */
702 if (out - start == 0)
703 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
704 DBGA("pci_map_sg: %ld entries\n", out - start);
709 printk(KERN_WARNING "pci_map_sg failed: "
710 "could not allocate dma page tables\n");
712 /* Some allocation failed while mapping the scatterlist
713 entries. Unmap them now. */
715 pci_unmap_sg(pdev, start, out - start, direction);
718 EXPORT_SYMBOL(pci_map_sg);
720 /* Unmap a set of streaming mode DMA translations. Again, cpu read
721 rules concerning calls here are the same as for pci_unmap_single()
725 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
729 struct pci_controller *hose;
730 struct pci_iommu_arena *arena;
731 struct scatterlist *end;
733 dma_addr_t fbeg, fend;
735 if (direction == PCI_DMA_NONE)
738 if (! alpha_mv.mv_pci_tbi)
741 hose = pdev ? pdev->sysdata : pci_isa_hose;
742 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
743 arena = hose->sg_pci;
744 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
745 arena = hose->sg_isa;
749 spin_lock_irqsave(&arena->lock, flags);
751 for (end = sg + nents; sg < end; ++sg) {
757 addr = sg->dma_address;
758 size = sg->dma_length;
762 if (addr > 0xffffffff) {
763 /* It's a DAC address -- nothing to do. */
764 DBGA(" (%ld) DAC [%lx,%lx]\n",
765 sg - end + nents, addr, size);
769 if (addr >= __direct_map_base
770 && addr < __direct_map_base + __direct_map_size) {
772 DBGA(" (%ld) direct [%lx,%lx]\n",
773 sg - end + nents, addr, size);
777 DBGA(" (%ld) sg [%lx,%lx]\n",
778 sg - end + nents, addr, size);
780 npages = calc_npages((addr & ~PAGE_MASK) + size);
781 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
782 iommu_arena_free(arena, ofs, npages);
784 tend = addr + size - 1;
785 if (fbeg > addr) fbeg = addr;
786 if (fend < tend) fend = tend;
789 /* If we're freeing ptes above the `next_entry' pointer (they
790 may have snuck back into the TLB since the last wrap flush),
791 we need to flush the TLB before reallocating the latter. */
792 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
793 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
795 spin_unlock_irqrestore(&arena->lock, flags);
797 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
799 EXPORT_SYMBOL(pci_unmap_sg);
802 /* Return whether the given PCI device DMA address mask can be
803 supported properly. */
806 pci_dma_supported(struct pci_dev *pdev, u64 mask)
808 struct pci_controller *hose;
809 struct pci_iommu_arena *arena;
811 /* If there exists a direct map, and the mask fits either
812 the entire direct mapped space or the total system memory as
813 shifted by the map base */
814 if (__direct_map_size != 0
815 && (__direct_map_base + __direct_map_size - 1 <= mask ||
816 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
819 /* Check that we have a scatter-gather arena that fits. */
820 hose = pdev ? pdev->sysdata : pci_isa_hose;
821 arena = hose->sg_isa;
822 if (arena && arena->dma_base + arena->size - 1 <= mask)
824 arena = hose->sg_pci;
825 if (arena && arena->dma_base + arena->size - 1 <= mask)
828 /* As last resort try ZONE_DMA. */
829 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
834 EXPORT_SYMBOL(pci_dma_supported);
838 * AGP GART extensions to the IOMMU
841 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
847 if (!arena) return -EINVAL;
849 spin_lock_irqsave(&arena->lock, flags);
851 /* Search for N empty ptes. */
853 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
855 spin_unlock_irqrestore(&arena->lock, flags);
859 /* Success. Mark them all reserved (ie not zero and invalid)
860 for the iommu tlb that could load them from under us.
861 They will be filled in with valid bits by _bind() */
862 for (i = 0; i < pg_count; ++i)
863 ptes[p+i] = IOMMU_RESERVED_PTE;
865 arena->next_entry = p + pg_count;
866 spin_unlock_irqrestore(&arena->lock, flags);
872 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
877 if (!arena) return -EINVAL;
881 /* Make sure they're all reserved first... */
882 for(i = pg_start; i < pg_start + pg_count; i++)
883 if (ptes[i] != IOMMU_RESERVED_PTE)
886 iommu_arena_free(arena, pg_start, pg_count);
891 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
892 unsigned long *physaddrs)
898 if (!arena) return -EINVAL;
900 spin_lock_irqsave(&arena->lock, flags);
904 for(j = pg_start; j < pg_start + pg_count; j++) {
905 if (ptes[j] != IOMMU_RESERVED_PTE) {
906 spin_unlock_irqrestore(&arena->lock, flags);
911 for(i = 0, j = pg_start; i < pg_count; i++, j++)
912 ptes[j] = mk_iommu_pte(physaddrs[i]);
914 spin_unlock_irqrestore(&arena->lock, flags);
920 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
925 if (!arena) return -EINVAL;
927 p = arena->ptes + pg_start;
928 for(i = 0; i < pg_count; i++)
929 p[i] = IOMMU_RESERVED_PTE;
934 /* True if the machine supports DAC addressing, and DEV can
935 make use of it given MASK. */
938 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
940 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
943 /* If this is not set, the machine doesn't support DAC at all. */
947 /* The device has to be able to address our DAC bit. */
948 if ((dac_offset & dev->dma_mask) != dac_offset)
951 /* If both conditions above are met, we are fine. */
952 DBGA("pci_dac_dma_supported %s from %p\n",
953 ok ? "yes" : "no", __builtin_return_address(0));
958 /* Helper for generic DMA-mapping functions. */
961 alpha_gendev_to_pci(struct device *dev)
963 if (dev && dev->bus == &pci_bus_type)
964 return to_pci_dev(dev);
966 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
970 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
971 bridge is bus master then). */
972 if (!dev || !dev->dma_mask || !*dev->dma_mask)
975 /* For EISA bus masters, return isa_bridge (it might have smaller
976 dma_mask due to wiring limitations). */
977 if (*dev->dma_mask >= isa_bridge->dma_mask)
980 /* This assumes ISA bus master with dma_mask 0xffffff. */
983 EXPORT_SYMBOL(alpha_gendev_to_pci);
986 dma_set_mask(struct device *dev, u64 mask)
988 if (!dev->dma_mask ||
989 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
992 *dev->dma_mask = mask;
996 EXPORT_SYMBOL(dma_set_mask);