2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
16 #include <asm/hwrpb.h>
24 # define DBGA(args...) printk(KERN_DEBUG args)
26 # define DBGA(args...)
29 # define DBGA2(args...) printk(KERN_DEBUG args)
31 # define DBGA2(args...)
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
45 calc_npages(long bytes)
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 /* Return the minimum of MAX or the first power of two larger
55 size_for_memory(unsigned long max)
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
59 max = roundup_pow_of_two(mem);
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
80 #ifdef CONFIG_DISCONTIGMEM
82 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
83 if (!NODE_DATA(nid) || !arena) {
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
87 arena = alloc_bootmem(sizeof(*arena));
90 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
91 if (!NODE_DATA(nid) || !arena->ptes) {
92 printk("%s: couldn't allocate arena ptes from node %d\n"
93 " falling back to system-wide allocation\n",
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
98 #else /* CONFIG_DISCONTIGMEM */
100 arena = alloc_bootmem(sizeof(*arena));
101 arena->ptes = __alloc_bootmem(mem_size, align, 0);
103 #endif /* CONFIG_DISCONTIGMEM */
105 spin_lock_init(&arena->lock);
107 arena->dma_base = base;
108 arena->size = window_size;
109 arena->next_entry = 0;
111 /* Align allocations to a multiple of a page size. Not needed
112 unless there are chip bugs. */
113 arena->align_entry = 1;
118 struct pci_iommu_arena * __init
119 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
120 unsigned long window_size, unsigned long align)
122 return iommu_arena_new_node(0, hose, base, window_size, align);
125 /* Must be called with the arena lock held */
127 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
134 unsigned long boundary_size;
136 base = arena->dma_base >> PAGE_SHIFT;
138 boundary_size = dma_get_seg_boundary(dev) + 1;
139 boundary_size >>= PAGE_SHIFT;
141 boundary_size = 1UL << (32 - PAGE_SHIFT);
144 /* Search forward for the first mask-aligned sequence of N free ptes */
146 nent = arena->size >> PAGE_SHIFT;
147 p = ALIGN(arena->next_entry, mask + 1);
151 while (i < n && p+i < nent) {
152 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
153 p = ALIGN(p + 1, mask + 1);
158 p = ALIGN(p + i + 1, mask + 1), i = 0;
166 * Reached the end. Flush the TLB and restart
167 * the search from the beginning.
169 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
179 /* Success. It's the responsibility of the caller to mark them
180 in use before releasing the lock */
185 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
192 spin_lock_irqsave(&arena->lock, flags);
194 /* Search for N empty ptes */
196 mask = max(align, arena->align_entry) - 1;
197 p = iommu_arena_find_pages(dev, arena, n, mask);
199 spin_unlock_irqrestore(&arena->lock, flags);
203 /* Success. Mark them all in use, ie not zero and invalid
204 for the iommu tlb that could load them from under us.
205 The chip specific bits will fill this in with something
206 kosher when we return. */
207 for (i = 0; i < n; ++i)
208 ptes[p+i] = IOMMU_INVALID_PTE;
210 arena->next_entry = p + n;
211 spin_unlock_irqrestore(&arena->lock, flags);
217 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
222 p = arena->ptes + ofs;
223 for (i = 0; i < n; ++i)
227 /* True if the machine supports DAC addressing, and DEV can
228 make use of it given MASK. */
229 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
231 /* Map a single buffer of the indicated size for PCI DMA in streaming
232 mode. The 32-bit PCI bus mastering address to use is returned.
233 Once the device is given the dma address, the device owns this memory
234 until either pci_unmap_single or pci_dma_sync_single is performed. */
237 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
240 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
241 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
242 struct pci_iommu_arena *arena;
243 long npages, dma_ofs, i;
246 unsigned int align = 0;
247 struct device *dev = pdev ? &pdev->dev : NULL;
249 paddr = __pa(cpu_addr);
252 /* First check to see if we can use the direct map window. */
253 if (paddr + size + __direct_map_base - 1 <= max_dma
254 && paddr + size <= __direct_map_size) {
255 ret = paddr + __direct_map_base;
257 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
258 cpu_addr, size, ret, __builtin_return_address(0));
264 /* Next, use DAC if selected earlier. */
266 ret = paddr + alpha_mv.pci_dac_offset;
268 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
269 cpu_addr, size, ret, __builtin_return_address(0));
274 /* If the machine doesn't define a pci_tbi routine, we have to
275 assume it doesn't support sg mapping, and, since we tried to
276 use direct_map above, it now must be considered an error. */
277 if (! alpha_mv.mv_pci_tbi) {
278 static int been_here = 0; /* Only print the message once. */
280 printk(KERN_WARNING "pci_map_single: no HW sg\n");
286 arena = hose->sg_pci;
287 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
288 arena = hose->sg_isa;
290 npages = calc_npages((paddr & ~PAGE_MASK) + size);
292 /* Force allocation to 64KB boundary for ISA bridges. */
293 if (pdev && pdev == isa_bridge)
295 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
297 printk(KERN_WARNING "pci_map_single failed: "
298 "could not allocate dma page tables\n");
303 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
304 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
306 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
307 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
309 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
310 cpu_addr, size, npages, ret, __builtin_return_address(0));
316 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
320 if (dir == PCI_DMA_NONE)
323 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
324 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
326 EXPORT_SYMBOL(pci_map_single);
329 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
330 size_t size, int dir)
334 if (dir == PCI_DMA_NONE)
337 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
338 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
341 EXPORT_SYMBOL(pci_map_page);
343 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
344 SIZE must match what was provided for in a previous pci_map_single
345 call. All other usages are undefined. After this call, reads by
346 the cpu to the buffer are guaranteed to see whatever the device
350 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
354 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
355 struct pci_iommu_arena *arena;
356 long dma_ofs, npages;
358 if (direction == PCI_DMA_NONE)
361 if (dma_addr >= __direct_map_base
362 && dma_addr < __direct_map_base + __direct_map_size) {
365 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
366 dma_addr, size, __builtin_return_address(0));
371 if (dma_addr > 0xffffffff) {
372 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
373 dma_addr, size, __builtin_return_address(0));
377 arena = hose->sg_pci;
378 if (!arena || dma_addr < arena->dma_base)
379 arena = hose->sg_isa;
381 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
382 if (dma_ofs * PAGE_SIZE >= arena->size) {
383 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
384 " base %lx size %x\n", dma_addr, arena->dma_base,
390 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
392 spin_lock_irqsave(&arena->lock, flags);
394 iommu_arena_free(arena, dma_ofs, npages);
396 /* If we're freeing ptes above the `next_entry' pointer (they
397 may have snuck back into the TLB since the last wrap flush),
398 we need to flush the TLB before reallocating the latter. */
399 if (dma_ofs >= arena->next_entry)
400 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
402 spin_unlock_irqrestore(&arena->lock, flags);
404 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
405 dma_addr, size, npages, __builtin_return_address(0));
407 EXPORT_SYMBOL(pci_unmap_single);
410 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
411 size_t size, int direction)
413 pci_unmap_single(pdev, dma_addr, size, direction);
415 EXPORT_SYMBOL(pci_unmap_page);
417 /* Allocate and map kernel buffer using consistent mode DMA for PCI
418 device. Returns non-NULL cpu-view pointer to the buffer if
419 successful and sets *DMA_ADDRP to the pci side dma address as well,
420 else DMA_ADDRP is undefined. */
423 __pci_alloc_consistent(struct pci_dev *pdev, size_t size,
424 dma_addr_t *dma_addrp, gfp_t gfp)
427 long order = get_order(size);
432 cpu_addr = (void *)__get_free_pages(gfp, order);
434 printk(KERN_INFO "pci_alloc_consistent: "
435 "get_free_pages failed from %p\n",
436 __builtin_return_address(0));
437 /* ??? Really atomic allocation? Otherwise we could play
438 with vmalloc and sg if we can't find contiguous memory. */
441 memset(cpu_addr, 0, size);
443 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
444 if (*dma_addrp == 0) {
445 free_pages((unsigned long)cpu_addr, order);
446 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
448 /* The address doesn't fit required mask and we
449 do not have iommu. Try again with GFP_DMA. */
454 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
455 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
459 EXPORT_SYMBOL(__pci_alloc_consistent);
461 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
462 be values that were returned from pci_alloc_consistent. SIZE must
463 be the same as what as passed into pci_alloc_consistent.
464 References to the memory and mappings associated with CPU_ADDR or
465 DMA_ADDR past this call are illegal. */
468 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
471 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
472 free_pages((unsigned long)cpu_addr, get_order(size));
474 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
475 dma_addr, size, __builtin_return_address(0));
477 EXPORT_SYMBOL(pci_free_consistent);
479 /* Classify the elements of the scatterlist. Write dma_address
480 of each element with:
481 0 : Followers all physically adjacent.
482 1 : Followers all virtually adjacent.
483 -1 : Not leader, physically adjacent to previous.
484 -2 : Not leader, virtually adjacent to previous.
485 Write dma_length of each leader with the combined lengths of
486 the mergable followers. */
488 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
489 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
492 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
495 unsigned long next_paddr;
496 struct scatterlist *leader;
497 long leader_flag, leader_length;
498 unsigned int max_seg_size;
502 leader_length = leader->length;
503 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
505 /* we will not marge sg without device. */
506 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
507 for (++sg; sg < end; ++sg) {
508 unsigned long addr, len;
509 addr = SG_ENT_PHYS_ADDRESS(sg);
512 if (leader_length + len > max_seg_size)
515 if (next_paddr == addr) {
516 sg->dma_address = -1;
517 leader_length += len;
518 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
519 sg->dma_address = -2;
521 leader_length += len;
524 leader->dma_address = leader_flag;
525 leader->dma_length = leader_length;
531 next_paddr = addr + len;
534 leader->dma_address = leader_flag;
535 leader->dma_length = leader_length;
538 /* Given a scatterlist leader, choose an allocation method and fill
542 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
543 struct scatterlist *out, struct pci_iommu_arena *arena,
544 dma_addr_t max_dma, int dac_allowed)
546 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
547 long size = leader->dma_length;
548 struct scatterlist *sg;
550 long npages, dma_ofs, i;
553 /* If everything is physically contiguous, and the addresses
554 fall into the direct-map window, use it. */
555 if (leader->dma_address == 0
556 && paddr + size + __direct_map_base - 1 <= max_dma
557 && paddr + size <= __direct_map_size) {
558 out->dma_address = paddr + __direct_map_base;
559 out->dma_length = size;
561 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
562 __va(paddr), size, out->dma_address);
568 /* If physically contiguous and DAC is available, use it. */
569 if (leader->dma_address == 0 && dac_allowed) {
570 out->dma_address = paddr + alpha_mv.pci_dac_offset;
571 out->dma_length = size;
573 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
574 __va(paddr), size, out->dma_address);
579 /* Otherwise, we'll use the iommu to make the pages virtually
583 npages = calc_npages(paddr + size);
584 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
586 /* If we attempted a direct map above but failed, die. */
587 if (leader->dma_address == 0)
590 /* Otherwise, break up the remaining virtually contiguous
591 hunks into individual direct maps and retry. */
592 sg_classify(dev, leader, end, 0);
593 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
596 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
597 out->dma_length = size;
599 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
600 __va(paddr), size, out->dma_address, npages);
602 /* All virtually contiguous. We need to find the length of each
603 physically contiguous subsegment to fill in the ptes. */
604 ptes = &arena->ptes[dma_ofs];
608 struct scatterlist *last_sg = sg;
612 paddr = SG_ENT_PHYS_ADDRESS(sg);
614 while (sg+1 < end && (int) sg[1].dma_address == -1) {
615 size += sg[1].length;
619 npages = calc_npages((paddr & ~PAGE_MASK) + size);
622 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
623 *ptes++ = mk_iommu_pte(paddr);
626 DBGA(" (%ld) [%p,%x] np %ld\n",
627 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
628 last_sg->length, npages);
629 while (++last_sg <= sg) {
630 DBGA(" (%ld) [%p,%x] cont\n",
631 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
635 } while (++sg < end && (int) sg->dma_address < 0);
641 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
644 struct scatterlist *start, *end, *out;
645 struct pci_controller *hose;
646 struct pci_iommu_arena *arena;
651 if (direction == PCI_DMA_NONE)
654 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
656 dev = pdev ? &pdev->dev : NULL;
658 /* Fast path single entry scatterlists. */
660 sg->dma_length = sg->length;
662 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
663 sg->length, dac_allowed);
664 return sg->dma_address != 0;
670 /* First, prepare information about the entries. */
671 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
673 /* Second, figure out where we're going to map things. */
674 if (alpha_mv.mv_pci_tbi) {
675 hose = pdev ? pdev->sysdata : pci_isa_hose;
676 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
677 arena = hose->sg_pci;
678 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
679 arena = hose->sg_isa;
686 /* Third, iterate over the scatterlist leaders and allocate
687 dma space as needed. */
688 for (out = sg; sg < end; ++sg) {
689 if ((int) sg->dma_address < 0)
691 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
696 /* Mark the end of the list for pci_unmap_sg. */
700 if (out - start == 0)
701 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
702 DBGA("pci_map_sg: %ld entries\n", out - start);
707 printk(KERN_WARNING "pci_map_sg failed: "
708 "could not allocate dma page tables\n");
710 /* Some allocation failed while mapping the scatterlist
711 entries. Unmap them now. */
713 pci_unmap_sg(pdev, start, out - start, direction);
716 EXPORT_SYMBOL(pci_map_sg);
718 /* Unmap a set of streaming mode DMA translations. Again, cpu read
719 rules concerning calls here are the same as for pci_unmap_single()
723 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
727 struct pci_controller *hose;
728 struct pci_iommu_arena *arena;
729 struct scatterlist *end;
731 dma_addr_t fbeg, fend;
733 if (direction == PCI_DMA_NONE)
736 if (! alpha_mv.mv_pci_tbi)
739 hose = pdev ? pdev->sysdata : pci_isa_hose;
740 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
741 arena = hose->sg_pci;
742 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
743 arena = hose->sg_isa;
747 spin_lock_irqsave(&arena->lock, flags);
749 for (end = sg + nents; sg < end; ++sg) {
755 addr = sg->dma_address;
756 size = sg->dma_length;
760 if (addr > 0xffffffff) {
761 /* It's a DAC address -- nothing to do. */
762 DBGA(" (%ld) DAC [%lx,%lx]\n",
763 sg - end + nents, addr, size);
767 if (addr >= __direct_map_base
768 && addr < __direct_map_base + __direct_map_size) {
770 DBGA(" (%ld) direct [%lx,%lx]\n",
771 sg - end + nents, addr, size);
775 DBGA(" (%ld) sg [%lx,%lx]\n",
776 sg - end + nents, addr, size);
778 npages = calc_npages((addr & ~PAGE_MASK) + size);
779 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
780 iommu_arena_free(arena, ofs, npages);
782 tend = addr + size - 1;
783 if (fbeg > addr) fbeg = addr;
784 if (fend < tend) fend = tend;
787 /* If we're freeing ptes above the `next_entry' pointer (they
788 may have snuck back into the TLB since the last wrap flush),
789 we need to flush the TLB before reallocating the latter. */
790 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
791 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
793 spin_unlock_irqrestore(&arena->lock, flags);
795 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
797 EXPORT_SYMBOL(pci_unmap_sg);
800 /* Return whether the given PCI device DMA address mask can be
801 supported properly. */
804 pci_dma_supported(struct pci_dev *pdev, u64 mask)
806 struct pci_controller *hose;
807 struct pci_iommu_arena *arena;
809 /* If there exists a direct map, and the mask fits either
810 the entire direct mapped space or the total system memory as
811 shifted by the map base */
812 if (__direct_map_size != 0
813 && (__direct_map_base + __direct_map_size - 1 <= mask ||
814 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
817 /* Check that we have a scatter-gather arena that fits. */
818 hose = pdev ? pdev->sysdata : pci_isa_hose;
819 arena = hose->sg_isa;
820 if (arena && arena->dma_base + arena->size - 1 <= mask)
822 arena = hose->sg_pci;
823 if (arena && arena->dma_base + arena->size - 1 <= mask)
826 /* As last resort try ZONE_DMA. */
827 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
832 EXPORT_SYMBOL(pci_dma_supported);
836 * AGP GART extensions to the IOMMU
839 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
845 if (!arena) return -EINVAL;
847 spin_lock_irqsave(&arena->lock, flags);
849 /* Search for N empty ptes. */
851 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
853 spin_unlock_irqrestore(&arena->lock, flags);
857 /* Success. Mark them all reserved (ie not zero and invalid)
858 for the iommu tlb that could load them from under us.
859 They will be filled in with valid bits by _bind() */
860 for (i = 0; i < pg_count; ++i)
861 ptes[p+i] = IOMMU_RESERVED_PTE;
863 arena->next_entry = p + pg_count;
864 spin_unlock_irqrestore(&arena->lock, flags);
870 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
875 if (!arena) return -EINVAL;
879 /* Make sure they're all reserved first... */
880 for(i = pg_start; i < pg_start + pg_count; i++)
881 if (ptes[i] != IOMMU_RESERVED_PTE)
884 iommu_arena_free(arena, pg_start, pg_count);
889 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
890 unsigned long *physaddrs)
896 if (!arena) return -EINVAL;
898 spin_lock_irqsave(&arena->lock, flags);
902 for(j = pg_start; j < pg_start + pg_count; j++) {
903 if (ptes[j] != IOMMU_RESERVED_PTE) {
904 spin_unlock_irqrestore(&arena->lock, flags);
909 for(i = 0, j = pg_start; i < pg_count; i++, j++)
910 ptes[j] = mk_iommu_pte(physaddrs[i]);
912 spin_unlock_irqrestore(&arena->lock, flags);
918 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
923 if (!arena) return -EINVAL;
925 p = arena->ptes + pg_start;
926 for(i = 0; i < pg_count; i++)
927 p[i] = IOMMU_RESERVED_PTE;
932 /* True if the machine supports DAC addressing, and DEV can
933 make use of it given MASK. */
936 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
938 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
941 /* If this is not set, the machine doesn't support DAC at all. */
945 /* The device has to be able to address our DAC bit. */
946 if ((dac_offset & dev->dma_mask) != dac_offset)
949 /* If both conditions above are met, we are fine. */
950 DBGA("pci_dac_dma_supported %s from %p\n",
951 ok ? "yes" : "no", __builtin_return_address(0));
956 /* Helper for generic DMA-mapping functions. */
959 alpha_gendev_to_pci(struct device *dev)
961 if (dev && dev->bus == &pci_bus_type)
962 return to_pci_dev(dev);
964 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
968 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
969 bridge is bus master then). */
970 if (!dev || !dev->dma_mask || !*dev->dma_mask)
973 /* For EISA bus masters, return isa_bridge (it might have smaller
974 dma_mask due to wiring limitations). */
975 if (*dev->dma_mask >= isa_bridge->dma_mask)
978 /* This assumes ISA bus master with dma_mask 0xffffff. */
981 EXPORT_SYMBOL(alpha_gendev_to_pci);
984 dma_set_mask(struct device *dev, u64 mask)
986 if (!dev->dma_mask ||
987 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
990 *dev->dma_mask = mask;
994 EXPORT_SYMBOL(dma_set_mask);