2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
14 #include <asm/hwrpb.h>
22 # define DBGA(args...) printk(KERN_DEBUG args)
24 # define DBGA(args...)
27 # define DBGA2(args...) printk(KERN_DEBUG args)
29 # define DBGA2(args...)
32 #define DEBUG_NODIRECT 0
33 #define DEBUG_FORCEDAC 0
35 #define ISA_DMA_MASK 0x00ffffff
37 static inline unsigned long
38 mk_iommu_pte(unsigned long paddr)
40 return (paddr >> (PAGE_SHIFT-1)) | 1;
44 calc_npages(long bytes)
46 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
50 /* Return the minimum of MAX or the first power of two larger
54 size_for_memory(unsigned long max)
56 unsigned long mem = max_low_pfn << PAGE_SHIFT;
58 max = roundup_pow_of_two(mem);
62 struct pci_iommu_arena * __init
63 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
64 unsigned long window_size, unsigned long align)
66 unsigned long mem_size;
67 struct pci_iommu_arena *arena;
69 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
71 /* Note that the TLB lookup logic uses bitwise concatenation,
72 not addition, so the required arena alignment is based on
73 the size of the window. Retain the align parameter so that
74 particular systems can over-align the arena. */
79 #ifdef CONFIG_DISCONTIGMEM
81 if (!NODE_DATA(nid) ||
82 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
87 arena = alloc_bootmem(sizeof(*arena));
90 if (!NODE_DATA(nid) ||
91 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
95 printk("%s: couldn't allocate arena ptes from node %d\n"
96 " falling back to system-wide allocation\n",
98 arena->ptes = __alloc_bootmem(mem_size, align, 0);
101 #else /* CONFIG_DISCONTIGMEM */
103 arena = alloc_bootmem(sizeof(*arena));
104 arena->ptes = __alloc_bootmem(mem_size, align, 0);
106 #endif /* CONFIG_DISCONTIGMEM */
108 spin_lock_init(&arena->lock);
110 arena->dma_base = base;
111 arena->size = window_size;
112 arena->next_entry = 0;
114 /* Align allocations to a multiple of a page size. Not needed
115 unless there are chip bugs. */
116 arena->align_entry = 1;
121 struct pci_iommu_arena * __init
122 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
123 unsigned long window_size, unsigned long align)
125 return iommu_arena_new_node(0, hose, base, window_size, align);
128 /* Must be called with the arena lock held */
130 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
135 /* Search forward for the first mask-aligned sequence of N free ptes */
137 nent = arena->size >> PAGE_SHIFT;
138 p = (arena->next_entry + mask) & ~mask;
140 while (i < n && p+i < nent) {
142 p = (p + i + 1 + mask) & ~mask, i = 0;
148 /* Reached the end. Flush the TLB and restart the
149 search from the beginning. */
150 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
153 while (i < n && p+i < nent) {
155 p = (p + i + 1 + mask) & ~mask, i = 0;
164 /* Success. It's the responsibility of the caller to mark them
165 in use before releasing the lock */
170 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
176 spin_lock_irqsave(&arena->lock, flags);
178 /* Search for N empty ptes */
180 mask = max(align, arena->align_entry) - 1;
181 p = iommu_arena_find_pages(arena, n, mask);
183 spin_unlock_irqrestore(&arena->lock, flags);
187 /* Success. Mark them all in use, ie not zero and invalid
188 for the iommu tlb that could load them from under us.
189 The chip specific bits will fill this in with something
190 kosher when we return. */
191 for (i = 0; i < n; ++i)
192 ptes[p+i] = IOMMU_INVALID_PTE;
194 arena->next_entry = p + n;
195 spin_unlock_irqrestore(&arena->lock, flags);
201 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
206 p = arena->ptes + ofs;
207 for (i = 0; i < n; ++i)
211 /* True if the machine supports DAC addressing, and DEV can
212 make use of it given MASK. */
213 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
215 /* Map a single buffer of the indicated size for PCI DMA in streaming
216 mode. The 32-bit PCI bus mastering address to use is returned.
217 Once the device is given the dma address, the device owns this memory
218 until either pci_unmap_single or pci_dma_sync_single is performed. */
221 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
224 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
225 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
226 struct pci_iommu_arena *arena;
227 long npages, dma_ofs, i;
230 unsigned int align = 0;
232 paddr = __pa(cpu_addr);
235 /* First check to see if we can use the direct map window. */
236 if (paddr + size + __direct_map_base - 1 <= max_dma
237 && paddr + size <= __direct_map_size) {
238 ret = paddr + __direct_map_base;
240 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
241 cpu_addr, size, ret, __builtin_return_address(0));
247 /* Next, use DAC if selected earlier. */
249 ret = paddr + alpha_mv.pci_dac_offset;
251 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
252 cpu_addr, size, ret, __builtin_return_address(0));
257 /* If the machine doesn't define a pci_tbi routine, we have to
258 assume it doesn't support sg mapping, and, since we tried to
259 use direct_map above, it now must be considered an error. */
260 if (! alpha_mv.mv_pci_tbi) {
261 static int been_here = 0; /* Only print the message once. */
263 printk(KERN_WARNING "pci_map_single: no HW sg\n");
269 arena = hose->sg_pci;
270 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
271 arena = hose->sg_isa;
273 npages = calc_npages((paddr & ~PAGE_MASK) + size);
275 /* Force allocation to 64KB boundary for ISA bridges. */
276 if (pdev && pdev == isa_bridge)
278 dma_ofs = iommu_arena_alloc(arena, npages, align);
280 printk(KERN_WARNING "pci_map_single failed: "
281 "could not allocate dma page tables\n");
286 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
287 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
289 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
290 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
292 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
293 cpu_addr, size, npages, ret, __builtin_return_address(0));
299 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
303 if (dir == PCI_DMA_NONE)
306 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
307 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
309 EXPORT_SYMBOL(pci_map_single);
312 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
313 size_t size, int dir)
317 if (dir == PCI_DMA_NONE)
320 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
321 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
324 EXPORT_SYMBOL(pci_map_page);
326 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
327 SIZE must match what was provided for in a previous pci_map_single
328 call. All other usages are undefined. After this call, reads by
329 the cpu to the buffer are guaranteed to see whatever the device
333 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
337 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
338 struct pci_iommu_arena *arena;
339 long dma_ofs, npages;
341 if (direction == PCI_DMA_NONE)
344 if (dma_addr >= __direct_map_base
345 && dma_addr < __direct_map_base + __direct_map_size) {
348 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
349 dma_addr, size, __builtin_return_address(0));
354 if (dma_addr > 0xffffffff) {
355 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
356 dma_addr, size, __builtin_return_address(0));
360 arena = hose->sg_pci;
361 if (!arena || dma_addr < arena->dma_base)
362 arena = hose->sg_isa;
364 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
365 if (dma_ofs * PAGE_SIZE >= arena->size) {
366 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
367 " base %lx size %x\n", dma_addr, arena->dma_base,
373 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
375 spin_lock_irqsave(&arena->lock, flags);
377 iommu_arena_free(arena, dma_ofs, npages);
379 /* If we're freeing ptes above the `next_entry' pointer (they
380 may have snuck back into the TLB since the last wrap flush),
381 we need to flush the TLB before reallocating the latter. */
382 if (dma_ofs >= arena->next_entry)
383 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
385 spin_unlock_irqrestore(&arena->lock, flags);
387 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
388 dma_addr, size, npages, __builtin_return_address(0));
390 EXPORT_SYMBOL(pci_unmap_single);
393 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
394 size_t size, int direction)
396 pci_unmap_single(pdev, dma_addr, size, direction);
398 EXPORT_SYMBOL(pci_unmap_page);
400 /* Allocate and map kernel buffer using consistent mode DMA for PCI
401 device. Returns non-NULL cpu-view pointer to the buffer if
402 successful and sets *DMA_ADDRP to the pci side dma address as well,
403 else DMA_ADDRP is undefined. */
406 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
409 long order = get_order(size);
410 gfp_t gfp = GFP_ATOMIC;
413 cpu_addr = (void *)__get_free_pages(gfp, order);
415 printk(KERN_INFO "pci_alloc_consistent: "
416 "get_free_pages failed from %p\n",
417 __builtin_return_address(0));
418 /* ??? Really atomic allocation? Otherwise we could play
419 with vmalloc and sg if we can't find contiguous memory. */
422 memset(cpu_addr, 0, size);
424 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
425 if (*dma_addrp == 0) {
426 free_pages((unsigned long)cpu_addr, order);
427 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
429 /* The address doesn't fit required mask and we
430 do not have iommu. Try again with GFP_DMA. */
435 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
436 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
440 EXPORT_SYMBOL(pci_alloc_consistent);
442 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
443 be values that were returned from pci_alloc_consistent. SIZE must
444 be the same as what as passed into pci_alloc_consistent.
445 References to the memory and mappings associated with CPU_ADDR or
446 DMA_ADDR past this call are illegal. */
449 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
452 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
453 free_pages((unsigned long)cpu_addr, get_order(size));
455 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
456 dma_addr, size, __builtin_return_address(0));
458 EXPORT_SYMBOL(pci_free_consistent);
460 /* Classify the elements of the scatterlist. Write dma_address
461 of each element with:
462 0 : Followers all physically adjacent.
463 1 : Followers all virtually adjacent.
464 -1 : Not leader, physically adjacent to previous.
465 -2 : Not leader, virtually adjacent to previous.
466 Write dma_length of each leader with the combined lengths of
467 the mergable followers. */
469 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
470 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
473 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
475 unsigned long next_paddr;
476 struct scatterlist *leader;
477 long leader_flag, leader_length;
481 leader_length = leader->length;
482 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
484 for (++sg; sg < end; ++sg) {
485 unsigned long addr, len;
486 addr = SG_ENT_PHYS_ADDRESS(sg);
489 if (next_paddr == addr) {
490 sg->dma_address = -1;
491 leader_length += len;
492 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
493 sg->dma_address = -2;
495 leader_length += len;
497 leader->dma_address = leader_flag;
498 leader->dma_length = leader_length;
504 next_paddr = addr + len;
507 leader->dma_address = leader_flag;
508 leader->dma_length = leader_length;
511 /* Given a scatterlist leader, choose an allocation method and fill
515 sg_fill(struct scatterlist *leader, struct scatterlist *end,
516 struct scatterlist *out, struct pci_iommu_arena *arena,
517 dma_addr_t max_dma, int dac_allowed)
519 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
520 long size = leader->dma_length;
521 struct scatterlist *sg;
523 long npages, dma_ofs, i;
526 /* If everything is physically contiguous, and the addresses
527 fall into the direct-map window, use it. */
528 if (leader->dma_address == 0
529 && paddr + size + __direct_map_base - 1 <= max_dma
530 && paddr + size <= __direct_map_size) {
531 out->dma_address = paddr + __direct_map_base;
532 out->dma_length = size;
534 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
535 __va(paddr), size, out->dma_address);
541 /* If physically contiguous and DAC is available, use it. */
542 if (leader->dma_address == 0 && dac_allowed) {
543 out->dma_address = paddr + alpha_mv.pci_dac_offset;
544 out->dma_length = size;
546 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
547 __va(paddr), size, out->dma_address);
552 /* Otherwise, we'll use the iommu to make the pages virtually
556 npages = calc_npages(paddr + size);
557 dma_ofs = iommu_arena_alloc(arena, npages, 0);
559 /* If we attempted a direct map above but failed, die. */
560 if (leader->dma_address == 0)
563 /* Otherwise, break up the remaining virtually contiguous
564 hunks into individual direct maps and retry. */
565 sg_classify(leader, end, 0);
566 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
569 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
570 out->dma_length = size;
572 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
573 __va(paddr), size, out->dma_address, npages);
575 /* All virtually contiguous. We need to find the length of each
576 physically contiguous subsegment to fill in the ptes. */
577 ptes = &arena->ptes[dma_ofs];
581 struct scatterlist *last_sg = sg;
585 paddr = SG_ENT_PHYS_ADDRESS(sg);
587 while (sg+1 < end && (int) sg[1].dma_address == -1) {
588 size += sg[1].length;
592 npages = calc_npages((paddr & ~PAGE_MASK) + size);
595 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
596 *ptes++ = mk_iommu_pte(paddr);
599 DBGA(" (%ld) [%p,%x] np %ld\n",
600 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
601 last_sg->length, npages);
602 while (++last_sg <= sg) {
603 DBGA(" (%ld) [%p,%x] cont\n",
604 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
608 } while (++sg < end && (int) sg->dma_address < 0);
614 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
617 struct scatterlist *start, *end, *out;
618 struct pci_controller *hose;
619 struct pci_iommu_arena *arena;
623 if (direction == PCI_DMA_NONE)
626 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
628 /* Fast path single entry scatterlists. */
630 sg->dma_length = sg->length;
632 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
633 sg->length, dac_allowed);
634 return sg->dma_address != 0;
640 /* First, prepare information about the entries. */
641 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
643 /* Second, figure out where we're going to map things. */
644 if (alpha_mv.mv_pci_tbi) {
645 hose = pdev ? pdev->sysdata : pci_isa_hose;
646 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
647 arena = hose->sg_pci;
648 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
649 arena = hose->sg_isa;
656 /* Third, iterate over the scatterlist leaders and allocate
657 dma space as needed. */
658 for (out = sg; sg < end; ++sg) {
659 if ((int) sg->dma_address < 0)
661 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
666 /* Mark the end of the list for pci_unmap_sg. */
670 if (out - start == 0)
671 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
672 DBGA("pci_map_sg: %ld entries\n", out - start);
677 printk(KERN_WARNING "pci_map_sg failed: "
678 "could not allocate dma page tables\n");
680 /* Some allocation failed while mapping the scatterlist
681 entries. Unmap them now. */
683 pci_unmap_sg(pdev, start, out - start, direction);
686 EXPORT_SYMBOL(pci_map_sg);
688 /* Unmap a set of streaming mode DMA translations. Again, cpu read
689 rules concerning calls here are the same as for pci_unmap_single()
693 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
697 struct pci_controller *hose;
698 struct pci_iommu_arena *arena;
699 struct scatterlist *end;
701 dma_addr_t fbeg, fend;
703 if (direction == PCI_DMA_NONE)
706 if (! alpha_mv.mv_pci_tbi)
709 hose = pdev ? pdev->sysdata : pci_isa_hose;
710 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
711 arena = hose->sg_pci;
712 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
713 arena = hose->sg_isa;
717 spin_lock_irqsave(&arena->lock, flags);
719 for (end = sg + nents; sg < end; ++sg) {
725 addr = sg->dma_address;
726 size = sg->dma_length;
730 if (addr > 0xffffffff) {
731 /* It's a DAC address -- nothing to do. */
732 DBGA(" (%ld) DAC [%lx,%lx]\n",
733 sg - end + nents, addr, size);
737 if (addr >= __direct_map_base
738 && addr < __direct_map_base + __direct_map_size) {
740 DBGA(" (%ld) direct [%lx,%lx]\n",
741 sg - end + nents, addr, size);
745 DBGA(" (%ld) sg [%lx,%lx]\n",
746 sg - end + nents, addr, size);
748 npages = calc_npages((addr & ~PAGE_MASK) + size);
749 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
750 iommu_arena_free(arena, ofs, npages);
752 tend = addr + size - 1;
753 if (fbeg > addr) fbeg = addr;
754 if (fend < tend) fend = tend;
757 /* If we're freeing ptes above the `next_entry' pointer (they
758 may have snuck back into the TLB since the last wrap flush),
759 we need to flush the TLB before reallocating the latter. */
760 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
761 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
763 spin_unlock_irqrestore(&arena->lock, flags);
765 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
767 EXPORT_SYMBOL(pci_unmap_sg);
770 /* Return whether the given PCI device DMA address mask can be
771 supported properly. */
774 pci_dma_supported(struct pci_dev *pdev, u64 mask)
776 struct pci_controller *hose;
777 struct pci_iommu_arena *arena;
779 /* If there exists a direct map, and the mask fits either
780 the entire direct mapped space or the total system memory as
781 shifted by the map base */
782 if (__direct_map_size != 0
783 && (__direct_map_base + __direct_map_size - 1 <= mask ||
784 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
787 /* Check that we have a scatter-gather arena that fits. */
788 hose = pdev ? pdev->sysdata : pci_isa_hose;
789 arena = hose->sg_isa;
790 if (arena && arena->dma_base + arena->size - 1 <= mask)
792 arena = hose->sg_pci;
793 if (arena && arena->dma_base + arena->size - 1 <= mask)
796 /* As last resort try ZONE_DMA. */
797 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
802 EXPORT_SYMBOL(pci_dma_supported);
806 * AGP GART extensions to the IOMMU
809 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
815 if (!arena) return -EINVAL;
817 spin_lock_irqsave(&arena->lock, flags);
819 /* Search for N empty ptes. */
821 p = iommu_arena_find_pages(arena, pg_count, align_mask);
823 spin_unlock_irqrestore(&arena->lock, flags);
827 /* Success. Mark them all reserved (ie not zero and invalid)
828 for the iommu tlb that could load them from under us.
829 They will be filled in with valid bits by _bind() */
830 for (i = 0; i < pg_count; ++i)
831 ptes[p+i] = IOMMU_RESERVED_PTE;
833 arena->next_entry = p + pg_count;
834 spin_unlock_irqrestore(&arena->lock, flags);
840 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
845 if (!arena) return -EINVAL;
849 /* Make sure they're all reserved first... */
850 for(i = pg_start; i < pg_start + pg_count; i++)
851 if (ptes[i] != IOMMU_RESERVED_PTE)
854 iommu_arena_free(arena, pg_start, pg_count);
859 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
860 unsigned long *physaddrs)
866 if (!arena) return -EINVAL;
868 spin_lock_irqsave(&arena->lock, flags);
872 for(j = pg_start; j < pg_start + pg_count; j++) {
873 if (ptes[j] != IOMMU_RESERVED_PTE) {
874 spin_unlock_irqrestore(&arena->lock, flags);
879 for(i = 0, j = pg_start; i < pg_count; i++, j++)
880 ptes[j] = mk_iommu_pte(physaddrs[i]);
882 spin_unlock_irqrestore(&arena->lock, flags);
888 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
893 if (!arena) return -EINVAL;
895 p = arena->ptes + pg_start;
896 for(i = 0; i < pg_count; i++)
897 p[i] = IOMMU_RESERVED_PTE;
902 /* True if the machine supports DAC addressing, and DEV can
903 make use of it given MASK. */
906 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
908 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
911 /* If this is not set, the machine doesn't support DAC at all. */
915 /* The device has to be able to address our DAC bit. */
916 if ((dac_offset & dev->dma_mask) != dac_offset)
919 /* If both conditions above are met, we are fine. */
920 DBGA("pci_dac_dma_supported %s from %p\n",
921 ok ? "yes" : "no", __builtin_return_address(0));
926 /* Helper for generic DMA-mapping functions. */
929 alpha_gendev_to_pci(struct device *dev)
931 if (dev && dev->bus == &pci_bus_type)
932 return to_pci_dev(dev);
934 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
938 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
939 bridge is bus master then). */
940 if (!dev || !dev->dma_mask || !*dev->dma_mask)
943 /* For EISA bus masters, return isa_bridge (it might have smaller
944 dma_mask due to wiring limitations). */
945 if (*dev->dma_mask >= isa_bridge->dma_mask)
948 /* This assumes ISA bus master with dma_mask 0xffffff. */
951 EXPORT_SYMBOL(alpha_gendev_to_pci);
954 dma_set_mask(struct device *dev, u64 mask)
956 if (!dev->dma_mask ||
957 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
960 *dev->dma_mask = mask;
964 EXPORT_SYMBOL(dma_set_mask);