2 * linux/arch/alpha/kernel/pci_iommu.c
5 #include <linux/kernel.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/log2.h>
13 #include <asm/hwrpb.h>
21 # define DBGA(args...) printk(KERN_DEBUG args)
23 # define DBGA(args...)
26 # define DBGA2(args...) printk(KERN_DEBUG args)
28 # define DBGA2(args...)
31 #define DEBUG_NODIRECT 0
32 #define DEBUG_FORCEDAC 0
34 #define ISA_DMA_MASK 0x00ffffff
36 static inline unsigned long
37 mk_iommu_pte(unsigned long paddr)
39 return (paddr >> (PAGE_SHIFT-1)) | 1;
43 calc_npages(long bytes)
45 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
49 /* Return the minimum of MAX or the first power of two larger
53 size_for_memory(unsigned long max)
55 unsigned long mem = max_low_pfn << PAGE_SHIFT;
57 max = roundup_pow_of_two(mem);
61 struct pci_iommu_arena *
62 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
63 unsigned long window_size, unsigned long align)
65 unsigned long mem_size;
66 struct pci_iommu_arena *arena;
68 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
70 /* Note that the TLB lookup logic uses bitwise concatenation,
71 not addition, so the required arena alignment is based on
72 the size of the window. Retain the align parameter so that
73 particular systems can over-align the arena. */
78 #ifdef CONFIG_DISCONTIGMEM
80 if (!NODE_DATA(nid) ||
81 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
83 printk("%s: couldn't allocate arena from node %d\n"
84 " falling back to system-wide allocation\n",
86 arena = alloc_bootmem(sizeof(*arena));
89 if (!NODE_DATA(nid) ||
90 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
94 printk("%s: couldn't allocate arena ptes from node %d\n"
95 " falling back to system-wide allocation\n",
97 arena->ptes = __alloc_bootmem(mem_size, align, 0);
100 #else /* CONFIG_DISCONTIGMEM */
102 arena = alloc_bootmem(sizeof(*arena));
103 arena->ptes = __alloc_bootmem(mem_size, align, 0);
105 #endif /* CONFIG_DISCONTIGMEM */
107 spin_lock_init(&arena->lock);
109 arena->dma_base = base;
110 arena->size = window_size;
111 arena->next_entry = 0;
113 /* Align allocations to a multiple of a page size. Not needed
114 unless there are chip bugs. */
115 arena->align_entry = 1;
120 struct pci_iommu_arena *
121 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
122 unsigned long window_size, unsigned long align)
124 return iommu_arena_new_node(0, hose, base, window_size, align);
127 /* Must be called with the arena lock held */
129 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
134 /* Search forward for the first mask-aligned sequence of N free ptes */
136 nent = arena->size >> PAGE_SHIFT;
137 p = (arena->next_entry + mask) & ~mask;
139 while (i < n && p+i < nent) {
141 p = (p + i + 1 + mask) & ~mask, i = 0;
147 /* Reached the end. Flush the TLB and restart the
148 search from the beginning. */
149 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
152 while (i < n && p+i < nent) {
154 p = (p + i + 1 + mask) & ~mask, i = 0;
163 /* Success. It's the responsibility of the caller to mark them
164 in use before releasing the lock */
169 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
175 spin_lock_irqsave(&arena->lock, flags);
177 /* Search for N empty ptes */
179 mask = max(align, arena->align_entry) - 1;
180 p = iommu_arena_find_pages(arena, n, mask);
182 spin_unlock_irqrestore(&arena->lock, flags);
186 /* Success. Mark them all in use, ie not zero and invalid
187 for the iommu tlb that could load them from under us.
188 The chip specific bits will fill this in with something
189 kosher when we return. */
190 for (i = 0; i < n; ++i)
191 ptes[p+i] = IOMMU_INVALID_PTE;
193 arena->next_entry = p + n;
194 spin_unlock_irqrestore(&arena->lock, flags);
200 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
205 p = arena->ptes + ofs;
206 for (i = 0; i < n; ++i)
210 /* True if the machine supports DAC addressing, and DEV can
211 make use of it given MASK. */
212 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
214 /* Map a single buffer of the indicated size for PCI DMA in streaming
215 mode. The 32-bit PCI bus mastering address to use is returned.
216 Once the device is given the dma address, the device owns this memory
217 until either pci_unmap_single or pci_dma_sync_single is performed. */
220 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
223 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
224 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
225 struct pci_iommu_arena *arena;
226 long npages, dma_ofs, i;
229 unsigned int align = 0;
231 paddr = __pa(cpu_addr);
234 /* First check to see if we can use the direct map window. */
235 if (paddr + size + __direct_map_base - 1 <= max_dma
236 && paddr + size <= __direct_map_size) {
237 ret = paddr + __direct_map_base;
239 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
240 cpu_addr, size, ret, __builtin_return_address(0));
246 /* Next, use DAC if selected earlier. */
248 ret = paddr + alpha_mv.pci_dac_offset;
250 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
251 cpu_addr, size, ret, __builtin_return_address(0));
256 /* If the machine doesn't define a pci_tbi routine, we have to
257 assume it doesn't support sg mapping, and, since we tried to
258 use direct_map above, it now must be considered an error. */
259 if (! alpha_mv.mv_pci_tbi) {
260 static int been_here = 0; /* Only print the message once. */
262 printk(KERN_WARNING "pci_map_single: no HW sg\n");
268 arena = hose->sg_pci;
269 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
270 arena = hose->sg_isa;
272 npages = calc_npages((paddr & ~PAGE_MASK) + size);
274 /* Force allocation to 64KB boundary for ISA bridges. */
275 if (pdev && pdev == isa_bridge)
277 dma_ofs = iommu_arena_alloc(arena, npages, align);
279 printk(KERN_WARNING "pci_map_single failed: "
280 "could not allocate dma page tables\n");
285 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
286 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
288 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
289 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
291 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
292 cpu_addr, size, npages, ret, __builtin_return_address(0));
298 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
302 if (dir == PCI_DMA_NONE)
305 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
306 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
308 EXPORT_SYMBOL(pci_map_single);
311 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
312 size_t size, int dir)
316 if (dir == PCI_DMA_NONE)
319 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
320 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
323 EXPORT_SYMBOL(pci_map_page);
325 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
326 SIZE must match what was provided for in a previous pci_map_single
327 call. All other usages are undefined. After this call, reads by
328 the cpu to the buffer are guaranteed to see whatever the device
332 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
336 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
337 struct pci_iommu_arena *arena;
338 long dma_ofs, npages;
340 if (direction == PCI_DMA_NONE)
343 if (dma_addr >= __direct_map_base
344 && dma_addr < __direct_map_base + __direct_map_size) {
347 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
348 dma_addr, size, __builtin_return_address(0));
353 if (dma_addr > 0xffffffff) {
354 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
355 dma_addr, size, __builtin_return_address(0));
359 arena = hose->sg_pci;
360 if (!arena || dma_addr < arena->dma_base)
361 arena = hose->sg_isa;
363 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
364 if (dma_ofs * PAGE_SIZE >= arena->size) {
365 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
366 " base %lx size %x\n", dma_addr, arena->dma_base,
372 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
374 spin_lock_irqsave(&arena->lock, flags);
376 iommu_arena_free(arena, dma_ofs, npages);
378 /* If we're freeing ptes above the `next_entry' pointer (they
379 may have snuck back into the TLB since the last wrap flush),
380 we need to flush the TLB before reallocating the latter. */
381 if (dma_ofs >= arena->next_entry)
382 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
384 spin_unlock_irqrestore(&arena->lock, flags);
386 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
387 dma_addr, size, npages, __builtin_return_address(0));
389 EXPORT_SYMBOL(pci_unmap_single);
392 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
393 size_t size, int direction)
395 pci_unmap_single(pdev, dma_addr, size, direction);
397 EXPORT_SYMBOL(pci_unmap_page);
399 /* Allocate and map kernel buffer using consistent mode DMA for PCI
400 device. Returns non-NULL cpu-view pointer to the buffer if
401 successful and sets *DMA_ADDRP to the pci side dma address as well,
402 else DMA_ADDRP is undefined. */
405 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
408 long order = get_order(size);
409 gfp_t gfp = GFP_ATOMIC;
412 cpu_addr = (void *)__get_free_pages(gfp, order);
414 printk(KERN_INFO "pci_alloc_consistent: "
415 "get_free_pages failed from %p\n",
416 __builtin_return_address(0));
417 /* ??? Really atomic allocation? Otherwise we could play
418 with vmalloc and sg if we can't find contiguous memory. */
421 memset(cpu_addr, 0, size);
423 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
424 if (*dma_addrp == 0) {
425 free_pages((unsigned long)cpu_addr, order);
426 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
428 /* The address doesn't fit required mask and we
429 do not have iommu. Try again with GFP_DMA. */
434 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
435 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
439 EXPORT_SYMBOL(pci_alloc_consistent);
441 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
442 be values that were returned from pci_alloc_consistent. SIZE must
443 be the same as what as passed into pci_alloc_consistent.
444 References to the memory and mappings associated with CPU_ADDR or
445 DMA_ADDR past this call are illegal. */
448 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
451 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
452 free_pages((unsigned long)cpu_addr, get_order(size));
454 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
455 dma_addr, size, __builtin_return_address(0));
457 EXPORT_SYMBOL(pci_free_consistent);
459 /* Classify the elements of the scatterlist. Write dma_address
460 of each element with:
461 0 : Followers all physically adjacent.
462 1 : Followers all virtually adjacent.
463 -1 : Not leader, physically adjacent to previous.
464 -2 : Not leader, virtually adjacent to previous.
465 Write dma_length of each leader with the combined lengths of
466 the mergable followers. */
468 #define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
469 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
472 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
474 unsigned long next_paddr;
475 struct scatterlist *leader;
476 long leader_flag, leader_length;
480 leader_length = leader->length;
481 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
483 for (++sg; sg < end; ++sg) {
484 unsigned long addr, len;
485 addr = SG_ENT_PHYS_ADDRESS(sg);
488 if (next_paddr == addr) {
489 sg->dma_address = -1;
490 leader_length += len;
491 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
492 sg->dma_address = -2;
494 leader_length += len;
496 leader->dma_address = leader_flag;
497 leader->dma_length = leader_length;
503 next_paddr = addr + len;
506 leader->dma_address = leader_flag;
507 leader->dma_length = leader_length;
510 /* Given a scatterlist leader, choose an allocation method and fill
514 sg_fill(struct scatterlist *leader, struct scatterlist *end,
515 struct scatterlist *out, struct pci_iommu_arena *arena,
516 dma_addr_t max_dma, int dac_allowed)
518 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
519 long size = leader->dma_length;
520 struct scatterlist *sg;
522 long npages, dma_ofs, i;
525 /* If everything is physically contiguous, and the addresses
526 fall into the direct-map window, use it. */
527 if (leader->dma_address == 0
528 && paddr + size + __direct_map_base - 1 <= max_dma
529 && paddr + size <= __direct_map_size) {
530 out->dma_address = paddr + __direct_map_base;
531 out->dma_length = size;
533 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
534 __va(paddr), size, out->dma_address);
540 /* If physically contiguous and DAC is available, use it. */
541 if (leader->dma_address == 0 && dac_allowed) {
542 out->dma_address = paddr + alpha_mv.pci_dac_offset;
543 out->dma_length = size;
545 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
546 __va(paddr), size, out->dma_address);
551 /* Otherwise, we'll use the iommu to make the pages virtually
555 npages = calc_npages(paddr + size);
556 dma_ofs = iommu_arena_alloc(arena, npages, 0);
558 /* If we attempted a direct map above but failed, die. */
559 if (leader->dma_address == 0)
562 /* Otherwise, break up the remaining virtually contiguous
563 hunks into individual direct maps and retry. */
564 sg_classify(leader, end, 0);
565 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
568 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
569 out->dma_length = size;
571 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
572 __va(paddr), size, out->dma_address, npages);
574 /* All virtually contiguous. We need to find the length of each
575 physically contiguous subsegment to fill in the ptes. */
576 ptes = &arena->ptes[dma_ofs];
580 struct scatterlist *last_sg = sg;
584 paddr = SG_ENT_PHYS_ADDRESS(sg);
586 while (sg+1 < end && (int) sg[1].dma_address == -1) {
587 size += sg[1].length;
591 npages = calc_npages((paddr & ~PAGE_MASK) + size);
594 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
595 *ptes++ = mk_iommu_pte(paddr);
598 DBGA(" (%ld) [%p,%x] np %ld\n",
599 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
600 last_sg->length, npages);
601 while (++last_sg <= sg) {
602 DBGA(" (%ld) [%p,%x] cont\n",
603 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
607 } while (++sg < end && (int) sg->dma_address < 0);
613 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
616 struct scatterlist *start, *end, *out;
617 struct pci_controller *hose;
618 struct pci_iommu_arena *arena;
622 if (direction == PCI_DMA_NONE)
625 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
627 /* Fast path single entry scatterlists. */
629 sg->dma_length = sg->length;
631 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
632 sg->length, dac_allowed);
633 return sg->dma_address != 0;
639 /* First, prepare information about the entries. */
640 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
642 /* Second, figure out where we're going to map things. */
643 if (alpha_mv.mv_pci_tbi) {
644 hose = pdev ? pdev->sysdata : pci_isa_hose;
645 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
646 arena = hose->sg_pci;
647 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
648 arena = hose->sg_isa;
655 /* Third, iterate over the scatterlist leaders and allocate
656 dma space as needed. */
657 for (out = sg; sg < end; ++sg) {
658 if ((int) sg->dma_address < 0)
660 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
665 /* Mark the end of the list for pci_unmap_sg. */
669 if (out - start == 0)
670 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
671 DBGA("pci_map_sg: %ld entries\n", out - start);
676 printk(KERN_WARNING "pci_map_sg failed: "
677 "could not allocate dma page tables\n");
679 /* Some allocation failed while mapping the scatterlist
680 entries. Unmap them now. */
682 pci_unmap_sg(pdev, start, out - start, direction);
685 EXPORT_SYMBOL(pci_map_sg);
687 /* Unmap a set of streaming mode DMA translations. Again, cpu read
688 rules concerning calls here are the same as for pci_unmap_single()
692 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
696 struct pci_controller *hose;
697 struct pci_iommu_arena *arena;
698 struct scatterlist *end;
700 dma_addr_t fbeg, fend;
702 if (direction == PCI_DMA_NONE)
705 if (! alpha_mv.mv_pci_tbi)
708 hose = pdev ? pdev->sysdata : pci_isa_hose;
709 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
710 arena = hose->sg_pci;
711 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
712 arena = hose->sg_isa;
716 spin_lock_irqsave(&arena->lock, flags);
718 for (end = sg + nents; sg < end; ++sg) {
724 addr = sg->dma_address;
725 size = sg->dma_length;
729 if (addr > 0xffffffff) {
730 /* It's a DAC address -- nothing to do. */
731 DBGA(" (%ld) DAC [%lx,%lx]\n",
732 sg - end + nents, addr, size);
736 if (addr >= __direct_map_base
737 && addr < __direct_map_base + __direct_map_size) {
739 DBGA(" (%ld) direct [%lx,%lx]\n",
740 sg - end + nents, addr, size);
744 DBGA(" (%ld) sg [%lx,%lx]\n",
745 sg - end + nents, addr, size);
747 npages = calc_npages((addr & ~PAGE_MASK) + size);
748 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
749 iommu_arena_free(arena, ofs, npages);
751 tend = addr + size - 1;
752 if (fbeg > addr) fbeg = addr;
753 if (fend < tend) fend = tend;
756 /* If we're freeing ptes above the `next_entry' pointer (they
757 may have snuck back into the TLB since the last wrap flush),
758 we need to flush the TLB before reallocating the latter. */
759 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
760 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
762 spin_unlock_irqrestore(&arena->lock, flags);
764 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
766 EXPORT_SYMBOL(pci_unmap_sg);
769 /* Return whether the given PCI device DMA address mask can be
770 supported properly. */
773 pci_dma_supported(struct pci_dev *pdev, u64 mask)
775 struct pci_controller *hose;
776 struct pci_iommu_arena *arena;
778 /* If there exists a direct map, and the mask fits either
779 the entire direct mapped space or the total system memory as
780 shifted by the map base */
781 if (__direct_map_size != 0
782 && (__direct_map_base + __direct_map_size - 1 <= mask ||
783 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
786 /* Check that we have a scatter-gather arena that fits. */
787 hose = pdev ? pdev->sysdata : pci_isa_hose;
788 arena = hose->sg_isa;
789 if (arena && arena->dma_base + arena->size - 1 <= mask)
791 arena = hose->sg_pci;
792 if (arena && arena->dma_base + arena->size - 1 <= mask)
795 /* As last resort try ZONE_DMA. */
796 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
801 EXPORT_SYMBOL(pci_dma_supported);
805 * AGP GART extensions to the IOMMU
808 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
814 if (!arena) return -EINVAL;
816 spin_lock_irqsave(&arena->lock, flags);
818 /* Search for N empty ptes. */
820 p = iommu_arena_find_pages(arena, pg_count, align_mask);
822 spin_unlock_irqrestore(&arena->lock, flags);
826 /* Success. Mark them all reserved (ie not zero and invalid)
827 for the iommu tlb that could load them from under us.
828 They will be filled in with valid bits by _bind() */
829 for (i = 0; i < pg_count; ++i)
830 ptes[p+i] = IOMMU_RESERVED_PTE;
832 arena->next_entry = p + pg_count;
833 spin_unlock_irqrestore(&arena->lock, flags);
839 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
844 if (!arena) return -EINVAL;
848 /* Make sure they're all reserved first... */
849 for(i = pg_start; i < pg_start + pg_count; i++)
850 if (ptes[i] != IOMMU_RESERVED_PTE)
853 iommu_arena_free(arena, pg_start, pg_count);
858 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
859 unsigned long *physaddrs)
865 if (!arena) return -EINVAL;
867 spin_lock_irqsave(&arena->lock, flags);
871 for(j = pg_start; j < pg_start + pg_count; j++) {
872 if (ptes[j] != IOMMU_RESERVED_PTE) {
873 spin_unlock_irqrestore(&arena->lock, flags);
878 for(i = 0, j = pg_start; i < pg_count; i++, j++)
879 ptes[j] = mk_iommu_pte(physaddrs[i]);
881 spin_unlock_irqrestore(&arena->lock, flags);
887 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
892 if (!arena) return -EINVAL;
894 p = arena->ptes + pg_start;
895 for(i = 0; i < pg_count; i++)
896 p[i] = IOMMU_RESERVED_PTE;
901 /* True if the machine supports DAC addressing, and DEV can
902 make use of it given MASK. */
905 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
907 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
910 /* If this is not set, the machine doesn't support DAC at all. */
914 /* The device has to be able to address our DAC bit. */
915 if ((dac_offset & dev->dma_mask) != dac_offset)
918 /* If both conditions above are met, we are fine. */
919 DBGA("pci_dac_dma_supported %s from %p\n",
920 ok ? "yes" : "no", __builtin_return_address(0));
925 /* Helper for generic DMA-mapping functions. */
928 alpha_gendev_to_pci(struct device *dev)
930 if (dev && dev->bus == &pci_bus_type)
931 return to_pci_dev(dev);
933 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
937 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
938 bridge is bus master then). */
939 if (!dev || !dev->dma_mask || !*dev->dma_mask)
942 /* For EISA bus masters, return isa_bridge (it might have smaller
943 dma_mask due to wiring limitations). */
944 if (*dev->dma_mask >= isa_bridge->dma_mask)
947 /* This assumes ISA bus master with dma_mask 0xffffff. */
950 EXPORT_SYMBOL(alpha_gendev_to_pci);
953 dma_set_mask(struct device *dev, u64 mask)
955 if (!dev->dma_mask ||
956 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
959 *dev->dma_mask = mask;
963 EXPORT_SYMBOL(dma_set_mask);