1 /* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
10 #include <linux/delay.h>
14 #include "iommu_common.h"
16 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19 /* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
23 #define pci_iommu_read(__reg) \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
31 #define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
37 /* Must be invoked under the IOMMU lock. */
38 static void __iommu_flushall(struct iommu *iommu)
40 if (iommu->iommu_flushinv) {
41 pci_iommu_write(iommu->iommu_flushinv, ~(u64)0);
46 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
47 for (entry = 0; entry < 16; entry++) {
48 pci_iommu_write(tag, 0);
52 /* Ensure completion of previous PIO writes. */
53 (void) pci_iommu_read(iommu->write_complete_reg);
57 #define IOPTE_CONSISTENT(CTX) \
58 (IOPTE_VALID | IOPTE_CACHE | \
59 (((CTX) << 47) & IOPTE_CONTEXT))
61 #define IOPTE_STREAMING(CTX) \
62 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
64 /* Existing mappings are never marked invalid, instead they
65 * are pointed to a dummy page.
67 #define IOPTE_IS_DUMMY(iommu, iopte) \
68 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
70 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
72 unsigned long val = iopte_val(*iopte);
75 val |= iommu->dummy_page_pa;
77 iopte_val(*iopte) = val;
80 /* Based largely upon the ppc64 iommu allocator. */
81 static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
83 struct iommu_arena *arena = &iommu->arena;
84 unsigned long n, i, start, end, limit;
92 n = find_next_zero_bit(arena->map, limit, start);
94 if (unlikely(end >= limit)) {
95 if (likely(pass < 1)) {
98 __iommu_flushall(iommu);
102 /* Scanned the whole thing, give up. */
107 for (i = n; i < end; i++) {
108 if (test_bit(i, arena->map)) {
114 for (i = n; i < end; i++)
115 __set_bit(i, arena->map);
122 static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
126 for (i = base; i < (base + npages); i++)
127 __clear_bit(i, arena->map);
130 void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
132 unsigned long i, tsbbase, order, sz, num_tsb_entries;
134 num_tsb_entries = tsbsize / sizeof(iopte_t);
136 /* Setup initial software IOMMU state. */
137 spin_lock_init(&iommu->lock);
138 iommu->ctx_lowest_free = 1;
139 iommu->page_table_map_base = dma_offset;
140 iommu->dma_addr_mask = dma_addr_mask;
142 /* Allocate and initialize the free area map. */
143 sz = num_tsb_entries / 8;
144 sz = (sz + 7UL) & ~7UL;
145 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
146 if (!iommu->arena.map) {
147 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
150 iommu->arena.limit = num_tsb_entries;
152 /* Allocate and initialize the dummy page which we
153 * set inactive IO PTEs to point to.
155 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
156 if (!iommu->dummy_page) {
157 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
160 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
161 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
163 /* Now allocate and setup the IOMMU page table itself. */
164 order = get_order(tsbsize);
165 tsbbase = __get_free_pages(GFP_KERNEL, order);
167 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
170 iommu->page_table = (iopte_t *)tsbbase;
172 for (i = 0; i < num_tsb_entries; i++)
173 iopte_make_dummy(iommu, &iommu->page_table[i]);
176 static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
180 entry = pci_arena_alloc(iommu, npages);
181 if (unlikely(entry < 0))
184 return iommu->page_table + entry;
187 static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
189 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
192 static int iommu_alloc_ctx(struct iommu *iommu)
194 int lowest = iommu->ctx_lowest_free;
195 int sz = IOMMU_NUM_CTXS - lowest;
196 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
198 if (unlikely(n == sz)) {
199 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
200 if (unlikely(n == lowest)) {
201 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
206 __set_bit(n, iommu->ctx_bitmap);
211 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
214 __clear_bit(ctx, iommu->ctx_bitmap);
215 if (ctx < iommu->ctx_lowest_free)
216 iommu->ctx_lowest_free = ctx;
220 /* Allocate and map kernel buffer of size SIZE using consistent mode
221 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
222 * successful and set *DMA_ADDRP to the PCI side dma address.
224 static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
228 unsigned long flags, order, first_page;
232 size = IO_PAGE_ALIGN(size);
233 order = get_order(size);
237 first_page = __get_free_pages(gfp, order);
238 if (first_page == 0UL)
240 memset((char *)first_page, 0, PAGE_SIZE << order);
242 iommu = pdev->dev.archdata.iommu;
244 spin_lock_irqsave(&iommu->lock, flags);
245 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
246 spin_unlock_irqrestore(&iommu->lock, flags);
248 if (unlikely(iopte == NULL)) {
249 free_pages(first_page, order);
253 *dma_addrp = (iommu->page_table_map_base +
254 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
255 ret = (void *) first_page;
256 npages = size >> IO_PAGE_SHIFT;
257 first_page = __pa(first_page);
259 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
261 (first_page & IOPTE_PAGE));
263 first_page += IO_PAGE_SIZE;
269 /* Free and unmap a consistent DMA translation. */
270 static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
274 unsigned long flags, order, npages;
276 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
277 iommu = pdev->dev.archdata.iommu;
278 iopte = iommu->page_table +
279 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
281 spin_lock_irqsave(&iommu->lock, flags);
283 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
285 spin_unlock_irqrestore(&iommu->lock, flags);
287 order = get_order(size);
289 free_pages((unsigned long)cpu, order);
292 /* Map a single buffer at PTR of SZ bytes for PCI DMA
295 static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
298 struct strbuf *strbuf;
300 unsigned long flags, npages, oaddr;
301 unsigned long i, base_paddr, ctx;
303 unsigned long iopte_protection;
305 iommu = pdev->dev.archdata.iommu;
306 strbuf = pdev->dev.archdata.stc;
308 if (unlikely(direction == PCI_DMA_NONE))
311 oaddr = (unsigned long)ptr;
312 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
313 npages >>= IO_PAGE_SHIFT;
315 spin_lock_irqsave(&iommu->lock, flags);
316 base = alloc_npages(iommu, npages);
318 if (iommu->iommu_ctxflush)
319 ctx = iommu_alloc_ctx(iommu);
320 spin_unlock_irqrestore(&iommu->lock, flags);
325 bus_addr = (iommu->page_table_map_base +
326 ((base - iommu->page_table) << IO_PAGE_SHIFT));
327 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
328 base_paddr = __pa(oaddr & IO_PAGE_MASK);
329 if (strbuf->strbuf_enabled)
330 iopte_protection = IOPTE_STREAMING(ctx);
332 iopte_protection = IOPTE_CONSISTENT(ctx);
333 if (direction != PCI_DMA_TODEVICE)
334 iopte_protection |= IOPTE_WRITE;
336 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
337 iopte_val(*base) = iopte_protection | base_paddr;
342 iommu_free_ctx(iommu, ctx);
344 if (printk_ratelimit())
346 return PCI_DMA_ERROR_CODE;
349 static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
353 if (strbuf->strbuf_ctxflush &&
354 iommu->iommu_ctxflush) {
355 unsigned long matchreg, flushreg;
358 flushreg = strbuf->strbuf_ctxflush;
359 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
361 pci_iommu_write(flushreg, ctx);
362 val = pci_iommu_read(matchreg);
369 pci_iommu_write(flushreg, ctx);
372 val = pci_iommu_read(matchreg);
374 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
375 "timeout matchreg[%lx] ctx[%lx]\n",
383 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
384 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
388 /* If the device could not have possibly put dirty data into
389 * the streaming cache, no flush-flag synchronization needs
392 if (direction == PCI_DMA_TODEVICE)
395 PCI_STC_FLUSHFLAG_INIT(strbuf);
396 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
397 (void) pci_iommu_read(iommu->write_complete_reg);
400 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
408 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
409 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
413 /* Unmap a single streaming mode DMA translation. */
414 static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
417 struct strbuf *strbuf;
419 unsigned long flags, npages, ctx, i;
421 if (unlikely(direction == PCI_DMA_NONE)) {
422 if (printk_ratelimit())
427 iommu = pdev->dev.archdata.iommu;
428 strbuf = pdev->dev.archdata.stc;
430 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
431 npages >>= IO_PAGE_SHIFT;
432 base = iommu->page_table +
433 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
434 #ifdef DEBUG_PCI_IOMMU
435 if (IOPTE_IS_DUMMY(iommu, base))
436 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
437 bus_addr, sz, __builtin_return_address(0));
439 bus_addr &= IO_PAGE_MASK;
441 spin_lock_irqsave(&iommu->lock, flags);
443 /* Record the context, if any. */
445 if (iommu->iommu_ctxflush)
446 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
448 /* Step 1: Kick data out of streaming buffers if necessary. */
449 if (strbuf->strbuf_enabled)
450 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
453 /* Step 2: Clear out TSB entries. */
454 for (i = 0; i < npages; i++)
455 iopte_make_dummy(iommu, base + i);
457 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
459 iommu_free_ctx(iommu, ctx);
461 spin_unlock_irqrestore(&iommu->lock, flags);
464 #define SG_ENT_PHYS_ADDRESS(SG) \
465 (__pa(page_address((SG)->page)) + (SG)->offset)
467 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
468 int nused, int nelems, unsigned long iopte_protection)
470 struct scatterlist *dma_sg = sg;
471 struct scatterlist *sg_end = sg + nelems;
474 for (i = 0; i < nused; i++) {
475 unsigned long pteval = ~0UL;
478 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
480 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
482 unsigned long offset;
485 /* If we are here, we know we have at least one
486 * more page to map. So walk forward until we
487 * hit a page crossing, and begin creating new
488 * mappings from that spot.
493 tmp = SG_ENT_PHYS_ADDRESS(sg);
495 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
496 pteval = tmp & IO_PAGE_MASK;
497 offset = tmp & (IO_PAGE_SIZE - 1UL);
500 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
501 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
503 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
509 pteval = iopte_protection | (pteval & IOPTE_PAGE);
511 *iopte++ = __iopte(pteval);
512 pteval += IO_PAGE_SIZE;
513 len -= (IO_PAGE_SIZE - offset);
518 pteval = (pteval & IOPTE_PAGE) + len;
521 /* Skip over any tail mappings we've fully mapped,
522 * adjusting pteval along the way. Stop when we
523 * detect a page crossing event.
525 while (sg < sg_end &&
526 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
527 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
529 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
530 pteval += sg->length;
533 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
535 } while (dma_npages != 0);
540 /* Map a set of buffers described by SGLIST with NELEMS array
541 * elements in streaming mode for PCI DMA.
542 * When making changes here, inspect the assembly output. I was having
543 * hard time to kepp this routine out of using stack slots for holding variables.
545 static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
548 struct strbuf *strbuf;
549 unsigned long flags, ctx, npages, iopte_protection;
552 struct scatterlist *sgtmp;
555 /* Fast path single entry scatterlists. */
557 sglist->dma_address =
558 pci_4u_map_single(pdev,
559 (page_address(sglist->page) + sglist->offset),
560 sglist->length, direction);
561 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
563 sglist->dma_length = sglist->length;
567 iommu = pdev->dev.archdata.iommu;
568 strbuf = pdev->dev.archdata.stc;
570 if (unlikely(direction == PCI_DMA_NONE))
573 /* Step 1: Prepare scatter list. */
575 npages = prepare_sg(sglist, nelems);
577 /* Step 2: Allocate a cluster and context, if necessary. */
579 spin_lock_irqsave(&iommu->lock, flags);
581 base = alloc_npages(iommu, npages);
583 if (iommu->iommu_ctxflush)
584 ctx = iommu_alloc_ctx(iommu);
586 spin_unlock_irqrestore(&iommu->lock, flags);
591 dma_base = iommu->page_table_map_base +
592 ((base - iommu->page_table) << IO_PAGE_SHIFT);
594 /* Step 3: Normalize DMA addresses. */
598 while (used && sgtmp->dma_length) {
599 sgtmp->dma_address += dma_base;
603 used = nelems - used;
605 /* Step 4: Create the mappings. */
606 if (strbuf->strbuf_enabled)
607 iopte_protection = IOPTE_STREAMING(ctx);
609 iopte_protection = IOPTE_CONSISTENT(ctx);
610 if (direction != PCI_DMA_TODEVICE)
611 iopte_protection |= IOPTE_WRITE;
613 fill_sg(base, sglist, used, nelems, iopte_protection);
616 verify_sglist(sglist, nelems, base, npages);
622 iommu_free_ctx(iommu, ctx);
624 if (printk_ratelimit())
629 /* Unmap a set of streaming mode DMA translations. */
630 static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
633 struct strbuf *strbuf;
635 unsigned long flags, ctx, i, npages;
638 if (unlikely(direction == PCI_DMA_NONE)) {
639 if (printk_ratelimit())
643 iommu = pdev->dev.archdata.iommu;
644 strbuf = pdev->dev.archdata.stc;
646 bus_addr = sglist->dma_address & IO_PAGE_MASK;
648 for (i = 1; i < nelems; i++)
649 if (sglist[i].dma_length == 0)
652 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
653 bus_addr) >> IO_PAGE_SHIFT;
655 base = iommu->page_table +
656 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
658 #ifdef DEBUG_PCI_IOMMU
659 if (IOPTE_IS_DUMMY(iommu, base))
660 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
663 spin_lock_irqsave(&iommu->lock, flags);
665 /* Record the context, if any. */
667 if (iommu->iommu_ctxflush)
668 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
670 /* Step 1: Kick data out of streaming buffers if necessary. */
671 if (strbuf->strbuf_enabled)
672 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
674 /* Step 2: Clear out the TSB entries. */
675 for (i = 0; i < npages; i++)
676 iopte_make_dummy(iommu, base + i);
678 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
680 iommu_free_ctx(iommu, ctx);
682 spin_unlock_irqrestore(&iommu->lock, flags);
685 /* Make physical memory consistent for a single
686 * streaming mode DMA translation after a transfer.
688 static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
691 struct strbuf *strbuf;
692 unsigned long flags, ctx, npages;
694 iommu = pdev->dev.archdata.iommu;
695 strbuf = pdev->dev.archdata.stc;
697 if (!strbuf->strbuf_enabled)
700 spin_lock_irqsave(&iommu->lock, flags);
702 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
703 npages >>= IO_PAGE_SHIFT;
704 bus_addr &= IO_PAGE_MASK;
706 /* Step 1: Record the context, if any. */
708 if (iommu->iommu_ctxflush &&
709 strbuf->strbuf_ctxflush) {
712 iopte = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
714 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
717 /* Step 2: Kick data out of streaming buffers. */
718 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
720 spin_unlock_irqrestore(&iommu->lock, flags);
723 /* Make physical memory consistent for a set of streaming
724 * mode DMA translations after a transfer.
726 static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
729 struct strbuf *strbuf;
730 unsigned long flags, ctx, npages, i;
733 iommu = pdev->dev.archdata.iommu;
734 strbuf = pdev->dev.archdata.stc;
736 if (!strbuf->strbuf_enabled)
739 spin_lock_irqsave(&iommu->lock, flags);
741 /* Step 1: Record the context, if any. */
743 if (iommu->iommu_ctxflush &&
744 strbuf->strbuf_ctxflush) {
747 iopte = iommu->page_table +
748 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
749 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
752 /* Step 2: Kick data out of streaming buffers. */
753 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
754 for(i = 1; i < nelems; i++)
755 if (!sglist[i].dma_length)
758 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
759 - bus_addr) >> IO_PAGE_SHIFT;
760 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
762 spin_unlock_irqrestore(&iommu->lock, flags);
765 const struct pci_iommu_ops pci_sun4u_iommu_ops = {
766 .alloc_consistent = pci_4u_alloc_consistent,
767 .free_consistent = pci_4u_free_consistent,
768 .map_single = pci_4u_map_single,
769 .unmap_single = pci_4u_unmap_single,
770 .map_sg = pci_4u_map_sg,
771 .unmap_sg = pci_4u_unmap_sg,
772 .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
773 .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
776 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
778 struct pci_dev *ali_isa_bridge;
781 /* ALI sound chips generate 31-bits of DMA, a special register
782 * determines what bit 31 is emitted as.
784 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
785 PCI_DEVICE_ID_AL_M1533,
788 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
793 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
794 pci_dev_put(ali_isa_bridge);
797 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
802 dma_addr_mask = 0xffffffff;
804 struct iommu *iommu = pdev->dev.archdata.iommu;
806 dma_addr_mask = iommu->dma_addr_mask;
808 if (pdev->vendor == PCI_VENDOR_ID_AL &&
809 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
810 device_mask == 0x7fffffff) {
811 ali_sound_dma_hack(pdev,
812 (dma_addr_mask & 0x80000000) != 0);
817 if (device_mask >= (1UL << 32UL))
820 return (device_mask & dma_addr_mask) == dma_addr_mask;