2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2005 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2005 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/init.h>
30 #include <linux/string.h>
31 #include <linux/pci.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/acpi.h>
35 #include <linux/efi.h>
36 #include <linux/nodemask.h>
37 #include <linux/bitops.h> /* hweight64() */
39 #include <asm/delay.h> /* ia64_get_itc() */
41 #include <asm/page.h> /* PAGE_OFFSET */
43 #include <asm/system.h> /* wmb() */
45 #include <asm/acpi-ext.h>
50 ** Enabling timing search of the pdir resource map. Output in /proc.
51 ** Disabled by default to optimize performance.
53 #undef PDIR_SEARCH_TIMING
56 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
57 ** not defined, all DMA will be 32bit and go through the TLB.
58 ** There's potentially a conflict in the bio merge code with us
59 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
60 ** appears to give more performance than bio-level virtual merging, we'll
61 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
62 ** completely restrict DMA to the IOMMU.
64 #define ALLOW_IOV_BYPASS
67 ** This option specifically allows/disallows bypassing scatterlists with
68 ** multiple entries. Coalescing these entries can allow better DMA streaming
69 ** and in some cases shows better performance than entirely bypassing the
70 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
71 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
73 #undef ALLOW_IOV_BYPASS_SG
76 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
77 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
78 ** disconnect on 4k boundaries and prevent such issues. If the device is
79 ** particularly agressive, this option will keep the entire pdir valid such
80 ** that prefetching will hit a valid address. This could severely impact
81 ** error containment, and is therefore off by default. The page that is
82 ** used for spill-over is poisoned, so that should help debugging somewhat.
84 #undef FULL_VALID_PDIR
86 #define ENABLE_MARK_CLEAN
89 ** The number of debug flags is a clue - this code is fragile. NOTE: since
90 ** tightening the use of res_lock the resource bitmap and actual pdir are no
91 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
96 #undef DEBUG_SBA_RUN_SG
97 #undef DEBUG_SBA_RESOURCE
98 #undef ASSERT_PDIR_SANITY
99 #undef DEBUG_LARGE_SG_ENTRIES
102 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
103 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
106 #define SBA_INLINE __inline__
107 /* #define SBA_INLINE */
109 #ifdef DEBUG_SBA_INIT
110 #define DBG_INIT(x...) printk(x)
112 #define DBG_INIT(x...)
116 #define DBG_RUN(x...) printk(x)
118 #define DBG_RUN(x...)
121 #ifdef DEBUG_SBA_RUN_SG
122 #define DBG_RUN_SG(x...) printk(x)
124 #define DBG_RUN_SG(x...)
128 #ifdef DEBUG_SBA_RESOURCE
129 #define DBG_RES(x...) printk(x)
131 #define DBG_RES(x...)
135 #define DBG_BYPASS(x...) printk(x)
137 #define DBG_BYPASS(x...)
140 #ifdef ASSERT_PDIR_SANITY
141 #define ASSERT(expr) \
143 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
151 ** The number of pdir entries to "free" before issuing
152 ** a read to PCOM register to flush out PCOM writes.
153 ** Interacts with allocation granularity (ie 4 or 8 entries
154 ** allocated and free'd/purged at a time might make this
155 ** less interesting).
157 #define DELAYED_RESOURCE_CNT 64
159 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
161 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
162 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
163 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
164 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
165 #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
167 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
169 #define IOC_FUNC_ID 0x000
170 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
171 #define IOC_IBASE 0x300 /* IO TLB */
172 #define IOC_IMASK 0x308
173 #define IOC_PCOM 0x310
174 #define IOC_TCNFG 0x318
175 #define IOC_PDIR_BASE 0x320
177 #define IOC_ROPE0_CFG 0x500
178 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
181 /* AGP GART driver looks for this */
182 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
185 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
187 ** Some IOCs (sx1000) can run at the above pages sizes, but are
188 ** really only supported using the IOC at a 4k page size.
190 ** iovp_size could only be greater than PAGE_SIZE if we are
191 ** confident the drivers really only touch the next physical
192 ** page iff that driver instance owns it.
194 static unsigned long iovp_size;
195 static unsigned long iovp_shift;
196 static unsigned long iovp_mask;
199 void __iomem *ioc_hpa; /* I/O MMU base address */
200 char *res_map; /* resource map, bit == pdir entry */
201 u64 *pdir_base; /* physical base address */
202 unsigned long ibase; /* pdir IOV Space base */
203 unsigned long imask; /* pdir IOV Space mask */
205 unsigned long *res_hint; /* next avail IOVP - circular search */
206 unsigned long dma_mask;
207 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
208 /* clearing pdir to prevent races with allocations. */
209 unsigned int res_bitshift; /* from the RIGHT! */
210 unsigned int res_size; /* size of resource map in bytes */
212 unsigned int node; /* node where this IOC lives */
214 #if DELAYED_RESOURCE_CNT > 0
215 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
216 /* than res_lock for bigger systems. */
218 struct sba_dma_pair {
221 } saved[DELAYED_RESOURCE_CNT];
224 #ifdef PDIR_SEARCH_TIMING
225 #define SBA_SEARCH_SAMPLE 0x100
226 unsigned long avg_search[SBA_SEARCH_SAMPLE];
227 unsigned long avg_idx; /* current index into avg_search */
230 /* Stuff we don't need in performance path */
231 struct ioc *next; /* list of IOC's in system */
232 acpi_handle handle; /* for multiple IOC's */
234 unsigned int func_id;
235 unsigned int rev; /* HW revision of chip */
237 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
238 struct pci_dev *sac_only_dev;
241 static struct ioc *ioc_list;
242 static int reserve_sba_gart = 1;
244 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
245 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
247 #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
249 #ifdef FULL_VALID_PDIR
250 static u64 prefetch_spill_page;
254 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
255 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
257 # define GET_IOC(dev) NULL
261 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
262 ** (or rather not merge) DMA's into managable chunks.
263 ** On parisc, this is more of the software/tuning constraint
264 ** rather than the HW. I/O MMU allocation alogorithms can be
265 ** faster with smaller size is (to some degree).
267 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
269 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
271 /************************************
272 ** SBA register read and write support
274 ** BE WARNED: register writes are posted.
275 ** (ie follow writes which must reach HW with a read)
278 #define READ_REG(addr) __raw_readq(addr)
279 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
281 #ifdef DEBUG_SBA_INIT
284 * sba_dump_tlb - debugging only - print IOMMU operating parameters
285 * @hpa: base address of the IOMMU
287 * Print the size/location of the IO MMU PDIR.
290 sba_dump_tlb(char *hpa)
292 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
293 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
294 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
295 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
296 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
302 #ifdef ASSERT_PDIR_SANITY
305 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
306 * @ioc: IO MMU structure which owns the pdir we are interested in.
307 * @msg: text to print ont the output line.
310 * Print one entry of the IO MMU PDIR in human readable form.
313 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
315 /* start printing from lowest pde in rval */
316 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
317 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
320 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
321 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
324 while (rcnt < BITS_PER_LONG) {
325 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
326 (rcnt == (pide & (BITS_PER_LONG - 1)))
328 rcnt, ptr, (unsigned long long) *ptr );
332 printk(KERN_DEBUG "%s", msg);
337 * sba_check_pdir - debugging only - consistency checker
338 * @ioc: IO MMU structure which owns the pdir we are interested in.
339 * @msg: text to print ont the output line.
341 * Verify the resource map and pdir state is consistent
344 sba_check_pdir(struct ioc *ioc, char *msg)
346 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
347 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
348 u64 *pptr = ioc->pdir_base; /* pdir ptr */
351 while (rptr < rptr_end) {
353 int rcnt; /* number of bits we might check */
359 /* Get last byte and highest bit from that */
360 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
361 if ((rval & 0x1) ^ pde)
364 ** BUMMER! -- res_map != pdir --
365 ** Dump rval and matching pdir entries
367 sba_dump_pdir_entry(ioc, msg, pide);
371 rval >>= 1; /* try the next bit */
375 rptr++; /* look at next word of res_map */
377 /* It'd be nice if we always got here :^) */
383 * sba_dump_sg - debugging only - print Scatter-Gather list
384 * @ioc: IO MMU structure which owns the pdir we are interested in.
385 * @startsg: head of the SG list
386 * @nents: number of entries in SG list
388 * print the SG list so we can verify it's correct by hand.
391 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
393 while (nents-- > 0) {
394 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
395 startsg->dma_address, startsg->dma_length,
396 sba_sg_address(startsg));
402 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
404 struct scatterlist *the_sg = startsg;
405 int the_nents = nents;
407 while (the_nents-- > 0) {
408 if (sba_sg_address(the_sg) == 0x0UL)
409 sba_dump_sg(NULL, startsg, nents);
414 #endif /* ASSERT_PDIR_SANITY */
419 /**************************************************************
421 * I/O Pdir Resource Management
423 * Bits set in the resource map are in use.
424 * Each bit can represent a number of pages.
425 * LSbs represent lower addresses (IOVA's).
427 ***************************************************************/
428 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
430 /* Convert from IOVP to IOVA and vice versa. */
431 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
432 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
434 #define PDIR_ENTRY_SIZE sizeof(u64)
436 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
438 #define RESMAP_MASK(n) ~(~0UL << (n))
439 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
443 * For most cases the normal get_order is sufficient, however it limits us
444 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
445 * It only incurs about 1 clock cycle to use this one with the static variable
446 * and makes the code more intuitive.
448 static SBA_INLINE int
449 get_iovp_order (unsigned long size)
451 long double d = size - 1;
454 order = ia64_getf_exp(d);
455 order = order - iovp_shift - 0xffff + 1;
462 * sba_search_bitmap - find free space in IO PDIR resource bitmap
463 * @ioc: IO MMU structure which owns the pdir we are interested in.
464 * @bits_wanted: number of entries we need.
465 * @use_hint: use res_hint to indicate where to start looking
467 * Find consecutive free bits in resource bitmap.
468 * Each bit represents one entry in the IO Pdir.
469 * Cool perf optimization: search for log2(size) bits at a time.
471 static SBA_INLINE unsigned long
472 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
474 unsigned long *res_ptr;
475 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
476 unsigned long flags, pide = ~0UL;
478 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
479 ASSERT(res_ptr < res_end);
481 spin_lock_irqsave(&ioc->res_lock, flags);
483 /* Allow caller to force a search through the entire resource space */
484 if (likely(use_hint)) {
485 res_ptr = ioc->res_hint;
487 res_ptr = (ulong *)ioc->res_map;
488 ioc->res_bitshift = 0;
492 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
493 * if a TLB entry is purged while in use. sba_mark_invalid()
494 * purges IOTLB entries in power-of-two sizes, so we also
495 * allocate IOVA space in power-of-two sizes.
497 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
499 if (likely(bits_wanted == 1)) {
500 unsigned int bitshiftcnt;
501 for(; res_ptr < res_end ; res_ptr++) {
502 if (likely(*res_ptr != ~0UL)) {
503 bitshiftcnt = ffz(*res_ptr);
504 *res_ptr |= (1UL << bitshiftcnt);
505 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
506 pide <<= 3; /* convert to bit address */
508 ioc->res_bitshift = bitshiftcnt + bits_wanted;
516 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
518 ** Search the resource bit map on well-aligned values.
519 ** "o" is the alignment.
520 ** We need the alignment to invalidate I/O TLB using
521 ** SBA HW features in the unmap path.
523 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
524 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
525 unsigned long mask, base_mask;
527 base_mask = RESMAP_MASK(bits_wanted);
528 mask = base_mask << bitshiftcnt;
530 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
531 for(; res_ptr < res_end ; res_ptr++)
533 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
535 for (; mask ; mask <<= o, bitshiftcnt += o) {
536 if(0 == ((*res_ptr) & mask)) {
537 *res_ptr |= mask; /* mark resources busy! */
538 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
539 pide <<= 3; /* convert to bit address */
541 ioc->res_bitshift = bitshiftcnt + bits_wanted;
555 qwords = bits_wanted >> 6; /* /64 */
556 bits = bits_wanted - (qwords * BITS_PER_LONG);
558 end = res_end - qwords;
560 for (; res_ptr < end; res_ptr++) {
561 for (i = 0 ; i < qwords ; i++) {
565 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
568 /* Found it, mark it */
569 for (i = 0 ; i < qwords ; i++)
571 res_ptr[i] |= RESMAP_MASK(bits);
573 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
574 pide <<= 3; /* convert to bit address */
576 ioc->res_bitshift = bits;
584 prefetch(ioc->res_map);
585 ioc->res_hint = (unsigned long *) ioc->res_map;
586 ioc->res_bitshift = 0;
587 spin_unlock_irqrestore(&ioc->res_lock, flags);
591 ioc->res_hint = res_ptr;
592 spin_unlock_irqrestore(&ioc->res_lock, flags);
598 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
599 * @ioc: IO MMU structure which owns the pdir we are interested in.
600 * @size: number of bytes to create a mapping for
602 * Given a size, find consecutive unmarked and then mark those bits in the
606 sba_alloc_range(struct ioc *ioc, size_t size)
608 unsigned int pages_needed = size >> iovp_shift;
609 #ifdef PDIR_SEARCH_TIMING
610 unsigned long itc_start;
614 ASSERT(pages_needed);
615 ASSERT(0 == (size & ~iovp_mask));
617 #ifdef PDIR_SEARCH_TIMING
618 itc_start = ia64_get_itc();
621 ** "seek and ye shall find"...praying never hurts either...
623 pide = sba_search_bitmap(ioc, pages_needed, 1);
624 if (unlikely(pide >= (ioc->res_size << 3))) {
625 pide = sba_search_bitmap(ioc, pages_needed, 0);
626 if (unlikely(pide >= (ioc->res_size << 3))) {
627 #if DELAYED_RESOURCE_CNT > 0
631 ** With delayed resource freeing, we can give this one more shot. We're
632 ** getting close to being in trouble here, so do what we can to make this
635 spin_lock_irqsave(&ioc->saved_lock, flags);
636 if (ioc->saved_cnt > 0) {
637 struct sba_dma_pair *d;
638 int cnt = ioc->saved_cnt;
640 d = &(ioc->saved[ioc->saved_cnt - 1]);
642 spin_lock(&ioc->res_lock);
644 sba_mark_invalid(ioc, d->iova, d->size);
645 sba_free_range(ioc, d->iova, d->size);
649 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
650 spin_unlock(&ioc->res_lock);
652 spin_unlock_irqrestore(&ioc->saved_lock, flags);
654 pide = sba_search_bitmap(ioc, pages_needed, 0);
655 if (unlikely(pide >= (ioc->res_size << 3)))
656 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
659 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
665 #ifdef PDIR_SEARCH_TIMING
666 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
667 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
670 prefetchw(&(ioc->pdir_base[pide]));
672 #ifdef ASSERT_PDIR_SANITY
673 /* verify the first enable bit is clear */
674 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
675 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
679 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
680 __FUNCTION__, size, pages_needed, pide,
681 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
689 * sba_free_range - unmark bits in IO PDIR resource bitmap
690 * @ioc: IO MMU structure which owns the pdir we are interested in.
691 * @iova: IO virtual address which was previously allocated.
692 * @size: number of bytes to create a mapping for
694 * clear bits in the ioc's resource map
696 static SBA_INLINE void
697 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
699 unsigned long iovp = SBA_IOVP(ioc, iova);
700 unsigned int pide = PDIR_INDEX(iovp);
701 unsigned int ridx = pide >> 3; /* convert bit to byte address */
702 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
703 int bits_not_wanted = size >> iovp_shift;
706 /* Round up to power-of-two size: see AR2305 note above */
707 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
708 for (; bits_not_wanted > 0 ; res_ptr++) {
710 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
712 /* these mappings start 64bit aligned */
714 bits_not_wanted -= BITS_PER_LONG;
715 pide += BITS_PER_LONG;
719 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
720 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
723 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size,
724 bits_not_wanted, m, pide, res_ptr, *res_ptr);
727 ASSERT(bits_not_wanted);
728 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
735 /**************************************************************
737 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
739 ***************************************************************/
742 * sba_io_pdir_entry - fill in one IO PDIR entry
743 * @pdir_ptr: pointer to IO PDIR entry
744 * @vba: Virtual CPU address of buffer to map
746 * SBA Mapping Routine
748 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
749 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
750 * Each IO Pdir entry consists of 8 bytes as shown below
754 * +-+---------------------+----------------------------------+----+--------+
755 * |V| U | PPN[39:12] | U | FF |
756 * +-+---------------------+----------------------------------+----+--------+
760 * PPN == Physical Page Number
762 * The physical address fields are filled with the results of virt_to_phys()
767 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
768 | 0x8000000000000000ULL)
771 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
773 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
777 #ifdef ENABLE_MARK_CLEAN
779 * Since DMA is i-cache coherent, any (complete) pages that were written via
780 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
781 * flush them when they get mapped into an executable vm-area.
784 mark_clean (void *addr, size_t size)
786 unsigned long pg_addr, end;
788 pg_addr = PAGE_ALIGN((unsigned long) addr);
789 end = (unsigned long) addr + size;
790 while (pg_addr + PAGE_SIZE <= end) {
791 struct page *page = virt_to_page((void *)pg_addr);
792 set_bit(PG_arch_1, &page->flags);
793 pg_addr += PAGE_SIZE;
799 * sba_mark_invalid - invalidate one or more IO PDIR entries
800 * @ioc: IO MMU structure which owns the pdir we are interested in.
801 * @iova: IO Virtual Address mapped earlier
802 * @byte_cnt: number of bytes this mapping covers.
804 * Marking the IO PDIR entry(ies) as Invalid and invalidate
805 * corresponding IO TLB entry. The PCOM (Purge Command Register)
806 * is to purge stale entries in the IO TLB when unmapping entries.
808 * The PCOM register supports purging of multiple pages, with a minium
809 * of 1 page and a maximum of 2GB. Hardware requires the address be
810 * aligned to the size of the range being purged. The size of the range
811 * must be a power of 2. The "Cool perf optimization" in the
812 * allocation routine helps keep that true.
814 static SBA_INLINE void
815 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
817 u32 iovp = (u32) SBA_IOVP(ioc,iova);
819 int off = PDIR_INDEX(iovp);
821 /* Must be non-zero and rounded up */
822 ASSERT(byte_cnt > 0);
823 ASSERT(0 == (byte_cnt & ~iovp_mask));
825 #ifdef ASSERT_PDIR_SANITY
826 /* Assert first pdir entry is set */
827 if (!(ioc->pdir_base[off] >> 60)) {
828 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
832 if (byte_cnt <= iovp_size)
834 ASSERT(off < ioc->pdir_size);
836 iovp |= iovp_shift; /* set "size" field for PCOM */
838 #ifndef FULL_VALID_PDIR
840 ** clear I/O PDIR entry "valid" bit
841 ** Do NOT clear the rest - save it for debugging.
842 ** We should only clear bits that have previously
845 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
848 ** If we want to maintain the PDIR as valid, put in
849 ** the spill page so devices prefetching won't
850 ** cause a hard fail.
852 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
855 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
858 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
861 /* verify this pdir entry is enabled */
862 ASSERT(ioc->pdir_base[off] >> 63);
863 #ifndef FULL_VALID_PDIR
864 /* clear I/O Pdir entry "valid" bit first */
865 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
867 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
870 byte_cnt -= iovp_size;
871 } while (byte_cnt > 0);
874 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
878 * sba_map_single - map one buffer and return IOVA for DMA
879 * @dev: instance of PCI owned by the driver that's asking.
880 * @addr: driver buffer to map.
881 * @size: number of bytes to map in driver buffer.
884 * See Documentation/DMA-mapping.txt
887 sba_map_single(struct device *dev, void *addr, size_t size, int dir)
894 #ifdef ASSERT_PDIR_SANITY
897 #ifdef ALLOW_IOV_BYPASS
898 unsigned long pci_addr = virt_to_phys(addr);
901 #ifdef ALLOW_IOV_BYPASS
902 ASSERT(to_pci_dev(dev)->dma_mask);
904 ** Check if the PCI device can DMA to ptr... if so, just return ptr
906 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
908 ** Device is bit capable of DMA'ing to the buffer...
909 ** just return the PCI address of ptr
911 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
912 to_pci_dev(dev)->dma_mask, pci_addr);
919 prefetch(ioc->res_hint);
922 ASSERT(size <= DMA_CHUNK_SIZE);
924 /* save offset bits */
925 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
927 /* round up to nearest iovp_size */
928 size = (size + offset + ~iovp_mask) & iovp_mask;
930 #ifdef ASSERT_PDIR_SANITY
931 spin_lock_irqsave(&ioc->res_lock, flags);
932 if (sba_check_pdir(ioc,"Check before sba_map_single()"))
933 panic("Sanity check failed");
934 spin_unlock_irqrestore(&ioc->res_lock, flags);
937 pide = sba_alloc_range(ioc, size);
939 iovp = (dma_addr_t) pide << iovp_shift;
941 DBG_RUN("%s() 0x%p -> 0x%lx\n",
942 __FUNCTION__, addr, (long) iovp | offset);
944 pdir_start = &(ioc->pdir_base[pide]);
947 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
948 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
950 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
956 /* force pdir update */
959 /* form complete address */
960 #ifdef ASSERT_PDIR_SANITY
961 spin_lock_irqsave(&ioc->res_lock, flags);
962 sba_check_pdir(ioc,"Check after sba_map_single()");
963 spin_unlock_irqrestore(&ioc->res_lock, flags);
965 return SBA_IOVA(ioc, iovp, offset);
968 #ifdef ENABLE_MARK_CLEAN
969 static SBA_INLINE void
970 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
972 u32 iovp = (u32) SBA_IOVP(ioc,iova);
973 int off = PDIR_INDEX(iovp);
976 if (size <= iovp_size) {
977 addr = phys_to_virt(ioc->pdir_base[off] &
978 ~0xE000000000000FFFULL);
979 mark_clean(addr, size);
982 addr = phys_to_virt(ioc->pdir_base[off] &
983 ~0xE000000000000FFFULL);
984 mark_clean(addr, min(size, iovp_size));
993 * sba_unmap_single - unmap one IOVA and free resources
994 * @dev: instance of PCI owned by the driver that's asking.
995 * @iova: IOVA of driver buffer previously mapped.
996 * @size: number of bytes mapped in driver buffer.
999 * See Documentation/DMA-mapping.txt
1001 void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1004 #if DELAYED_RESOURCE_CNT > 0
1005 struct sba_dma_pair *d;
1007 unsigned long flags;
1013 #ifdef ALLOW_IOV_BYPASS
1014 if (likely((iova & ioc->imask) != ioc->ibase)) {
1016 ** Address does not fall w/in IOVA, must be bypassing
1018 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
1020 #ifdef ENABLE_MARK_CLEAN
1021 if (dir == DMA_FROM_DEVICE) {
1022 mark_clean(phys_to_virt(iova), size);
1028 offset = iova & ~iovp_mask;
1030 DBG_RUN("%s() iovp 0x%lx/%x\n",
1031 __FUNCTION__, (long) iova, size);
1033 iova ^= offset; /* clear offset bits */
1035 size = ROUNDUP(size, iovp_size);
1037 #ifdef ENABLE_MARK_CLEAN
1038 if (dir == DMA_FROM_DEVICE)
1039 sba_mark_clean(ioc, iova, size);
1042 #if DELAYED_RESOURCE_CNT > 0
1043 spin_lock_irqsave(&ioc->saved_lock, flags);
1044 d = &(ioc->saved[ioc->saved_cnt]);
1047 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1048 int cnt = ioc->saved_cnt;
1049 spin_lock(&ioc->res_lock);
1051 sba_mark_invalid(ioc, d->iova, d->size);
1052 sba_free_range(ioc, d->iova, d->size);
1056 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1057 spin_unlock(&ioc->res_lock);
1059 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1060 #else /* DELAYED_RESOURCE_CNT == 0 */
1061 spin_lock_irqsave(&ioc->res_lock, flags);
1062 sba_mark_invalid(ioc, iova, size);
1063 sba_free_range(ioc, iova, size);
1064 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1065 spin_unlock_irqrestore(&ioc->res_lock, flags);
1066 #endif /* DELAYED_RESOURCE_CNT == 0 */
1071 * sba_alloc_coherent - allocate/map shared mem for DMA
1072 * @dev: instance of PCI owned by the driver that's asking.
1073 * @size: number of bytes mapped in driver buffer.
1074 * @dma_handle: IOVA of new buffer.
1076 * See Documentation/DMA-mapping.txt
1079 sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags)
1090 page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
1091 numa_node_id() : ioc->node, flags,
1094 if (unlikely(!page))
1097 addr = page_address(page);
1100 addr = (void *) __get_free_pages(flags, get_order(size));
1102 if (unlikely(!addr))
1105 memset(addr, 0, size);
1106 *dma_handle = virt_to_phys(addr);
1108 #ifdef ALLOW_IOV_BYPASS
1109 ASSERT(dev->coherent_dma_mask);
1111 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1113 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1114 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1115 dev->coherent_dma_mask, *dma_handle);
1122 * If device can't bypass or bypass is disabled, pass the 32bit fake
1123 * device to map single to get an iova mapping.
1125 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
1132 * sba_free_coherent - free/unmap shared mem for DMA
1133 * @dev: instance of PCI owned by the driver that's asking.
1134 * @size: number of bytes mapped in driver buffer.
1135 * @vaddr: virtual address IOVA of "consistent" buffer.
1136 * @dma_handler: IO virtual address of "consistent" buffer.
1138 * See Documentation/DMA-mapping.txt
1140 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1142 sba_unmap_single(dev, dma_handle, size, 0);
1143 free_pages((unsigned long) vaddr, get_order(size));
1148 ** Since 0 is a valid pdir_base index value, can't use that
1149 ** to determine if a value is valid or not. Use a flag to indicate
1150 ** the SG list entry contains a valid pdir index.
1152 #define PIDE_FLAG 0x1UL
1154 #ifdef DEBUG_LARGE_SG_ENTRIES
1155 int dump_run_sg = 0;
1160 * sba_fill_pdir - write allocated SG entries into IO PDIR
1161 * @ioc: IO MMU structure which owns the pdir we are interested in.
1162 * @startsg: list of IOVA/size pairs
1163 * @nents: number of entries in startsg list
1165 * Take preprocessed SG list and write corresponding entries
1169 static SBA_INLINE int
1172 struct scatterlist *startsg,
1175 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1178 unsigned long dma_offset = 0;
1181 while (nents-- > 0) {
1182 int cnt = startsg->dma_length;
1183 startsg->dma_length = 0;
1185 #ifdef DEBUG_LARGE_SG_ENTRIES
1187 printk(" %2d : %08lx/%05x %p\n",
1188 nents, startsg->dma_address, cnt,
1189 sba_sg_address(startsg));
1191 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1192 nents, startsg->dma_address, cnt,
1193 sba_sg_address(startsg));
1196 ** Look for the start of a new DMA stream
1198 if (startsg->dma_address & PIDE_FLAG) {
1199 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1200 dma_offset = (unsigned long) pide & ~iovp_mask;
1201 startsg->dma_address = 0;
1203 dma_sg->dma_address = pide | ioc->ibase;
1204 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1209 ** Look for a VCONTIG chunk
1212 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1215 /* Since multiple Vcontig blocks could make up
1216 ** one DMA stream, *add* cnt to dma_len.
1218 dma_sg->dma_length += cnt;
1220 dma_offset=0; /* only want offset on first chunk */
1221 cnt = ROUNDUP(cnt, iovp_size);
1223 sba_io_pdir_entry(pdirp, vaddr);
1231 /* force pdir update */
1234 #ifdef DEBUG_LARGE_SG_ENTRIES
1242 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1243 ** "start of next" are both on an IOV page boundary.
1245 ** (shift left is a quick trick to mask off upper bits)
1247 #define DMA_CONTIG(__X, __Y) \
1248 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1252 * sba_coalesce_chunks - preprocess the SG list
1253 * @ioc: IO MMU structure which owns the pdir we are interested in.
1254 * @startsg: list of IOVA/size pairs
1255 * @nents: number of entries in startsg list
1257 * First pass is to walk the SG list and determine where the breaks are
1258 * in the DMA stream. Allocates PDIR entries but does not fill them.
1259 * Returns the number of DMA chunks.
1261 * Doing the fill separate from the coalescing/allocation keeps the
1262 * code simpler. Future enhancement could make one pass through
1263 * the sglist do both.
1265 static SBA_INLINE int
1266 sba_coalesce_chunks( struct ioc *ioc,
1267 struct scatterlist *startsg,
1270 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1271 unsigned long vcontig_len; /* len of VCONTIG chunk */
1272 unsigned long vcontig_end;
1273 struct scatterlist *dma_sg; /* next DMA stream head */
1274 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1278 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1281 ** Prepare for first/next DMA stream
1283 dma_sg = vcontig_sg = startsg;
1284 dma_len = vcontig_len = vcontig_end = startsg->length;
1285 vcontig_end += vaddr;
1286 dma_offset = vaddr & ~iovp_mask;
1288 /* PARANOID: clear entries */
1289 startsg->dma_address = startsg->dma_length = 0;
1292 ** This loop terminates one iteration "early" since
1293 ** it's always looking one "ahead".
1295 while (--nents > 0) {
1296 unsigned long vaddr; /* tmp */
1301 startsg->dma_address = startsg->dma_length = 0;
1303 /* catch brokenness in SCSI layer */
1304 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1307 ** First make sure current dma stream won't
1308 ** exceed DMA_CHUNK_SIZE if we coalesce the
1311 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1316 ** Then look for virtually contiguous blocks.
1318 ** append the next transaction?
1320 vaddr = (unsigned long) sba_sg_address(startsg);
1321 if (vcontig_end == vaddr)
1323 vcontig_len += startsg->length;
1324 vcontig_end += startsg->length;
1325 dma_len += startsg->length;
1329 #ifdef DEBUG_LARGE_SG_ENTRIES
1330 dump_run_sg = (vcontig_len > iovp_size);
1334 ** Not virtually contigous.
1335 ** Terminate prev chunk.
1336 ** Start a new chunk.
1338 ** Once we start a new VCONTIG chunk, dma_offset
1339 ** can't change. And we need the offset from the first
1340 ** chunk - not the last one. Ergo Successive chunks
1341 ** must start on page boundaries and dove tail
1342 ** with it's predecessor.
1344 vcontig_sg->dma_length = vcontig_len;
1346 vcontig_sg = startsg;
1347 vcontig_len = startsg->length;
1350 ** 3) do the entries end/start on page boundaries?
1351 ** Don't update vcontig_end until we've checked.
1353 if (DMA_CONTIG(vcontig_end, vaddr))
1355 vcontig_end = vcontig_len + vaddr;
1356 dma_len += vcontig_len;
1364 ** End of DMA Stream
1365 ** Terminate last VCONTIG block.
1366 ** Allocate space for DMA stream.
1368 vcontig_sg->dma_length = vcontig_len;
1369 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1370 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1371 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1372 | (sba_alloc_range(ioc, dma_len) << iovp_shift)
1382 * sba_map_sg - map Scatter/Gather list
1383 * @dev: instance of PCI owned by the driver that's asking.
1384 * @sglist: array of buffer/length pairs
1385 * @nents: number of entries in list
1386 * @dir: R/W or both.
1388 * See Documentation/DMA-mapping.txt
1390 int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
1393 int coalesced, filled = 0;
1394 #ifdef ASSERT_PDIR_SANITY
1395 unsigned long flags;
1397 #ifdef ALLOW_IOV_BYPASS_SG
1398 struct scatterlist *sg;
1401 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1405 #ifdef ALLOW_IOV_BYPASS_SG
1406 ASSERT(to_pci_dev(dev)->dma_mask);
1407 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1408 for (sg = sglist ; filled < nents ; filled++, sg++){
1409 sg->dma_length = sg->length;
1410 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1415 /* Fast path single entry scatterlists. */
1417 sglist->dma_length = sglist->length;
1418 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
1422 #ifdef ASSERT_PDIR_SANITY
1423 spin_lock_irqsave(&ioc->res_lock, flags);
1424 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1426 sba_dump_sg(ioc, sglist, nents);
1427 panic("Check before sba_map_sg()");
1429 spin_unlock_irqrestore(&ioc->res_lock, flags);
1432 prefetch(ioc->res_hint);
1435 ** First coalesce the chunks and allocate I/O pdir space
1437 ** If this is one DMA stream, we can properly map using the
1438 ** correct virtual address associated with each DMA page.
1439 ** w/o this association, we wouldn't have coherent DMA!
1440 ** Access to the virtual address is what forces a two pass algorithm.
1442 coalesced = sba_coalesce_chunks(ioc, sglist, nents);
1445 ** Program the I/O Pdir
1447 ** map the virtual addresses to the I/O Pdir
1448 ** o dma_address will contain the pdir index
1449 ** o dma_len will contain the number of bytes to map
1450 ** o address contains the virtual address.
1452 filled = sba_fill_pdir(ioc, sglist, nents);
1454 #ifdef ASSERT_PDIR_SANITY
1455 spin_lock_irqsave(&ioc->res_lock, flags);
1456 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1458 sba_dump_sg(ioc, sglist, nents);
1459 panic("Check after sba_map_sg()\n");
1461 spin_unlock_irqrestore(&ioc->res_lock, flags);
1464 ASSERT(coalesced == filled);
1465 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1472 * sba_unmap_sg - unmap Scatter/Gather list
1473 * @dev: instance of PCI owned by the driver that's asking.
1474 * @sglist: array of buffer/length pairs
1475 * @nents: number of entries in list
1476 * @dir: R/W or both.
1478 * See Documentation/DMA-mapping.txt
1480 void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
1482 #ifdef ASSERT_PDIR_SANITY
1484 unsigned long flags;
1487 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1488 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
1490 #ifdef ASSERT_PDIR_SANITY
1494 spin_lock_irqsave(&ioc->res_lock, flags);
1495 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1496 spin_unlock_irqrestore(&ioc->res_lock, flags);
1499 while (nents && sglist->dma_length) {
1501 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
1506 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1508 #ifdef ASSERT_PDIR_SANITY
1509 spin_lock_irqsave(&ioc->res_lock, flags);
1510 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1511 spin_unlock_irqrestore(&ioc->res_lock, flags);
1516 /**************************************************************
1518 * Initialization and claim
1520 ***************************************************************/
1523 ioc_iova_init(struct ioc *ioc)
1527 struct pci_dev *device = NULL;
1528 #ifdef FULL_VALID_PDIR
1529 unsigned long index;
1533 ** Firmware programs the base and size of a "safe IOVA space"
1534 ** (one that doesn't overlap memory or LMMIO space) in the
1535 ** IBASE and IMASK registers.
1537 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1538 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1540 ioc->iov_size = ~ioc->imask + 1;
1542 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1543 __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1544 ioc->iov_size >> 20);
1546 switch (iovp_size) {
1547 case 4*1024: tcnfg = 0; break;
1548 case 8*1024: tcnfg = 1; break;
1549 case 16*1024: tcnfg = 2; break;
1550 case 64*1024: tcnfg = 3; break;
1552 panic(PFX "Unsupported IOTLB page size %ldK",
1556 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1558 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1559 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1560 get_order(ioc->pdir_size));
1561 if (!ioc->pdir_base)
1562 panic(PFX "Couldn't allocate I/O Page Table\n");
1564 memset(ioc->pdir_base, 0, ioc->pdir_size);
1566 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
1567 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1569 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1570 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1573 ** If an AGP device is present, only use half of the IOV space
1574 ** for PCI DMA. Unfortunately we can't know ahead of time
1575 ** whether GART support will actually be used, for now we
1576 ** can just key on an AGP device found in the system.
1577 ** We program the next pdir index after we stop w/ a key for
1578 ** the GART code to handshake on.
1580 for_each_pci_dev(device)
1581 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1583 if (agp_found && reserve_sba_gart) {
1584 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1585 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1586 ioc->pdir_size /= 2;
1587 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1589 #ifdef FULL_VALID_PDIR
1591 ** Check to see if the spill page has been allocated, we don't need more than
1592 ** one across multiple SBAs.
1594 if (!prefetch_spill_page) {
1595 char *spill_poison = "SBAIOMMU POISON";
1596 int poison_size = 16;
1597 void *poison_addr, *addr;
1599 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1601 panic(PFX "Couldn't allocate PDIR spill page\n");
1604 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1605 memcpy(poison_addr, spill_poison, poison_size);
1607 prefetch_spill_page = virt_to_phys(addr);
1609 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
1612 ** Set all the PDIR entries valid w/ the spill page as the target
1614 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1615 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1618 /* Clear I/O TLB of any possible entries */
1619 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1620 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1622 /* Enable IOVA translation */
1623 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1624 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1628 ioc_resource_init(struct ioc *ioc)
1630 spin_lock_init(&ioc->res_lock);
1631 #if DELAYED_RESOURCE_CNT > 0
1632 spin_lock_init(&ioc->saved_lock);
1635 /* resource map size dictated by pdir_size */
1636 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1637 ioc->res_size >>= 3; /* convert bit count to byte count */
1638 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1640 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1641 get_order(ioc->res_size));
1643 panic(PFX "Couldn't allocate resource map\n");
1645 memset(ioc->res_map, 0, ioc->res_size);
1646 /* next available IOVP - circular search */
1647 ioc->res_hint = (unsigned long *) ioc->res_map;
1649 #ifdef ASSERT_PDIR_SANITY
1650 /* Mark first bit busy - ie no IOVA 0 */
1651 ioc->res_map[0] = 0x1;
1652 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1654 #ifdef FULL_VALID_PDIR
1655 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1656 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1657 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1658 | prefetch_spill_page);
1661 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
1662 ioc->res_size, (void *) ioc->res_map);
1666 ioc_sac_init(struct ioc *ioc)
1668 struct pci_dev *sac = NULL;
1669 struct pci_controller *controller = NULL;
1672 * pci_alloc_coherent() must return a DMA address which is
1673 * SAC (single address cycle) addressable, so allocate a
1674 * pseudo-device to enforce that.
1676 sac = kmalloc(sizeof(*sac), GFP_KERNEL);
1678 panic(PFX "Couldn't allocate struct pci_dev");
1679 memset(sac, 0, sizeof(*sac));
1681 controller = kmalloc(sizeof(*controller), GFP_KERNEL);
1683 panic(PFX "Couldn't allocate struct pci_controller");
1684 memset(controller, 0, sizeof(*controller));
1686 controller->iommu = ioc;
1687 sac->sysdata = controller;
1688 sac->dma_mask = 0xFFFFFFFFUL;
1690 sac->dev.bus = &pci_bus_type;
1692 ioc->sac_only_dev = sac;
1696 ioc_zx1_init(struct ioc *ioc)
1698 unsigned long rope_config;
1701 if (ioc->rev < 0x20)
1702 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1704 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1705 ioc->dma_mask = (0x1UL << 39) - 1;
1708 ** Clear ROPE(N)_CONFIG AO bit.
1709 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1710 ** Overrides bit 1 in DMA Hint Sets.
1711 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1713 for (i=0; i<(8*8); i+=8) {
1714 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1715 rope_config &= ~IOC_ROPE_AO;
1716 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1720 typedef void (initfunc)(struct ioc *);
1728 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1729 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1730 { ZX2_IOC_ID, "zx2", NULL },
1731 { SX1000_IOC_ID, "sx1000", NULL },
1732 { SX2000_IOC_ID, "sx2000", NULL },
1735 static struct ioc * __init
1736 ioc_init(u64 hpa, void *handle)
1739 struct ioc_iommu *info;
1741 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
1745 memset(ioc, 0, sizeof(*ioc));
1747 ioc->next = ioc_list;
1750 ioc->handle = handle;
1751 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1753 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1754 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1755 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1757 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1758 if (ioc->func_id == info->func_id) {
1759 ioc->name = info->name;
1765 iovp_size = (1 << iovp_shift);
1766 iovp_mask = ~(iovp_size - 1);
1768 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__,
1769 PAGE_SIZE >> 10, iovp_size >> 10);
1772 ioc->name = kmalloc(24, GFP_KERNEL);
1774 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1775 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1777 ioc->name = "Unknown";
1781 ioc_resource_init(ioc);
1784 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1785 ia64_max_iommu_merge_mask = ~iovp_mask;
1787 printk(KERN_INFO PFX
1788 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1789 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1790 hpa, ioc->iov_size >> 20, ioc->ibase);
1797 /**************************************************************************
1799 ** SBA initialization code (HW and SW)
1801 ** o identify SBA chip itself
1802 ** o FIXME: initialize DMA hints for reasonable defaults
1804 **************************************************************************/
1806 #ifdef CONFIG_PROC_FS
1808 ioc_start(struct seq_file *s, loff_t *pos)
1813 for (ioc = ioc_list; ioc; ioc = ioc->next)
1821 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1823 struct ioc *ioc = v;
1830 ioc_stop(struct seq_file *s, void *v)
1835 ioc_show(struct seq_file *s, void *v)
1837 struct ioc *ioc = v;
1838 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1841 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1842 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1844 if (ioc->node != MAX_NUMNODES)
1845 seq_printf(s, "NUMA node : %d\n", ioc->node);
1847 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1848 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1850 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1851 used += hweight64(*res_ptr);
1853 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1854 seq_printf(s, "PDIR used : %d entries\n", used);
1856 #ifdef PDIR_SEARCH_TIMING
1858 unsigned long i = 0, avg = 0, min, max;
1859 min = max = ioc->avg_search[0];
1860 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1861 avg += ioc->avg_search[i];
1862 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1863 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1865 avg /= SBA_SEARCH_SAMPLE;
1866 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1870 #ifndef ALLOW_IOV_BYPASS
1871 seq_printf(s, "IOVA bypass disabled\n");
1876 static struct seq_operations ioc_seq_ops = {
1884 ioc_open(struct inode *inode, struct file *file)
1886 return seq_open(file, &ioc_seq_ops);
1889 static struct file_operations ioc_fops = {
1892 .llseek = seq_lseek,
1893 .release = seq_release
1899 struct proc_dir_entry *dir, *entry;
1901 dir = proc_mkdir("bus/mckinley", NULL);
1905 entry = create_proc_entry(ioc_list->name, 0, dir);
1907 entry->proc_fops = &ioc_fops;
1912 sba_connect_bus(struct pci_bus *bus)
1914 acpi_handle handle, parent;
1918 if (!PCI_CONTROLLER(bus))
1919 panic(PFX "no sysdata on bus %d!\n", bus->number);
1921 if (PCI_CONTROLLER(bus)->iommu)
1924 handle = PCI_CONTROLLER(bus)->acpi_handle;
1929 * The IOC scope encloses PCI root bridges in the ACPI
1930 * namespace, so work our way out until we find an IOC we
1931 * claimed previously.
1934 for (ioc = ioc_list; ioc; ioc = ioc->next)
1935 if (ioc->handle == handle) {
1936 PCI_CONTROLLER(bus)->iommu = ioc;
1940 status = acpi_get_parent(handle, &parent);
1942 } while (ACPI_SUCCESS(status));
1944 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1949 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1954 ioc->node = MAX_NUMNODES;
1956 pxm = acpi_get_pxm(handle);
1961 node = pxm_to_nid_map[pxm];
1963 if (node >= MAX_NUMNODES || !node_online(node))
1970 #define sba_map_ioc_to_node(ioc, handle)
1974 acpi_sba_ioc_add(struct acpi_device *device)
1979 struct acpi_buffer buffer;
1980 struct acpi_device_info *dev_info;
1982 status = hp_acpi_csr_space(device->handle, &hpa, &length);
1983 if (ACPI_FAILURE(status))
1986 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
1987 status = acpi_get_object_info(device->handle, &buffer);
1988 if (ACPI_FAILURE(status))
1990 dev_info = buffer.pointer;
1993 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1994 * root bridges, and its CSR space includes the IOC function.
1996 if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
1997 hpa += ZX1_IOC_OFFSET;
1998 /* zx1 based systems default to kernel page size iommu pages */
2000 iovp_shift = min(PAGE_SHIFT, 16);
2002 ACPI_MEM_FREE(dev_info);
2005 * default anything not caught above or specified on cmdline to 4k
2011 ioc = ioc_init(hpa, device->handle);
2015 /* setup NUMA node association */
2016 sba_map_ioc_to_node(ioc, device->handle);
2020 static struct acpi_driver acpi_sba_ioc_driver = {
2021 .name = "IOC IOMMU Driver",
2022 .ids = "HWP0001,HWP0004",
2024 .add = acpi_sba_ioc_add,
2031 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2037 struct pci_bus *b = NULL;
2038 while ((b = pci_find_next_bus(b)) != NULL)
2043 #ifdef CONFIG_PROC_FS
2049 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2051 extern void dig_setup(char**);
2053 * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
2054 * so we use the platform_setup hook to fix it up.
2057 sba_setup(char **cmdline_p)
2059 MAX_DMA_ADDRESS = ~0UL;
2060 dig_setup(cmdline_p);
2064 nosbagart(char *str)
2066 reserve_sba_gart = 0;
2071 sba_dma_supported (struct device *dev, u64 mask)
2073 /* make sure it's at least 32bit capable */
2074 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2078 sba_dma_mapping_error (dma_addr_t dma_addr)
2083 __setup("nosbagart", nosbagart);
2086 sba_page_override(char *str)
2088 unsigned long page_size;
2090 page_size = memparse(str, &str);
2091 switch (page_size) {
2096 iovp_shift = ffs(page_size) - 1;
2099 printk("%s: unknown/unsupported iommu page size %ld\n",
2100 __FUNCTION__, page_size);
2106 __setup("sbapagesize=",sba_page_override);
2108 EXPORT_SYMBOL(sba_dma_mapping_error);
2109 EXPORT_SYMBOL(sba_map_single);
2110 EXPORT_SYMBOL(sba_unmap_single);
2111 EXPORT_SYMBOL(sba_map_sg);
2112 EXPORT_SYMBOL(sba_unmap_sg);
2113 EXPORT_SYMBOL(sba_dma_supported);
2114 EXPORT_SYMBOL(sba_alloc_coherent);
2115 EXPORT_SYMBOL(sba_free_coherent);