2 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
3 * @ioc: The I/O Controller.
4 * @startsg: The scatter/gather list of coalesced chunks.
5 * @nents: The number of entries in the scatter/gather list.
8 * This function inserts the coalesced scatter/gather list chunks into the
9 * I/O Controller's I/O Pdir.
11 static inline unsigned int
12 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
14 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
18 unsigned int n_mappings = 0;
19 unsigned long dma_offset = 0, dma_len = 0;
22 /* Horrible hack. For efficiency's sake, dma_sg starts one
23 * entry below the true start (it is immediately incremented
31 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
32 (unsigned long)sg_dma_address(startsg), cnt,
33 sg_virt_addr(startsg), startsg->length
38 ** Look for the start of a new DMA stream
41 if (sg_dma_address(startsg) & PIDE_FLAG) {
42 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
44 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
48 dma_len = sg_dma_len(startsg);
49 sg_dma_len(startsg) = 0;
50 dma_offset = (unsigned long) pide & ~IOVP_MASK;
52 #if defined(ZX1_SUPPORT)
53 /* Pluto IOMMU IO Virt Address is not zero based */
54 sg_dma_address(dma_sg) = pide | ioc->ibase;
56 /* SBA, ccio, and dino are zero based.
57 * Trying to save a few CPU cycles for most users.
59 sg_dma_address(dma_sg) = pide;
61 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
65 BUG_ON(pdirp == NULL);
67 vaddr = sg_virt_addr(startsg);
68 sg_dma_len(dma_sg) += startsg->length;
69 size = startsg->length + dma_offset;
71 #ifdef IOMMU_MAP_STATS
72 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
75 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
80 } while(unlikely(size > 0));
88 ** First pass is to walk the SG list and determine where the breaks are
89 ** in the DMA stream. Allocates PDIR entries but does not fill them.
90 ** Returns the number of DMA chunks.
92 ** Doing the fill separate from the coalescing/allocation keeps the
93 ** code simpler. Future enhancement could make one pass through
94 ** the sglist do both.
97 static inline unsigned int
98 iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
99 int (*iommu_alloc_range)(struct ioc *, size_t))
101 struct scatterlist *contig_sg; /* contig chunk head */
102 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
103 unsigned int n_mappings = 0;
108 ** Prepare for first/next DMA stream
111 dma_len = startsg->length;
112 dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
114 /* PARANOID: clear entries */
115 sg_dma_address(startsg) = 0;
116 sg_dma_len(startsg) = 0;
119 ** This loop terminates one iteration "early" since
120 ** it's always looking one "ahead".
123 unsigned long prevstartsg_end, startsg_end;
125 prevstartsg_end = sg_virt_addr(startsg) +
129 startsg_end = sg_virt_addr(startsg) +
132 /* PARANOID: clear entries */
133 sg_dma_address(startsg) = 0;
134 sg_dma_len(startsg) = 0;
137 ** First make sure current dma stream won't
138 ** exceed DMA_CHUNK_SIZE if we coalesce the
141 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
142 IOVP_SIZE) > DMA_CHUNK_SIZE))
146 ** Next see if we can append the next chunk (i.e.
147 ** it must end on one page and begin on another
149 if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
152 dma_len += startsg->length;
157 ** Terminate last VCONTIG block.
158 ** Allocate space for DMA stream.
160 sg_dma_len(contig_sg) = dma_len;
161 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
162 sg_dma_address(contig_sg) =
164 | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)