2 * IOMMU implementation for Cell Broadband Processor Architecture
3 * We just establish a linear mapping at boot by setting all the
4 * IOPT cache entries in the CPU.
5 * The mapping functions should be identical to pci_direct_iommu,
6 * except for the handling of the high order bit that is required
7 * by the Spider bridge. These should be split into a separate
8 * file at the point where we get a different bridge chip.
10 * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH,
11 * Arnd Bergmann <arndb@de.ibm.com>
13 * Based on linear mapping
14 * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
24 #include <linux/kernel.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/kernel.h>
33 #include <linux/compiler.h>
35 #include <asm/sections.h>
36 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/pmac_feature.h>
42 #include <asm/abs_addr.h>
43 #include <asm/system.h>
44 #include <asm/ppc-pci.h>
49 static dma_addr_t cell_dma_valid = SPIDER_DMA_VALID;
51 static inline unsigned long
52 get_iopt_entry(unsigned long real_address, unsigned long ioid,
55 return (prot & IOPT_PROT_MASK)
58 | (real_address & IOPT_RPN_MASK)
59 | (ioid & IOPT_IOID_MASK);
67 mk_ioste(unsigned long val)
69 ioste ioste = { .val = val, };
74 get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size)
84 nnpt = 0; /* one page per segment */
85 shift = 5; /* segment has 16 iopt entries */
90 nnpt = 0; /* one page per segment */
91 shift = 1; /* segment has 256 iopt entries */
96 nnpt = 0x07; /* 8 pages per io page table */
97 shift = 0; /* all entries are used */
102 nnpt = 0x7f; /* 128 pages per io page table */
103 shift = 0; /* all entries are used */
106 default: /* not a known compile time constant */
108 /* BUILD_BUG_ON() is not usable here */
109 extern void __get_iost_entry_bad_page_size(void);
110 __get_iost_entry_bad_page_size();
116 /* need 8 bytes per iopte */
117 (((io_address / page_size * 8)
118 /* align io page tables on 4k page boundaries */
120 /* nnpt+1 pages go into each iopt */
123 nnpt++; /* this seems to work, but the documentation is not clear
124 about wether we put nnpt or nnpt-1 into the ioste bits.
125 In theory, this can't work for 4k pages. */
126 return mk_ioste(IOST_VALID_MASK
127 | (iostep & IOST_PT_BASE_MASK)
128 | ((nnpt << 5) & IOST_NNPT_MASK)
129 | (ps & IOST_PS_MASK));
132 /* compute the address of an io pte */
133 static inline unsigned long
134 get_ioptep(ioste iost_entry, unsigned long io_address)
136 unsigned long iopt_base;
137 unsigned long page_size;
138 unsigned long page_number;
139 unsigned long iopt_offset;
141 iopt_base = iost_entry.val & IOST_PT_BASE_MASK;
142 page_size = iost_entry.val & IOST_PS_MASK;
144 /* decode page size to compute page number */
145 page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size);
146 /* page number is an offset into the io page table */
147 iopt_offset = (page_number << 3) & 0x7fff8ul;
148 return iopt_base + iopt_offset;
151 /* compute the tag field of the iopt cache entry */
152 static inline unsigned long
153 get_ioc_tag(ioste iost_entry, unsigned long io_address)
155 unsigned long iopte = get_ioptep(iost_entry, io_address);
157 return IOPT_VALID_MASK
158 | ((iopte & 0x00000000000000ff8ul) >> 3)
159 | ((iopte & 0x0000003fffffc0000ul) >> 9);
162 /* compute the hashed 6 bit index for the 4-way associative pte cache */
163 static inline unsigned long
164 get_ioc_hash(ioste iost_entry, unsigned long io_address)
166 unsigned long iopte = get_ioptep(iost_entry, io_address);
168 return ((iopte & 0x000000000000001f8ul) >> 3)
169 ^ ((iopte & 0x00000000000020000ul) >> 17)
170 ^ ((iopte & 0x00000000000010000ul) >> 15)
171 ^ ((iopte & 0x00000000000008000ul) >> 13)
172 ^ ((iopte & 0x00000000000004000ul) >> 11)
173 ^ ((iopte & 0x00000000000002000ul) >> 9)
174 ^ ((iopte & 0x00000000000001000ul) >> 7);
177 /* same as above, but pretend that we have a simpler 1-way associative
178 pte cache with an 8 bit index */
179 static inline unsigned long
180 get_ioc_hash_1way(ioste iost_entry, unsigned long io_address)
182 unsigned long iopte = get_ioptep(iost_entry, io_address);
184 return ((iopte & 0x000000000000001f8ul) >> 3)
185 ^ ((iopte & 0x00000000000020000ul) >> 17)
186 ^ ((iopte & 0x00000000000010000ul) >> 15)
187 ^ ((iopte & 0x00000000000008000ul) >> 13)
188 ^ ((iopte & 0x00000000000004000ul) >> 11)
189 ^ ((iopte & 0x00000000000002000ul) >> 9)
190 ^ ((iopte & 0x00000000000001000ul) >> 7)
191 ^ ((iopte & 0x0000000000000c000ul) >> 8);
195 get_iost_cache(void __iomem *base, unsigned long index)
197 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
198 return mk_ioste(in_be64(&p[index]));
202 set_iost_cache(void __iomem *base, unsigned long index, ioste ste)
204 unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR);
205 pr_debug("ioste %02lx was %016lx, store %016lx", index,
206 get_iost_cache(base, index).val, ste.val);
207 out_be64(&p[index], ste.val);
208 pr_debug(" now %016lx\n", get_iost_cache(base, index).val);
211 static inline unsigned long
212 get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag)
214 unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR);
215 unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG);
223 set_iopt_cache(void __iomem *base, unsigned long index,
224 unsigned long tag, unsigned long val)
226 unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR;
227 unsigned long __iomem *p = base + IOC_PT_CACHE_REG;
230 out_be64(&tags[index], tag);
234 set_iost_origin(void __iomem *base)
236 unsigned long __iomem *p = base + IOC_ST_ORIGIN;
237 unsigned long origin = IOSTO_ENABLE | IOSTO_SW;
239 pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin);
244 set_iocmd_config(void __iomem *base)
246 unsigned long __iomem *p = base + 0xc00;
250 pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE);
251 out_be64(p, conf | IOCMD_CONF_TE);
254 static void enable_mapping(void __iomem *base, void __iomem *mmio_base)
256 set_iocmd_config(base);
257 set_iost_origin(mmio_base);
262 unsigned long mmio_base;
263 void __iomem *mapped_base;
264 void __iomem *mapped_mmio_base;
267 static struct cell_iommu cell_iommus[NR_CPUS];
269 /* initialize the iommu to support a simple linear mapping
270 * for each DMA window used by any device. For now, we
271 * happen to know that there is only one DMA window in use,
272 * starting at iopt_phys_offset. */
273 static void cell_do_map_iommu(struct cell_iommu *iommu,
275 unsigned long map_start,
276 unsigned long map_size)
278 unsigned long io_address, real_address;
279 void __iomem *ioc_base, *ioc_mmio_base;
283 /* we pretend the io page table was at a very high address */
284 const unsigned long fake_iopt = 0x10000000000ul;
285 const unsigned long io_page_size = 0x1000000; /* use 16M pages */
286 const unsigned long io_segment_size = 0x10000000; /* 256M */
288 ioc_base = iommu->mapped_base;
289 ioc_mmio_base = iommu->mapped_mmio_base;
291 for (real_address = 0, io_address = map_start;
292 io_address <= map_start + map_size;
293 real_address += io_page_size, io_address += io_page_size) {
294 ioste = get_iost_entry(fake_iopt, io_address, io_page_size);
295 if ((real_address % io_segment_size) == 0) /* segment start */
296 set_iost_cache(ioc_mmio_base,
297 io_address >> 28, ioste);
298 index = get_ioc_hash_1way(ioste, io_address);
299 pr_debug("addr %08lx, index %02lx, ioste %016lx\n",
300 io_address, index, ioste.val);
301 set_iopt_cache(ioc_mmio_base,
302 get_ioc_hash_1way(ioste, io_address),
303 get_ioc_tag(ioste, io_address),
304 get_iopt_entry(real_address, ioid, IOPT_PROT_RW));
308 static void pci_dma_cell_bus_setup(struct pci_bus *b)
310 const unsigned int *ioid;
311 unsigned long map_start, map_size, token;
312 const unsigned long *dma_window;
313 struct cell_iommu *iommu;
314 struct device_node *d;
316 d = pci_bus_to_OF_node(b);
318 ioid = get_property(d, "ioid", NULL);
320 pr_debug("No ioid entry found !\n");
322 dma_window = get_property(d, "ibm,dma-window", NULL);
324 pr_debug("No ibm,dma-window entry found !\n");
326 map_start = dma_window[1];
327 map_size = dma_window[2];
328 token = dma_window[0] >> 32;
330 iommu = &cell_iommus[token];
332 cell_do_map_iommu(iommu, *ioid, map_start, map_size);
336 static int cell_map_iommu_hardcoded(int num_nodes)
338 struct cell_iommu *iommu = NULL;
340 pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__);
343 iommu = &cell_iommus[0];
344 iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
345 iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
347 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
349 cell_do_map_iommu(iommu, 0x048a,
350 0x20000000ul,0x20000000ul);
356 iommu = &cell_iommus[1];
357 iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
358 iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
360 enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
362 cell_do_map_iommu(iommu, 0x048a,
363 0x20000000,0x20000000ul);
369 static int cell_map_iommu(void)
371 unsigned int num_nodes = 0;
372 const unsigned int *node_id;
373 const unsigned long *base, *mmio_base;
374 struct device_node *dn;
375 struct cell_iommu *iommu = NULL;
377 /* determine number of nodes (=iommus) */
378 pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__);
379 for(dn = of_find_node_by_type(NULL, "cpu");
381 dn = of_find_node_by_type(dn, "cpu")) {
382 node_id = get_property(dn, "node-id", NULL);
384 if (num_nodes < *node_id)
385 num_nodes = *node_id;
389 pr_debug("%i found.\n", num_nodes);
391 /* map the iommu registers for each node */
392 pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__);
393 for(dn = of_find_node_by_type(NULL, "cpu");
395 dn = of_find_node_by_type(dn, "cpu")) {
397 node_id = get_property(dn, "node-id", NULL);
398 base = get_property(dn, "ioc-cache", NULL);
399 mmio_base = get_property(dn, "ioc-translation", NULL);
401 if (!base || !mmio_base || !node_id)
402 return cell_map_iommu_hardcoded(num_nodes);
404 iommu = &cell_iommus[*node_id];
406 iommu->mmio_base = *mmio_base;
408 iommu->mapped_base = ioremap(*base, 0x1000);
409 iommu->mapped_mmio_base = ioremap(*mmio_base, 0x1000);
411 enable_mapping(iommu->mapped_base,
412 iommu->mapped_mmio_base);
414 /* everything else will be done in iommu_bus_setup */
420 static void *cell_alloc_coherent(struct device *hwdev, size_t size,
421 dma_addr_t *dma_handle, gfp_t flag)
425 ret = (void *)__get_free_pages(flag, get_order(size));
427 memset(ret, 0, size);
428 *dma_handle = virt_to_abs(ret) | cell_dma_valid;
433 static void cell_free_coherent(struct device *hwdev, size_t size,
434 void *vaddr, dma_addr_t dma_handle)
436 free_pages((unsigned long)vaddr, get_order(size));
439 static dma_addr_t cell_map_single(struct device *hwdev, void *ptr,
440 size_t size, enum dma_data_direction direction)
442 return virt_to_abs(ptr) | cell_dma_valid;
445 static void cell_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
446 size_t size, enum dma_data_direction direction)
450 static int cell_map_sg(struct device *hwdev, struct scatterlist *sg,
451 int nents, enum dma_data_direction direction)
455 for (i = 0; i < nents; i++, sg++) {
456 sg->dma_address = (page_to_phys(sg->page) + sg->offset)
458 sg->dma_length = sg->length;
464 static void cell_unmap_sg(struct device *hwdev, struct scatterlist *sg,
465 int nents, enum dma_data_direction direction)
469 static int cell_dma_supported(struct device *dev, u64 mask)
471 return mask < 0x100000000ull;
474 static struct dma_mapping_ops cell_iommu_ops = {
475 .alloc_coherent = cell_alloc_coherent,
476 .free_coherent = cell_free_coherent,
477 .map_single = cell_map_single,
478 .unmap_single = cell_unmap_single,
479 .map_sg = cell_map_sg,
480 .unmap_sg = cell_unmap_sg,
481 .dma_supported = cell_dma_supported,
484 void cell_init_iommu(void)
488 /* If we have an Axon bridge, clear the DMA valid mask. This is fairly
489 * hackish but will work well enough until we have proper iommu code.
491 if (of_find_node_by_name(NULL, "axon"))
494 if (of_find_node_by_path("/mambo")) {
495 pr_info("Not using iommu on systemsim\n");
499 get_property(of_chosen, "linux,iommu-off", NULL)))
500 setup_bus = cell_map_iommu();
503 pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__);
504 ppc_md.pci_dma_bus_setup = pci_dma_cell_bus_setup;
506 pr_debug("%s: IOMMU mapping activated, "
507 "no device action necessary\n", __FUNCTION__);
508 /* Direct I/O, IOMMU off */
512 pci_dma_ops = &cell_iommu_ops;