2 * PS3 address space management.
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/memory_hotplug.h>
25 #include <asm/firmware.h>
28 #include <asm/lv1call.h>
33 #define DBG udbg_printf
39 #if defined(CONFIG_PS3_DYNAMIC_DMA)
52 static unsigned long make_page_sizes(unsigned long a, unsigned long b)
54 return (a << 56) | (b << 48);
58 ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
59 ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
62 /* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
65 HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
66 HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
69 /*============================================================================*/
70 /* virtual address space routines */
71 /*============================================================================*/
74 * struct mem_region - memory region structure
76 * @size: size in bytes
77 * @offset: difference between base and rm.size
87 * struct map - address space state variables holder
88 * @total: total memory available as reported by HV
89 * @vas_id - HV virtual address space id
90 * @htab_size: htab size in bytes
92 * The HV virtual address space (vas) allows for hotplug memory regions.
93 * Memory regions can be created and destroyed in the vas at runtime.
94 * @rm: real mode (bootmem) region
95 * @r1: hotplug memory region(s)
98 * virt_addr: a cpu 'translated' effective address
99 * phys_addr: an address in what Linux thinks is the physical address space
100 * lpar_addr: an address in the HV virtual address space
101 * bus_addr: an io controller 'translated' address on a device bus
106 unsigned long vas_id;
107 unsigned long htab_size;
108 struct mem_region rm;
109 struct mem_region r1;
112 #define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
113 static void __maybe_unused _debug_dump_map(const struct map *m,
114 const char *func, int line)
116 DBG("%s:%d: map.total = %lxh\n", func, line, m->total);
117 DBG("%s:%d: map.rm.size = %lxh\n", func, line, m->rm.size);
118 DBG("%s:%d: map.vas_id = %lu\n", func, line, m->vas_id);
119 DBG("%s:%d: map.htab_size = %lxh\n", func, line, m->htab_size);
120 DBG("%s:%d: map.r1.base = %lxh\n", func, line, m->r1.base);
121 DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
122 DBG("%s:%d: map.r1.size = %lxh\n", func, line, m->r1.size);
125 static struct map map;
128 * ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
129 * @phys_addr: linux physical address
132 unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
134 BUG_ON(is_kernel_addr(phys_addr));
135 return (phys_addr < map.rm.size || phys_addr >= map.total)
136 ? phys_addr : phys_addr + map.r1.offset;
139 EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
142 * ps3_mm_vas_create - create the virtual address space
145 void __init ps3_mm_vas_create(unsigned long* htab_size)
148 unsigned long start_address;
150 unsigned long access_right;
151 unsigned long max_page_size;
154 result = lv1_query_logical_partition_address_region_info(0,
155 &start_address, &size, &access_right, &max_page_size,
159 DBG("%s:%d: lv1_query_logical_partition_address_region_info "
160 "failed: %s\n", __func__, __LINE__,
165 if (max_page_size < PAGE_SHIFT_16M) {
166 DBG("%s:%d: bad max_page_size %lxh\n", __func__, __LINE__,
171 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
172 BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
174 result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
175 2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
176 &map.vas_id, &map.htab_size);
179 DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
180 __func__, __LINE__, ps3_result(result));
184 result = lv1_select_virtual_address_space(map.vas_id);
187 DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
188 __func__, __LINE__, ps3_result(result));
192 *htab_size = map.htab_size;
194 debug_dump_map(&map);
199 panic("ps3_mm_vas_create failed");
203 * ps3_mm_vas_destroy -
206 void ps3_mm_vas_destroy(void)
210 DBG("%s:%d: map.vas_id = %lu\n", __func__, __LINE__, map.vas_id);
213 result = lv1_select_virtual_address_space(0);
215 result = lv1_destruct_virtual_address_space(map.vas_id);
221 /*============================================================================*/
222 /* memory hotplug routines */
223 /*============================================================================*/
226 * ps3_mm_region_create - create a memory region in the vas
227 * @r: pointer to a struct mem_region to accept initialized values
228 * @size: requested region size
230 * This implementation creates the region with the vas large page size.
231 * @size is rounded down to a multiple of the vas large page size.
234 static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
239 r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
241 DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
242 DBG("%s:%d actual %lxh\n", __func__, __LINE__, r->size);
243 DBG("%s:%d difference %lxh (%luMB)\n", __func__, __LINE__,
244 (unsigned long)(size - r->size),
245 (size - r->size) / 1024 / 1024);
248 DBG("%s:%d: size == 0\n", __func__, __LINE__);
253 result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
254 ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
256 if (result || r->base < map.rm.size) {
257 DBG("%s:%d: lv1_allocate_memory failed: %s\n",
258 __func__, __LINE__, ps3_result(result));
262 r->offset = r->base - map.rm.size;
266 r->size = r->base = r->offset = 0;
271 * ps3_mm_region_destroy - destroy a memory region
272 * @r: pointer to struct mem_region
275 static void ps3_mm_region_destroy(struct mem_region *r)
279 DBG("%s:%d: r->base = %lxh\n", __func__, __LINE__, r->base);
281 result = lv1_release_memory(r->base);
283 r->size = r->base = r->offset = 0;
284 map.total = map.rm.size;
289 * ps3_mm_add_memory - hot add memory
292 static int __init ps3_mm_add_memory(void)
295 unsigned long start_addr;
296 unsigned long start_pfn;
297 unsigned long nr_pages;
299 if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
302 BUG_ON(!mem_init_done);
304 start_addr = map.rm.size;
305 start_pfn = start_addr >> PAGE_SHIFT;
306 nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
308 DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
309 __func__, __LINE__, start_addr, start_pfn, nr_pages);
311 result = add_memory(0, start_addr, map.r1.size);
314 DBG("%s:%d: add_memory failed: (%d)\n",
315 __func__, __LINE__, result);
319 result = online_pages(start_pfn, nr_pages);
322 DBG("%s:%d: online_pages failed: (%d)\n",
323 __func__, __LINE__, result);
328 core_initcall(ps3_mm_add_memory);
330 /*============================================================================*/
332 /*============================================================================*/
335 * dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
336 * @r: pointer to dma region structure
337 * @lpar_addr: HV lpar address
340 static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
341 unsigned long lpar_addr)
343 if (lpar_addr >= map.rm.size)
344 lpar_addr -= map.r1.offset;
345 BUG_ON(lpar_addr < r->offset);
346 BUG_ON(lpar_addr >= r->offset + r->len);
347 return r->bus_addr + lpar_addr - r->offset;
350 #define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
351 static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
352 const char *func, int line)
354 DBG("%s:%d: dev %lu:%lu\n", func, line, r->dev->bus_id,
356 DBG("%s:%d: page_size %u\n", func, line, r->page_size);
357 DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
358 DBG("%s:%d: len %lxh\n", func, line, r->len);
359 DBG("%s:%d: offset %lxh\n", func, line, r->offset);
363 * dma_chunk - A chunk of dma pages mapped by the io controller.
364 * @region - The dma region that owns this chunk.
365 * @lpar_addr: Starting lpar address of the area to map.
366 * @bus_addr: Starting ioc bus address of the area to map.
367 * @len: Length in bytes of the area to map.
368 * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
369 * list of all chuncks owned by the region.
371 * This implementation uses a very simple dma page manager
372 * based on the dma_chunk structure. This scheme assumes
373 * that all drivers use very well behaved dma ops.
377 struct ps3_dma_region *region;
378 unsigned long lpar_addr;
379 unsigned long bus_addr;
381 struct list_head link;
382 unsigned int usage_count;
385 #define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
386 static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
389 DBG("%s:%d: r.dev %lu:%lu\n", func, line,
390 c->region->dev->bus_id, c->region->dev->dev_id);
391 DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
392 DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
393 DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
394 DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
395 DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
396 DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
397 DBG("%s:%d: c.len %lxh\n", func, line, c->len);
400 static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
401 unsigned long bus_addr, unsigned long len)
404 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
405 unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
408 list_for_each_entry(c, &r->chunk_list.head, link) {
410 if (aligned_bus >= c->bus_addr &&
411 aligned_bus + aligned_len <= c->bus_addr + c->len)
415 if (aligned_bus + aligned_len <= c->bus_addr)
419 if (aligned_bus >= c->bus_addr + c->len)
422 /* we don't handle the multi-chunk case for now */
429 static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
430 unsigned long lpar_addr, unsigned long len)
433 unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
434 unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
437 list_for_each_entry(c, &r->chunk_list.head, link) {
439 if (c->lpar_addr <= aligned_lpar &&
440 aligned_lpar < c->lpar_addr + c->len) {
441 if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
449 if (aligned_lpar + aligned_len <= c->lpar_addr) {
453 if (c->lpar_addr + c->len <= aligned_lpar) {
460 static int dma_sb_free_chunk(struct dma_chunk *c)
465 result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
466 c->region->dev->dev_id, c->bus_addr, c->len);
474 static int dma_ioc0_free_chunk(struct dma_chunk *c)
478 unsigned long offset;
479 struct ps3_dma_region *r = c->region;
481 DBG("%s:start\n", __func__);
482 for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
483 offset = (1 << r->page_size) * iopage;
484 /* put INVALID entry */
485 result = lv1_put_iopte(0,
486 c->bus_addr + offset,
487 c->lpar_addr + offset,
490 DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
491 c->bus_addr + offset,
492 c->lpar_addr + offset,
496 DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
497 __LINE__, ps3_result(result));
501 DBG("%s:end\n", __func__);
506 * dma_sb_map_pages - Maps dma pages into the io controller bus address space.
507 * @r: Pointer to a struct ps3_dma_region.
508 * @phys_addr: Starting physical address of the area to map.
509 * @len: Length in bytes of the area to map.
510 * c_out: A pointer to receive an allocated struct dma_chunk for this area.
512 * This is the lowest level dma mapping routine, and is the one that will
513 * make the HV call to add the pages into the io controller address space.
516 static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
517 unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
522 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
530 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
531 c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
534 BUG_ON(iopte_flag != 0xf800000000000000UL);
535 result = lv1_map_device_dma_region(c->region->dev->bus_id,
536 c->region->dev->dev_id, c->lpar_addr,
537 c->bus_addr, c->len, iopte_flag);
539 DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
540 __func__, __LINE__, ps3_result(result));
544 list_add(&c->link, &r->chunk_list.head);
553 DBG(" <- %s:%d\n", __func__, __LINE__);
557 static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
558 unsigned long len, struct dma_chunk **c_out,
562 struct dma_chunk *c, *last;
564 unsigned long offset;
566 DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
567 phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
568 c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
577 c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
578 /* allocate IO address */
579 if (list_empty(&r->chunk_list.head)) {
581 c->bus_addr = r->bus_addr;
583 /* derive from last bus addr*/
584 last = list_entry(r->chunk_list.head.next,
585 struct dma_chunk, link);
586 c->bus_addr = last->bus_addr + last->len;
587 DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
588 last->bus_addr, last->len);
591 /* FIXME: check whether length exceeds region size */
593 /* build ioptes for the area */
594 pages = len >> r->page_size;
595 DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#lx\n", __func__,
596 r->page_size, r->len, pages, iopte_flag);
597 for (iopage = 0; iopage < pages; iopage++) {
598 offset = (1 << r->page_size) * iopage;
599 result = lv1_put_iopte(0,
600 c->bus_addr + offset,
601 c->lpar_addr + offset,
605 printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
606 "failed: %s\n", __func__, __LINE__,
610 DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
611 iopage, c->bus_addr + offset, c->lpar_addr + offset,
615 /* be sure that last allocated one is inserted at head */
616 list_add(&c->link, &r->chunk_list.head);
619 DBG("%s: end\n", __func__);
623 for (iopage--; 0 <= iopage; iopage--) {
625 c->bus_addr + offset,
626 c->lpar_addr + offset,
637 * dma_sb_region_create - Create a device dma region.
638 * @r: Pointer to a struct ps3_dma_region.
640 * This is the lowest level dma region create routine, and is the one that
641 * will make the HV call to create the region.
644 static int dma_sb_region_create(struct ps3_dma_region *r)
648 pr_info(" -> %s:%d:\n", __func__, __LINE__);
652 if (!r->dev->bus_id) {
653 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
654 r->dev->bus_id, r->dev->dev_id);
658 DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
659 __LINE__, r->len, r->page_size, r->offset);
662 BUG_ON(!r->page_size);
663 BUG_ON(!r->region_ops);
665 INIT_LIST_HEAD(&r->chunk_list.head);
666 spin_lock_init(&r->chunk_list.lock);
668 result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
669 roundup_pow_of_two(r->len), r->page_size, r->region_type,
673 DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
674 __func__, __LINE__, ps3_result(result));
675 r->len = r->bus_addr = 0;
681 static int dma_ioc0_region_create(struct ps3_dma_region *r)
685 INIT_LIST_HEAD(&r->chunk_list.head);
686 spin_lock_init(&r->chunk_list.lock);
688 result = lv1_allocate_io_segment(0,
693 DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
694 __func__, __LINE__, ps3_result(result));
695 r->len = r->bus_addr = 0;
697 DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
698 r->len, r->page_size, r->bus_addr);
703 * dma_region_free - Free a device dma region.
704 * @r: Pointer to a struct ps3_dma_region.
706 * This is the lowest level dma region free routine, and is the one that
707 * will make the HV call to free the region.
710 static int dma_sb_region_free(struct ps3_dma_region *r)
714 struct dma_chunk *tmp;
718 if (!r->dev->bus_id) {
719 pr_info("%s:%d: %lu:%lu no dma\n", __func__, __LINE__,
720 r->dev->bus_id, r->dev->dev_id);
724 list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
726 dma_sb_free_chunk(c);
729 result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
733 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
734 __func__, __LINE__, ps3_result(result));
741 static int dma_ioc0_region_free(struct ps3_dma_region *r)
744 struct dma_chunk *c, *n;
746 DBG("%s: start\n", __func__);
747 list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
749 dma_ioc0_free_chunk(c);
752 result = lv1_release_io_segment(0, r->bus_addr);
755 DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
756 __func__, __LINE__, ps3_result(result));
759 DBG("%s: end\n", __func__);
765 * dma_sb_map_area - Map an area of memory into a device dma region.
766 * @r: Pointer to a struct ps3_dma_region.
767 * @virt_addr: Starting virtual address of the area to map.
768 * @len: Length in bytes of the area to map.
769 * @bus_addr: A pointer to return the starting ioc bus address of the area to
772 * This is the common dma mapping routine.
775 static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
776 unsigned long len, unsigned long *bus_addr,
782 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
784 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
785 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
787 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
789 if (!USE_DYNAMIC_DMA) {
790 unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
791 DBG(" -> %s:%d\n", __func__, __LINE__);
792 DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
794 DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
796 DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
798 DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
799 DBG("%s:%d bus_addr %lxh (%lxh)\n", __func__, __LINE__,
803 spin_lock_irqsave(&r->chunk_list.lock, flags);
804 c = dma_find_chunk(r, *bus_addr, len);
807 DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
810 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
814 result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
818 DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
819 __func__, __LINE__, result);
820 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
826 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
830 static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
831 unsigned long len, unsigned long *bus_addr,
837 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
839 unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
840 unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
843 DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
845 DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
846 phys_addr, aligned_phys, aligned_len);
848 spin_lock_irqsave(&r->chunk_list.lock, flags);
849 c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
854 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
856 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
860 result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
865 DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
866 __func__, __LINE__, result);
867 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
870 *bus_addr = c->bus_addr + phys_addr - aligned_phys;
871 DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#lx\n", __func__,
872 virt_addr, phys_addr, aligned_phys, *bus_addr);
875 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
880 * dma_sb_unmap_area - Unmap an area of memory from a device dma region.
881 * @r: Pointer to a struct ps3_dma_region.
882 * @bus_addr: The starting ioc bus address of the area to unmap.
883 * @len: Length in bytes of the area to unmap.
885 * This is the common dma unmap routine.
888 static int dma_sb_unmap_area(struct ps3_dma_region *r, unsigned long bus_addr,
894 spin_lock_irqsave(&r->chunk_list.lock, flags);
895 c = dma_find_chunk(r, bus_addr, len);
898 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
900 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
901 - aligned_bus, 1 << r->page_size);
902 DBG("%s:%d: not found: bus_addr %lxh\n",
903 __func__, __LINE__, bus_addr);
904 DBG("%s:%d: not found: len %lxh\n",
905 __func__, __LINE__, len);
906 DBG("%s:%d: not found: aligned_bus %lxh\n",
907 __func__, __LINE__, aligned_bus);
908 DBG("%s:%d: not found: aligned_len %lxh\n",
909 __func__, __LINE__, aligned_len);
915 if (!c->usage_count) {
917 dma_sb_free_chunk(c);
920 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
924 static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
925 unsigned long bus_addr, unsigned long len)
930 DBG("%s: start a=%#lx l=%#lx\n", __func__, bus_addr, len);
931 spin_lock_irqsave(&r->chunk_list.lock, flags);
932 c = dma_find_chunk(r, bus_addr, len);
935 unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
937 unsigned long aligned_len = _ALIGN_UP(len + bus_addr
940 DBG("%s:%d: not found: bus_addr %lxh\n",
941 __func__, __LINE__, bus_addr);
942 DBG("%s:%d: not found: len %lxh\n",
943 __func__, __LINE__, len);
944 DBG("%s:%d: not found: aligned_bus %lxh\n",
945 __func__, __LINE__, aligned_bus);
946 DBG("%s:%d: not found: aligned_len %lxh\n",
947 __func__, __LINE__, aligned_len);
953 if (!c->usage_count) {
955 dma_ioc0_free_chunk(c);
958 spin_unlock_irqrestore(&r->chunk_list.lock, flags);
959 DBG("%s: end\n", __func__);
964 * dma_sb_region_create_linear - Setup a linear dma mapping for a device.
965 * @r: Pointer to a struct ps3_dma_region.
967 * This routine creates an HV dma region for the device and maps all available
968 * ram into the io controller bus address space.
971 static int dma_sb_region_create_linear(struct ps3_dma_region *r)
974 unsigned long virt_addr, len, tmp;
976 if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
977 /* force 16M dma pages for linear mapping */
978 if (r->page_size != PS3_DMA_16M) {
979 pr_info("%s:%d: forcing 16M pages for linear map\n",
981 r->page_size = PS3_DMA_16M;
982 r->len = _ALIGN_UP(r->len, 1 << r->page_size);
986 result = dma_sb_region_create(r);
989 if (r->offset < map.rm.size) {
990 /* Map (part of) 1st RAM chunk */
991 virt_addr = map.rm.base + r->offset;
992 len = map.rm.size - r->offset;
995 result = dma_sb_map_area(r, virt_addr, len, &tmp,
996 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1000 if (r->offset + r->len > map.rm.size) {
1001 /* Map (part of) 2nd RAM chunk */
1002 virt_addr = map.rm.size;
1004 if (r->offset >= map.rm.size)
1005 virt_addr += r->offset - map.rm.size;
1007 len -= map.rm.size - r->offset;
1008 result = dma_sb_map_area(r, virt_addr, len, &tmp,
1009 IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
1017 * dma_sb_region_free_linear - Free a linear dma mapping for a device.
1018 * @r: Pointer to a struct ps3_dma_region.
1020 * This routine will unmap all mapped areas and free the HV dma region.
1023 static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1026 unsigned long bus_addr, len, lpar_addr;
1028 if (r->offset < map.rm.size) {
1029 /* Unmap (part of) 1st RAM chunk */
1030 lpar_addr = map.rm.base + r->offset;
1031 len = map.rm.size - r->offset;
1034 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1035 result = dma_sb_unmap_area(r, bus_addr, len);
1039 if (r->offset + r->len > map.rm.size) {
1040 /* Unmap (part of) 2nd RAM chunk */
1041 lpar_addr = map.r1.base;
1043 if (r->offset >= map.rm.size)
1044 lpar_addr += r->offset - map.rm.size;
1046 len -= map.rm.size - r->offset;
1047 bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1048 result = dma_sb_unmap_area(r, bus_addr, len);
1052 result = dma_sb_region_free(r);
1059 * dma_sb_map_area_linear - Map an area of memory into a device dma region.
1060 * @r: Pointer to a struct ps3_dma_region.
1061 * @virt_addr: Starting virtual address of the area to map.
1062 * @len: Length in bytes of the area to map.
1063 * @bus_addr: A pointer to return the starting ioc bus address of the area to
1066 * This routine just returns the corresponding bus address. Actual mapping
1067 * occurs in dma_region_create_linear().
1070 static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1071 unsigned long virt_addr, unsigned long len, unsigned long *bus_addr,
1074 unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1076 *bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1081 * dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1082 * @r: Pointer to a struct ps3_dma_region.
1083 * @bus_addr: The starting ioc bus address of the area to unmap.
1084 * @len: Length in bytes of the area to unmap.
1086 * This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1089 static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1090 unsigned long bus_addr, unsigned long len)
1095 static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1096 .create = dma_sb_region_create,
1097 .free = dma_sb_region_free,
1098 .map = dma_sb_map_area,
1099 .unmap = dma_sb_unmap_area
1102 static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1103 .create = dma_sb_region_create_linear,
1104 .free = dma_sb_region_free_linear,
1105 .map = dma_sb_map_area_linear,
1106 .unmap = dma_sb_unmap_area_linear
1109 static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1110 .create = dma_ioc0_region_create,
1111 .free = dma_ioc0_region_free,
1112 .map = dma_ioc0_map_area,
1113 .unmap = dma_ioc0_unmap_area
1116 int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1117 struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1118 enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1120 unsigned long lpar_addr;
1122 lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1125 r->page_size = page_size;
1126 r->region_type = region_type;
1127 r->offset = lpar_addr;
1128 if (r->offset >= map.rm.size)
1129 r->offset -= map.r1.offset;
1130 r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1132 switch (dev->dev_type) {
1133 case PS3_DEVICE_TYPE_SB:
1134 r->region_ops = (USE_DYNAMIC_DMA)
1135 ? &ps3_dma_sb_region_ops
1136 : &ps3_dma_sb_region_linear_ops;
1138 case PS3_DEVICE_TYPE_IOC0:
1139 r->region_ops = &ps3_dma_ioc0_region_ops;
1147 EXPORT_SYMBOL(ps3_dma_region_init);
1149 int ps3_dma_region_create(struct ps3_dma_region *r)
1152 BUG_ON(!r->region_ops);
1153 BUG_ON(!r->region_ops->create);
1154 return r->region_ops->create(r);
1156 EXPORT_SYMBOL(ps3_dma_region_create);
1158 int ps3_dma_region_free(struct ps3_dma_region *r)
1161 BUG_ON(!r->region_ops);
1162 BUG_ON(!r->region_ops->free);
1163 return r->region_ops->free(r);
1165 EXPORT_SYMBOL(ps3_dma_region_free);
1167 int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1168 unsigned long len, unsigned long *bus_addr,
1171 return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1174 int ps3_dma_unmap(struct ps3_dma_region *r, unsigned long bus_addr,
1177 return r->region_ops->unmap(r, bus_addr, len);
1180 /*============================================================================*/
1181 /* system startup routines */
1182 /*============================================================================*/
1185 * ps3_mm_init - initialize the address space state variables
1188 void __init ps3_mm_init(void)
1192 DBG(" -> %s:%d\n", __func__, __LINE__);
1194 result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1198 panic("ps3_repository_read_mm_info() failed");
1200 map.rm.offset = map.rm.base;
1201 map.vas_id = map.htab_size = 0;
1203 /* this implementation assumes map.rm.base is zero */
1205 BUG_ON(map.rm.base);
1206 BUG_ON(!map.rm.size);
1209 /* arrange to do this in ps3_mm_add_memory */
1210 ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1212 /* correct map.total for the real total amount of memory we use */
1213 map.total = map.rm.size + map.r1.size;
1215 DBG(" <- %s:%d\n", __func__, __LINE__);
1219 * ps3_mm_shutdown - final cleanup of address space
1222 void ps3_mm_shutdown(void)
1224 ps3_mm_region_destroy(&map.r1);