2 * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
8 * A simple uncached page allocator using the generic allocator. This
9 * allocator first utilizes the spare (spill) pages found in the EFI
10 * memmap and will then start converting cached pages to uncached ones
11 * at a granule at a time. Node awareness is implemented by having a
12 * pool of pages per node.
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/efi.h>
23 #include <linux/genalloc.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/sn/arch.h>
33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
35 #define MAX_UNCACHED_GRANULES 5
36 static int allocated_granules;
38 struct gen_pool *uncached_pool[MAX_NUMNODES];
41 static void uncached_ipi_visibility(void *data)
45 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
46 if ((status != PAL_VISIBILITY_OK) &&
47 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
48 printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
49 "CPU %i\n", status, raw_smp_processor_id());
53 static void uncached_ipi_mc_drain(void *data)
57 status = ia64_pal_mc_drain();
59 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
60 "CPU %i\n", status, raw_smp_processor_id());
65 * Add a new chunk of uncached memory pages to the specified pool.
67 * @pool: pool to add new chunk of uncached memory to
68 * @nid: node id of node to allocate memory from, or -1
70 * This is accomplished by first allocating a granule of cached memory pages
71 * and then converting them to uncached memory pages.
73 static int uncached_add_chunk(struct gen_pool *pool, int nid)
77 unsigned long c_addr, uc_addr;
79 if (allocated_granules >= MAX_UNCACHED_GRANULES)
82 /* attempt to allocate a granule's worth of cached memory pages */
84 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
85 IA64_GRANULE_SHIFT-PAGE_SHIFT);
89 /* convert the memory pages from cached to uncached */
91 c_addr = (unsigned long)page_address(page);
92 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
95 * There's a small race here where it's possible for someone to
96 * access the page through /dev/mem halfway through the conversion
97 * to uncached - not sure it's really worth bothering about
99 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
100 SetPageUncached(&page[i]);
102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
106 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
113 if (ia64_platform_is("sn2"))
114 sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
116 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
118 /* flush the just introduced uncached translation from the TLB */
119 local_flush_tlb_all();
124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
129 * The chunk of memory pages has been converted to uncached so now we
130 * can add it to the pool.
132 status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
136 allocated_granules++;
139 /* failed to convert or add the chunk so give it back to the kernel */
141 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
142 ClearPageUncached(&page[i]);
144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
150 * uncached_alloc_page
152 * @starting_nid: node id of node to start with, or -1
154 * Allocate 1 uncached page. Allocates on the requested node. If no
155 * uncached pages are available on the requested node, roundrobin starting
156 * with the next higher node.
158 unsigned long uncached_alloc_page(int starting_nid)
160 unsigned long uc_addr;
161 struct gen_pool *pool;
164 if (unlikely(starting_nid >= MAX_NUMNODES))
167 if (starting_nid < 0)
168 starting_nid = numa_node_id();
172 if (!node_online(nid))
174 pool = uncached_pool[nid];
178 uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
181 } while (uncached_add_chunk(pool, nid) == 0);
183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
187 EXPORT_SYMBOL(uncached_alloc_page);
193 * @uc_addr: uncached address of page to free
195 * Free a single uncached page.
197 void uncached_free_page(unsigned long uc_addr)
199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
200 struct gen_pool *pool = uncached_pool[nid];
202 if (unlikely(pool == NULL))
205 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
206 panic("uncached_free_page invalid address %lx\n", uc_addr);
208 gen_pool_free(pool, uc_addr, PAGE_SIZE);
210 EXPORT_SYMBOL(uncached_free_page);
214 * uncached_build_memmap,
216 * @uc_start: uncached starting address of a chunk of uncached memory
217 * @uc_end: uncached ending address of a chunk of uncached memory
218 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
220 * Called at boot time to build a map of pages that can be used for
221 * memory special operations.
223 static int __init uncached_build_memmap(unsigned long uc_start,
224 unsigned long uc_end, void *arg)
226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
227 struct gen_pool *pool = uncached_pool[nid];
228 size_t size = uc_end - uc_start;
230 touch_softlockup_watchdog();
233 memset((char *)uc_start, 0, size);
234 (void) gen_pool_add(pool, uc_start, size, nid);
240 static int __init uncached_init(void)
244 for_each_online_node(nid) {
245 uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
248 efi_memmap_walk_uc(uncached_build_memmap, NULL);
252 __initcall(uncached_init);