2 * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
8 * A simple uncached page allocator using the generic allocator. This
9 * allocator first utilizes the spare (spill) pages found in the EFI
10 * memmap and will then start converting cached pages to uncached ones
11 * at a granule at a time. Node awareness is implemented by having a
12 * pool of pages per node.
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/efi.h>
23 #include <linux/genalloc.h>
26 #include <asm/system.h>
27 #include <asm/pgtable.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/sn/arch.h>
33 extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
35 struct uncached_pool {
36 struct gen_pool *pool;
37 struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
38 int nchunks_added; /* #of converted chunks added to pool */
39 atomic_t status; /* smp called function's return status*/
42 #define MAX_CONVERTED_CHUNKS_PER_NODE 2
44 struct uncached_pool uncached_pools[MAX_NUMNODES];
47 static void uncached_ipi_visibility(void *data)
50 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
53 if ((status != PAL_VISIBILITY_OK) &&
54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
55 atomic_inc(&uc_pool->status);
59 static void uncached_ipi_mc_drain(void *data)
62 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
64 status = ia64_pal_mc_drain();
65 if (status != PAL_STATUS_SUCCESS)
66 atomic_inc(&uc_pool->status);
71 * Add a new chunk of uncached memory pages to the specified pool.
73 * @pool: pool to add new chunk of uncached memory to
74 * @nid: node id of node to allocate memory from, or -1
76 * This is accomplished by first allocating a granule of cached memory pages
77 * and then converting them to uncached memory pages.
79 static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
82 int status, i, nchunks_added = uc_pool->nchunks_added;
83 unsigned long c_addr, uc_addr;
85 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
86 return -1; /* interrupted by a signal */
88 if (uc_pool->nchunks_added > nchunks_added) {
89 /* someone added a new chunk while we were waiting */
90 mutex_unlock(&uc_pool->add_chunk_mutex);
94 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
95 mutex_unlock(&uc_pool->add_chunk_mutex);
99 /* attempt to allocate a granule's worth of cached memory pages */
101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
104 mutex_unlock(&uc_pool->add_chunk_mutex);
108 /* convert the memory pages from cached to uncached */
110 c_addr = (unsigned long)page_address(page);
111 uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
114 * There's a small race here where it's possible for someone to
115 * access the page through /dev/mem halfway through the conversion
116 * to uncached - not sure it's really worth bothering about
118 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
119 SetPageUncached(&page[i]);
121 flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
125 atomic_set(&uc_pool->status, 0);
126 status = smp_call_function(uncached_ipi_visibility, uc_pool, 1);
127 if (status || atomic_read(&uc_pool->status))
129 } else if (status != PAL_VISIBILITY_OK)
134 if (ia64_platform_is("sn2"))
135 sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
137 flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
139 /* flush the just introduced uncached translation from the TLB */
140 local_flush_tlb_all();
144 status = ia64_pal_mc_drain();
145 if (status != PAL_STATUS_SUCCESS)
147 atomic_set(&uc_pool->status, 0);
148 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1);
149 if (status || atomic_read(&uc_pool->status))
153 * The chunk of memory pages has been converted to uncached so now we
154 * can add it to the pool.
156 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
160 uc_pool->nchunks_added++;
161 mutex_unlock(&uc_pool->add_chunk_mutex);
164 /* failed to convert or add the chunk so give it back to the kernel */
166 for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
167 ClearPageUncached(&page[i]);
169 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
170 mutex_unlock(&uc_pool->add_chunk_mutex);
176 * uncached_alloc_page
178 * @starting_nid: node id of node to start with, or -1
179 * @n_pages: number of contiguous pages to allocate
181 * Allocate the specified number of contiguous uncached pages on the
182 * the requested node. If not enough contiguous uncached pages are available
183 * on the requested node, roundrobin starting with the next higher node.
185 unsigned long uncached_alloc_page(int starting_nid, int n_pages)
187 unsigned long uc_addr;
188 struct uncached_pool *uc_pool;
191 if (unlikely(starting_nid >= MAX_NUMNODES))
194 if (starting_nid < 0)
195 starting_nid = numa_node_id();
199 if (!node_state(nid, N_HIGH_MEMORY))
201 uc_pool = &uncached_pools[nid];
202 if (uc_pool->pool == NULL)
205 uc_addr = gen_pool_alloc(uc_pool->pool,
206 n_pages * PAGE_SIZE);
209 } while (uncached_add_chunk(uc_pool, nid) == 0);
211 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
215 EXPORT_SYMBOL(uncached_alloc_page);
221 * @uc_addr: uncached address of first page to free
222 * @n_pages: number of contiguous pages to free
224 * Free the specified number of uncached pages.
226 void uncached_free_page(unsigned long uc_addr, int n_pages)
228 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
229 struct gen_pool *pool = uncached_pools[nid].pool;
231 if (unlikely(pool == NULL))
234 if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
235 panic("uncached_free_page invalid address %lx\n", uc_addr);
237 gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
239 EXPORT_SYMBOL(uncached_free_page);
243 * uncached_build_memmap,
245 * @uc_start: uncached starting address of a chunk of uncached memory
246 * @uc_end: uncached ending address of a chunk of uncached memory
247 * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
249 * Called at boot time to build a map of pages that can be used for
250 * memory special operations.
252 static int __init uncached_build_memmap(unsigned long uc_start,
253 unsigned long uc_end, void *arg)
255 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
256 struct gen_pool *pool = uncached_pools[nid].pool;
257 size_t size = uc_end - uc_start;
259 touch_softlockup_watchdog();
262 memset((char *)uc_start, 0, size);
263 (void) gen_pool_add(pool, uc_start, size, nid);
269 static int __init uncached_init(void)
273 for_each_node_state(nid, N_ONLINE) {
274 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
275 mutex_init(&uncached_pools[nid].add_chunk_mutex);
278 efi_memmap_walk_uc(uncached_build_memmap, NULL);
282 __initcall(uncached_init);