2 * linux/arch/sh/mm/init.c
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
14 #include <linux/proc_fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/percpu.h>
18 #include <asm/mmu_context.h>
20 #include <asm/cacheflush.h>
21 #include <asm/sections.h>
22 #include <asm/cache.h>
24 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
25 pgd_t swapper_pg_dir[PTRS_PER_PGD];
26 unsigned long cached_to_uncached = 0;
30 int total = 0, reserved = 0, free = 0;
31 int shared = 0, cached = 0, slab = 0;
34 printk("Mem-info:\n");
37 for_each_online_pgdat(pgdat) {
38 unsigned long flags, i;
40 pgdat_resize_lock(pgdat, &flags);
41 for (i = 0; i < pgdat->node_spanned_pages; i++) {
42 struct page *page = pgdat_page_nr(pgdat, i);
44 if (PageReserved(page))
46 else if (PageSwapCache(page))
48 else if (PageSlab(page))
50 else if (!page_count(page))
53 shared += page_count(page) - 1;
55 pgdat_resize_unlock(pgdat, &flags);
58 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
59 printk("%d pages of RAM\n", total);
60 printk("%d free pages\n", free);
61 printk("%d reserved pages\n", reserved);
62 printk("%d slab pages\n", slab);
63 printk("%d pages shared\n", shared);
64 printk("%d pages swap cached\n", cached);
65 printk(KERN_INFO "Total of %ld pages in page table cache\n",
66 quicklist_total_size());
70 static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
77 pgd = pgd_offset_k(addr);
83 pud = pud_alloc(NULL, pgd, addr);
89 pmd = pmd_alloc(NULL, pud, addr);
95 pte = pte_offset_kernel(pmd, addr);
96 if (!pte_none(*pte)) {
101 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
103 if (cached_to_uncached)
104 flush_tlb_one(get_asid(), addr);
108 * As a performance optimization, other platforms preserve the fixmap mapping
109 * across a context switch, we don't presently do this, but this could be done
110 * in a similar fashion as to the wired TLB interface that sh64 uses (by way
111 * of the memory mapped UTLB configuration) -- this unfortunately forces us to
112 * give up a TLB entry for each mapping we want to preserve. While this may be
113 * viable for a small number of fixmaps, it's not particularly useful for
114 * everything and needs to be carefully evaluated. (ie, we may want this for
115 * the vsyscall page).
117 * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass
118 * in at __set_fixmap() time to determine the appropriate behavior to follow.
122 void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
124 unsigned long address = __fix_to_virt(idx);
126 if (idx >= __end_of_fixed_addresses) {
131 set_pte_phys(address, phys, prot);
134 void __init page_table_range_init(unsigned long start, unsigned long end,
143 vaddr = start & PMD_MASK;
144 end = (end + PMD_SIZE - 1) & PMD_MASK;
145 pgd_idx = pgd_index(vaddr);
146 pgd = pgd_base + pgd_idx;
148 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
149 BUG_ON(pgd_none(*pgd));
150 pud = pud_offset(pgd, 0);
151 BUG_ON(pud_none(*pud));
152 pmd = pmd_offset(pud, 0);
154 if (!pmd_present(*pmd)) {
156 pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
157 memset(pte_table, 0, PAGE_SIZE);
158 pmd_populate_kernel(&init_mm, pmd, pte_table);
164 #endif /* CONFIG_MMU */
167 * paging_init() sets up the page tables
169 void __init paging_init(void)
171 unsigned long max_zone_pfns[MAX_NR_ZONES];
174 /* We don't need to map the kernel through the TLB, as
175 * it is permanatly mapped using P1. So clear the
177 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
179 /* Set an initial value for the MMU.TTB so we don't have to
180 * check for a null value. */
181 set_TTB(swapper_pg_dir);
183 /* Populate the relevant portions of swapper_pg_dir so that
184 * we can use the fixmap entries without calling kmalloc.
185 * pte's will be filled in by __set_fixmap(). */
186 page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
188 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
190 for_each_online_node(nid) {
191 pg_data_t *pgdat = NODE_DATA(nid);
192 unsigned long low, start_pfn;
194 start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
195 low = pgdat->bdata->node_low_pfn;
197 if (max_zone_pfns[ZONE_NORMAL] < low)
198 max_zone_pfns[ZONE_NORMAL] = low;
200 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
201 nid, start_pfn, low);
204 free_area_init_nodes(max_zone_pfns);
206 /* Set up the uncached fixmap */
207 set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
211 * Handle trivial transitions between cached and uncached
212 * segments, making use of the 1:1 mapping relationship in
215 cached_to_uncached = P2SEG - P1SEG;
219 static struct kcore_list kcore_mem, kcore_vmalloc;
220 int after_bootmem = 0;
222 void __init mem_init(void)
224 int codesize, datasize, initsize;
230 for_each_online_node(nid) {
231 pg_data_t *pgdat = NODE_DATA(nid);
232 unsigned long node_pages = 0;
233 void *node_high_memory;
235 num_physpages += pgdat->node_present_pages;
237 if (pgdat->node_spanned_pages)
238 node_pages = free_all_bootmem_node(pgdat);
240 totalram_pages += node_pages;
242 node_high_memory = (void *)__va((pgdat->node_start_pfn +
243 pgdat->node_spanned_pages) <<
245 if (node_high_memory > high_memory)
246 high_memory = node_high_memory;
249 /* clear the zero-page */
250 memset(empty_zero_page, 0, PAGE_SIZE);
251 __flush_wback_region(empty_zero_page, PAGE_SIZE);
255 codesize = (unsigned long) &_etext - (unsigned long) &_text;
256 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
257 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
259 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
260 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
261 VMALLOC_END - VMALLOC_START);
263 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
264 "%dk data, %dk init)\n",
265 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
266 num_physpages << (PAGE_SHIFT-10),
273 /* Initialize the vDSO */
277 void free_initmem(void)
281 addr = (unsigned long)(&__init_begin);
282 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
283 ClearPageReserved(virt_to_page(addr));
284 init_page_count(virt_to_page(addr));
288 printk("Freeing unused kernel memory: %ldk freed\n",
289 ((unsigned long)&__init_end -
290 (unsigned long)&__init_begin) >> 10);
293 #ifdef CONFIG_BLK_DEV_INITRD
294 void free_initrd_mem(unsigned long start, unsigned long end)
297 for (p = start; p < end; p += PAGE_SIZE) {
298 ClearPageReserved(virt_to_page(p));
299 init_page_count(virt_to_page(p));
303 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
307 #ifdef CONFIG_MEMORY_HOTPLUG
308 void online_page(struct page *page)
310 ClearPageReserved(page);
311 init_page_count(page);
317 int arch_add_memory(int nid, u64 start, u64 size)
320 unsigned long start_pfn = start >> PAGE_SHIFT;
321 unsigned long nr_pages = size >> PAGE_SHIFT;
324 pgdat = NODE_DATA(nid);
326 /* We only have ZONE_NORMAL, so this is easy.. */
327 ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
329 printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);
333 EXPORT_SYMBOL_GPL(arch_add_memory);
336 int memory_add_physaddr_to_nid(u64 addr)
338 /* Node 0 for now.. */
341 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);