3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
47 #include <asm/sections.h>
52 #ifndef CPU_FTR_COHERENT_ICACHE
53 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
54 #define CPU_FTR_NOEXECUTE 0
57 int init_bootmem_done;
59 unsigned long memory_limit;
61 extern void hash_preload(struct mm_struct *mm, unsigned long ea,
62 unsigned long access, unsigned long trap);
64 int page_is_ram(unsigned long pfn)
66 unsigned long paddr = (pfn << PAGE_SHIFT);
68 #ifndef CONFIG_PPC64 /* XXX for now */
69 return paddr < __pa(high_memory);
72 for (i=0; i < lmb.memory.cnt; i++) {
75 base = lmb.memory.region[i].base;
77 if ((paddr >= base) &&
78 (paddr < (base + lmb.memory.region[i].size))) {
86 EXPORT_SYMBOL(page_is_ram);
88 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
89 unsigned long size, pgprot_t vma_prot)
91 if (ppc_md.phys_mem_access_prot)
92 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
94 if (!page_is_ram(pfn))
95 vma_prot = __pgprot(pgprot_val(vma_prot)
96 | _PAGE_GUARDED | _PAGE_NO_CACHE);
99 EXPORT_SYMBOL(phys_mem_access_prot);
101 #ifdef CONFIG_MEMORY_HOTPLUG
103 void online_page(struct page *page)
105 ClearPageReserved(page);
106 init_page_count(page);
113 int memory_add_physaddr_to_nid(u64 start)
115 return hot_add_scn_to_nid(start);
119 int __devinit arch_add_memory(int nid, u64 start, u64 size)
121 struct pglist_data *pgdata;
123 unsigned long start_pfn = start >> PAGE_SHIFT;
124 unsigned long nr_pages = size >> PAGE_SHIFT;
126 pgdata = NODE_DATA(nid);
128 start = (unsigned long)__va(start);
129 create_section_mapping(start, start + size);
131 /* this should work for most non-highmem platforms */
132 zone = pgdata->node_zones;
134 return __add_pages(zone, start_pfn, nr_pages);
140 * First pass at this code will check to determine if the remove
141 * request is within the RMO. Do not allow removal within the RMO.
143 int __devinit remove_memory(u64 start, u64 size)
146 unsigned long start_pfn, end_pfn, nr_pages;
148 start_pfn = start >> PAGE_SHIFT;
149 nr_pages = size >> PAGE_SHIFT;
150 end_pfn = start_pfn + nr_pages;
152 printk("%s(): Attempting to remove memoy in range "
153 "%lx to %lx\n", __func__, start, start+size);
155 * check for range within RMO
157 zone = page_zone(pfn_to_page(start_pfn));
159 printk("%s(): memory will be removed from "
160 "the %s zone\n", __func__, zone->name);
163 * not handling removing memory ranges that
164 * overlap multiple zones yet
166 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
169 /* make sure it is NOT in RMO */
170 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
171 printk("%s(): range to be removed must NOT be in RMO!\n",
176 return __remove_pages(zone, start_pfn, nr_pages);
179 printk("%s(): memory range to be removed overlaps "
180 "multiple zones!!!\n", __func__);
184 #endif /* CONFIG_MEMORY_HOTPLUG */
188 unsigned long total = 0, reserved = 0;
189 unsigned long shared = 0, cached = 0;
190 unsigned long highmem = 0;
195 printk("Mem-info:\n");
197 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
198 for_each_online_pgdat(pgdat) {
200 pgdat_resize_lock(pgdat, &flags);
201 for (i = 0; i < pgdat->node_spanned_pages; i++) {
202 if (!pfn_valid(pgdat->node_start_pfn + i))
204 page = pgdat_page_nr(pgdat, i);
206 if (PageHighMem(page))
208 if (PageReserved(page))
210 else if (PageSwapCache(page))
212 else if (page_count(page))
213 shared += page_count(page) - 1;
215 pgdat_resize_unlock(pgdat, &flags);
217 printk("%ld pages of RAM\n", total);
218 #ifdef CONFIG_HIGHMEM
219 printk("%ld pages of HIGHMEM\n", highmem);
221 printk("%ld reserved pages\n", reserved);
222 printk("%ld pages shared\n", shared);
223 printk("%ld pages swap cached\n", cached);
227 * Initialize the bootmem system and give it all the memory we
228 * have available. If we are using highmem, we only put the
229 * lowmem into the bootmem system.
231 #ifndef CONFIG_NEED_MULTIPLE_NODES
232 void __init do_init_bootmem(void)
235 unsigned long start, bootmap_pages;
236 unsigned long total_pages;
239 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
240 #ifdef CONFIG_HIGHMEM
241 total_pages = total_lowmem >> PAGE_SHIFT;
245 * Find an area to use for the bootmem bitmap. Calculate the size of
246 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
247 * Add 1 additional page in case the address isn't page-aligned.
249 bootmap_pages = bootmem_bootmap_pages(total_pages);
251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
253 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
255 /* Add active regions with valid PFNs */
256 for (i = 0; i < lmb.memory.cnt; i++) {
257 unsigned long start_pfn, end_pfn;
258 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
259 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
260 add_active_range(0, start_pfn, end_pfn);
263 /* Add all physical memory to the bootmem map, mark each area
266 #ifdef CONFIG_HIGHMEM
267 free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
269 free_bootmem_with_active_regions(0, max_pfn);
272 /* reserve the sections we're already using */
273 for (i = 0; i < lmb.reserved.cnt; i++)
274 reserve_bootmem(lmb.reserved.region[i].base,
275 lmb_size_bytes(&lmb.reserved, i));
277 /* XXX need to clip this if using highmem? */
278 sparse_memory_present_with_active_regions(0);
280 init_bootmem_done = 1;
284 * paging_init() sets up the page tables - in fact we've already done this.
286 void __init paging_init(void)
288 unsigned long total_ram = lmb_phys_mem_size();
289 unsigned long top_of_ram = lmb_end_of_DRAM();
290 unsigned long max_zone_pfns[MAX_NR_ZONES];
292 #ifdef CONFIG_HIGHMEM
293 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
294 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
295 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
296 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
297 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
298 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
299 kmap_prot = PAGE_KERNEL;
300 #endif /* CONFIG_HIGHMEM */
302 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
303 top_of_ram, total_ram);
304 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
305 (top_of_ram - total_ram) >> 20);
306 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
307 #ifdef CONFIG_HIGHMEM
308 max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
309 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
311 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
313 free_area_init_nodes(max_zone_pfns);
315 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
317 void __init mem_init(void)
319 #ifdef CONFIG_NEED_MULTIPLE_NODES
325 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
327 num_physpages = lmb.memory.size >> PAGE_SHIFT;
328 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
330 #ifdef CONFIG_NEED_MULTIPLE_NODES
331 for_each_online_node(nid) {
332 if (NODE_DATA(nid)->node_spanned_pages != 0) {
333 printk("freeing bootmem node %d\n", nid);
335 free_all_bootmem_node(NODE_DATA(nid));
340 totalram_pages += free_all_bootmem();
342 for_each_online_pgdat(pgdat) {
343 for (i = 0; i < pgdat->node_spanned_pages; i++) {
344 if (!pfn_valid(pgdat->node_start_pfn + i))
346 page = pgdat_page_nr(pgdat, i);
347 if (PageReserved(page))
352 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
353 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
354 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
355 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
357 #ifdef CONFIG_HIGHMEM
359 unsigned long pfn, highmem_mapnr;
361 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
362 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
363 struct page *page = pfn_to_page(pfn);
365 ClearPageReserved(page);
366 init_page_count(page);
370 totalram_pages += totalhigh_pages;
371 printk(KERN_DEBUG "High memory: %luk\n",
372 totalhigh_pages << (PAGE_SHIFT-10));
374 #endif /* CONFIG_HIGHMEM */
376 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
377 "%luk reserved, %luk data, %luk bss, %luk init)\n",
378 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
379 num_physpages << (PAGE_SHIFT-10),
381 reservedpages << (PAGE_SHIFT-10),
388 /* Initialize the vDSO */
393 * This is called when a page has been modified by the kernel.
394 * It just marks the page as not i-cache clean. We do the i-cache
395 * flush later when the page is given to a user process, if necessary.
397 void flush_dcache_page(struct page *page)
399 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
401 /* avoid an atomic op if possible */
402 if (test_bit(PG_arch_1, &page->flags))
403 clear_bit(PG_arch_1, &page->flags);
405 EXPORT_SYMBOL(flush_dcache_page);
407 void flush_dcache_icache_page(struct page *page)
410 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
411 __flush_dcache_icache(start);
412 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
413 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
414 /* On 8xx there is no need to kmap since highmem is not supported */
415 __flush_dcache_icache(page_address(page));
417 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
421 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
426 * We shouldnt have to do this, but some versions of glibc
427 * require it (ld.so assumes zero filled pages are icache clean)
430 flush_dcache_page(pg);
432 EXPORT_SYMBOL(clear_user_page);
434 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
437 copy_page(vto, vfrom);
440 * We should be able to use the following optimisation, however
441 * there are two problems.
442 * Firstly a bug in some versions of binutils meant PLT sections
443 * were not marked executable.
444 * Secondly the first word in the GOT section is blrl, used
445 * to establish the GOT address. Until recently the GOT was
446 * not marked executable.
450 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
454 flush_dcache_page(pg);
457 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
458 unsigned long addr, int len)
462 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
463 flush_icache_range(maddr, maddr + len);
466 EXPORT_SYMBOL(flush_icache_user_range);
469 * This is called at the end of handling a user page fault, when the
470 * fault has been handled by updating a PTE in the linux page tables.
471 * We use it to preload an HPTE into the hash table corresponding to
472 * the updated linux PTE.
474 * This must always be called with the pte lock held.
476 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
479 #ifdef CONFIG_PPC_STD_MMU
480 unsigned long access = 0, trap;
482 unsigned long pfn = pte_pfn(pte);
484 /* handle i-cache coherency */
485 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
486 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
488 struct page *page = pfn_to_page(pfn);
490 /* On 8xx, cache control instructions (particularly
491 * "dcbst" from flush_dcache_icache) fault as write
492 * operation if there is an unpopulated TLB entry
493 * for the address in question. To workaround that,
494 * we invalidate the TLB here, thus avoiding dcbst
499 if (!PageReserved(page)
500 && !test_bit(PG_arch_1, &page->flags)) {
501 if (vma->vm_mm == current->active_mm) {
502 __flush_dcache_icache((void *) address);
504 flush_dcache_icache_page(page);
505 set_bit(PG_arch_1, &page->flags);
509 #ifdef CONFIG_PPC_STD_MMU
510 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
511 if (!pte_young(pte) || address >= TASK_SIZE)
514 /* We try to figure out if we are coming from an instruction
515 * access fault and pass that down to __hash_page so we avoid
516 * double-faulting on execution of fresh text. We have to test
517 * for regs NULL since init will get here first thing at boot
519 * We also avoid filling the hash if not coming from a fault
521 if (current->thread.regs == NULL)
523 trap = TRAP(current->thread.regs);
525 access |= _PAGE_EXEC;
526 else if (trap != 0x300)
528 hash_preload(vma->vm_mm, address, access, trap);
529 #endif /* CONFIG_PPC_STD_MMU */