Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[linux-2.6] / arch / powerpc / mm / mem.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34
35 #include <asm/pgalloc.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
44 #include <asm/tlb.h>
45 #include <asm/lmb.h>
46 #include <asm/sections.h>
47 #include <asm/vdso.h>
48
49 #include "mmu_decl.h"
50
51 #ifndef CPU_FTR_COHERENT_ICACHE
52 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
53 #define CPU_FTR_NOEXECUTE       0
54 #endif
55
56 int init_bootmem_done;
57 int mem_init_done;
58 unsigned long memory_limit;
59
60 int page_is_ram(unsigned long pfn)
61 {
62         unsigned long paddr = (pfn << PAGE_SHIFT);
63
64 #ifndef CONFIG_PPC64    /* XXX for now */
65         return paddr < __pa(high_memory);
66 #else
67         int i;
68         for (i=0; i < lmb.memory.cnt; i++) {
69                 unsigned long base;
70
71                 base = lmb.memory.region[i].base;
72
73                 if ((paddr >= base) &&
74                         (paddr < (base + lmb.memory.region[i].size))) {
75                         return 1;
76                 }
77         }
78
79         return 0;
80 #endif
81 }
82
83 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
84                               unsigned long size, pgprot_t vma_prot)
85 {
86         if (ppc_md.phys_mem_access_prot)
87                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
88
89         if (!page_is_ram(pfn))
90                 vma_prot = __pgprot(pgprot_val(vma_prot)
91                                     | _PAGE_GUARDED | _PAGE_NO_CACHE);
92         return vma_prot;
93 }
94 EXPORT_SYMBOL(phys_mem_access_prot);
95
96 #ifdef CONFIG_MEMORY_HOTPLUG
97
98 void online_page(struct page *page)
99 {
100         ClearPageReserved(page);
101         init_page_count(page);
102         __free_page(page);
103         totalram_pages++;
104         num_physpages++;
105 }
106
107 #ifdef CONFIG_NUMA
108 int memory_add_physaddr_to_nid(u64 start)
109 {
110         return hot_add_scn_to_nid(start);
111 }
112 #endif
113
114 int __devinit arch_add_memory(int nid, u64 start, u64 size)
115 {
116         struct pglist_data *pgdata;
117         struct zone *zone;
118         unsigned long start_pfn = start >> PAGE_SHIFT;
119         unsigned long nr_pages = size >> PAGE_SHIFT;
120
121         pgdata = NODE_DATA(nid);
122
123         start = (unsigned long)__va(start);
124         create_section_mapping(start, start + size);
125
126         /* this should work for most non-highmem platforms */
127         zone = pgdata->node_zones;
128
129         return __add_pages(zone, start_pfn, nr_pages);
130 }
131
132 #ifdef CONFIG_MEMORY_HOTREMOVE
133 int remove_memory(u64 start, u64 size)
134 {
135         unsigned long start_pfn, end_pfn;
136         int ret;
137
138         start_pfn = start >> PAGE_SHIFT;
139         end_pfn = start_pfn + (size >> PAGE_SHIFT);
140         ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
141         if (ret)
142                 goto out;
143         /* Arch-specific calls go here - next patch */
144 out:
145         return ret;
146 }
147 #endif /* CONFIG_MEMORY_HOTREMOVE */
148
149 /*
150  * walk_memory_resource() needs to make sure there is no holes in a given
151  * memory range. On PPC64, since this range comes from /sysfs, the range
152  * is guaranteed to be valid, non-overlapping and can not contain any
153  * holes. By the time we get here (memory add or remove), /proc/device-tree
154  * is updated and correct. Only reason we need to check against device-tree
155  * would be if we allow user-land to specify a memory range through a
156  * system call/ioctl etc. instead of doing offline/online through /sysfs.
157  */
158 int
159 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
160                         int (*func)(unsigned long, unsigned long, void *))
161 {
162         return  (*func)(start_pfn, nr_pages, arg);
163 }
164
165 #endif /* CONFIG_MEMORY_HOTPLUG */
166
167 void show_mem(void)
168 {
169         unsigned long total = 0, reserved = 0;
170         unsigned long shared = 0, cached = 0;
171         unsigned long highmem = 0;
172         struct page *page;
173         pg_data_t *pgdat;
174         unsigned long i;
175
176         printk("Mem-info:\n");
177         show_free_areas();
178         printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
179         for_each_online_pgdat(pgdat) {
180                 unsigned long flags;
181                 pgdat_resize_lock(pgdat, &flags);
182                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
183                         if (!pfn_valid(pgdat->node_start_pfn + i))
184                                 continue;
185                         page = pgdat_page_nr(pgdat, i);
186                         total++;
187                         if (PageHighMem(page))
188                                 highmem++;
189                         if (PageReserved(page))
190                                 reserved++;
191                         else if (PageSwapCache(page))
192                                 cached++;
193                         else if (page_count(page))
194                                 shared += page_count(page) - 1;
195                 }
196                 pgdat_resize_unlock(pgdat, &flags);
197         }
198         printk("%ld pages of RAM\n", total);
199 #ifdef CONFIG_HIGHMEM
200         printk("%ld pages of HIGHMEM\n", highmem);
201 #endif
202         printk("%ld reserved pages\n", reserved);
203         printk("%ld pages shared\n", shared);
204         printk("%ld pages swap cached\n", cached);
205 }
206
207 /*
208  * Initialize the bootmem system and give it all the memory we
209  * have available.  If we are using highmem, we only put the
210  * lowmem into the bootmem system.
211  */
212 #ifndef CONFIG_NEED_MULTIPLE_NODES
213 void __init do_init_bootmem(void)
214 {
215         unsigned long i;
216         unsigned long start, bootmap_pages;
217         unsigned long total_pages;
218         int boot_mapsize;
219
220         max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
221 #ifdef CONFIG_HIGHMEM
222         total_pages = total_lowmem >> PAGE_SHIFT;
223 #endif
224
225         /*
226          * Find an area to use for the bootmem bitmap.  Calculate the size of
227          * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
228          * Add 1 additional page in case the address isn't page-aligned.
229          */
230         bootmap_pages = bootmem_bootmap_pages(total_pages);
231
232         start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
233
234         boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
235
236         /* Add active regions with valid PFNs */
237         for (i = 0; i < lmb.memory.cnt; i++) {
238                 unsigned long start_pfn, end_pfn;
239                 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
240                 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
241                 add_active_range(0, start_pfn, end_pfn);
242         }
243
244         /* Add all physical memory to the bootmem map, mark each area
245          * present.
246          */
247 #ifdef CONFIG_HIGHMEM
248         free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
249
250         /* reserve the sections we're already using */
251         for (i = 0; i < lmb.reserved.cnt; i++) {
252                 unsigned long addr = lmb.reserved.region[i].base +
253                                      lmb_size_bytes(&lmb.reserved, i) - 1;
254                 if (addr < total_lowmem)
255                         reserve_bootmem(lmb.reserved.region[i].base,
256                                         lmb_size_bytes(&lmb.reserved, i),
257                                         BOOTMEM_DEFAULT);
258                 else if (lmb.reserved.region[i].base < total_lowmem) {
259                         unsigned long adjusted_size = total_lowmem -
260                                       lmb.reserved.region[i].base;
261                         reserve_bootmem(lmb.reserved.region[i].base,
262                                         adjusted_size, BOOTMEM_DEFAULT);
263                 }
264         }
265 #else
266         free_bootmem_with_active_regions(0, max_pfn);
267
268         /* reserve the sections we're already using */
269         for (i = 0; i < lmb.reserved.cnt; i++)
270                 reserve_bootmem(lmb.reserved.region[i].base,
271                                 lmb_size_bytes(&lmb.reserved, i),
272                                 BOOTMEM_DEFAULT);
273
274 #endif
275         /* XXX need to clip this if using highmem? */
276         sparse_memory_present_with_active_regions(0);
277
278         init_bootmem_done = 1;
279 }
280
281 /* mark pages that don't exist as nosave */
282 static int __init mark_nonram_nosave(void)
283 {
284         unsigned long lmb_next_region_start_pfn,
285                       lmb_region_max_pfn;
286         int i;
287
288         for (i = 0; i < lmb.memory.cnt - 1; i++) {
289                 lmb_region_max_pfn =
290                         (lmb.memory.region[i].base >> PAGE_SHIFT) +
291                         (lmb.memory.region[i].size >> PAGE_SHIFT);
292                 lmb_next_region_start_pfn =
293                         lmb.memory.region[i+1].base >> PAGE_SHIFT;
294
295                 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
296                         register_nosave_region(lmb_region_max_pfn,
297                                                lmb_next_region_start_pfn);
298         }
299
300         return 0;
301 }
302
303 /*
304  * paging_init() sets up the page tables - in fact we've already done this.
305  */
306 void __init paging_init(void)
307 {
308         unsigned long total_ram = lmb_phys_mem_size();
309         unsigned long top_of_ram = lmb_end_of_DRAM();
310         unsigned long max_zone_pfns[MAX_NR_ZONES];
311
312 #ifdef CONFIG_HIGHMEM
313         map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
314         pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
315                         (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
316         map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
317         kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
318                         (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
319                          KMAP_FIX_BEGIN);
320         kmap_prot = PAGE_KERNEL;
321 #endif /* CONFIG_HIGHMEM */
322
323         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
324                top_of_ram, total_ram);
325         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
326                (top_of_ram - total_ram) >> 20);
327         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
328 #ifdef CONFIG_HIGHMEM
329         max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
330         max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
331 #else
332         max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
333 #endif
334         free_area_init_nodes(max_zone_pfns);
335
336         mark_nonram_nosave();
337 }
338 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
339
340 void __init mem_init(void)
341 {
342 #ifdef CONFIG_NEED_MULTIPLE_NODES
343         int nid;
344 #endif
345         pg_data_t *pgdat;
346         unsigned long i;
347         struct page *page;
348         unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
349
350         num_physpages = lmb.memory.size >> PAGE_SHIFT;
351         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
352
353 #ifdef CONFIG_NEED_MULTIPLE_NODES
354         for_each_online_node(nid) {
355                 if (NODE_DATA(nid)->node_spanned_pages != 0) {
356                         printk("freeing bootmem node %d\n", nid);
357                         totalram_pages +=
358                                 free_all_bootmem_node(NODE_DATA(nid));
359                 }
360         }
361 #else
362         max_mapnr = max_pfn;
363         totalram_pages += free_all_bootmem();
364 #endif
365         for_each_online_pgdat(pgdat) {
366                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
367                         if (!pfn_valid(pgdat->node_start_pfn + i))
368                                 continue;
369                         page = pgdat_page_nr(pgdat, i);
370                         if (PageReserved(page))
371                                 reservedpages++;
372                 }
373         }
374
375         codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
376         datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
377         initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
378         bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
379
380 #ifdef CONFIG_HIGHMEM
381         {
382                 unsigned long pfn, highmem_mapnr;
383
384                 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
385                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
386                         struct page *page = pfn_to_page(pfn);
387                         if (lmb_is_reserved(pfn << PAGE_SHIFT))
388                                 continue;
389                         ClearPageReserved(page);
390                         init_page_count(page);
391                         __free_page(page);
392                         totalhigh_pages++;
393                         reservedpages--;
394                 }
395                 totalram_pages += totalhigh_pages;
396                 printk(KERN_DEBUG "High memory: %luk\n",
397                        totalhigh_pages << (PAGE_SHIFT-10));
398         }
399 #endif /* CONFIG_HIGHMEM */
400
401         printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
402                "%luk reserved, %luk data, %luk bss, %luk init)\n",
403                 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
404                 num_physpages << (PAGE_SHIFT-10),
405                 codesize >> 10,
406                 reservedpages << (PAGE_SHIFT-10),
407                 datasize >> 10,
408                 bsssize >> 10,
409                 initsize >> 10);
410
411         mem_init_done = 1;
412 }
413
414 /*
415  * This is called when a page has been modified by the kernel.
416  * It just marks the page as not i-cache clean.  We do the i-cache
417  * flush later when the page is given to a user process, if necessary.
418  */
419 void flush_dcache_page(struct page *page)
420 {
421         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
422                 return;
423         /* avoid an atomic op if possible */
424         if (test_bit(PG_arch_1, &page->flags))
425                 clear_bit(PG_arch_1, &page->flags);
426 }
427 EXPORT_SYMBOL(flush_dcache_page);
428
429 void flush_dcache_icache_page(struct page *page)
430 {
431 #ifdef CONFIG_BOOKE
432         void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
433         __flush_dcache_icache(start);
434         kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
435 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
436         /* On 8xx there is no need to kmap since highmem is not supported */
437         __flush_dcache_icache(page_address(page)); 
438 #else
439         __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
440 #endif
441
442 }
443 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
444 {
445         clear_page(page);
446
447         /*
448          * We shouldnt have to do this, but some versions of glibc
449          * require it (ld.so assumes zero filled pages are icache clean)
450          * - Anton
451          */
452         flush_dcache_page(pg);
453 }
454 EXPORT_SYMBOL(clear_user_page);
455
456 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
457                     struct page *pg)
458 {
459         copy_page(vto, vfrom);
460
461         /*
462          * We should be able to use the following optimisation, however
463          * there are two problems.
464          * Firstly a bug in some versions of binutils meant PLT sections
465          * were not marked executable.
466          * Secondly the first word in the GOT section is blrl, used
467          * to establish the GOT address. Until recently the GOT was
468          * not marked executable.
469          * - Anton
470          */
471 #if 0
472         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
473                 return;
474 #endif
475
476         flush_dcache_page(pg);
477 }
478
479 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
480                              unsigned long addr, int len)
481 {
482         unsigned long maddr;
483
484         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
485         flush_icache_range(maddr, maddr + len);
486         kunmap(page);
487 }
488 EXPORT_SYMBOL(flush_icache_user_range);
489
490 /*
491  * This is called at the end of handling a user page fault, when the
492  * fault has been handled by updating a PTE in the linux page tables.
493  * We use it to preload an HPTE into the hash table corresponding to
494  * the updated linux PTE.
495  * 
496  * This must always be called with the pte lock held.
497  */
498 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
499                       pte_t pte)
500 {
501 #ifdef CONFIG_PPC_STD_MMU
502         unsigned long access = 0, trap;
503 #endif
504         unsigned long pfn = pte_pfn(pte);
505
506         /* handle i-cache coherency */
507         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
508             !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
509             pfn_valid(pfn)) {
510                 struct page *page = pfn_to_page(pfn);
511 #ifdef CONFIG_8xx
512                 /* On 8xx, cache control instructions (particularly
513                  * "dcbst" from flush_dcache_icache) fault as write
514                  * operation if there is an unpopulated TLB entry
515                  * for the address in question. To workaround that,
516                  * we invalidate the TLB here, thus avoiding dcbst
517                  * misbehaviour.
518                  */
519                 _tlbie(address, 0 /* 8xx doesn't care about PID */);
520 #endif
521                 /* The _PAGE_USER test should really be _PAGE_EXEC, but
522                  * older glibc versions execute some code from no-exec
523                  * pages, which for now we are supporting.  If exec-only
524                  * pages are ever implemented, this will have to change.
525                  */
526                 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
527                     && !test_bit(PG_arch_1, &page->flags)) {
528                         if (vma->vm_mm == current->active_mm) {
529                                 __flush_dcache_icache((void *) address);
530                         } else
531                                 flush_dcache_icache_page(page);
532                         set_bit(PG_arch_1, &page->flags);
533                 }
534         }
535
536 #ifdef CONFIG_PPC_STD_MMU
537         /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
538         if (!pte_young(pte) || address >= TASK_SIZE)
539                 return;
540
541         /* We try to figure out if we are coming from an instruction
542          * access fault and pass that down to __hash_page so we avoid
543          * double-faulting on execution of fresh text. We have to test
544          * for regs NULL since init will get here first thing at boot
545          *
546          * We also avoid filling the hash if not coming from a fault
547          */
548         if (current->thread.regs == NULL)
549                 return;
550         trap = TRAP(current->thread.regs);
551         if (trap == 0x400)
552                 access |= _PAGE_EXEC;
553         else if (trap != 0x300)
554                 return;
555         hash_preload(vma->vm_mm, address, access, trap);
556 #endif /* CONFIG_PPC_STD_MMU */
557 }