[MIPS] Cache: Provide more information on cache policy on bootup.
[linux-2.6] / arch / mips / mm / init.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/bootmem.h>
25 #include <linux/highmem.h>
26 #include <linux/swap.h>
27 #include <linux/proc_fs.h>
28 #include <linux/pfn.h>
29
30 #include <asm/asm-offsets.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cachectl.h>
33 #include <asm/cpu.h>
34 #include <asm/dma.h>
35 #include <asm/kmap_types.h>
36 #include <asm/mmu_context.h>
37 #include <asm/sections.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/tlb.h>
41 #include <asm/fixmap.h>
42
43 /* Atomicity and interruptability */
44 #ifdef CONFIG_MIPS_MT_SMTC
45
46 #include <asm/mipsmtregs.h>
47
48 #define ENTER_CRITICAL(flags) \
49         { \
50         unsigned int mvpflags; \
51         local_irq_save(flags);\
52         mvpflags = dvpe()
53 #define EXIT_CRITICAL(flags) \
54         evpe(mvpflags); \
55         local_irq_restore(flags); \
56         }
57 #else
58
59 #define ENTER_CRITICAL(flags) local_irq_save(flags)
60 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
61
62 #endif /* CONFIG_MIPS_MT_SMTC */
63
64 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
65
66 /*
67  * We have up to 8 empty zeroed pages so we can map one of the right colour
68  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
69  * where we have to avoid VCED / VECI exceptions for good performance at
70  * any price.  Since page is never written to after the initialization we
71  * don't have to care about aliases on other CPUs.
72  */
73 unsigned long empty_zero_page, zero_page_mask;
74
75 /*
76  * Not static inline because used by IP27 special magic initialization code
77  */
78 unsigned long setup_zero_pages(void)
79 {
80         unsigned int order;
81         unsigned long size;
82         struct page *page;
83
84         if (cpu_has_vce)
85                 order = 3;
86         else
87                 order = 0;
88
89         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
90         if (!empty_zero_page)
91                 panic("Oh boy, that early out of memory?");
92
93         page = virt_to_page((void *)empty_zero_page);
94         split_page(page, order);
95         while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
96                 SetPageReserved(page);
97                 page++;
98         }
99
100         size = PAGE_SIZE << order;
101         zero_page_mask = (size - 1) & PAGE_MASK;
102
103         return 1UL << order;
104 }
105
106 /*
107  * These are almost like kmap_atomic / kunmap_atmic except they take an
108  * additional address argument as the hint.
109  */
110
111 #define kmap_get_fixmap_pte(vaddr)                                      \
112         pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
113
114 #ifdef CONFIG_MIPS_MT_SMTC
115 static pte_t *kmap_coherent_pte;
116 static void __init kmap_coherent_init(void)
117 {
118         unsigned long vaddr;
119
120         /* cache the first coherent kmap pte */
121         vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
122         kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
123 }
124 #else
125 static inline void kmap_coherent_init(void) {}
126 #endif
127
128 void *kmap_coherent(struct page *page, unsigned long addr)
129 {
130         enum fixed_addresses idx;
131         unsigned long vaddr, flags, entrylo;
132         unsigned long old_ctx;
133         pte_t pte;
134         int tlbidx;
135
136         BUG_ON(Page_dcache_dirty(page));
137
138         inc_preempt_count();
139         idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
140 #ifdef CONFIG_MIPS_MT_SMTC
141         idx += FIX_N_COLOURS * smp_processor_id();
142 #endif
143         vaddr = __fix_to_virt(FIX_CMAP_END - idx);
144         pte = mk_pte(page, PAGE_KERNEL);
145 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
146         entrylo = pte.pte_high;
147 #else
148         entrylo = pte_val(pte) >> 6;
149 #endif
150
151         ENTER_CRITICAL(flags);
152         old_ctx = read_c0_entryhi();
153         write_c0_entryhi(vaddr & (PAGE_MASK << 1));
154         write_c0_entrylo0(entrylo);
155         write_c0_entrylo1(entrylo);
156 #ifdef CONFIG_MIPS_MT_SMTC
157         set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
158         /* preload TLB instead of local_flush_tlb_one() */
159         mtc0_tlbw_hazard();
160         tlb_probe();
161         tlb_probe_hazard();
162         tlbidx = read_c0_index();
163         mtc0_tlbw_hazard();
164         if (tlbidx < 0)
165                 tlb_write_random();
166         else
167                 tlb_write_indexed();
168 #else
169         tlbidx = read_c0_wired();
170         write_c0_wired(tlbidx + 1);
171         write_c0_index(tlbidx);
172         mtc0_tlbw_hazard();
173         tlb_write_indexed();
174 #endif
175         tlbw_use_hazard();
176         write_c0_entryhi(old_ctx);
177         EXIT_CRITICAL(flags);
178
179         return (void*) vaddr;
180 }
181
182 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
183
184 void kunmap_coherent(void)
185 {
186 #ifndef CONFIG_MIPS_MT_SMTC
187         unsigned int wired;
188         unsigned long flags, old_ctx;
189
190         ENTER_CRITICAL(flags);
191         old_ctx = read_c0_entryhi();
192         wired = read_c0_wired() - 1;
193         write_c0_wired(wired);
194         write_c0_index(wired);
195         write_c0_entryhi(UNIQUE_ENTRYHI(wired));
196         write_c0_entrylo0(0);
197         write_c0_entrylo1(0);
198         mtc0_tlbw_hazard();
199         tlb_write_indexed();
200         tlbw_use_hazard();
201         write_c0_entryhi(old_ctx);
202         EXIT_CRITICAL(flags);
203 #endif
204         dec_preempt_count();
205         preempt_check_resched();
206 }
207
208 void copy_user_highpage(struct page *to, struct page *from,
209         unsigned long vaddr, struct vm_area_struct *vma)
210 {
211         void *vfrom, *vto;
212
213         vto = kmap_atomic(to, KM_USER1);
214         if (cpu_has_dc_aliases && page_mapped(from)) {
215                 vfrom = kmap_coherent(from, vaddr);
216                 copy_page(vto, vfrom);
217                 kunmap_coherent();
218         } else {
219                 vfrom = kmap_atomic(from, KM_USER0);
220                 copy_page(vto, vfrom);
221                 kunmap_atomic(vfrom, KM_USER0);
222         }
223         if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
224             pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
225                 flush_data_cache_page((unsigned long)vto);
226         kunmap_atomic(vto, KM_USER1);
227         /* Make sure this page is cleared on other CPU's too before using it */
228         smp_wmb();
229 }
230
231 EXPORT_SYMBOL(copy_user_highpage);
232
233 void copy_to_user_page(struct vm_area_struct *vma,
234         struct page *page, unsigned long vaddr, void *dst, const void *src,
235         unsigned long len)
236 {
237         if (cpu_has_dc_aliases && page_mapped(page)) {
238                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
239                 memcpy(vto, src, len);
240                 kunmap_coherent();
241         } else {
242                 memcpy(dst, src, len);
243                 if (cpu_has_dc_aliases)
244                         SetPageDcacheDirty(page);
245         }
246         if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
247                 flush_cache_page(vma, vaddr, page_to_pfn(page));
248 }
249
250 EXPORT_SYMBOL(copy_to_user_page);
251
252 void copy_from_user_page(struct vm_area_struct *vma,
253         struct page *page, unsigned long vaddr, void *dst, const void *src,
254         unsigned long len)
255 {
256         if (cpu_has_dc_aliases && page_mapped(page)) {
257                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
258                 memcpy(dst, vfrom, len);
259                 kunmap_coherent();
260         } else {
261                 memcpy(dst, src, len);
262                 if (cpu_has_dc_aliases)
263                         SetPageDcacheDirty(page);
264         }
265 }
266
267 EXPORT_SYMBOL(copy_from_user_page);
268
269
270 #ifdef CONFIG_HIGHMEM
271 unsigned long highstart_pfn, highend_pfn;
272
273 pte_t *kmap_pte;
274 pgprot_t kmap_prot;
275
276 static void __init kmap_init(void)
277 {
278         unsigned long kmap_vstart;
279
280         /* cache the first kmap pte */
281         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
282         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
283
284         kmap_prot = PAGE_KERNEL;
285 }
286 #endif /* CONFIG_HIGHMEM */
287
288 void __init fixrange_init(unsigned long start, unsigned long end,
289         pgd_t *pgd_base)
290 {
291 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
292         pgd_t *pgd;
293         pud_t *pud;
294         pmd_t *pmd;
295         pte_t *pte;
296         int i, j, k;
297         unsigned long vaddr;
298
299         vaddr = start;
300         i = __pgd_offset(vaddr);
301         j = __pud_offset(vaddr);
302         k = __pmd_offset(vaddr);
303         pgd = pgd_base + i;
304
305         for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
306                 pud = (pud_t *)pgd;
307                 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
308                         pmd = (pmd_t *)pud;
309                         for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
310                                 if (pmd_none(*pmd)) {
311                                         pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
312                                         set_pmd(pmd, __pmd((unsigned long)pte));
313                                         if (pte != pte_offset_kernel(pmd, 0))
314                                                 BUG();
315                                 }
316                                 vaddr += PMD_SIZE;
317                         }
318                         k = 0;
319                 }
320                 j = 0;
321         }
322 #endif
323 }
324
325 #ifndef CONFIG_NEED_MULTIPLE_NODES
326 static int __init page_is_ram(unsigned long pagenr)
327 {
328         int i;
329
330         for (i = 0; i < boot_mem_map.nr_map; i++) {
331                 unsigned long addr, end;
332
333                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
334                         /* not usable memory */
335                         continue;
336
337                 addr = PFN_UP(boot_mem_map.map[i].addr);
338                 end = PFN_DOWN(boot_mem_map.map[i].addr +
339                                boot_mem_map.map[i].size);
340
341                 if (pagenr >= addr && pagenr < end)
342                         return 1;
343         }
344
345         return 0;
346 }
347
348 void __init paging_init(void)
349 {
350         unsigned long zones_size[MAX_NR_ZONES] = { 0, };
351 #ifndef CONFIG_FLATMEM
352         unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
353         unsigned long i, j, pfn;
354 #endif
355
356         pagetable_init();
357
358 #ifdef CONFIG_HIGHMEM
359         kmap_init();
360 #endif
361         kmap_coherent_init();
362
363 #ifdef CONFIG_ZONE_DMA
364         if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) {
365                 zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
366                 zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
367         } else if (max_low_pfn < MAX_DMA_PFN)
368                 zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
369         else
370 #endif
371         zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
372
373 #ifdef CONFIG_HIGHMEM
374         zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
375
376         if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
377                 printk(KERN_WARNING "This processor doesn't support highmem."
378                        " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
379                 zones_size[ZONE_HIGHMEM] = 0;
380         }
381 #endif
382
383 #ifdef CONFIG_FLATMEM
384         free_area_init(zones_size);
385 #else
386         pfn = min_low_pfn;
387         for (i = 0; i < MAX_NR_ZONES; i++)
388                 for (j = 0; j < zones_size[i]; j++, pfn++)
389                         if (!page_is_ram(pfn))
390                                 zholes_size[i]++;
391         free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
392 #endif
393 }
394
395 static struct kcore_list kcore_mem, kcore_vmalloc;
396 #ifdef CONFIG_64BIT
397 static struct kcore_list kcore_kseg0;
398 #endif
399
400 void __init mem_init(void)
401 {
402         unsigned long codesize, reservedpages, datasize, initsize;
403         unsigned long tmp, ram;
404
405 #ifdef CONFIG_HIGHMEM
406 #ifdef CONFIG_DISCONTIGMEM
407 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
408 #endif
409         max_mapnr = highend_pfn;
410 #else
411         max_mapnr = max_low_pfn;
412 #endif
413         high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
414
415         totalram_pages += free_all_bootmem();
416         totalram_pages -= setup_zero_pages();   /* Setup zeroed pages.  */
417
418         reservedpages = ram = 0;
419         for (tmp = 0; tmp < max_low_pfn; tmp++)
420                 if (page_is_ram(tmp)) {
421                         ram++;
422                         if (PageReserved(pfn_to_page(tmp)))
423                                 reservedpages++;
424                 }
425         num_physpages = ram;
426
427 #ifdef CONFIG_HIGHMEM
428         for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
429                 struct page *page = mem_map + tmp;
430
431                 if (!page_is_ram(tmp)) {
432                         SetPageReserved(page);
433                         continue;
434                 }
435                 ClearPageReserved(page);
436                 init_page_count(page);
437                 __free_page(page);
438                 totalhigh_pages++;
439         }
440         totalram_pages += totalhigh_pages;
441         num_physpages += totalhigh_pages;
442 #endif
443
444         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
445         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
446         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
447
448 #ifdef CONFIG_64BIT
449         if ((unsigned long) &_text > (unsigned long) CKSEG0)
450                 /* The -4 is a hack so that user tools don't have to handle
451                    the overflow.  */
452                 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
453 #endif
454         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
455         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
456                    VMALLOC_END-VMALLOC_START);
457
458         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
459                "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
460                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
461                ram << (PAGE_SHIFT-10),
462                codesize >> 10,
463                reservedpages << (PAGE_SHIFT-10),
464                datasize >> 10,
465                initsize >> 10,
466                (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
467 }
468 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
469
470 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
471 {
472         unsigned long pfn;
473
474         for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
475                 struct page *page = pfn_to_page(pfn);
476                 void *addr = phys_to_virt(PFN_PHYS(pfn));
477
478                 ClearPageReserved(page);
479                 init_page_count(page);
480                 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
481                 __free_page(page);
482                 totalram_pages++;
483         }
484         printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
485 }
486
487 #ifdef CONFIG_BLK_DEV_INITRD
488 void free_initrd_mem(unsigned long start, unsigned long end)
489 {
490         free_init_pages("initrd memory",
491                         virt_to_phys((void *)start),
492                         virt_to_phys((void *)end));
493 }
494 #endif
495
496 void __init_refok free_initmem(void)
497 {
498         prom_free_prom_memory();
499         free_init_pages("unused kernel memory",
500                         __pa_symbol(&__init_begin),
501                         __pa_symbol(&__init_end));
502 }
503
504 unsigned long pgd_current[NR_CPUS];
505 /*
506  * On 64-bit we've got three-level pagetables with a slightly
507  * different layout ...
508  */
509 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
510
511 /*
512  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
513  * are constants.  So we use the variants from asm-offset.h until that gcc
514  * will officially be retired.
515  */
516 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
517 #ifdef CONFIG_64BIT
518 #ifdef MODULE_START
519 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
520 #endif
521 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
522 #endif
523 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);