Merge branch 'x86/cpufeature' into irq/numa
[linux-2.6] / arch / mips / mm / init.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/bootmem.h>
25 #include <linux/highmem.h>
26 #include <linux/swap.h>
27 #include <linux/proc_fs.h>
28 #include <linux/pfn.h>
29
30 #include <asm/asm-offsets.h>
31 #include <asm/bootinfo.h>
32 #include <asm/cachectl.h>
33 #include <asm/cpu.h>
34 #include <asm/dma.h>
35 #include <asm/kmap_types.h>
36 #include <asm/mmu_context.h>
37 #include <asm/sections.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/tlb.h>
41 #include <asm/fixmap.h>
42
43 /* Atomicity and interruptability */
44 #ifdef CONFIG_MIPS_MT_SMTC
45
46 #include <asm/mipsmtregs.h>
47
48 #define ENTER_CRITICAL(flags) \
49         { \
50         unsigned int mvpflags; \
51         local_irq_save(flags);\
52         mvpflags = dvpe()
53 #define EXIT_CRITICAL(flags) \
54         evpe(mvpflags); \
55         local_irq_restore(flags); \
56         }
57 #else
58
59 #define ENTER_CRITICAL(flags) local_irq_save(flags)
60 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
61
62 #endif /* CONFIG_MIPS_MT_SMTC */
63
64 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
65
66 /*
67  * We have up to 8 empty zeroed pages so we can map one of the right colour
68  * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
69  * where we have to avoid VCED / VECI exceptions for good performance at
70  * any price.  Since page is never written to after the initialization we
71  * don't have to care about aliases on other CPUs.
72  */
73 unsigned long empty_zero_page, zero_page_mask;
74 EXPORT_SYMBOL_GPL(empty_zero_page);
75
76 /*
77  * Not static inline because used by IP27 special magic initialization code
78  */
79 unsigned long setup_zero_pages(void)
80 {
81         unsigned int order;
82         unsigned long size;
83         struct page *page;
84
85         if (cpu_has_vce)
86                 order = 3;
87         else
88                 order = 0;
89
90         empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
91         if (!empty_zero_page)
92                 panic("Oh boy, that early out of memory?");
93
94         page = virt_to_page((void *)empty_zero_page);
95         split_page(page, order);
96         while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
97                 SetPageReserved(page);
98                 page++;
99         }
100
101         size = PAGE_SIZE << order;
102         zero_page_mask = (size - 1) & PAGE_MASK;
103
104         return 1UL << order;
105 }
106
107 #ifdef CONFIG_MIPS_MT_SMTC
108 static pte_t *kmap_coherent_pte;
109 static void __init kmap_coherent_init(void)
110 {
111         unsigned long vaddr;
112
113         /* cache the first coherent kmap pte */
114         vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
115         kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
116 }
117 #else
118 static inline void kmap_coherent_init(void) {}
119 #endif
120
121 void *kmap_coherent(struct page *page, unsigned long addr)
122 {
123         enum fixed_addresses idx;
124         unsigned long vaddr, flags, entrylo;
125         unsigned long old_ctx;
126         pte_t pte;
127         int tlbidx;
128
129         BUG_ON(Page_dcache_dirty(page));
130
131         inc_preempt_count();
132         idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
133 #ifdef CONFIG_MIPS_MT_SMTC
134         idx += FIX_N_COLOURS * smp_processor_id();
135 #endif
136         vaddr = __fix_to_virt(FIX_CMAP_END - idx);
137         pte = mk_pte(page, PAGE_KERNEL);
138 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
139         entrylo = pte.pte_high;
140 #else
141         entrylo = pte_val(pte) >> 6;
142 #endif
143
144         ENTER_CRITICAL(flags);
145         old_ctx = read_c0_entryhi();
146         write_c0_entryhi(vaddr & (PAGE_MASK << 1));
147         write_c0_entrylo0(entrylo);
148         write_c0_entrylo1(entrylo);
149 #ifdef CONFIG_MIPS_MT_SMTC
150         set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
151         /* preload TLB instead of local_flush_tlb_one() */
152         mtc0_tlbw_hazard();
153         tlb_probe();
154         tlb_probe_hazard();
155         tlbidx = read_c0_index();
156         mtc0_tlbw_hazard();
157         if (tlbidx < 0)
158                 tlb_write_random();
159         else
160                 tlb_write_indexed();
161 #else
162         tlbidx = read_c0_wired();
163         write_c0_wired(tlbidx + 1);
164         write_c0_index(tlbidx);
165         mtc0_tlbw_hazard();
166         tlb_write_indexed();
167 #endif
168         tlbw_use_hazard();
169         write_c0_entryhi(old_ctx);
170         EXIT_CRITICAL(flags);
171
172         return (void*) vaddr;
173 }
174
175 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
176
177 void kunmap_coherent(void)
178 {
179 #ifndef CONFIG_MIPS_MT_SMTC
180         unsigned int wired;
181         unsigned long flags, old_ctx;
182
183         ENTER_CRITICAL(flags);
184         old_ctx = read_c0_entryhi();
185         wired = read_c0_wired() - 1;
186         write_c0_wired(wired);
187         write_c0_index(wired);
188         write_c0_entryhi(UNIQUE_ENTRYHI(wired));
189         write_c0_entrylo0(0);
190         write_c0_entrylo1(0);
191         mtc0_tlbw_hazard();
192         tlb_write_indexed();
193         tlbw_use_hazard();
194         write_c0_entryhi(old_ctx);
195         EXIT_CRITICAL(flags);
196 #endif
197         dec_preempt_count();
198         preempt_check_resched();
199 }
200
201 void copy_user_highpage(struct page *to, struct page *from,
202         unsigned long vaddr, struct vm_area_struct *vma)
203 {
204         void *vfrom, *vto;
205
206         vto = kmap_atomic(to, KM_USER1);
207         if (cpu_has_dc_aliases &&
208             page_mapped(from) && !Page_dcache_dirty(from)) {
209                 vfrom = kmap_coherent(from, vaddr);
210                 copy_page(vto, vfrom);
211                 kunmap_coherent();
212         } else {
213                 vfrom = kmap_atomic(from, KM_USER0);
214                 copy_page(vto, vfrom);
215                 kunmap_atomic(vfrom, KM_USER0);
216         }
217         if ((!cpu_has_ic_fills_f_dc) ||
218             pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
219                 flush_data_cache_page((unsigned long)vto);
220         kunmap_atomic(vto, KM_USER1);
221         /* Make sure this page is cleared on other CPU's too before using it */
222         smp_wmb();
223 }
224
225 void copy_to_user_page(struct vm_area_struct *vma,
226         struct page *page, unsigned long vaddr, void *dst, const void *src,
227         unsigned long len)
228 {
229         if (cpu_has_dc_aliases &&
230             page_mapped(page) && !Page_dcache_dirty(page)) {
231                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
232                 memcpy(vto, src, len);
233                 kunmap_coherent();
234         } else {
235                 memcpy(dst, src, len);
236                 if (cpu_has_dc_aliases)
237                         SetPageDcacheDirty(page);
238         }
239         if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
240                 flush_cache_page(vma, vaddr, page_to_pfn(page));
241 }
242
243 void copy_from_user_page(struct vm_area_struct *vma,
244         struct page *page, unsigned long vaddr, void *dst, const void *src,
245         unsigned long len)
246 {
247         if (cpu_has_dc_aliases &&
248             page_mapped(page) && !Page_dcache_dirty(page)) {
249                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
250                 memcpy(dst, vfrom, len);
251                 kunmap_coherent();
252         } else {
253                 memcpy(dst, src, len);
254                 if (cpu_has_dc_aliases)
255                         SetPageDcacheDirty(page);
256         }
257 }
258
259 void __init fixrange_init(unsigned long start, unsigned long end,
260         pgd_t *pgd_base)
261 {
262 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
263         pgd_t *pgd;
264         pud_t *pud;
265         pmd_t *pmd;
266         pte_t *pte;
267         int i, j, k;
268         unsigned long vaddr;
269
270         vaddr = start;
271         i = __pgd_offset(vaddr);
272         j = __pud_offset(vaddr);
273         k = __pmd_offset(vaddr);
274         pgd = pgd_base + i;
275
276         for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
277                 pud = (pud_t *)pgd;
278                 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
279                         pmd = (pmd_t *)pud;
280                         for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
281                                 if (pmd_none(*pmd)) {
282                                         pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
283                                         set_pmd(pmd, __pmd((unsigned long)pte));
284                                         BUG_ON(pte != pte_offset_kernel(pmd, 0));
285                                 }
286                                 vaddr += PMD_SIZE;
287                         }
288                         k = 0;
289                 }
290                 j = 0;
291         }
292 #endif
293 }
294
295 #ifndef CONFIG_NEED_MULTIPLE_NODES
296 static int __init page_is_ram(unsigned long pagenr)
297 {
298         int i;
299
300         for (i = 0; i < boot_mem_map.nr_map; i++) {
301                 unsigned long addr, end;
302
303                 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
304                         /* not usable memory */
305                         continue;
306
307                 addr = PFN_UP(boot_mem_map.map[i].addr);
308                 end = PFN_DOWN(boot_mem_map.map[i].addr +
309                                boot_mem_map.map[i].size);
310
311                 if (pagenr >= addr && pagenr < end)
312                         return 1;
313         }
314
315         return 0;
316 }
317
318 void __init paging_init(void)
319 {
320         unsigned long max_zone_pfns[MAX_NR_ZONES];
321         unsigned long lastpfn;
322
323         pagetable_init();
324
325 #ifdef CONFIG_HIGHMEM
326         kmap_init();
327 #endif
328         kmap_coherent_init();
329
330 #ifdef CONFIG_ZONE_DMA
331         max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
332 #endif
333 #ifdef CONFIG_ZONE_DMA32
334         max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
335 #endif
336         max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337         lastpfn = max_low_pfn;
338 #ifdef CONFIG_HIGHMEM
339         max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
340         lastpfn = highend_pfn;
341
342         if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
343                 printk(KERN_WARNING "This processor doesn't support highmem."
344                        " %ldk highmem ignored\n",
345                        (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
346                 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
347                 lastpfn = max_low_pfn;
348         }
349 #endif
350
351         free_area_init_nodes(max_zone_pfns);
352 }
353
354 static struct kcore_list kcore_mem, kcore_vmalloc;
355 #ifdef CONFIG_64BIT
356 static struct kcore_list kcore_kseg0;
357 #endif
358
359 void __init mem_init(void)
360 {
361         unsigned long codesize, reservedpages, datasize, initsize;
362         unsigned long tmp, ram;
363
364 #ifdef CONFIG_HIGHMEM
365 #ifdef CONFIG_DISCONTIGMEM
366 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
367 #endif
368         max_mapnr = highend_pfn;
369 #else
370         max_mapnr = max_low_pfn;
371 #endif
372         high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
373
374         totalram_pages += free_all_bootmem();
375         totalram_pages -= setup_zero_pages();   /* Setup zeroed pages.  */
376
377         reservedpages = ram = 0;
378         for (tmp = 0; tmp < max_low_pfn; tmp++)
379                 if (page_is_ram(tmp)) {
380                         ram++;
381                         if (PageReserved(pfn_to_page(tmp)))
382                                 reservedpages++;
383                 }
384         num_physpages = ram;
385
386 #ifdef CONFIG_HIGHMEM
387         for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
388                 struct page *page = pfn_to_page(tmp);
389
390                 if (!page_is_ram(tmp)) {
391                         SetPageReserved(page);
392                         continue;
393                 }
394                 ClearPageReserved(page);
395                 init_page_count(page);
396                 __free_page(page);
397                 totalhigh_pages++;
398         }
399         totalram_pages += totalhigh_pages;
400         num_physpages += totalhigh_pages;
401 #endif
402
403         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
404         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
405         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
406
407 #ifdef CONFIG_64BIT
408         if ((unsigned long) &_text > (unsigned long) CKSEG0)
409                 /* The -4 is a hack so that user tools don't have to handle
410                    the overflow.  */
411                 kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4);
412 #endif
413         kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
414         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
415                    VMALLOC_END-VMALLOC_START);
416
417         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
418                "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
419                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
420                ram << (PAGE_SHIFT-10),
421                codesize >> 10,
422                reservedpages << (PAGE_SHIFT-10),
423                datasize >> 10,
424                initsize >> 10,
425                (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
426 }
427 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
428
429 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
430 {
431         unsigned long pfn;
432
433         for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
434                 struct page *page = pfn_to_page(pfn);
435                 void *addr = phys_to_virt(PFN_PHYS(pfn));
436
437                 ClearPageReserved(page);
438                 init_page_count(page);
439                 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
440                 __free_page(page);
441                 totalram_pages++;
442         }
443         printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
444 }
445
446 #ifdef CONFIG_BLK_DEV_INITRD
447 void free_initrd_mem(unsigned long start, unsigned long end)
448 {
449         free_init_pages("initrd memory",
450                         virt_to_phys((void *)start),
451                         virt_to_phys((void *)end));
452 }
453 #endif
454
455 void __init_refok free_initmem(void)
456 {
457         prom_free_prom_memory();
458         free_init_pages("unused kernel memory",
459                         __pa_symbol(&__init_begin),
460                         __pa_symbol(&__init_end));
461 }
462
463 unsigned long pgd_current[NR_CPUS];
464 /*
465  * On 64-bit we've got three-level pagetables with a slightly
466  * different layout ...
467  */
468 #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
469
470 /*
471  * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
472  * are constants.  So we use the variants from asm-offset.h until that gcc
473  * will officially be retired.
474  */
475 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
476 #ifdef CONFIG_64BIT
477 #ifdef MODULE_START
478 pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
479 #endif
480 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
481 #endif
482 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);