[ACPI] merge 3549 4320 4485 4588 4980 5483 5651 acpica asus fops pnpacpi branches...
[linux-2.6] / arch / cris / arch-v32 / mm / init.c
1 /*
2  * Set up paging and the MMU.
3  *
4  * Copyright (C) 2000-2003, Axis Communications AB.
5  *
6  * Authors:   Bjorn Wesen <bjornw@axis.com>
7  *            Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8  */
9 #include <linux/config.h>
10 #include <linux/mmzone.h>
11 #include <linux/init.h>
12 #include <linux/bootmem.h>
13 #include <linux/mm.h>
14 #include <linux/config.h>
15 #include <asm/pgtable.h>
16 #include <asm/page.h>
17 #include <asm/types.h>
18 #include <asm/mmu.h>
19 #include <asm/io.h>
20 #include <asm/mmu_context.h>
21 #include <asm/arch/hwregs/asm/mmu_defs_asm.h>
22 #include <asm/arch/hwregs/supp_reg.h>
23
24 extern void tlb_init(void);
25
26 /*
27  * The kernel is already mapped with linear mapping at kseg_c so there's no
28  * need to map it with a page table. However, head.S also temporarily mapped it
29  * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
30  * other paging stuff.
31  */
32 void __init
33 cris_mmu_init(void)
34 {
35         unsigned long mmu_config;
36         unsigned long mmu_kbase_hi;
37         unsigned long mmu_kbase_lo;
38         unsigned short mmu_page_id;
39
40         /*
41          * Make sure the current pgd table points to something sane, even if it
42          * is most probably not used until the next switch_mm.
43          */
44         per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
45
46 #ifdef CONFIG_SMP
47         {
48                 pgd_t **pgd;
49                 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
50                 SUPP_BANK_SEL(1);
51                 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
52                 SUPP_BANK_SEL(2);
53                 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
54         }
55 #endif
56
57         /* Initialise the TLB. Function found in tlb.c. */
58         tlb_init();
59
60         /* Enable exceptions and initialize the kernel segments. */
61         mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on)        |
62                        REG_STATE(mmu, rw_mm_cfg, acc, on)       |
63                        REG_STATE(mmu, rw_mm_cfg, ex, on)        |
64                        REG_STATE(mmu, rw_mm_cfg, inv, on)       |
65                        REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
66                        REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
67                        REG_STATE(mmu, rw_mm_cfg, seg_d, page)   |
68                        REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
69                        REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
70 #ifndef CONFIG_ETRAXFS_SIM
71                        REG_STATE(mmu, rw_mm_cfg, seg_a, page)   |
72 #else
73                        REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
74 #endif
75                        REG_STATE(mmu, rw_mm_cfg, seg_9, page)   |
76                        REG_STATE(mmu, rw_mm_cfg, seg_8, page)   |
77                        REG_STATE(mmu, rw_mm_cfg, seg_7, page)   |
78                        REG_STATE(mmu, rw_mm_cfg, seg_6, page)   |
79                        REG_STATE(mmu, rw_mm_cfg, seg_5, page)   |
80                        REG_STATE(mmu, rw_mm_cfg, seg_4, page)   |
81                        REG_STATE(mmu, rw_mm_cfg, seg_3, page)   |
82                        REG_STATE(mmu, rw_mm_cfg, seg_2, page)   |
83                        REG_STATE(mmu, rw_mm_cfg, seg_1, page)   |
84                        REG_STATE(mmu, rw_mm_cfg, seg_0, page));
85
86         mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
87                          REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
88                          REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
89 #ifndef CONFIG_ETRAXFS_SIM
90                          REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
91 #else
92                          REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x0) |
93 #endif
94                          REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
95 #ifndef CONFIG_ETRAXFS_SIM
96                          REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
97 #else
98                          REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
99 #endif
100                          REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
101                          REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
102
103         mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
104                          REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
105                          REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
106                          REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
107                          REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
108                          REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
109                          REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
110                          REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
111
112         mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
113
114         /* Update the instruction MMU. */
115         SUPP_BANK_SEL(BANK_IM);
116         SUPP_REG_WR(RW_MM_CFG, mmu_config);
117         SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
118         SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
119         SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
120
121         /* Update the data MMU. */
122         SUPP_BANK_SEL(BANK_DM);
123         SUPP_REG_WR(RW_MM_CFG, mmu_config);
124         SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
125         SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
126         SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
127
128         SPEC_REG_WR(SPEC_REG_PID, 0);
129
130         /*
131          * The MMU has been enabled ever since head.S but just to make it
132          * totally obvious enable it here as well.
133          */
134         SUPP_BANK_SEL(BANK_GC);
135         SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
136 }
137
138 void __init
139 paging_init(void)
140 {
141         int i;
142         unsigned long zones_size[MAX_NR_ZONES];
143
144         printk("Setting up paging and the MMU.\n");
145
146         /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
147         for(i = 0; i < PTRS_PER_PGD; i++)
148                 swapper_pg_dir[i] = __pgd(0);
149
150         cris_mmu_init();
151
152         /*
153          * Initialize the bad page table and bad page to point to a couple of
154          * allocated pages.
155          */
156         empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
157         memset((void *) empty_zero_page, 0, PAGE_SIZE);
158
159         /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
160         zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
161
162         for (i = 1; i < MAX_NR_ZONES; i++)
163                 zones_size[i] = 0;
164
165         /*
166          * Use free_area_init_node instead of free_area_init, because it is
167          * designed for systems where the DRAM starts at an address
168          * substantially higher than 0, like us (we start at PAGE_OFFSET). This
169          * saves space in the mem_map page array.
170          */
171         free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
172
173         mem_map = contig_page_data.node_mem_map;
174 }