Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920 | |
3 | * | |
4 | * Copyright (C) 1999,2000 ARM Limited | |
5 | * Copyright (C) 2000 Deep Blue Solutions Ltd. | |
d090ddda | 6 | * hacked for non-paged-MM by Hyok S. Choi, 2003. |
1da177e4 LT |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | * | |
22 | * | |
23 | * These are the low level assembler for performing cache and TLB | |
24 | * functions on the arm920. | |
25 | * | |
26 | * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt | |
27 | */ | |
28 | #include <linux/linkage.h> | |
1da177e4 LT |
29 | #include <linux/init.h> |
30 | #include <asm/assembler.h> | |
5ec9407d | 31 | #include <asm/hwcap.h> |
74945c86 | 32 | #include <asm/pgtable-hwdef.h> |
1da177e4 | 33 | #include <asm/pgtable.h> |
1da177e4 LT |
34 | #include <asm/page.h> |
35 | #include <asm/ptrace.h> | |
36 | #include "proc-macros.S" | |
37 | ||
38 | /* | |
39 | * The size of one data cache line. | |
40 | */ | |
41 | #define CACHE_DLINESIZE 32 | |
42 | ||
43 | /* | |
44 | * The number of data cache segments. | |
45 | */ | |
46 | #define CACHE_DSEGMENTS 8 | |
47 | ||
48 | /* | |
49 | * The number of lines in a cache segment. | |
50 | */ | |
51 | #define CACHE_DENTRIES 64 | |
52 | ||
53 | /* | |
54 | * This is the size at which it becomes more efficient to | |
55 | * clean the whole cache, rather than using the individual | |
56 | * cache line maintainence instructions. | |
57 | */ | |
58 | #define CACHE_DLIMIT 65536 | |
59 | ||
60 | ||
61 | .text | |
62 | /* | |
63 | * cpu_arm920_proc_init() | |
64 | */ | |
65 | ENTRY(cpu_arm920_proc_init) | |
66 | mov pc, lr | |
67 | ||
68 | /* | |
69 | * cpu_arm920_proc_fin() | |
70 | */ | |
71 | ENTRY(cpu_arm920_proc_fin) | |
72 | stmfd sp!, {lr} | |
73 | mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE | |
74 | msr cpsr_c, ip | |
75 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | |
76 | bl arm920_flush_kern_cache_all | |
77 | #else | |
78 | bl v4wt_flush_kern_cache_all | |
79 | #endif | |
80 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | |
81 | bic r0, r0, #0x1000 @ ...i............ | |
82 | bic r0, r0, #0x000e @ ............wca. | |
83 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | |
84 | ldmfd sp!, {pc} | |
85 | ||
86 | /* | |
87 | * cpu_arm920_reset(loc) | |
88 | * | |
89 | * Perform a soft reset of the system. Put the CPU into the | |
90 | * same state as it would be if it had been reset, and branch | |
91 | * to what would be the reset vector. | |
92 | * | |
93 | * loc: location to jump to for soft reset | |
94 | */ | |
95 | .align 5 | |
96 | ENTRY(cpu_arm920_reset) | |
97 | mov ip, #0 | |
98 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | |
99 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
d090ddda | 100 | #ifdef CONFIG_MMU |
1da177e4 | 101 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
d090ddda | 102 | #endif |
1da177e4 LT |
103 | mrc p15, 0, ip, c1, c0, 0 @ ctrl register |
104 | bic ip, ip, #0x000f @ ............wcam | |
105 | bic ip, ip, #0x1100 @ ...i...s........ | |
106 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | |
107 | mov pc, r0 | |
108 | ||
109 | /* | |
110 | * cpu_arm920_do_idle() | |
111 | */ | |
112 | .align 5 | |
113 | ENTRY(cpu_arm920_do_idle) | |
114 | mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt | |
115 | mov pc, lr | |
116 | ||
117 | ||
118 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | |
119 | ||
120 | /* | |
121 | * flush_user_cache_all() | |
122 | * | |
123 | * Invalidate all cache entries in a particular address | |
124 | * space. | |
125 | */ | |
126 | ENTRY(arm920_flush_user_cache_all) | |
127 | /* FALLTHROUGH */ | |
128 | ||
129 | /* | |
130 | * flush_kern_cache_all() | |
131 | * | |
132 | * Clean and invalidate the entire cache. | |
133 | */ | |
134 | ENTRY(arm920_flush_kern_cache_all) | |
135 | mov r2, #VM_EXEC | |
136 | mov ip, #0 | |
137 | __flush_whole_cache: | |
138 | mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments | |
139 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | |
140 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index | |
141 | subs r3, r3, #1 << 26 | |
142 | bcs 2b @ entries 63 to 0 | |
143 | subs r1, r1, #1 << 5 | |
144 | bcs 1b @ segments 7 to 0 | |
145 | tst r2, #VM_EXEC | |
146 | mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
147 | mcrne p15, 0, ip, c7, c10, 4 @ drain WB | |
148 | mov pc, lr | |
149 | ||
150 | /* | |
151 | * flush_user_cache_range(start, end, flags) | |
152 | * | |
153 | * Invalidate a range of cache entries in the specified | |
154 | * address space. | |
155 | * | |
156 | * - start - start address (inclusive) | |
157 | * - end - end address (exclusive) | |
158 | * - flags - vm_flags for address space | |
159 | */ | |
160 | ENTRY(arm920_flush_user_cache_range) | |
161 | mov ip, #0 | |
162 | sub r3, r1, r0 @ calculate total size | |
163 | cmp r3, #CACHE_DLIMIT | |
164 | bhs __flush_whole_cache | |
165 | ||
166 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | |
167 | tst r2, #VM_EXEC | |
168 | mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
169 | add r0, r0, #CACHE_DLINESIZE | |
170 | cmp r0, r1 | |
171 | blo 1b | |
172 | tst r2, #VM_EXEC | |
173 | mcrne p15, 0, ip, c7, c10, 4 @ drain WB | |
174 | mov pc, lr | |
175 | ||
176 | /* | |
177 | * coherent_kern_range(start, end) | |
178 | * | |
179 | * Ensure coherency between the Icache and the Dcache in the | |
180 | * region described by start, end. If you have non-snooping | |
181 | * Harvard caches, you need to implement this function. | |
182 | * | |
183 | * - start - virtual start address | |
184 | * - end - virtual end address | |
185 | */ | |
186 | ENTRY(arm920_coherent_kern_range) | |
187 | /* FALLTHROUGH */ | |
188 | ||
189 | /* | |
190 | * coherent_user_range(start, end) | |
191 | * | |
192 | * Ensure coherency between the Icache and the Dcache in the | |
193 | * region described by start, end. If you have non-snooping | |
194 | * Harvard caches, you need to implement this function. | |
195 | * | |
196 | * - start - virtual start address | |
197 | * - end - virtual end address | |
198 | */ | |
199 | ENTRY(arm920_coherent_user_range) | |
200 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
201 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
202 | mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry | |
203 | add r0, r0, #CACHE_DLINESIZE | |
204 | cmp r0, r1 | |
205 | blo 1b | |
206 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
207 | mov pc, lr | |
208 | ||
209 | /* | |
210 | * flush_kern_dcache_page(void *page) | |
211 | * | |
212 | * Ensure no D cache aliasing occurs, either with itself or | |
213 | * the I cache | |
214 | * | |
215 | * - addr - page aligned address | |
216 | */ | |
217 | ENTRY(arm920_flush_kern_dcache_page) | |
218 | add r1, r0, #PAGE_SZ | |
219 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | |
220 | add r0, r0, #CACHE_DLINESIZE | |
221 | cmp r0, r1 | |
222 | blo 1b | |
223 | mov r0, #0 | |
224 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | |
225 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
226 | mov pc, lr | |
227 | ||
228 | /* | |
229 | * dma_inv_range(start, end) | |
230 | * | |
231 | * Invalidate (discard) the specified virtual address range. | |
232 | * May not write back any entries. If 'start' or 'end' | |
233 | * are not cache line aligned, those lines must be written | |
234 | * back. | |
235 | * | |
236 | * - start - virtual start address | |
237 | * - end - virtual end address | |
238 | * | |
239 | * (same as v4wb) | |
240 | */ | |
241 | ENTRY(arm920_dma_inv_range) | |
242 | tst r0, #CACHE_DLINESIZE - 1 | |
243 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
244 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | |
245 | tst r1, #CACHE_DLINESIZE - 1 | |
246 | mcrne p15, 0, r1, c7, c10, 1 @ clean D entry | |
247 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | |
248 | add r0, r0, #CACHE_DLINESIZE | |
249 | cmp r0, r1 | |
250 | blo 1b | |
251 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
252 | mov pc, lr | |
253 | ||
254 | /* | |
255 | * dma_clean_range(start, end) | |
256 | * | |
257 | * Clean the specified virtual address range. | |
258 | * | |
259 | * - start - virtual start address | |
260 | * - end - virtual end address | |
261 | * | |
262 | * (same as v4wb) | |
263 | */ | |
264 | ENTRY(arm920_dma_clean_range) | |
265 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
266 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
267 | add r0, r0, #CACHE_DLINESIZE | |
268 | cmp r0, r1 | |
269 | blo 1b | |
270 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
271 | mov pc, lr | |
272 | ||
273 | /* | |
274 | * dma_flush_range(start, end) | |
275 | * | |
276 | * Clean and invalidate the specified virtual address range. | |
277 | * | |
278 | * - start - virtual start address | |
279 | * - end - virtual end address | |
280 | */ | |
281 | ENTRY(arm920_dma_flush_range) | |
282 | bic r0, r0, #CACHE_DLINESIZE - 1 | |
283 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | |
284 | add r0, r0, #CACHE_DLINESIZE | |
285 | cmp r0, r1 | |
286 | blo 1b | |
287 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
288 | mov pc, lr | |
289 | ||
290 | ENTRY(arm920_cache_fns) | |
291 | .long arm920_flush_kern_cache_all | |
292 | .long arm920_flush_user_cache_all | |
293 | .long arm920_flush_user_cache_range | |
294 | .long arm920_coherent_kern_range | |
295 | .long arm920_coherent_user_range | |
296 | .long arm920_flush_kern_dcache_page | |
297 | .long arm920_dma_inv_range | |
298 | .long arm920_dma_clean_range | |
299 | .long arm920_dma_flush_range | |
300 | ||
301 | #endif | |
302 | ||
303 | ||
304 | ENTRY(cpu_arm920_dcache_clean_area) | |
305 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
306 | add r0, r0, #CACHE_DLINESIZE | |
307 | subs r1, r1, #CACHE_DLINESIZE | |
308 | bhi 1b | |
309 | mov pc, lr | |
310 | ||
311 | /* =============================== PageTable ============================== */ | |
312 | ||
313 | /* | |
314 | * cpu_arm920_switch_mm(pgd) | |
315 | * | |
316 | * Set the translation base pointer to be as described by pgd. | |
317 | * | |
318 | * pgd: new page tables | |
319 | */ | |
320 | .align 5 | |
321 | ENTRY(cpu_arm920_switch_mm) | |
d090ddda | 322 | #ifdef CONFIG_MMU |
1da177e4 LT |
323 | mov ip, #0 |
324 | #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH | |
325 | mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache | |
326 | #else | |
327 | @ && 'Clean & Invalidate whole DCache' | |
328 | @ && Re-written to use Index Ops. | |
329 | @ && Uses registers r1, r3 and ip | |
330 | ||
331 | mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments | |
332 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | |
333 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index | |
334 | subs r3, r3, #1 << 26 | |
335 | bcs 2b @ entries 63 to 0 | |
336 | subs r1, r1, #1 << 5 | |
337 | bcs 1b @ segments 7 to 0 | |
338 | #endif | |
339 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | |
340 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | |
341 | mcr p15, 0, r0, c2, c0, 0 @ load page table pointer | |
342 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | |
d090ddda | 343 | #endif |
1da177e4 LT |
344 | mov pc, lr |
345 | ||
346 | /* | |
ad1ae2fe | 347 | * cpu_arm920_set_pte(ptep, pte, ext) |
1da177e4 LT |
348 | * |
349 | * Set a PTE and flush it out | |
350 | */ | |
351 | .align 5 | |
ad1ae2fe | 352 | ENTRY(cpu_arm920_set_pte_ext) |
d090ddda | 353 | #ifdef CONFIG_MMU |
da091653 | 354 | armv3_set_pte_ext |
1da177e4 LT |
355 | mov r0, r0 |
356 | mcr p15, 0, r0, c7, c10, 1 @ clean D entry | |
357 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | |
da091653 | 358 | #endif |
1da177e4 LT |
359 | mov pc, lr |
360 | ||
361 | __INIT | |
362 | ||
363 | .type __arm920_setup, #function | |
364 | __arm920_setup: | |
365 | mov r0, #0 | |
366 | mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 | |
367 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 | |
d090ddda | 368 | #ifdef CONFIG_MMU |
1da177e4 | 369 | mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 |
d090ddda | 370 | #endif |
22b19086 RK |
371 | adr r5, arm920_crval |
372 | ldmia r5, {r5, r6} | |
1da177e4 | 373 | mrc p15, 0, r0, c1, c0 @ get control register v4 |
1da177e4 | 374 | bic r0, r0, r5 |
22b19086 | 375 | orr r0, r0, r6 |
1da177e4 LT |
376 | mov pc, lr |
377 | .size __arm920_setup, . - __arm920_setup | |
378 | ||
379 | /* | |
380 | * R | |
381 | * .RVI ZFRS BLDP WCAM | |
382 | * ..11 0001 ..11 0101 | |
383 | * | |
384 | */ | |
22b19086 RK |
385 | .type arm920_crval, #object |
386 | arm920_crval: | |
387 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 | |
1da177e4 LT |
388 | |
389 | __INITDATA | |
390 | ||
391 | /* | |
392 | * Purpose : Function pointers used to access above functions - all calls | |
393 | * come through these | |
394 | */ | |
395 | .type arm920_processor_functions, #object | |
396 | arm920_processor_functions: | |
397 | .word v4t_early_abort | |
4a1fd556 | 398 | .word pabort_noifar |
1da177e4 LT |
399 | .word cpu_arm920_proc_init |
400 | .word cpu_arm920_proc_fin | |
401 | .word cpu_arm920_reset | |
402 | .word cpu_arm920_do_idle | |
403 | .word cpu_arm920_dcache_clean_area | |
404 | .word cpu_arm920_switch_mm | |
ad1ae2fe | 405 | .word cpu_arm920_set_pte_ext |
1da177e4 LT |
406 | .size arm920_processor_functions, . - arm920_processor_functions |
407 | ||
408 | .section ".rodata" | |
409 | ||
410 | .type cpu_arch_name, #object | |
411 | cpu_arch_name: | |
412 | .asciz "armv4t" | |
413 | .size cpu_arch_name, . - cpu_arch_name | |
414 | ||
415 | .type cpu_elf_name, #object | |
416 | cpu_elf_name: | |
417 | .asciz "v4" | |
418 | .size cpu_elf_name, . - cpu_elf_name | |
419 | ||
420 | .type cpu_arm920_name, #object | |
421 | cpu_arm920_name: | |
264edb35 | 422 | .asciz "ARM920T" |
1da177e4 LT |
423 | .size cpu_arm920_name, . - cpu_arm920_name |
424 | ||
425 | .align | |
426 | ||
02b7dd12 | 427 | .section ".proc.info.init", #alloc, #execinstr |
1da177e4 LT |
428 | |
429 | .type __arm920_proc_info,#object | |
430 | __arm920_proc_info: | |
431 | .long 0x41009200 | |
432 | .long 0xff00fff0 | |
433 | .long PMD_TYPE_SECT | \ | |
434 | PMD_SECT_BUFFERABLE | \ | |
435 | PMD_SECT_CACHEABLE | \ | |
436 | PMD_BIT4 | \ | |
437 | PMD_SECT_AP_WRITE | \ | |
438 | PMD_SECT_AP_READ | |
8799ee9f RK |
439 | .long PMD_TYPE_SECT | \ |
440 | PMD_BIT4 | \ | |
441 | PMD_SECT_AP_WRITE | \ | |
442 | PMD_SECT_AP_READ | |
1da177e4 LT |
443 | b __arm920_setup |
444 | .long cpu_arch_name | |
445 | .long cpu_elf_name | |
446 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | |
447 | .long cpu_arm920_name | |
448 | .long arm920_processor_functions | |
449 | .long v4wbi_tlb_fns | |
450 | .long v4wb_user_fns | |
451 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | |
452 | .long arm920_cache_fns | |
453 | #else | |
454 | .long v4wt_cache_fns | |
455 | #endif | |
456 | .size __arm920_proc_info, . - __arm920_proc_info |