1 /* tlb-miss.S: TLB miss handlers
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/sys.h>
13 #include <linux/config.h>
14 #include <linux/linkage.h>
16 #include <asm/pgtable.h>
17 #include <asm/highmem.h>
18 #include <asm/spr-regs.h>
23 .globl __entry_insn_mmu_miss
24 __entry_insn_mmu_miss:
28 .globl __entry_insn_mmu_exception
29 __entry_insn_mmu_exception:
33 .globl __entry_data_mmu_miss
34 __entry_data_mmu_miss:
38 .globl __entry_data_mmu_exception
39 __entry_data_mmu_exception:
43 ###############################################################################
45 # handle a lookup failure of one sort or another in a kernel TLB handler
47 # GR29 - faulting address
50 ###############################################################################
51 .type __tlb_kernel_fault,@function
53 # see if we're supposed to re-enable single-step mode upon return
54 sethi.p %hi(__break_tlb_miss_return_break),gr30
55 setlo %lo(__break_tlb_miss_return_break),gr30
58 subcc gr31,gr30,gr0,icc0
59 beq icc0,#0,__tlb_kernel_fault_sstep
63 movgs gr29,scr2 /* save EAR0 value */
64 sethi.p %hi(__kernel_current_task),gr29
65 setlo %lo(__kernel_current_task),gr29
66 ldi.p @(gr29,#0),gr29 /* restore GR29 */
68 bra __entry_kernel_handle_mmu_fault
70 # we've got to re-enable single-stepping
71 __tlb_kernel_fault_sstep:
72 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
73 setlo %lo(__break_tlb_miss_real_return_info),gr30
80 movgs gr29,scr2 /* save EAR0 value */
81 sethi.p %hi(__kernel_current_task),gr29
82 setlo %lo(__kernel_current_task),gr29
83 ldi.p @(gr29,#0),gr29 /* restore GR29 */
84 bra __entry_kernel_handle_mmu_fault_sstep
86 .size __tlb_kernel_fault, .-__tlb_kernel_fault
88 ###############################################################################
90 # handle a lookup failure of one sort or another in a user TLB handler
92 # GR28 - faulting address
95 ###############################################################################
96 .type __tlb_user_fault,@function
98 # see if we're supposed to re-enable single-step mode upon return
99 sethi.p %hi(__break_tlb_miss_return_break),gr30
100 setlo %lo(__break_tlb_miss_return_break),gr30
102 subcc gr31,gr30,gr0,icc0
103 beq icc0,#0,__tlb_user_fault_sstep
107 bra __entry_uspace_handle_mmu_fault
109 # we've got to re-enable single-stepping
110 __tlb_user_fault_sstep:
111 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
112 setlo %lo(__break_tlb_miss_real_return_info),gr30
118 bra __entry_uspace_handle_mmu_fault_sstep
120 .size __tlb_user_fault, .-__tlb_user_fault
122 ###############################################################################
124 # Kernel instruction TLB miss handler
126 # GR1 - kernel stack pointer
127 # GR28 - saved exception frame pointer
128 # GR29 - faulting address
130 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
131 # DAMR3 - mapped page directory
132 # DAMR4 - mapped page table as matched by SCR0
134 ###############################################################################
135 .globl __entry_kernel_insn_tlb_miss
136 .type __entry_kernel_insn_tlb_miss,@function
137 __entry_kernel_insn_tlb_miss:
139 sethi.p %hi(0xe1200004),gr30
140 setlo %lo(0xe1200004),gr30
142 sethi.p %hi(0xffc00100),gr30
143 setlo %lo(0xffc00100),gr30
148 movsg ccr,gr30 /* save CCR */
151 # see if the cached page table mapping is appropriate
152 srlicc.p gr31,#26,gr0,icc0
154 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
155 bne icc0,#0,__itlb_k_PTD_miss
158 # access the PTD with EAR0[25:14]
159 # - DAMLR4 points to the virtual address of the appropriate page table
160 # - the PTD holds 4096 PTEs
161 # - the PTD must be accessed uncached
162 # - the PTE must be marked accessed if it was valid
167 ldi @(gr31,#0),gr30 /* fetch the PTE */
168 andicc gr30,#_PAGE_PRESENT,gr0,icc0
169 ori.p gr30,#_PAGE_ACCESSED,gr30
170 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
171 sti.p gr30,@(gr31,#0) /* update the PTE */
172 andi gr30,#~_PAGE_ACCESSED,gr30
174 # we're using IAMR1 as an extra TLB entry
175 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
176 # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
177 # - IAMPR1 has no WP bit, and we mustn't lose WP information
179 andicc gr31,#xAMPRx_V,gr0,icc0
180 setlos.p 0xfffff000,gr31
181 beq icc0,#0,__itlb_k_nopunt /* punt not required */
184 movgs gr31,tplr /* set TPLR.CXN */
185 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
188 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
190 movsg iamlr1,gr31 /* set TPLR.CXN */
192 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
193 movsg tpxr,gr31 /* check the TLB write error flag */
194 andicc.p gr31,#TPXR_E,gr0,icc0
195 setlos #0xfffff000,gr31
196 bne icc0,#0,__tlb_kernel_fault
200 # assemble the new TLB entry
204 movgs gr29,iamlr1 /* xAMLR = address | context number */
209 # return, restoring registers
212 sethi.p %hi(__kernel_current_task),gr29
213 setlo %lo(__kernel_current_task),gr29
216 beq icc0,#3,0 /* prevent icache prefetch */
218 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
219 # appropriate page table and map that instead
220 # - access the PGD with EAR0[31:26]
221 # - DAMLR3 points to the virtual address of the page directory
222 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
224 srli gr29,#26,gr31 /* calculate PGE offset */
225 slli gr31,#8,gr31 /* and clear bottom bits */
228 ld @(gr31,gr30),gr30 /* access the PGE */
230 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
231 andicc gr30,#xAMPRx_SS,gr0,icc1
233 # map this PTD instead and record coverage address
234 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
235 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
237 bne icc1,#0,__itlb_k_bigpage
241 # we can now resume normal service
243 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
244 bra __itlb_k_PTD_mapped
250 .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
252 ###############################################################################
254 # Kernel data TLB miss handler
256 # GR1 - kernel stack pointer
257 # GR28 - saved exception frame pointer
258 # GR29 - faulting address
260 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
261 # DAMR3 - mapped page directory
262 # DAMR5 - mapped page table as matched by SCR1
264 ###############################################################################
265 .globl __entry_kernel_data_tlb_miss
266 .type __entry_kernel_data_tlb_miss,@function
267 __entry_kernel_data_tlb_miss:
269 sethi.p %hi(0xe1200004),gr30
270 setlo %lo(0xe1200004),gr30
272 sethi.p %hi(0xffc00100),gr30
273 setlo %lo(0xffc00100),gr30
278 movsg ccr,gr30 /* save CCR */
281 # see if the cached page table mapping is appropriate
282 srlicc.p gr31,#26,gr0,icc0
284 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
285 bne icc0,#0,__dtlb_k_PTD_miss
288 # access the PTD with EAR0[25:14]
289 # - DAMLR5 points to the virtual address of the appropriate page table
290 # - the PTD holds 4096 PTEs
291 # - the PTD must be accessed uncached
292 # - the PTE must be marked accessed if it was valid
297 ldi @(gr31,#0),gr30 /* fetch the PTE */
298 andicc gr30,#_PAGE_PRESENT,gr0,icc0
299 ori.p gr30,#_PAGE_ACCESSED,gr30
300 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
301 sti.p gr30,@(gr31,#0) /* update the PTE */
302 andi gr30,#~_PAGE_ACCESSED,gr30
304 # we're using DAMR1 as an extra TLB entry
305 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
306 # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
308 andicc gr31,#xAMPRx_V,gr0,icc0
309 setlos.p 0xfffff000,gr31
310 beq icc0,#0,__dtlb_k_nopunt /* punt not required */
313 movgs gr31,tplr /* set TPLR.CXN */
314 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
317 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
319 movsg damlr1,gr31 /* set TPLR.CXN */
321 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
322 movsg tpxr,gr31 /* check the TLB write error flag */
323 andicc.p gr31,#TPXR_E,gr0,icc0
324 setlos #0xfffff000,gr31
325 bne icc0,#0,__tlb_kernel_fault
329 # assemble the new TLB entry
333 movgs gr29,iamlr1 /* xAMLR = address | context number */
338 # return, restoring registers
341 sethi.p %hi(__kernel_current_task),gr29
342 setlo %lo(__kernel_current_task),gr29
345 beq icc0,#3,0 /* prevent icache prefetch */
347 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
348 # appropriate page table and map that instead
349 # - access the PGD with EAR0[31:26]
350 # - DAMLR3 points to the virtual address of the page directory
351 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
353 srli gr29,#26,gr31 /* calculate PGE offset */
354 slli gr31,#8,gr31 /* and clear bottom bits */
357 ld @(gr31,gr30),gr30 /* access the PGE */
359 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
360 andicc gr30,#xAMPRx_SS,gr0,icc1
362 # map this PTD instead and record coverage address
363 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
364 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
366 bne icc1,#0,__dtlb_k_bigpage
370 # we can now resume normal service
372 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
373 bra __dtlb_k_PTD_mapped
379 .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
381 ###############################################################################
383 # Userspace instruction TLB miss handler (with PGE prediction)
385 # GR28 - faulting address
387 # SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
388 # DAMR3 - mapped page directory
389 # DAMR4 - mapped page table as matched by SCR0
391 ###############################################################################
392 .globl __entry_user_insn_tlb_miss
393 .type __entry_user_insn_tlb_miss,@function
394 __entry_user_insn_tlb_miss:
396 sethi.p %hi(0xe1200004),gr30
397 setlo %lo(0xe1200004),gr30
399 sethi.p %hi(0xffc00100),gr30
400 setlo %lo(0xffc00100),gr30
405 movsg ccr,gr30 /* save CCR */
408 # see if the cached page table mapping is appropriate
409 srlicc.p gr31,#26,gr0,icc0
411 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
412 bne icc0,#0,__itlb_u_PTD_miss
415 # access the PTD with EAR0[25:14]
416 # - DAMLR4 points to the virtual address of the appropriate page table
417 # - the PTD holds 4096 PTEs
418 # - the PTD must be accessed uncached
419 # - the PTE must be marked accessed if it was valid
424 ldi @(gr31,#0),gr30 /* fetch the PTE */
425 andicc gr30,#_PAGE_PRESENT,gr0,icc0
426 ori.p gr30,#_PAGE_ACCESSED,gr30
427 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
428 sti.p gr30,@(gr31,#0) /* update the PTE */
429 andi gr30,#~_PAGE_ACCESSED,gr30
431 # we're using IAMR1/DAMR1 as an extra TLB entry
432 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
434 andicc gr31,#xAMPRx_V,gr0,icc0
435 setlos.p 0xfffff000,gr31
436 beq icc0,#0,__itlb_u_nopunt /* punt not required */
440 movsg damlr1,gr31 /* set TPLR.CXN */
442 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
443 movsg tpxr,gr31 /* check the TLB write error flag */
444 andicc.p gr31,#TPXR_E,gr0,icc0
445 setlos #0xfffff000,gr31
446 bne icc0,#0,__tlb_user_fault
450 # assemble the new TLB entry
454 movgs gr28,iamlr1 /* xAMLR = address | context number */
459 # return, restoring registers
463 beq icc0,#3,0 /* prevent icache prefetch */
465 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
466 # appropriate page table and map that instead
467 # - access the PGD with EAR0[31:26]
468 # - DAMLR3 points to the virtual address of the page directory
469 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
471 srli gr28,#26,gr31 /* calculate PGE offset */
472 slli gr31,#8,gr31 /* and clear bottom bits */
475 ld @(gr31,gr30),gr30 /* access the PGE */
477 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
478 andicc gr30,#xAMPRx_SS,gr0,icc1
480 # map this PTD instead and record coverage address
481 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
482 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
484 bne icc1,#0,__itlb_u_bigpage
488 # we can now resume normal service
490 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
491 bra __itlb_u_PTD_mapped
497 .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
499 ###############################################################################
501 # Userspace data TLB miss handler
503 # GR28 - faulting address
505 # SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
506 # DAMR3 - mapped page directory
507 # DAMR5 - mapped page table as matched by SCR1
509 ###############################################################################
510 .globl __entry_user_data_tlb_miss
511 .type __entry_user_data_tlb_miss,@function
512 __entry_user_data_tlb_miss:
514 sethi.p %hi(0xe1200004),gr30
515 setlo %lo(0xe1200004),gr30
517 sethi.p %hi(0xffc00100),gr30
518 setlo %lo(0xffc00100),gr30
523 movsg ccr,gr30 /* save CCR */
526 # see if the cached page table mapping is appropriate
527 srlicc.p gr31,#26,gr0,icc0
529 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
530 bne icc0,#0,__dtlb_u_PTD_miss
533 # access the PTD with EAR0[25:14]
534 # - DAMLR5 points to the virtual address of the appropriate page table
535 # - the PTD holds 4096 PTEs
536 # - the PTD must be accessed uncached
537 # - the PTE must be marked accessed if it was valid
544 ldi @(gr31,#0),gr30 /* fetch the PTE */
545 andicc gr30,#_PAGE_PRESENT,gr0,icc0
546 ori.p gr30,#_PAGE_ACCESSED,gr30
547 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
548 sti.p gr30,@(gr31,#0) /* update the PTE */
549 andi gr30,#~_PAGE_ACCESSED,gr30
551 # we're using DAMR1 as an extra TLB entry
552 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
554 andicc gr31,#xAMPRx_V,gr0,icc0
555 setlos.p 0xfffff000,gr31
556 beq icc0,#0,__dtlb_u_nopunt /* punt not required */
560 movsg damlr1,gr31 /* set TPLR.CXN */
562 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
563 movsg tpxr,gr31 /* check the TLB write error flag */
564 andicc.p gr31,#TPXR_E,gr0,icc0
565 setlos #0xfffff000,gr31
566 bne icc0,#0,__tlb_user_fault
570 # assemble the new TLB entry
574 movgs gr28,iamlr1 /* xAMLR = address | context number */
579 # return, restoring registers
583 beq icc0,#3,0 /* prevent icache prefetch */
585 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
586 # appropriate page table and map that instead
587 # - first of all, check the insn PGE cache - we may well get a hit there
588 # - access the PGD with EAR0[31:26]
589 # - DAMLR3 points to the virtual address of the page directory
590 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
592 movsg scr0,gr31 /* consult the insn-PGE-cache key */
594 srlicc gr31,#26,gr0,icc0
595 srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
596 bne icc0,#0,__dtlb_u_iPGE_miss
598 # what we're looking for is covered by the insn-PGE-cache
602 bra __dtlb_u_using_iPTD
605 srli gr28,#26,gr31 /* calculate PGE offset */
606 slli gr31,#8,gr31 /* and clear bottom bits */
609 ld @(gr31,gr30),gr30 /* access the PGE */
611 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
612 andicc gr30,#xAMPRx_SS,gr0,icc1
614 # map this PTD instead and record coverage address
615 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
616 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
618 bne icc1,#0,__dtlb_u_bigpage
622 # we can now resume normal service
624 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
625 bra __dtlb_u_PTD_mapped
631 .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss