4 * Original code from fault.c
5 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Fast PTE->TLB refill path
8 * Copyright (C) 2003 Richard.Curnow@superh.com
11 * The do_fast_page_fault function is called from a context in entry.S
12 * where very few registers have been saved. In particular, the code in
13 * this file must be compiled not to use ANY caller-save registers that
14 * are not part of the restricted save set. Also, it means that code in
15 * this file must not make calls to functions elsewhere in the kernel, or
16 * else the excepting context will see corruption in its caller-save
17 * registers. Plus, the entry.S save area is non-reentrant, so this code
18 * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
34 #include <linux/smp.h>
35 #include <linux/interrupt.h>
36 #include <asm/system.h>
39 #include <asm/uaccess.h>
40 #include <asm/pgalloc.h>
41 #include <asm/mmu_context.h>
42 #include <cpu/registers.h>
44 /* Callable from fault.c, so not static */
45 inline void __do_tlb_refill(unsigned long address,
46 unsigned long long is_text_not_data, pte_t *pte)
48 unsigned long long ptel;
49 unsigned long long pteh=0;
50 struct tlb_info *tlbp;
51 unsigned long long next;
59 pteh = address & MMU_VPN_MASK;
61 /* Sign extend based on neff. */
63 /* Faster sign extension */
64 pteh = (unsigned long long)(signed long long)(signed long)pteh;
67 pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
71 pteh |= get_asid() << PTEH_ASID_SHIFT;
74 /* Set PTEL register, set_pte has performed the sign extension */
75 ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
77 tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
79 __flush_tlb_slot(next);
80 asm volatile ("putcfg %0,1,%2\n\n\t"
82 : : "r" (next), "r" (pteh), "r" (ptel) );
85 if (next > tlbp->last) next = tlbp->first;
90 static int handle_vmalloc_fault(struct mm_struct *mm,
91 unsigned long protection_flags,
92 unsigned long long textaccess,
93 unsigned long address)
101 dir = pgd_offset_k(address);
103 pud = pud_offset(dir, address);
104 if (pud_none_or_clear_bad(pud))
107 pmd = pmd_offset(pud, address);
108 if (pmd_none_or_clear_bad(pmd))
111 pte = pte_offset_kernel(pmd, address);
114 if (pte_none(entry) || !pte_present(entry))
116 if ((pte_val(entry) & protection_flags) != protection_flags)
119 __do_tlb_refill(address, textaccess, pte);
124 static int handle_tlbmiss(struct mm_struct *mm,
125 unsigned long long protection_flags,
126 unsigned long long textaccess,
127 unsigned long address)
135 /* NB. The PGD currently only contains a single entry - there is no
136 page table tree stored for the top half of the address space since
137 virtual pages in that region should never be mapped in user mode.
138 (In kernel mode, the only things in that region are the 512Mb super
139 page (locked in), and vmalloc (modules) + I/O device pages (handled
140 by handle_vmalloc_fault), so no PGD for the upper half is required
141 by kernel mode either).
143 See how mm->pgd is allocated and initialised in pgd_alloc to see why
144 the next test is necessary. - RPC */
145 if (address >= (unsigned long) TASK_SIZE)
146 /* upper half - never has page table entries. */
149 dir = pgd_offset(mm, address);
150 if (pgd_none(*dir) || !pgd_present(*dir))
152 if (!pgd_present(*dir))
155 pud = pud_offset(dir, address);
156 if (pud_none(*pud) || !pud_present(*pud))
159 pmd = pmd_offset(pud, address);
160 if (pmd_none(*pmd) || !pmd_present(*pmd))
163 pte = pte_offset_kernel(pmd, address);
166 if (pte_none(entry) || !pte_present(entry))
170 * If the page doesn't have sufficient protection bits set to
171 * service the kind of fault being handled, there's not much
172 * point doing the TLB refill. Punt the fault to the general
175 if ((pte_val(entry) & protection_flags) != protection_flags)
178 __do_tlb_refill(address, textaccess, pte);
184 * Put all this information into one structure so that everything is just
185 * arithmetic relative to a single base address. This reduces the number
186 * of movi/shori pairs needed just to load addresses of static data.
188 struct expevt_lookup {
189 unsigned short protection_flags[8];
190 unsigned char is_text_access[8];
191 unsigned char is_write_access[8];
199 #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
200 #define YOUNG (_PAGE_ACCESSED)
202 /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
203 the fault happened in user mode or privileged mode. */
204 static struct expevt_lookup expevt_lookup_table = {
205 .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
206 .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
210 This routine handles page faults that can be serviced just by refilling a
211 TLB entry from an existing page table entry. (This case represents a very
212 large majority of page faults.) Return 1 if the fault was successfully
213 handled. Return 0 if the fault could not be handled. (This leads into the
214 general fault handling in fault.c which deals with mapping file-backed
215 pages, stack growth, segmentation faults, swapping etc etc)
217 asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
218 unsigned long long expevt,
219 unsigned long address)
221 struct task_struct *tsk;
222 struct mm_struct *mm;
223 unsigned long long textaccess;
224 unsigned long long protection_flags;
225 unsigned long long index;
226 unsigned long long expevt4;
228 /* The next few lines implement a way of hashing EXPEVT into a
229 * small array index which can be used to lookup parameters
230 * specific to the type of TLBMISS being handled.
233 * ITLBMISS has EXPEVT==0xa40
234 * RTLBMISS has EXPEVT==0x040
235 * WTLBMISS has EXPEVT==0x060
237 expevt4 = (expevt >> 4);
238 /* TODO : xor ssr_md into this expression too. Then we can check
239 * that PRU is set when it needs to be. */
240 index = expevt4 ^ (expevt4 >> 5);
242 protection_flags = expevt_lookup_table.protection_flags[index];
243 textaccess = expevt_lookup_table.is_text_access[index];
246 * Note this is now called with interrupts still disabled
247 * This is to cope with being called for a missing IO port
248 * address with interrupts disabled. This should be fixed as
249 * soon as we have a better 'fast path' miss handler.
251 * Plus take care how you try and debug this stuff.
252 * For example, writing debug data to a port which you
253 * have just faulted on is not going to work.
259 if ((address >= VMALLOC_START && address < VMALLOC_END) ||
260 (address >= IOBASE_VADDR && address < IOBASE_END)) {
263 * Process-contexts can never have this address
266 if (handle_vmalloc_fault(mm, protection_flags,
267 textaccess, address))
269 } else if (!in_interrupt() && mm) {
270 if (handle_tlbmiss(mm, protection_flags, textaccess, address))