2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
15 #include <linux/signal.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/kprobes.h>
21 #include <linux/kdebug.h>
22 #include <linux/percpu.h>
25 #include <asm/pgtable.h>
26 #include <asm/openprom.h>
27 #include <asm/oplib.h>
28 #include <asm/uaccess.h>
31 #include <asm/sections.h>
32 #include <asm/mmu_context.h>
35 static inline int notify_page_fault(struct pt_regs *regs)
39 /* kprobe_running() needs smp_processor_id() */
40 if (!user_mode(regs)) {
42 if (kprobe_running() && kprobe_fault_handler(regs, 0))
49 static inline int notify_page_fault(struct pt_regs *regs)
55 static void __kprobes unhandled_fault(unsigned long address,
56 struct task_struct *tsk,
59 if ((unsigned long) address < PAGE_SIZE) {
60 printk(KERN_ALERT "Unable to handle kernel NULL "
61 "pointer dereference\n");
63 printk(KERN_ALERT "Unable to handle kernel paging request "
64 "at virtual address %016lx\n", (unsigned long)address);
66 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
68 CTX_HWBITS(tsk->mm->context) :
69 CTX_HWBITS(tsk->active_mm->context)));
70 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
71 (tsk->mm ? (unsigned long) tsk->mm->pgd :
72 (unsigned long) tsk->active_mm->pgd));
73 die_if_kernel("Oops", regs);
76 static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
78 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
80 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
81 printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
82 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
84 unhandled_fault(regs->tpc, current, regs);
88 * We now make sure that mmap_sem is held in all paths that call
89 * this. Additionally, to prevent kswapd from ripping ptes from
90 * under us, raise interrupts around the time that we look at the
91 * pte, kswapd will have to wait to get his smp ipi response from
92 * us. vmtruncate likewise. This saves us having to get pte lock.
94 static unsigned int get_user_insn(unsigned long tpc)
96 pgd_t *pgdp = pgd_offset(current->mm, tpc);
102 unsigned long pstate;
106 pudp = pud_offset(pgdp, tpc);
109 pmdp = pmd_offset(pudp, tpc);
113 /* This disables preemption for us as well. */
114 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
115 __asm__ __volatile__("wrpr %0, %1, %%pstate"
116 : : "r" (pstate), "i" (PSTATE_IE));
117 ptep = pte_offset_map(pmdp, tpc);
119 if (!pte_present(pte))
122 pa = (pte_pfn(pte) << PAGE_SHIFT);
123 pa += (tpc & ~PAGE_MASK);
125 /* Use phys bypass so we don't pollute dtlb/dcache. */
126 __asm__ __volatile__("lduwa [%1] %2, %0"
128 : "r" (pa), "i" (ASI_PHYS_USE_EC));
132 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
137 extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
139 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
140 unsigned int insn, int fault_code)
147 if (fault_code & FAULT_CODE_ITLB)
148 info.si_addr = (void __user *) regs->tpc;
150 info.si_addr = (void __user *)
151 compute_effective_address(regs, insn, 0);
153 force_sig_info(sig, &info, current);
156 extern int handle_ldf_stq(u32, struct pt_regs *);
157 extern int handle_ld_nf(u32, struct pt_regs *);
159 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
162 if (!regs->tpc || (regs->tpc & 0x3))
164 if (regs->tstate & TSTATE_PRIV) {
165 insn = *(unsigned int *) regs->tpc;
167 insn = get_user_insn(regs->tpc);
173 static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
174 unsigned int insn, unsigned long address)
176 unsigned char asi = ASI_P;
178 if ((!insn) && (regs->tstate & TSTATE_PRIV))
181 /* If user insn could be read (thus insn is zero), that
182 * is fine. We will just gun down the process with a signal
186 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
187 (insn & 0xc0800000) == 0xc0800000) {
189 asi = (regs->tstate >> 24);
192 if ((asi & 0xf2) == 0x82) {
193 if (insn & 0x1000000) {
194 handle_ldf_stq(insn, regs);
196 /* This was a non-faulting load. Just clear the
197 * destination register(s) and continue with the next
200 handle_ld_nf(insn, regs);
206 /* Is this in ex_table? */
207 if (regs->tstate & TSTATE_PRIV) {
208 const struct exception_table_entry *entry;
210 entry = search_exception_tables(regs->tpc);
212 regs->tpc = entry->fixup;
213 regs->tnpc = regs->tpc + 4;
217 /* The si_code was set to make clear whether
218 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
220 do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
225 unhandled_fault (address, current, regs);
228 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
230 struct mm_struct *mm = current->mm;
231 struct vm_area_struct *vma;
232 unsigned int insn = 0;
233 int si_code, fault_code, fault;
234 unsigned long address, mm_rss;
236 fault_code = get_thread_fault_code();
238 if (notify_page_fault(regs))
241 si_code = SEGV_MAPERR;
242 address = current_thread_info()->fault_address;
244 if ((fault_code & FAULT_CODE_ITLB) &&
245 (fault_code & FAULT_CODE_DTLB))
248 if (test_thread_flag(TIF_32BIT)) {
249 if (!(regs->tstate & TSTATE_PRIV))
250 regs->tpc &= 0xffffffff;
251 address &= 0xffffffff;
254 if (regs->tstate & TSTATE_PRIV) {
255 unsigned long eaddr, tpc = regs->tpc;
257 /* Sanity check the PC. */
258 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
259 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
260 /* Valid, no problems... */
262 bad_kernel_pc(regs, address);
266 insn = get_fault_insn(regs, insn);
267 eaddr = compute_effective_address(regs, insn, 0);
268 if (WARN_ON_ONCE((eaddr & PAGE_MASK) != (address & PAGE_MASK))){
269 printk(KERN_ERR "FAULT: Mismatch kernel fault "
270 "address: addr[%lx] eaddr[%lx] TPC[%lx]\n",
271 address, eaddr, tpc);
273 goto handle_kernel_fault;
278 * If we're in an interrupt or have no user
279 * context, we must not take the fault..
281 if (in_atomic() || !mm)
284 if (!down_read_trylock(&mm->mmap_sem)) {
285 if ((regs->tstate & TSTATE_PRIV) &&
286 !search_exception_tables(regs->tpc)) {
287 insn = get_fault_insn(regs, insn);
288 goto handle_kernel_fault;
290 down_read(&mm->mmap_sem);
293 vma = find_vma(mm, address);
297 /* Pure DTLB misses do not tell us whether the fault causing
298 * load/store/atomic was a write or not, it only says that there
299 * was no match. So in such a case we (carefully) read the
300 * instruction to try and figure this out. It's an optimization
301 * so it's ok if we can't do this.
303 * Special hack, window spill/fill knows the exact fault type.
306 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
307 (vma->vm_flags & VM_WRITE) != 0) {
308 insn = get_fault_insn(regs, 0);
311 /* All loads, stores and atomics have bits 30 and 31 both set
312 * in the instruction. Bit 21 is set in all stores, but we
313 * have to avoid prefetches which also have bit 21 set.
315 if ((insn & 0xc0200000) == 0xc0200000 &&
316 (insn & 0x01780000) != 0x01680000) {
317 /* Don't bother updating thread struct value,
318 * because update_mmu_cache only cares which tlb
319 * the access came from.
321 fault_code |= FAULT_CODE_WRITE;
326 if (vma->vm_start <= address)
328 if (!(vma->vm_flags & VM_GROWSDOWN))
330 if (!(fault_code & FAULT_CODE_WRITE)) {
331 /* Non-faulting loads shouldn't expand stack. */
332 insn = get_fault_insn(regs, insn);
333 if ((insn & 0xc0800000) == 0xc0800000) {
337 asi = (regs->tstate >> 24);
340 if ((asi & 0xf2) == 0x82)
344 if (expand_stack(vma, address))
347 * Ok, we have a good vm_area for this memory access, so
351 si_code = SEGV_ACCERR;
353 /* If we took a ITLB miss on a non-executable page, catch
356 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
357 BUG_ON(address != regs->tpc);
358 BUG_ON(regs->tstate & TSTATE_PRIV);
362 if (fault_code & FAULT_CODE_WRITE) {
363 if (!(vma->vm_flags & VM_WRITE))
366 /* Spitfire has an icache which does not snoop
367 * processor stores. Later processors do...
369 if (tlb_type == spitfire &&
370 (vma->vm_flags & VM_EXEC) != 0 &&
371 vma->vm_file != NULL)
372 set_thread_fault_code(fault_code |
373 FAULT_CODE_BLKCOMMIT);
375 /* Allow reads even for write-only mappings */
376 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
380 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
381 if (unlikely(fault & VM_FAULT_ERROR)) {
382 if (fault & VM_FAULT_OOM)
384 else if (fault & VM_FAULT_SIGBUS)
388 if (fault & VM_FAULT_MAJOR)
393 up_read(&mm->mmap_sem);
395 mm_rss = get_mm_rss(mm);
396 #ifdef CONFIG_HUGETLB_PAGE
397 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
399 if (unlikely(mm_rss >
400 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
401 tsb_grow(mm, MM_TSB_BASE, mm_rss);
402 #ifdef CONFIG_HUGETLB_PAGE
403 mm_rss = mm->context.huge_pte_count;
404 if (unlikely(mm_rss >
405 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
406 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
411 * Something tried to access memory that isn't in our memory map..
412 * Fix it, but check if it's kernel or user first..
415 insn = get_fault_insn(regs, insn);
416 up_read(&mm->mmap_sem);
419 do_kernel_fault(regs, si_code, fault_code, insn, address);
423 * We ran out of memory, or some other thing happened to us that made
424 * us unable to handle the page fault gracefully.
427 insn = get_fault_insn(regs, insn);
428 up_read(&mm->mmap_sem);
429 printk("VM: killing process %s\n", current->comm);
430 if (!(regs->tstate & TSTATE_PRIV))
431 do_group_exit(SIGKILL);
432 goto handle_kernel_fault;
435 insn = get_fault_insn(regs, 0);
436 goto handle_kernel_fault;
439 insn = get_fault_insn(regs, insn);
440 up_read(&mm->mmap_sem);
443 * Send a sigbus, regardless of whether we were in kernel
446 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
448 /* Kernel mode? Handle exceptions or die */
449 if (regs->tstate & TSTATE_PRIV)
450 goto handle_kernel_fault;