2 * linux/arch/x86-64/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
8 #include <linux/signal.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/mman.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/tty.h>
21 #include <linux/vt_kern.h> /* For unblank_screen() */
22 #include <linux/compiler.h>
23 #include <linux/vmalloc.h>
24 #include <linux/module.h>
25 #include <linux/kprobes.h>
26 #include <linux/uaccess.h>
27 #include <linux/kdebug.h>
29 #include <asm/system.h>
30 #include <asm/pgalloc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/proto.h>
34 #include <asm-generic/sections.h>
36 /* Page fault error code bits */
37 #define PF_PROT (1<<0) /* or no page found */
38 #define PF_WRITE (1<<1)
39 #define PF_USER (1<<2)
40 #define PF_RSVD (1<<3)
41 #define PF_INSTR (1<<4)
43 static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
45 /* Hook to register for page fault notifications */
46 int register_page_fault_notifier(struct notifier_block *nb)
49 return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
51 EXPORT_SYMBOL_GPL(register_page_fault_notifier);
53 int unregister_page_fault_notifier(struct notifier_block *nb)
55 return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
57 EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
59 static inline int notify_page_fault(struct pt_regs *regs, long err)
61 struct die_args args = {
68 return atomic_notifier_call_chain(¬ify_page_fault_chain,
69 DIE_PAGE_FAULT, &args);
72 /* Sometimes the CPU reports invalid exceptions on prefetch.
73 Check that here and ignore.
74 Opcode checker based on code by Richard Brunner */
75 static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
76 unsigned long error_code)
81 unsigned char *max_instr;
83 /* If it was a exec fault ignore */
84 if (error_code & PF_INSTR)
87 instr = (unsigned char __user *)convert_rip_to_linear(current, regs);
88 max_instr = instr + 15;
90 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
93 while (scan_more && instr < max_instr) {
95 unsigned char instr_hi;
96 unsigned char instr_lo;
98 if (probe_kernel_address(instr, opcode))
101 instr_hi = opcode & 0xf0;
102 instr_lo = opcode & 0x0f;
108 /* Values 0x26,0x2E,0x36,0x3E are valid x86
109 prefixes. In long mode, the CPU will signal
110 invalid opcode if some of these prefixes are
111 present so we will never get here anyway */
112 scan_more = ((instr_lo & 7) == 0x6);
116 /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
117 Need to figure out under what instruction mode the
118 instruction was issued ... */
119 /* Could check the LDT for lm, but for now it's good
120 enough to assume that long mode only uses well known
121 segments or kernel. */
122 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
126 /* 0x64 thru 0x67 are valid prefixes in all modes. */
127 scan_more = (instr_lo & 0xC) == 0x4;
130 /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
131 scan_more = !instr_lo || (instr_lo>>1) == 1;
134 /* Prefetch instruction is 0x0F0D or 0x0F18 */
136 if (probe_kernel_address(instr, opcode))
138 prefetch = (instr_lo == 0xF) &&
139 (opcode == 0x0D || opcode == 0x18);
149 static int bad_address(void *p)
152 return probe_kernel_address((unsigned long *)p, dummy);
155 void dump_pagetable(unsigned long address)
162 pgd = (pgd_t *)read_cr3();
164 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
165 pgd += pgd_index(address);
166 if (bad_address(pgd)) goto bad;
167 printk("PGD %lx ", pgd_val(*pgd));
168 if (!pgd_present(*pgd)) goto ret;
170 pud = pud_offset(pgd, address);
171 if (bad_address(pud)) goto bad;
172 printk("PUD %lx ", pud_val(*pud));
173 if (!pud_present(*pud)) goto ret;
175 pmd = pmd_offset(pud, address);
176 if (bad_address(pmd)) goto bad;
177 printk("PMD %lx ", pmd_val(*pmd));
178 if (!pmd_present(*pmd)) goto ret;
180 pte = pte_offset_kernel(pmd, address);
181 if (bad_address(pte)) goto bad;
182 printk("PTE %lx", pte_val(*pte));
190 static const char errata93_warning[] =
191 KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
192 KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
193 KERN_ERR "******* Please consider a BIOS update.\n"
194 KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
196 /* Workaround for K8 erratum #93 & buggy BIOS.
197 BIOS SMM functions are required to use a specific workaround
198 to avoid corruption of the 64bit RIP register on C stepping K8.
199 A lot of BIOS that didn't get tested properly miss this.
200 The OS sees this as a page fault with the upper 32bits of RIP cleared.
201 Try to work around it here.
202 Note we only handle faults in kernel here. */
204 static int is_errata93(struct pt_regs *regs, unsigned long address)
207 if (address != regs->rip)
209 if ((address >> 32) != 0)
211 address |= 0xffffffffUL << 32;
212 if ((address >= (u64)_stext && address <= (u64)_etext) ||
213 (address >= MODULES_VADDR && address <= MODULES_END)) {
215 printk(errata93_warning);
224 static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
225 unsigned long error_code)
227 unsigned long flags = oops_begin();
228 struct task_struct *tsk;
230 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
231 current->comm, address);
232 dump_pagetable(address);
234 tsk->thread.cr2 = address;
235 tsk->thread.trap_no = 14;
236 tsk->thread.error_code = error_code;
237 __die("Bad pagetable", regs, error_code);
243 * Handle a fault on the vmalloc area
245 * This assumes no large pages in there.
247 static int vmalloc_fault(unsigned long address)
249 pgd_t *pgd, *pgd_ref;
250 pud_t *pud, *pud_ref;
251 pmd_t *pmd, *pmd_ref;
252 pte_t *pte, *pte_ref;
254 /* Copy kernel mappings over when needed. This can also
255 happen within a race in page table update. In the later
258 pgd = pgd_offset(current->mm ?: &init_mm, address);
259 pgd_ref = pgd_offset_k(address);
260 if (pgd_none(*pgd_ref))
263 set_pgd(pgd, *pgd_ref);
265 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
267 /* Below here mismatches are bugs because these lower tables
270 pud = pud_offset(pgd, address);
271 pud_ref = pud_offset(pgd_ref, address);
272 if (pud_none(*pud_ref))
274 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
276 pmd = pmd_offset(pud, address);
277 pmd_ref = pmd_offset(pud_ref, address);
278 if (pmd_none(*pmd_ref))
280 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
282 pte_ref = pte_offset_kernel(pmd_ref, address);
283 if (!pte_present(*pte_ref))
285 pte = pte_offset_kernel(pmd, address);
286 /* Don't use pte_page here, because the mappings can point
287 outside mem_map, and the NUMA hash lookup cannot handle
289 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
294 static int page_fault_trace;
295 int show_unhandled_signals = 1;
298 * This routine handles page faults. It determines the address,
299 * and the problem, and then passes it off to one of the appropriate
302 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
303 unsigned long error_code)
305 struct task_struct *tsk;
306 struct mm_struct *mm;
307 struct vm_area_struct * vma;
308 unsigned long address;
309 const struct exception_table_entry *fixup;
316 prefetchw(&mm->mmap_sem);
318 /* get the address */
319 address = read_cr2();
321 info.si_code = SEGV_MAPERR;
325 * We fault-in kernel-space virtual memory on-demand. The
326 * 'reference' page table is init_mm.pgd.
328 * NOTE! We MUST NOT take any locks for this case. We may
329 * be in an interrupt or a critical region, and should
330 * only copy the information from the master page table,
333 * This verifies that the fault happens in kernel space
334 * (error_code & 4) == 0, and that the fault was not a
335 * protection error (error_code & 9) == 0.
337 if (unlikely(address >= TASK_SIZE64)) {
339 * Don't check for the module range here: its PML4
340 * is always initialized because it's shared with the main
341 * kernel text. Only vmalloc may need PML4 syncups.
343 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
344 ((address >= VMALLOC_START && address < VMALLOC_END))) {
345 if (vmalloc_fault(address) >= 0)
348 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
351 * Don't take the mm semaphore here. If we fixup a prefetch
352 * fault we could otherwise deadlock.
354 goto bad_area_nosemaphore;
357 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
360 if (likely(regs->eflags & X86_EFLAGS_IF))
363 if (unlikely(page_fault_trace))
364 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
365 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
367 if (unlikely(error_code & PF_RSVD))
368 pgtable_bad(address, regs, error_code);
371 * If we're in an interrupt or have no user
372 * context, we must not take the fault..
374 if (unlikely(in_atomic() || !mm))
375 goto bad_area_nosemaphore;
378 * User-mode registers count as a user access even for any
379 * potential system fault or CPU buglet.
381 if (user_mode_vm(regs))
382 error_code |= PF_USER;
385 /* When running in the kernel we expect faults to occur only to
386 * addresses in user space. All other faults represent errors in the
387 * kernel and should generate an OOPS. Unfortunatly, in the case of an
388 * erroneous fault occurring in a code path which already holds mmap_sem
389 * we will deadlock attempting to validate the fault against the
390 * address space. Luckily the kernel only validly references user
391 * space from well defined areas of code, which are listed in the
394 * As the vast majority of faults will be valid we will only perform
395 * the source reference check when there is a possibilty of a deadlock.
396 * Attempt to lock the address space, if we cannot we then validate the
397 * source. If this is invalid we can skip the address space check,
398 * thus avoiding the deadlock.
400 if (!down_read_trylock(&mm->mmap_sem)) {
401 if ((error_code & PF_USER) == 0 &&
402 !search_exception_tables(regs->rip))
403 goto bad_area_nosemaphore;
404 down_read(&mm->mmap_sem);
407 vma = find_vma(mm, address);
410 if (likely(vma->vm_start <= address))
412 if (!(vma->vm_flags & VM_GROWSDOWN))
414 if (error_code & 4) {
415 /* Allow userspace just enough access below the stack pointer
416 * to let the 'enter' instruction work.
418 if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
421 if (expand_stack(vma, address))
424 * Ok, we have a good vm_area for this memory access, so
428 info.si_code = SEGV_ACCERR;
430 switch (error_code & (PF_PROT|PF_WRITE)) {
431 default: /* 3: write, present */
433 case PF_WRITE: /* write, not present */
434 if (!(vma->vm_flags & VM_WRITE))
438 case PF_PROT: /* read, present */
440 case 0: /* read, not present */
441 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
446 * If for any reason at all we couldn't handle the fault,
447 * make sure we exit gracefully rather than endlessly redo
450 fault = handle_mm_fault(mm, vma, address, write);
451 if (unlikely(fault & VM_FAULT_ERROR)) {
452 if (fault & VM_FAULT_OOM)
454 else if (fault & VM_FAULT_SIGBUS)
458 if (fault & VM_FAULT_MAJOR)
462 up_read(&mm->mmap_sem);
466 * Something tried to access memory that isn't in our memory map..
467 * Fix it, but check if it's kernel or user first..
470 up_read(&mm->mmap_sem);
472 bad_area_nosemaphore:
473 /* User mode accesses just cause a SIGSEGV */
474 if (error_code & PF_USER) {
477 * It's possible to have interrupts off here.
481 if (is_prefetch(regs, address, error_code))
484 /* Work around K8 erratum #100 K8 in compat mode
485 occasionally jumps to illegal addresses >4GB. We
486 catch this here in the page fault handler because
487 these addresses are not reachable. Just detect this
488 case and return. Any code segment in LDT is
489 compatibility mode. */
490 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
494 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
495 printk_ratelimit()) {
497 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
498 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
499 tsk->comm, tsk->pid, address, regs->rip,
500 regs->rsp, error_code);
503 tsk->thread.cr2 = address;
504 /* Kernel addresses are always protection faults */
505 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
506 tsk->thread.trap_no = 14;
507 info.si_signo = SIGSEGV;
509 /* info.si_code has been set above */
510 info.si_addr = (void __user *)address;
511 force_sig_info(SIGSEGV, &info, tsk);
517 /* Are we prepared to handle this kernel fault? */
518 fixup = search_exception_tables(regs->rip);
520 regs->rip = fixup->fixup;
525 * Hall of shame of CPU/BIOS bugs.
528 if (is_prefetch(regs, address, error_code))
531 if (is_errata93(regs, address))
535 * Oops. The kernel tried to access some bad page. We'll have to
536 * terminate things with extreme prejudice.
539 flags = oops_begin();
541 if (address < PAGE_SIZE)
542 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
544 printk(KERN_ALERT "Unable to handle kernel paging request");
545 printk(" at %016lx RIP: \n" KERN_ALERT,address);
546 printk_address(regs->rip);
547 dump_pagetable(address);
548 tsk->thread.cr2 = address;
549 tsk->thread.trap_no = 14;
550 tsk->thread.error_code = error_code;
551 __die("Oops", regs, error_code);
552 /* Executive summary in case the body of the oops scrolled away */
553 printk(KERN_EMERG "CR2: %016lx\n", address);
558 * We ran out of memory, or some other thing happened to us that made
559 * us unable to handle the page fault gracefully.
562 up_read(&mm->mmap_sem);
563 if (is_init(current)) {
567 printk("VM: killing process %s\n", tsk->comm);
569 do_group_exit(SIGKILL);
573 up_read(&mm->mmap_sem);
575 /* Kernel mode? Handle exceptions or die */
576 if (!(error_code & PF_USER))
579 tsk->thread.cr2 = address;
580 tsk->thread.error_code = error_code;
581 tsk->thread.trap_no = 14;
582 info.si_signo = SIGBUS;
584 info.si_code = BUS_ADRERR;
585 info.si_addr = (void __user *)address;
586 force_sig_info(SIGBUS, &info, tsk);
590 DEFINE_SPINLOCK(pgd_lock);
593 void vmalloc_sync_all(void)
595 /* Note that races in the updates of insync and start aren't
597 insync can only get set bits added, and updates to start are only
598 improving performance (without affecting correctness if undone). */
599 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
600 static unsigned long start = VMALLOC_START & PGDIR_MASK;
601 unsigned long address;
603 for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
604 if (!test_bit(pgd_index(address), insync)) {
605 const pgd_t *pgd_ref = pgd_offset_k(address);
608 if (pgd_none(*pgd_ref))
610 spin_lock(&pgd_lock);
611 list_for_each_entry(page, &pgd_list, lru) {
613 pgd = (pgd_t *)page_address(page) + pgd_index(address);
615 set_pgd(pgd, *pgd_ref);
617 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
619 spin_unlock(&pgd_lock);
620 set_bit(pgd_index(address), insync);
622 if (address == start)
623 start = address + PGDIR_SIZE;
625 /* Check that there is no need to do the same for the modules area. */
626 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
627 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
628 (__START_KERNEL & PGDIR_MASK)));
631 static int __init enable_pagefaulttrace(char *str)
633 page_fault_trace = 1;
636 __setup("pagefaulttrace", enable_pagefaulttrace);