2 * linux/arch/i386/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/string.h>
12 #include <linux/types.h>
13 #include <linux/ptrace.h>
14 #include <linux/mman.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/init.h>
19 #include <linux/tty.h>
20 #include <linux/vt_kern.h> /* For unblank_screen() */
21 #include <linux/highmem.h>
22 #include <linux/bootmem.h> /* for max_low_pfn */
23 #include <linux/vmalloc.h>
24 #include <linux/module.h>
25 #include <linux/kprobes.h>
26 #include <linux/uaccess.h>
27 #include <linux/kdebug.h>
29 #include <asm/system.h>
31 #include <asm/segment.h>
33 extern void die(const char *,struct pt_regs *,long);
35 static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
37 int register_page_fault_notifier(struct notifier_block *nb)
40 return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
42 EXPORT_SYMBOL_GPL(register_page_fault_notifier);
44 int unregister_page_fault_notifier(struct notifier_block *nb)
46 return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
48 EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
50 static inline int notify_page_fault(struct pt_regs *regs, long err)
52 struct die_args args = {
59 return atomic_notifier_call_chain(¬ify_page_fault_chain,
60 DIE_PAGE_FAULT, &args);
64 * Return EIP plus the CS segment base. The segment limit is also
65 * adjusted, clamped to the kernel/user address space (whichever is
66 * appropriate), and returned in *eip_limit.
68 * The segment is checked, because it might have been changed by another
69 * task between the original faulting instruction and here.
71 * If CS is no longer a valid code segment, or if EIP is beyond the
72 * limit, or if it is a kernel address when CS is not a kernel segment,
73 * then the returned value will be greater than *eip_limit.
75 * This is slow, but is very rarely executed.
77 static inline unsigned long get_segment_eip(struct pt_regs *regs,
78 unsigned long *eip_limit)
80 unsigned long eip = regs->eip;
81 unsigned seg = regs->xcs & 0xffff;
82 u32 seg_ar, seg_limit, base, *desc;
84 /* Unlikely, but must come before segment checks. */
85 if (unlikely(regs->eflags & VM_MASK)) {
87 *eip_limit = base + 0xffff;
88 return base + (eip & 0xffff);
91 /* The standard kernel/user address space limit. */
92 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
94 /* By far the most common cases. */
95 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
98 /* Check the segment exists, is within the current LDT/GDT size,
99 that kernel/user (ring 0..3) has the appropriate privilege,
100 that it's a code segment, and get the limit. */
101 __asm__ ("larl %3,%0; lsll %3,%1"
102 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
103 if ((~seg_ar & 0x9800) || eip > seg_limit) {
105 return 1; /* So that returned eip > *eip_limit. */
108 /* Get the GDT/LDT descriptor base.
109 When you look for races in this code remember that
110 LDT and other horrors are only used in user space. */
112 /* Must lock the LDT while reading it. */
113 down(¤t->mm->context.sem);
114 desc = current->mm->context.ldt;
115 desc = (void *)desc + (seg & ~7);
117 /* Must disable preemption while reading the GDT. */
118 desc = (u32 *)get_cpu_gdt_table(get_cpu());
119 desc = (void *)desc + (seg & ~7);
122 /* Decode the code segment base from the descriptor */
123 base = get_desc_base((unsigned long *)desc);
126 up(¤t->mm->context.sem);
130 /* Adjust EIP and segment limit, and clamp at the kernel limit.
131 It's legitimate for segments to wrap at 0xffffffff. */
133 if (seg_limit < *eip_limit && seg_limit >= base)
134 *eip_limit = seg_limit;
139 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
140 * Check that here and ignore it.
142 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
145 unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
150 for (i = 0; scan_more && i < 15; i++) {
151 unsigned char opcode;
152 unsigned char instr_hi;
153 unsigned char instr_lo;
155 if (instr > (unsigned char *)limit)
157 if (probe_kernel_address(instr, opcode))
160 instr_hi = opcode & 0xf0;
161 instr_lo = opcode & 0x0f;
167 /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
168 scan_more = ((instr_lo & 7) == 0x6);
172 /* 0x64 thru 0x67 are valid prefixes in all modes. */
173 scan_more = (instr_lo & 0xC) == 0x4;
176 /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
177 scan_more = !instr_lo || (instr_lo>>1) == 1;
180 /* Prefetch instruction is 0x0F0D or 0x0F18 */
182 if (instr > (unsigned char *)limit)
184 if (probe_kernel_address(instr, opcode))
186 prefetch = (instr_lo == 0xF) &&
187 (opcode == 0x0D || opcode == 0x18);
197 static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
198 unsigned long error_code)
200 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
201 boot_cpu_data.x86 >= 6)) {
202 /* Catch an obscure case of prefetch inside an NX page. */
203 if (nx_enabled && (error_code & 16))
205 return __is_prefetch(regs, addr);
210 static noinline void force_sig_info_fault(int si_signo, int si_code,
211 unsigned long address, struct task_struct *tsk)
215 info.si_signo = si_signo;
217 info.si_code = si_code;
218 info.si_addr = (void __user *)address;
219 force_sig_info(si_signo, &info, tsk);
222 fastcall void do_invalid_op(struct pt_regs *, unsigned long);
224 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
226 unsigned index = pgd_index(address);
232 pgd_k = init_mm.pgd + index;
234 if (!pgd_present(*pgd_k))
238 * set_pgd(pgd, *pgd_k); here would be useless on PAE
239 * and redundant with the set_pmd() on non-PAE. As would
243 pud = pud_offset(pgd, address);
244 pud_k = pud_offset(pgd_k, address);
245 if (!pud_present(*pud_k))
248 pmd = pmd_offset(pud, address);
249 pmd_k = pmd_offset(pud_k, address);
250 if (!pmd_present(*pmd_k))
252 if (!pmd_present(*pmd)) {
253 set_pmd(pmd, *pmd_k);
254 arch_flush_lazy_mmu_mode();
256 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
261 * Handle a fault on the vmalloc or module mapping area
263 * This assumes no large pages in there.
265 static inline int vmalloc_fault(unsigned long address)
267 unsigned long pgd_paddr;
271 * Synchronize this task's top level page-table
272 * with the 'reference' page table.
274 * Do _not_ use "current" here. We might be inside
275 * an interrupt in the middle of a task switch..
277 pgd_paddr = read_cr3();
278 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
281 pte_k = pte_offset_kernel(pmd_k, address);
282 if (!pte_present(*pte_k))
287 int show_unhandled_signals = 1;
290 * This routine handles page faults. It determines the address,
291 * and the problem, and then passes it off to one of the appropriate
295 * bit 0 == 0 means no page found, 1 means protection fault
296 * bit 1 == 0 means read, 1 means write
297 * bit 2 == 0 means kernel, 1 means user-mode
298 * bit 3 == 1 means use of reserved bit detected
299 * bit 4 == 1 means fault was an instruction fetch
301 fastcall void __kprobes do_page_fault(struct pt_regs *regs,
302 unsigned long error_code)
304 struct task_struct *tsk;
305 struct mm_struct *mm;
306 struct vm_area_struct * vma;
307 unsigned long address;
311 /* get the address */
312 address = read_cr2();
316 si_code = SEGV_MAPERR;
319 * We fault-in kernel-space virtual memory on-demand. The
320 * 'reference' page table is init_mm.pgd.
322 * NOTE! We MUST NOT take any locks for this case. We may
323 * be in an interrupt or a critical region, and should
324 * only copy the information from the master page table,
327 * This verifies that the fault happens in kernel space
328 * (error_code & 4) == 0, and that the fault was not a
329 * protection error (error_code & 9) == 0.
331 if (unlikely(address >= TASK_SIZE)) {
332 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
334 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
337 * Don't take the mm semaphore here. If we fixup a prefetch
338 * fault we could otherwise deadlock.
340 goto bad_area_nosemaphore;
343 if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
346 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
347 fault has been handled. */
348 if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
354 * If we're in an interrupt, have no user context or are running in an
355 * atomic region then we must not take the fault..
357 if (in_atomic() || !mm)
358 goto bad_area_nosemaphore;
360 /* When running in the kernel we expect faults to occur only to
361 * addresses in user space. All other faults represent errors in the
362 * kernel and should generate an OOPS. Unfortunatly, in the case of an
363 * erroneous fault occurring in a code path which already holds mmap_sem
364 * we will deadlock attempting to validate the fault against the
365 * address space. Luckily the kernel only validly references user
366 * space from well defined areas of code, which are listed in the
369 * As the vast majority of faults will be valid we will only perform
370 * the source reference check when there is a possibilty of a deadlock.
371 * Attempt to lock the address space, if we cannot we then validate the
372 * source. If this is invalid we can skip the address space check,
373 * thus avoiding the deadlock.
375 if (!down_read_trylock(&mm->mmap_sem)) {
376 if ((error_code & 4) == 0 &&
377 !search_exception_tables(regs->eip))
378 goto bad_area_nosemaphore;
379 down_read(&mm->mmap_sem);
382 vma = find_vma(mm, address);
385 if (vma->vm_start <= address)
387 if (!(vma->vm_flags & VM_GROWSDOWN))
389 if (error_code & 4) {
391 * Accessing the stack below %esp is always a bug.
392 * The large cushion allows instructions like enter
393 * and pusha to work. ("enter $65535,$31" pushes
394 * 32 pointers and then decrements %esp by 65535.)
396 if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
399 if (expand_stack(vma, address))
402 * Ok, we have a good vm_area for this memory access, so
406 si_code = SEGV_ACCERR;
408 switch (error_code & 3) {
409 default: /* 3: write, present */
411 case 2: /* write, not present */
412 if (!(vma->vm_flags & VM_WRITE))
416 case 1: /* read, present */
418 case 0: /* read, not present */
419 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
425 * If for any reason at all we couldn't handle the fault,
426 * make sure we exit gracefully rather than endlessly redo
429 fault = handle_mm_fault(mm, vma, address, write);
430 if (unlikely(fault & VM_FAULT_ERROR)) {
431 if (fault & VM_FAULT_OOM)
433 else if (fault & VM_FAULT_SIGBUS)
437 if (fault & VM_FAULT_MAJOR)
443 * Did it hit the DOS screen memory VA from vm86 mode?
445 if (regs->eflags & VM_MASK) {
446 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
448 tsk->thread.screen_bitmap |= 1 << bit;
450 up_read(&mm->mmap_sem);
454 * Something tried to access memory that isn't in our memory map..
455 * Fix it, but check if it's kernel or user first..
458 up_read(&mm->mmap_sem);
460 bad_area_nosemaphore:
461 /* User mode accesses just cause a SIGSEGV */
462 if (error_code & 4) {
464 * It's possible to have interrupts off here.
469 * Valid to do another page fault here because this one came
472 if (is_prefetch(regs, address, error_code))
475 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
476 printk_ratelimit()) {
477 printk("%s%s[%d]: segfault at %08lx eip %08lx "
478 "esp %08lx error %lx\n",
479 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
480 tsk->comm, tsk->pid, address, regs->eip,
481 regs->esp, error_code);
483 tsk->thread.cr2 = address;
484 /* Kernel addresses are always protection faults */
485 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
486 tsk->thread.trap_no = 14;
487 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
491 #ifdef CONFIG_X86_F00F_BUG
493 * Pentium F0 0F C7 C8 bug workaround.
495 if (boot_cpu_data.f00f_bug) {
498 nr = (address - idt_descr.address) >> 3;
501 do_invalid_op(regs, 0);
508 /* Are we prepared to handle this kernel fault? */
509 if (fixup_exception(regs))
513 * Valid to do another page fault here, because if this fault
514 * had been triggered by is_prefetch fixup_exception would have
517 if (is_prefetch(regs, address, error_code))
521 * Oops. The kernel tried to access some bad page. We'll have to
522 * terminate things with extreme prejudice.
527 if (oops_may_print()) {
528 __typeof__(pte_val(__pte(0))) page;
530 #ifdef CONFIG_X86_PAE
531 if (error_code & 16) {
532 pte_t *pte = lookup_address(address);
534 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
535 printk(KERN_CRIT "kernel tried to execute "
536 "NX-protected page - exploit attempt? "
537 "(uid: %d)\n", current->uid);
540 if (address < PAGE_SIZE)
541 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
542 "pointer dereference");
544 printk(KERN_ALERT "BUG: unable to handle kernel paging"
546 printk(" at virtual address %08lx\n",address);
547 printk(KERN_ALERT " printing eip:\n");
548 printk("%08lx\n", regs->eip);
551 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
552 #ifdef CONFIG_X86_PAE
553 printk(KERN_ALERT "*pdpt = %016Lx\n", page);
554 if ((page >> PAGE_SHIFT) < max_low_pfn
555 && page & _PAGE_PRESENT) {
557 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
558 & (PTRS_PER_PMD - 1)];
559 printk(KERN_ALERT "*pde = %016Lx\n", page);
563 printk(KERN_ALERT "*pde = %08lx\n", page);
567 * We must not directly access the pte in the highpte
568 * case if the page table is located in highmem.
569 * And let's rather not kmap-atomic the pte, just in case
570 * it's allocated already.
572 if ((page >> PAGE_SHIFT) < max_low_pfn
573 && (page & _PAGE_PRESENT)) {
575 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
576 & (PTRS_PER_PTE - 1)];
577 printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page);
581 tsk->thread.cr2 = address;
582 tsk->thread.trap_no = 14;
583 tsk->thread.error_code = error_code;
584 die("Oops", regs, error_code);
589 * We ran out of memory, or some other thing happened to us that made
590 * us unable to handle the page fault gracefully.
593 up_read(&mm->mmap_sem);
596 down_read(&mm->mmap_sem);
599 printk("VM: killing process %s\n", tsk->comm);
605 up_read(&mm->mmap_sem);
607 /* Kernel mode? Handle exceptions or die */
608 if (!(error_code & 4))
611 /* User space => ok to do another page fault */
612 if (is_prefetch(regs, address, error_code))
615 tsk->thread.cr2 = address;
616 tsk->thread.error_code = error_code;
617 tsk->thread.trap_no = 14;
618 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
621 void vmalloc_sync_all(void)
624 * Note that races in the updates of insync and start aren't
625 * problematic: insync can only get set bits added, and updates to
626 * start are only improving performance (without affecting correctness
629 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
630 static unsigned long start = TASK_SIZE;
631 unsigned long address;
633 if (SHARED_KERNEL_PMD)
636 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
637 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
638 if (!test_bit(pgd_index(address), insync)) {
642 spin_lock_irqsave(&pgd_lock, flags);
643 for (page = pgd_list; page; page =
644 (struct page *)page->index)
645 if (!vmalloc_sync_one(page_address(page),
647 BUG_ON(page != pgd_list);
650 spin_unlock_irqrestore(&pgd_lock, flags);
652 set_bit(pgd_index(address), insync);
654 if (address == start && test_bit(pgd_index(address), insync))
655 start = address + PGDIR_SIZE;