2 * MMU fault handling support.
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
10 #include <linux/smp_lock.h>
11 #include <linux/interrupt.h>
12 #include <linux/kprobes.h>
14 #include <asm/pgtable.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <asm/kdebug.h>
20 extern void die (char *, struct pt_regs *, long);
23 * This routine is analogous to expand_stack() but instead grows the
24 * register backing store (which grows towards higher addresses).
25 * Since the register backing store is access sequentially, we
26 * disallow growing the RBS by more than a page at a time. Note that
27 * the VM_GROWSUP flag can be set on any VM area but that's fine
28 * because the total process size is still limited by RLIMIT_STACK and
32 expand_backing_store (struct vm_area_struct *vma, unsigned long address)
36 grow = PAGE_SIZE >> PAGE_SHIFT;
37 if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur
38 || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur))
40 vma->vm_end += PAGE_SIZE;
41 vma->vm_mm->total_vm += grow;
42 if (vma->vm_flags & VM_LOCKED)
43 vma->vm_mm->locked_vm += grow;
44 __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
49 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
50 * (inside region 5, on ia64) and that page is present.
53 mapped_kernel_page_is_present (unsigned long address)
60 pgd = pgd_offset_k(address);
61 if (pgd_none(*pgd) || pgd_bad(*pgd))
64 pud = pud_offset(pgd, address);
65 if (pud_none(*pud) || pud_bad(*pud))
68 pmd = pmd_offset(pud, address);
69 if (pmd_none(*pmd) || pmd_bad(*pmd))
72 ptep = pte_offset_kernel(pmd, address);
77 return pte_present(pte);
81 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
83 int signal = SIGSEGV, code = SEGV_MAPERR;
84 struct vm_area_struct *vma, *prev_vma;
85 struct mm_struct *mm = current->mm;
90 * If we're in an interrupt or have no user context, we must not take the fault..
92 if (in_atomic() || !mm)
95 #ifdef CONFIG_VIRTUAL_MEM_MAP
97 * If fault is in region 5 and we are in the kernel, we may already
98 * have the mmap_sem (pfn_valid macro is called during mmap). There
99 * is no vma for region 5 addr's anyway, so skip getting the semaphore
100 * and go directly to the exception handling code.
103 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
108 * This is to handle the kprobes on user space access instructions
110 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
111 SIGSEGV) == NOTIFY_STOP)
114 down_read(&mm->mmap_sem);
116 vma = find_vma_prev(mm, address, &prev_vma);
120 /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
121 if (address < vma->vm_start)
122 goto check_expansion;
127 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
129 # define VM_READ_BIT 0
130 # define VM_WRITE_BIT 1
131 # define VM_EXEC_BIT 2
133 # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
134 || (1 << VM_EXEC_BIT) != VM_EXEC)
135 # error File is out of sync with <linux/mm.h>. Please update.
138 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
139 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
140 | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
142 if ((vma->vm_flags & mask) != mask)
147 * If for any reason at all we couldn't handle the fault, make
148 * sure we exit gracefully rather than endlessly redo the
151 switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
158 case VM_FAULT_SIGBUS:
160 * We ran out of memory, or some other thing happened
161 * to us that made us unable to handle the page fault
171 up_read(&mm->mmap_sem);
175 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
176 if (!(vma->vm_flags & VM_GROWSDOWN))
178 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
179 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
181 if (expand_stack(vma, address))
185 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
186 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
188 if (expand_backing_store(vma, address))
194 up_read(&mm->mmap_sem);
195 #ifdef CONFIG_VIRTUAL_MEM_MAP
198 if ((isr & IA64_ISR_SP)
199 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
202 * This fault was due to a speculative load or lfetch.fault, set the "ed"
203 * bit in the psr to ensure forward progress. (Target register will get a
204 * NaT for ld.s, lfetch will be canceled.)
206 ia64_psr(regs)->ed = 1;
209 if (user_mode(regs)) {
210 si.si_signo = signal;
213 si.si_addr = (void __user *) address;
215 si.si_flags = __ISR_VALID;
216 force_sig_info(signal, &si, current);
221 if ((isr & IA64_ISR_SP)
222 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
225 * This fault was due to a speculative load or lfetch.fault, set the "ed"
226 * bit in the psr to ensure forward progress. (Target register will get a
227 * NaT for ld.s, lfetch will be canceled.)
229 ia64_psr(regs)->ed = 1;
233 if (ia64_done_with_exception(regs))
237 * Since we have no vma's for region 5, we might get here even if the address is
238 * valid, due to the VHPT walker inserting a non present translation that becomes
239 * stale. If that happens, the non present fault handler already purged the stale
240 * translation, which fixed the problem. So, we check to see if the translation is
241 * valid, and return if it is.
243 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
247 * Oops. The kernel tried to access some bad page. We'll have to terminate things
248 * with extreme prejudice.
252 if (address < PAGE_SIZE)
253 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
255 printk(KERN_ALERT "Unable to handle kernel paging request at "
256 "virtual address %016lx\n", address);
257 die("Oops", regs, isr);
263 up_read(&mm->mmap_sem);
264 if (current->pid == 1) {
266 down_read(&mm->mmap_sem);
269 printk(KERN_CRIT "VM: killing process %s\n", current->comm);