2 * Copyright (C) 2004-2006 Atmel Corporation
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
16 #include <linux/kdebug.h>
17 #include <asm/mmu_context.h>
18 #include <asm/sysreg.h>
20 #include <asm/uaccess.h>
23 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
25 /* Hook to register for page fault notifications */
26 int register_page_fault_notifier(struct notifier_block *nb)
28 return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
31 int unregister_page_fault_notifier(struct notifier_block *nb)
33 return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
36 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
39 struct die_args args = {
43 return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
46 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
53 int exception_trace = 1;
56 * This routine handles page faults. It determines the address and the
57 * problem, and then passes it off to one of the appropriate routines.
59 * ecr is the Exception Cause Register. Possible values are:
60 * 6: Protection fault (instruction access)
61 * 15: Protection fault (read access)
62 * 16: Protection fault (write access)
63 * 20: Page not found (instruction access)
64 * 24: Page not found (read access)
65 * 28: Page not found (write access)
67 asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
69 struct task_struct *tsk;
71 struct vm_area_struct *vma;
72 const struct exception_table_entry *fixup;
73 unsigned long address;
79 if (notify_page_fault(DIE_PAGE_FAULT, regs,
80 ecr, SIGSEGV) == NOTIFY_STOP)
83 address = sysreg_read(TLBEAR);
92 * If we're in an interrupt or have no user context, we must
93 * not take the fault...
95 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
100 down_read(&mm->mmap_sem);
102 vma = find_vma(mm, address);
105 if (vma->vm_start <= address)
107 if (!(vma->vm_flags & VM_GROWSDOWN))
109 if (expand_stack(vma, address))
113 * Ok, we have a good vm_area for this memory access, so we
121 case ECR_PROTECTION_X:
123 if (!(vma->vm_flags & VM_EXEC))
126 case ECR_PROTECTION_R:
128 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
131 case ECR_PROTECTION_W:
133 if (!(vma->vm_flags & VM_WRITE))
138 panic("Unhandled case %lu in do_page_fault!", ecr);
142 * If for any reason at all we couldn't handle the fault, make
143 * sure we exit gracefully rather than endlessly redo the
147 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
154 case VM_FAULT_SIGBUS:
162 up_read(&mm->mmap_sem);
166 * Something tried to access memory that isn't in our memory
167 * map. Fix it, but check if it's kernel or user first...
170 up_read(&mm->mmap_sem);
172 if (user_mode(regs)) {
174 printk("%s%s[%d]: segfault at %08lx pc %08lx "
175 "sp %08lx ecr %lu\n",
176 is_init(tsk) ? KERN_EMERG : KERN_INFO,
177 tsk->comm, tsk->pid, address, regs->pc,
179 _exception(SIGSEGV, regs, code, address);
184 /* Are we prepared to handle this kernel fault? */
185 fixup = search_exception_tables(regs->pc);
187 regs->pc = fixup->fixup;
192 * Oops. The kernel tried to access some bad page. We'll have
193 * to terminate things with extreme prejudice.
195 if (address < PAGE_SIZE)
197 "Unable to handle kernel NULL pointer dereference");
200 "Unable to handle kernel paging request");
201 printk(" at virtual address %08lx\n", address);
203 page = sysreg_read(PTBR);
204 printk(KERN_ALERT "ptbr = %08lx", page);
206 page = ((unsigned long *)page)[address >> 22];
207 printk(" pgd = %08lx", page);
208 if (page & _PAGE_PRESENT) {
210 address &= 0x003ff000;
211 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
212 printk(" pte = %08lx", page);
216 die("Kernel access of bad area", regs, signr);
220 * We ran out of memory, or some other thing happened to us
221 * that made us unable to handle the page fault gracefully.
224 up_read(&mm->mmap_sem);
225 if (is_init(current)) {
227 down_read(&mm->mmap_sem);
230 printk("VM: Killing process %s\n", tsk->comm);
236 up_read(&mm->mmap_sem);
238 /* Kernel mode? Handle exceptions or die */
241 if (!user_mode(regs))
245 printk("%s%s[%d]: bus error at %08lx pc %08lx "
246 "sp %08lx ecr %lu\n",
247 is_init(tsk) ? KERN_EMERG : KERN_INFO,
248 tsk->comm, tsk->pid, address, regs->pc,
251 _exception(SIGBUS, regs, BUS_ADRERR, address);
254 asmlinkage void do_bus_error(unsigned long addr, int write_access,
255 struct pt_regs *regs)
258 "Bus error at physical address 0x%08lx (%s access)\n",
259 addr, write_access ? "write" : "read");
260 printk(KERN_INFO "DTLB dump:\n");
262 die("Bus Error", regs, SIGKILL);
266 * This functionality is currently not possible to implement because
267 * we're using segmentation to ensure a fixed mapping of the kernel
268 * virtual address space.
270 * It would be possible to implement this, but it would require us to
271 * disable segmentation at startup and load the kernel mappings into
272 * the TLB like any other pages. There will be lots of trickery to
273 * avoid recursive invocation of the TLB miss handler, though...
275 #ifdef CONFIG_DEBUG_PAGEALLOC
276 void kernel_map_pages(struct page *page, int numpages, int enable)
280 EXPORT_SYMBOL(kernel_map_pages);