2 * Copyright (C) 2004-2006 Atmel Corporation
4 * Based on linux/arch/sh/mm/fault.c:
5 * Copyright (C) 1999 Niibe Yutaka
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/kdebug.h>
16 #include <linux/kprobes.h>
18 #include <asm/mmu_context.h>
19 #include <asm/sysreg.h>
21 #include <asm/uaccess.h>
24 static inline int notify_page_fault(struct pt_regs *regs, int trap)
28 if (!user_mode(regs)) {
29 if (kprobe_running() && kprobe_fault_handler(regs, trap))
36 static inline int notify_page_fault(struct pt_regs *regs, int trap)
42 int exception_trace = 1;
45 * This routine handles page faults. It determines the address and the
46 * problem, and then passes it off to one of the appropriate routines.
48 * ecr is the Exception Cause Register. Possible values are:
49 * 6: Protection fault (instruction access)
50 * 15: Protection fault (read access)
51 * 16: Protection fault (write access)
52 * 20: Page not found (instruction access)
53 * 24: Page not found (read access)
54 * 28: Page not found (write access)
56 asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
58 struct task_struct *tsk;
60 struct vm_area_struct *vma;
61 const struct exception_table_entry *fixup;
62 unsigned long address;
68 if (notify_page_fault(regs, ecr))
71 address = sysreg_read(TLBEAR);
80 * If we're in an interrupt or have no user context, we must
81 * not take the fault...
83 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
88 down_read(&mm->mmap_sem);
90 vma = find_vma(mm, address);
93 if (vma->vm_start <= address)
95 if (!(vma->vm_flags & VM_GROWSDOWN))
97 if (expand_stack(vma, address))
101 * Ok, we have a good vm_area for this memory access, so we
109 case ECR_PROTECTION_X:
111 if (!(vma->vm_flags & VM_EXEC))
114 case ECR_PROTECTION_R:
116 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
119 case ECR_PROTECTION_W:
121 if (!(vma->vm_flags & VM_WRITE))
126 panic("Unhandled case %lu in do_page_fault!", ecr);
130 * If for any reason at all we couldn't handle the fault, make
131 * sure we exit gracefully rather than endlessly redo the
135 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
142 case VM_FAULT_SIGBUS:
150 up_read(&mm->mmap_sem);
154 * Something tried to access memory that isn't in our memory
155 * map. Fix it, but check if it's kernel or user first...
158 up_read(&mm->mmap_sem);
160 if (user_mode(regs)) {
162 printk("%s%s[%d]: segfault at %08lx pc %08lx "
163 "sp %08lx ecr %lu\n",
164 is_init(tsk) ? KERN_EMERG : KERN_INFO,
165 tsk->comm, tsk->pid, address, regs->pc,
167 _exception(SIGSEGV, regs, code, address);
172 /* Are we prepared to handle this kernel fault? */
173 fixup = search_exception_tables(regs->pc);
175 regs->pc = fixup->fixup;
180 * Oops. The kernel tried to access some bad page. We'll have
181 * to terminate things with extreme prejudice.
183 if (address < PAGE_SIZE)
185 "Unable to handle kernel NULL pointer dereference");
188 "Unable to handle kernel paging request");
189 printk(" at virtual address %08lx\n", address);
191 page = sysreg_read(PTBR);
192 printk(KERN_ALERT "ptbr = %08lx", page);
194 page = ((unsigned long *)page)[address >> 22];
195 printk(" pgd = %08lx", page);
196 if (page & _PAGE_PRESENT) {
198 address &= 0x003ff000;
199 page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
200 printk(" pte = %08lx", page);
204 die("Kernel access of bad area", regs, signr);
208 * We ran out of memory, or some other thing happened to us
209 * that made us unable to handle the page fault gracefully.
212 up_read(&mm->mmap_sem);
213 if (is_init(current)) {
215 down_read(&mm->mmap_sem);
218 printk("VM: Killing process %s\n", tsk->comm);
224 up_read(&mm->mmap_sem);
226 /* Kernel mode? Handle exceptions or die */
229 if (!user_mode(regs))
233 printk("%s%s[%d]: bus error at %08lx pc %08lx "
234 "sp %08lx ecr %lu\n",
235 is_init(tsk) ? KERN_EMERG : KERN_INFO,
236 tsk->comm, tsk->pid, address, regs->pc,
239 _exception(SIGBUS, regs, BUS_ADRERR, address);
242 asmlinkage void do_bus_error(unsigned long addr, int write_access,
243 struct pt_regs *regs)
246 "Bus error at physical address 0x%08lx (%s access)\n",
247 addr, write_access ? "write" : "read");
248 printk(KERN_INFO "DTLB dump:\n");
250 die("Bus Error", regs, SIGKILL);
254 * This functionality is currently not possible to implement because
255 * we're using segmentation to ensure a fixed mapping of the kernel
256 * virtual address space.
258 * It would be possible to implement this, but it would require us to
259 * disable segmentation at startup and load the kernel mappings into
260 * the TLB like any other pages. There will be lots of trickery to
261 * avoid recursive invocation of the TLB miss handler, though...
263 #ifdef CONFIG_DEBUG_PAGEALLOC
264 void kernel_map_pages(struct page *page, int numpages, int enable)
268 EXPORT_SYMBOL(kernel_map_pages);