2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2008 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/io_trapped.h>
19 #include <asm/system.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
24 static inline int notify_page_fault(struct pt_regs *regs, int trap)
29 if (!user_mode(regs)) {
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
41 * This routine handles page faults. It determines the address,
42 * and the problem, and then passes it off to one of the appropriate
45 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
46 unsigned long writeaccess,
47 unsigned long address)
49 struct task_struct *tsk;
51 struct vm_area_struct * vma;
57 * We don't bother with any notifier callbacks here, as they are
58 * all handled through the __do_page_fault() fast-path.
62 si_code = SEGV_MAPERR;
64 if (unlikely(address >= TASK_SIZE)) {
66 * Synchronize this task's top level page-table
67 * with the 'reference' page table.
69 * Do _not_ use "tsk" here. We might be inside
70 * an interrupt in the middle of a task switch..
72 int offset = pgd_index(address);
77 pgd = get_TTB() + offset;
78 pgd_k = swapper_pg_dir + offset;
80 if (!pgd_present(*pgd)) {
81 if (!pgd_present(*pgd_k))
82 goto bad_area_nosemaphore;
87 pud = pud_offset(pgd, address);
88 pud_k = pud_offset(pgd_k, address);
90 if (!pud_present(*pud)) {
91 if (!pud_present(*pud_k))
92 goto bad_area_nosemaphore;
97 pmd = pmd_offset(pud, address);
98 pmd_k = pmd_offset(pud_k, address);
99 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
100 goto bad_area_nosemaphore;
101 set_pmd(pmd, *pmd_k);
106 /* Only enable interrupts if they were on before the fault */
107 if ((regs->sr & SR_IMASK) != SR_IMASK) {
115 * If we're in an interrupt or have no user
116 * context, we must not take the fault..
118 if (in_atomic() || !mm)
121 down_read(&mm->mmap_sem);
123 vma = find_vma(mm, address);
126 if (vma->vm_start <= address)
128 if (!(vma->vm_flags & VM_GROWSDOWN))
130 if (expand_stack(vma, address))
133 * Ok, we have a good vm_area for this memory access, so
137 si_code = SEGV_ACCERR;
139 if (!(vma->vm_flags & VM_WRITE))
142 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
147 * If for any reason at all we couldn't handle the fault,
148 * make sure we exit gracefully rather than endlessly redo
152 fault = handle_mm_fault(mm, vma, address, writeaccess);
153 if (unlikely(fault & VM_FAULT_ERROR)) {
154 if (fault & VM_FAULT_OOM)
156 else if (fault & VM_FAULT_SIGBUS)
160 if (fault & VM_FAULT_MAJOR)
165 up_read(&mm->mmap_sem);
169 * Something tried to access memory that isn't in our memory map..
170 * Fix it, but check if it's kernel or user first..
173 up_read(&mm->mmap_sem);
175 bad_area_nosemaphore:
176 if (user_mode(regs)) {
177 info.si_signo = SIGSEGV;
179 info.si_code = si_code;
180 info.si_addr = (void *) address;
181 force_sig_info(SIGSEGV, &info, tsk);
186 /* Are we prepared to handle this kernel fault? */
187 if (fixup_exception(regs))
190 if (handle_trapped_io(regs, address))
193 * Oops. The kernel tried to access some bad page. We'll have to
194 * terminate things with extreme prejudice.
200 if (oops_may_print()) {
203 if (address < PAGE_SIZE)
204 printk(KERN_ALERT "Unable to handle kernel NULL "
205 "pointer dereference");
207 printk(KERN_ALERT "Unable to handle kernel paging "
209 printk(" at virtual address %08lx\n", address);
210 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
211 page = (unsigned long)get_TTB();
213 page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
214 printk(KERN_ALERT "*pde = %08lx\n", page);
215 if (page & _PAGE_PRESENT) {
217 address &= 0x003ff000;
218 page = ((__typeof__(page) *)
219 __va(page))[address >>
221 printk(KERN_ALERT "*pte = %08lx\n", page);
226 die("Oops", regs, writeaccess);
231 * We ran out of memory, or some other thing happened to us that made
232 * us unable to handle the page fault gracefully.
235 up_read(&mm->mmap_sem);
236 if (is_global_init(current)) {
238 down_read(&mm->mmap_sem);
241 printk("VM: killing process %s\n", tsk->comm);
243 do_group_exit(SIGKILL);
247 up_read(&mm->mmap_sem);
250 * Send a sigbus, regardless of whether we were in kernel
253 info.si_signo = SIGBUS;
255 info.si_code = BUS_ADRERR;
256 info.si_addr = (void *)address;
257 force_sig_info(SIGBUS, &info, tsk);
259 /* Kernel mode? Handle exceptions or die */
260 if (!user_mode(regs))
264 #ifdef CONFIG_SH_STORE_QUEUES
266 * This is a special case for the SH-4 store queues, as pages for this
267 * space still need to be faulted in before it's possible to flush the
268 * store queue cache for writeout to the remapped region.
270 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
272 #define P3_ADDR_MAX P4SEG
276 * Called with interrupts disabled.
278 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
279 unsigned long writeaccess,
280 unsigned long address)
288 if (notify_page_fault(regs, lookup_exception_vector()))
291 #ifdef CONFIG_SH_KGDB
292 if (kgdb_nofault && kgdb_bus_err_hook)
297 * We don't take page faults for P1, P2, and parts of P4, these
298 * are always mapped, whether it be due to legacy behaviour in
299 * 29-bit mode, or due to PMB configuration in 32-bit mode.
301 if (address >= P3SEG && address < P3_ADDR_MAX) {
302 pgd = pgd_offset_k(address);
304 if (unlikely(address >= TASK_SIZE || !current->mm))
307 pgd = pgd_offset(current->mm, address);
310 pud = pud_offset(pgd, address);
311 if (pud_none_or_clear_bad(pud))
313 pmd = pmd_offset(pud, address);
314 if (pmd_none_or_clear_bad(pmd))
317 pte = pte_offset_kernel(pmd, address);
319 if (unlikely(pte_none(entry) || pte_not_present(entry)))
321 if (unlikely(writeaccess && !pte_write(entry)))
325 entry = pte_mkdirty(entry);
326 entry = pte_mkyoung(entry);
328 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
330 * ITLB is not affected by "ldtlb" instruction.
331 * So, we need to flush the entry by ourselves.
333 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
337 update_mmu_cache(NULL, address, entry);