2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/io_trapped.h>
19 #include <asm/system.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
25 * This routine handles page faults. It determines the address,
26 * and the problem, and then passes it off to one of the appropriate
29 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
30 unsigned long writeaccess,
31 unsigned long address)
33 struct task_struct *tsk;
35 struct vm_area_struct * vma;
41 if (kgdb_nofault && kgdb_bus_err_hook)
46 si_code = SEGV_MAPERR;
48 if (unlikely(address >= TASK_SIZE)) {
50 * Synchronize this task's top level page-table
51 * with the 'reference' page table.
53 * Do _not_ use "tsk" here. We might be inside
54 * an interrupt in the middle of a task switch..
56 int offset = pgd_index(address);
61 pgd = get_TTB() + offset;
62 pgd_k = swapper_pg_dir + offset;
64 /* This will never happen with the folded page table. */
65 if (!pgd_present(*pgd)) {
66 if (!pgd_present(*pgd_k))
67 goto bad_area_nosemaphore;
72 pud = pud_offset(pgd, address);
73 pud_k = pud_offset(pgd_k, address);
74 if (pud_present(*pud) || !pud_present(*pud_k))
75 goto bad_area_nosemaphore;
78 pmd = pmd_offset(pud, address);
79 pmd_k = pmd_offset(pud_k, address);
80 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
81 goto bad_area_nosemaphore;
87 /* Only enable interrupts if they were on before the fault */
88 if ((regs->sr & SR_IMASK) != SR_IMASK) {
96 * If we're in an interrupt or have no user
97 * context, we must not take the fault..
99 if (in_atomic() || !mm)
102 down_read(&mm->mmap_sem);
104 vma = find_vma(mm, address);
107 if (vma->vm_start <= address)
109 if (!(vma->vm_flags & VM_GROWSDOWN))
111 if (expand_stack(vma, address))
114 * Ok, we have a good vm_area for this memory access, so
118 si_code = SEGV_ACCERR;
120 if (!(vma->vm_flags & VM_WRITE))
123 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
128 * If for any reason at all we couldn't handle the fault,
129 * make sure we exit gracefully rather than endlessly redo
133 fault = handle_mm_fault(mm, vma, address, writeaccess);
134 if (unlikely(fault & VM_FAULT_ERROR)) {
135 if (fault & VM_FAULT_OOM)
137 else if (fault & VM_FAULT_SIGBUS)
141 if (fault & VM_FAULT_MAJOR)
146 up_read(&mm->mmap_sem);
150 * Something tried to access memory that isn't in our memory map..
151 * Fix it, but check if it's kernel or user first..
154 up_read(&mm->mmap_sem);
156 bad_area_nosemaphore:
157 if (user_mode(regs)) {
158 info.si_signo = SIGSEGV;
160 info.si_code = si_code;
161 info.si_addr = (void *) address;
162 force_sig_info(SIGSEGV, &info, tsk);
167 /* Are we prepared to handle this kernel fault? */
168 if (fixup_exception(regs))
171 if (handle_trapped_io(regs, address))
174 * Oops. The kernel tried to access some bad page. We'll have to
175 * terminate things with extreme prejudice.
181 if (oops_may_print()) {
184 if (address < PAGE_SIZE)
185 printk(KERN_ALERT "Unable to handle kernel NULL "
186 "pointer dereference");
188 printk(KERN_ALERT "Unable to handle kernel paging "
190 printk(" at virtual address %08lx\n", address);
191 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
192 page = (unsigned long)get_TTB();
194 page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
195 printk(KERN_ALERT "*pde = %08lx\n", page);
196 if (page & _PAGE_PRESENT) {
198 address &= 0x003ff000;
199 page = ((__typeof__(page) *)
200 __va(page))[address >>
202 printk(KERN_ALERT "*pte = %08lx\n", page);
207 die("Oops", regs, writeaccess);
212 * We ran out of memory, or some other thing happened to us that made
213 * us unable to handle the page fault gracefully.
216 up_read(&mm->mmap_sem);
217 if (is_global_init(current)) {
219 down_read(&mm->mmap_sem);
222 printk("VM: killing process %s\n", tsk->comm);
224 do_group_exit(SIGKILL);
228 up_read(&mm->mmap_sem);
231 * Send a sigbus, regardless of whether we were in kernel
234 info.si_signo = SIGBUS;
236 info.si_code = BUS_ADRERR;
237 info.si_addr = (void *)address;
238 force_sig_info(SIGBUS, &info, tsk);
240 /* Kernel mode? Handle exceptions or die */
241 if (!user_mode(regs))
245 #ifdef CONFIG_SH_STORE_QUEUES
247 * This is a special case for the SH-4 store queues, as pages for this
248 * space still need to be faulted in before it's possible to flush the
249 * store queue cache for writeout to the remapped region.
251 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
253 #define P3_ADDR_MAX P4SEG
257 * Called with interrupts disabled.
259 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
260 unsigned long writeaccess,
261 unsigned long address)
269 #ifdef CONFIG_SH_KGDB
270 if (kgdb_nofault && kgdb_bus_err_hook)
275 * We don't take page faults for P1, P2, and parts of P4, these
276 * are always mapped, whether it be due to legacy behaviour in
277 * 29-bit mode, or due to PMB configuration in 32-bit mode.
279 if (address >= P3SEG && address < P3_ADDR_MAX) {
280 pgd = pgd_offset_k(address);
282 if (unlikely(address >= TASK_SIZE || !current->mm))
285 pgd = pgd_offset(current->mm, address);
288 pud = pud_offset(pgd, address);
289 if (pud_none_or_clear_bad(pud))
291 pmd = pmd_offset(pud, address);
292 if (pmd_none_or_clear_bad(pmd))
295 pte = pte_offset_kernel(pmd, address);
297 if (unlikely(pte_none(entry) || pte_not_present(entry)))
299 if (unlikely(writeaccess && !pte_write(entry)))
303 entry = pte_mkdirty(entry);
304 entry = pte_mkyoung(entry);
306 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
308 * ITLB is not affected by "ldtlb" instruction.
309 * So, we need to flush the entry by ourselves.
311 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
315 update_mmu_cache(NULL, address, entry);