2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <asm/system.h>
19 #include <asm/mmu_context.h>
22 extern void die(const char *,struct pt_regs *,long);
25 * This routine handles page faults. It determines the address,
26 * and the problem, and then passes it off to one of the appropriate
29 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
30 unsigned long address)
32 struct task_struct *tsk;
34 struct vm_area_struct * vma;
38 if (kgdb_nofault && kgdb_bus_err_hook)
46 * If we're in an interrupt or have no user
47 * context, we must not take the fault..
49 if (in_atomic() || !mm)
52 down_read(&mm->mmap_sem);
54 vma = find_vma(mm, address);
57 if (vma->vm_start <= address)
59 if (!(vma->vm_flags & VM_GROWSDOWN))
61 if (expand_stack(vma, address))
64 * Ok, we have a good vm_area for this memory access, so
69 if (!(vma->vm_flags & VM_WRITE))
72 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
77 * If for any reason at all we couldn't handle the fault,
78 * make sure we exit gracefully rather than endlessly redo
82 switch (handle_mm_fault(mm, vma, address, writeaccess)) {
97 up_read(&mm->mmap_sem);
101 * Something tried to access memory that isn't in our memory map..
102 * Fix it, but check if it's kernel or user first..
105 up_read(&mm->mmap_sem);
107 if (user_mode(regs)) {
108 tsk->thread.address = address;
109 tsk->thread.error_code = writeaccess;
110 force_sig(SIGSEGV, tsk);
115 /* Are we prepared to handle this kernel fault? */
116 if (fixup_exception(regs))
120 * Oops. The kernel tried to access some bad page. We'll have to
121 * terminate things with extreme prejudice.
124 if (address < PAGE_SIZE)
125 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
127 printk(KERN_ALERT "Unable to handle kernel paging request");
128 printk(" at virtual address %08lx\n", address);
129 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
130 asm volatile("mov.l %1, %0"
132 : "m" (__m(MMU_TTB)));
134 page = ((unsigned long *) page)[address >> 22];
135 printk(KERN_ALERT "*pde = %08lx\n", page);
136 if (page & _PAGE_PRESENT) {
138 address &= 0x003ff000;
139 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
140 printk(KERN_ALERT "*pte = %08lx\n", page);
143 die("Oops", regs, writeaccess);
147 * We ran out of memory, or some other thing happened to us that made
148 * us unable to handle the page fault gracefully.
151 up_read(&mm->mmap_sem);
152 if (is_init(current)) {
154 down_read(&mm->mmap_sem);
157 printk("VM: killing process %s\n", tsk->comm);
163 up_read(&mm->mmap_sem);
166 * Send a sigbus, regardless of whether we were in kernel
169 tsk->thread.address = address;
170 tsk->thread.error_code = writeaccess;
171 tsk->thread.trap_no = 14;
172 force_sig(SIGBUS, tsk);
174 /* Kernel mode? Handle exceptions or die */
175 if (!user_mode(regs))
179 #ifdef CONFIG_SH_STORE_QUEUES
181 * This is a special case for the SH-4 store queues, as pages for this
182 * space still need to be faulted in before it's possible to flush the
183 * store queue cache for writeout to the remapped region.
185 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
187 #define P3_ADDR_MAX P4SEG
191 * Called with interrupts disabled.
193 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
194 unsigned long writeaccess,
195 unsigned long address)
202 struct mm_struct *mm = current->mm;
206 #ifdef CONFIG_SH_KGDB
207 if (kgdb_nofault && kgdb_bus_err_hook)
212 * We don't take page faults for P1, P2, and parts of P4, these
213 * are always mapped, whether it be due to legacy behaviour in
214 * 29-bit mode, or due to PMB configuration in 32-bit mode.
216 if (address >= P3SEG && address < P3_ADDR_MAX) {
217 pgd = pgd_offset_k(address);
220 if (unlikely(address >= TASK_SIZE || !mm))
223 pgd = pgd_offset(mm, address);
226 pud = pud_offset(pgd, address);
227 if (pud_none_or_clear_bad(pud))
229 pmd = pmd_offset(pud, address);
230 if (pmd_none_or_clear_bad(pmd))
234 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
236 pte = pte_offset_kernel(pmd, address);
239 if (unlikely(pte_none(entry) || pte_not_present(entry)))
241 if (unlikely(writeaccess && !pte_write(entry)))
245 entry = pte_mkdirty(entry);
246 entry = pte_mkyoung(entry);
248 #ifdef CONFIG_CPU_SH4
250 * ITLB is not affected by "ldtlb" instruction.
251 * So, we need to flush the entry by ourselves.
253 __flush_tlb_page(get_asid(), address & PAGE_MASK);
257 update_mmu_cache(NULL, address, entry);
261 pte_unmap_unlock(pte, ptl);