1 // TODO VM_EXEC flag work-around, cache aliasing
3 * arch/xtensa/mm/fault.c
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 #include <linux/module.h>
17 #include <linux/hardirq.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
20 #include <asm/hardirq.h>
21 #include <asm/uaccess.h>
22 #include <asm/system.h>
23 #include <asm/pgalloc.h>
25 unsigned long asid_cache = ASID_USER_FIRST;
26 void bad_page_fault(struct pt_regs*, unsigned long, int);
28 #undef DEBUG_PAGE_FAULT
31 * This routine handles page faults. It determines the address,
32 * and the problem, and then passes it off to one of the appropriate
35 * Note: does not handle Miss and MultiHit.
38 void do_page_fault(struct pt_regs *regs)
40 struct vm_area_struct * vma;
41 struct mm_struct *mm = current->mm;
42 unsigned int exccause = regs->exccause;
43 unsigned int address = regs->excvaddr;
46 int is_write, is_exec;
49 info.si_code = SEGV_MAPERR;
51 /* We fault-in kernel-space virtual memory on-demand. The
52 * 'reference' page table is init_mm.pgd.
54 if (address >= TASK_SIZE && !user_mode(regs))
57 /* If we're in an interrupt or have no user
58 * context, we must not take the fault..
60 if (in_atomic() || !mm) {
61 bad_page_fault(regs, address, SIGSEGV);
65 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
66 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
67 exccause == EXCCAUSE_ITLB_MISS ||
68 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
70 #ifdef DEBUG_PAGE_FAULT
71 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
72 address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
75 down_read(&mm->mmap_sem);
76 vma = find_vma(mm, address);
80 if (vma->vm_start <= address)
82 if (!(vma->vm_flags & VM_GROWSDOWN))
84 if (expand_stack(vma, address))
87 /* Ok, we have a good vm_area for this memory access, so
92 info.si_code = SEGV_ACCERR;
95 if (!(vma->vm_flags & VM_WRITE))
98 if (!(vma->vm_flags & VM_EXEC))
100 } else /* Allow read even from write-only pages. */
101 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
104 /* If for any reason at all we couldn't handle the fault,
105 * make sure we exit gracefully rather than endlessly redo
109 fault = handle_mm_fault(mm, vma, address, is_write);
110 if (unlikely(fault & VM_FAULT_ERROR)) {
111 if (fault & VM_FAULT_OOM)
113 else if (fault & VM_FAULT_SIGBUS)
117 if (fault & VM_FAULT_MAJOR)
122 up_read(&mm->mmap_sem);
125 /* Something tried to access memory that isn't in our memory map..
126 * Fix it, but check if it's kernel or user first..
129 up_read(&mm->mmap_sem);
130 if (user_mode(regs)) {
131 current->thread.bad_vaddr = address;
132 current->thread.error_code = is_write;
133 info.si_signo = SIGSEGV;
135 /* info.si_code has been set above */
136 info.si_addr = (void *) address;
137 force_sig_info(SIGSEGV, &info, current);
140 bad_page_fault(regs, address, SIGSEGV);
144 /* We ran out of memory, or some other thing happened to us that made
145 * us unable to handle the page fault gracefully.
148 up_read(&mm->mmap_sem);
149 if (is_global_init(current)) {
151 down_read(&mm->mmap_sem);
154 printk("VM: killing process %s\n", current->comm);
156 do_group_exit(SIGKILL);
157 bad_page_fault(regs, address, SIGKILL);
161 up_read(&mm->mmap_sem);
163 /* Send a sigbus, regardless of whether we were in kernel
166 current->thread.bad_vaddr = address;
167 info.si_code = SIGBUS;
169 info.si_code = BUS_ADRERR;
170 info.si_addr = (void *) address;
171 force_sig_info(SIGBUS, &info, current);
173 /* Kernel mode? Handle exceptions or die */
174 if (!user_mode(regs))
175 bad_page_fault(regs, address, SIGBUS);
179 /* Synchronize this task's top level page-table
180 * with the 'reference' page table.
182 struct mm_struct *act_mm = current->active_mm;
183 int index = pgd_index(address);
191 pgd = act_mm->pgd + index;
192 pgd_k = init_mm.pgd + index;
194 if (!pgd_present(*pgd_k))
197 pgd_val(*pgd) = pgd_val(*pgd_k);
199 pmd = pmd_offset(pgd, address);
200 pmd_k = pmd_offset(pgd_k, address);
201 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
204 pmd_val(*pmd) = pmd_val(*pmd_k);
205 pte_k = pte_offset_kernel(pmd_k, address);
207 if (!pte_present(*pte_k))
212 bad_page_fault(regs, address, SIGKILL);
218 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
220 extern void die(const char*, struct pt_regs*, long);
221 const struct exception_table_entry *entry;
223 /* Are we prepared to handle this kernel fault? */
224 if ((entry = search_exception_tables(regs->pc)) != NULL) {
225 #ifdef DEBUG_PAGE_FAULT
226 printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
227 current->comm, regs->pc, entry->fixup);
229 current->thread.bad_uaddr = address;
230 regs->pc = entry->fixup;
234 /* Oops. The kernel tried to access some bad page. We'll have to
235 * terminate things with extreme prejudice.
237 printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
238 "address %08lx\n pc = %08lx, ra = %08lx\n",
239 address, regs->pc, regs->areg[0]);
240 die("Oops", regs, sig);