2 * arch/sh/mm/tlb-flush_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
6 * Copyright (C) 2003 - 2009 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/signal.h>
13 #include <linux/rwsem.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/ptrace.h>
20 #include <linux/mman.h>
22 #include <linux/smp.h>
23 #include <linux/perf_counter.h>
24 #include <linux/interrupt.h>
25 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgalloc.h>
30 #include <asm/mmu_context.h>
32 extern void die(const char *,struct pt_regs *,long);
34 #define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" )
35 #define PPROT(flag) PFLAG(pgprot_val(prot),flag)
37 static inline void print_prots(pgprot_t prot)
39 printk("prot is 0x%08lx\n",pgprot_val(prot));
41 printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ),
42 PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER));
45 static inline void print_vma(struct vm_area_struct *vma)
47 printk("vma start 0x%08lx\n", vma->vm_start);
48 printk("vma end 0x%08lx\n", vma->vm_end);
50 print_prots(vma->vm_page_prot);
51 printk("vm_flags 0x%08lx\n", vma->vm_flags);
54 static inline void print_task(struct task_struct *tsk)
56 printk("Task pid %d\n", task_pid_nr(tsk));
59 static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
67 dir = pgd_offset(mm, address);
71 pud = pud_offset(dir, address);
75 pmd = pmd_offset(pud, address);
79 pte = pte_offset_kernel(pmd, address);
81 if (pte_none(entry) || !pte_present(entry))
88 * This routine handles page faults. It determines the address,
89 * and the problem, and then passes it off to one of the appropriate
92 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
93 unsigned long textaccess, unsigned long address)
95 struct task_struct *tsk;
97 struct vm_area_struct * vma;
98 const struct exception_table_entry *fixup;
103 * Note this is now called with interrupts still disabled
104 * This is to cope with being called for a missing IO port
105 * address with interrupts disabled. This should be fixed as
106 * soon as we have a better 'fast path' miss handler.
108 * Plus take care how you try and debug this stuff.
109 * For example, writing debug data to a port which you
110 * have just faulted on is not going to work.
116 /* Not an IO address, so reenable interrupts */
119 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
122 * If we're in an interrupt or have no user
123 * context, we must not take the fault..
125 if (in_atomic() || !mm)
128 /* TLB misses upon some cache flushes get done under cli() */
129 down_read(&mm->mmap_sem);
131 vma = find_vma(mm, address);
136 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
138 address,regs->pc,textaccess,writeaccess);
143 if (vma->vm_start <= address) {
147 if (!(vma->vm_flags & VM_GROWSDOWN)) {
150 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
152 address,regs->pc,textaccess,writeaccess);
159 if (expand_stack(vma, address)) {
162 printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n",
164 address,regs->pc,textaccess,writeaccess);
170 * Ok, we have a good vm_area for this memory access, so
175 if (!(vma->vm_flags & VM_EXEC))
179 if (!(vma->vm_flags & VM_WRITE))
182 if (!(vma->vm_flags & VM_READ))
188 * If for any reason at all we couldn't handle the fault,
189 * make sure we exit gracefully rather than endlessly redo
193 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
194 if (unlikely(fault & VM_FAULT_ERROR)) {
195 if (fault & VM_FAULT_OOM)
197 else if (fault & VM_FAULT_SIGBUS)
202 if (fault & VM_FAULT_MAJOR) {
204 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
208 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
212 /* If we get here, the page fault has been handled. Do the TLB refill
213 now from the newly-setup PTE, to avoid having to fault again right
214 away on the same instruction. */
215 pte = lookup_pte (mm, address);
217 /* From empirical evidence, we can get here, due to
218 !pte_present(pte). (e.g. if a swap-in occurs, and the page
219 is swapped back out again before the process that wanted it
220 gets rescheduled?) */
224 __do_tlb_refill(address, textaccess, pte);
228 up_read(&mm->mmap_sem);
232 * Something tried to access memory that isn't in our memory map..
233 * Fix it, but check if it's kernel or user first..
237 printk("fault:bad area\n");
239 up_read(&mm->mmap_sem);
241 if (user_mode(regs)) {
245 /* This is really to help debug faults when starting
246 * usermode, so only need a few */
248 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
249 address, task_pid_nr(current), current->comm,
250 (unsigned long) regs->pc);
255 if (is_global_init(tsk)) {
256 panic("INIT had user mode bad_area\n");
258 tsk->thread.address = address;
259 tsk->thread.error_code = writeaccess;
260 info.si_signo = SIGSEGV;
262 info.si_addr = (void *) address;
263 force_sig_info(SIGSEGV, &info, tsk);
269 printk("fault:No context\n");
271 /* Are we prepared to handle this kernel fault? */
272 fixup = search_exception_tables(regs->pc);
274 regs->pc = fixup->fixup;
279 * Oops. The kernel tried to access some bad page. We'll have to
280 * terminate things with extreme prejudice.
283 if (address < PAGE_SIZE)
284 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
286 printk(KERN_ALERT "Unable to handle kernel paging request");
287 printk(" at virtual address %08lx\n", address);
288 printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff);
289 die("Oops", regs, writeaccess);
293 * We ran out of memory, or some other thing happened to us that made
294 * us unable to handle the page fault gracefully.
297 if (is_global_init(current)) {
298 panic("INIT out of memory\n");
302 printk("fault:Out of memory\n");
303 up_read(&mm->mmap_sem);
304 if (is_global_init(current)) {
306 down_read(&mm->mmap_sem);
309 printk("VM: killing process %s\n", tsk->comm);
311 do_group_exit(SIGKILL);
315 printk("fault:Do sigbus\n");
316 up_read(&mm->mmap_sem);
319 * Send a sigbus, regardless of whether we were in kernel
322 tsk->thread.address = address;
323 tsk->thread.error_code = writeaccess;
324 tsk->thread.trap_no = 14;
325 force_sig(SIGBUS, tsk);
327 /* Kernel mode? Handle exceptions or die */
328 if (!user_mode(regs))
332 void update_mmu_cache(struct vm_area_struct * vma,
333 unsigned long address, pte_t pte)
336 * This appears to get called once for every pte entry that gets
337 * established => I don't think it's efficient to try refilling the
338 * TLBs with the pages - some may not get accessed even. Also, for
339 * executable pages, it is impossible to determine reliably here which
340 * TLB they should be mapped into (or both even).
342 * So, just do nothing here and handle faults on demand. In the
343 * TLBMISS handling case, the refill is now done anyway after the pte
344 * has been fixed up, so that deals with most useful cases.
348 void local_flush_tlb_one(unsigned long asid, unsigned long page)
350 unsigned long long match, pteh=0, lpage;
354 * Sign-extend based on neff.
356 lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page;
357 match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
360 for_each_itlb_entry(tlb) {
361 asm volatile ("getcfg %1, 0, %0"
366 __flush_tlb_slot(tlb);
371 for_each_dtlb_entry(tlb) {
372 asm volatile ("getcfg %1, 0, %0"
377 __flush_tlb_slot(tlb);
384 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
390 local_irq_save(flags);
391 local_flush_tlb_one(get_asid(), page);
392 local_irq_restore(flags);
396 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
400 unsigned long long match, pteh=0, pteh_epn, pteh_low;
402 unsigned int cpu = smp_processor_id();
403 struct mm_struct *mm;
406 if (cpu_context(cpu, mm) == NO_CONTEXT)
409 local_irq_save(flags);
414 match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
417 for_each_itlb_entry(tlb) {
418 asm volatile ("getcfg %1, 0, %0"
422 pteh_epn = pteh & PAGE_MASK;
423 pteh_low = pteh & ~PAGE_MASK;
425 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
426 __flush_tlb_slot(tlb);
430 for_each_dtlb_entry(tlb) {
431 asm volatile ("getcfg %1, 0, %0"
435 pteh_epn = pteh & PAGE_MASK;
436 pteh_low = pteh & ~PAGE_MASK;
438 if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
439 __flush_tlb_slot(tlb);
442 local_irq_restore(flags);
445 void local_flush_tlb_mm(struct mm_struct *mm)
448 unsigned int cpu = smp_processor_id();
450 if (cpu_context(cpu, mm) == NO_CONTEXT)
453 local_irq_save(flags);
455 cpu_context(cpu, mm) = NO_CONTEXT;
456 if (mm == current->mm)
457 activate_context(mm, cpu);
459 local_irq_restore(flags);
462 void local_flush_tlb_all(void)
464 /* Invalidate all, including shared pages, excluding fixed TLBs */
465 unsigned long flags, tlb;
467 local_irq_save(flags);
469 /* Flush each ITLB entry */
470 for_each_itlb_entry(tlb)
471 __flush_tlb_slot(tlb);
473 /* Flush each DTLB entry */
474 for_each_dtlb_entry(tlb)
475 __flush_tlb_slot(tlb);
477 local_irq_restore(flags);
480 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
482 /* FIXME: Optimize this later.. */