2 * SN Platform GRU Driver
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/spinlock.h>
31 #include <linux/hugetlb.h>
32 #include <linux/device.h>
34 #include <linux/uaccess.h>
35 #include <linux/security.h>
36 #include <asm/pgtable.h>
38 #include "grutables.h"
40 #include "gru_instructions.h"
41 #include <asm/uv/uv_hub.h>
44 * Test if a physical address is a valid GRU GSEG address
46 static inline int is_gru_paddr(unsigned long paddr)
48 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
52 * Find the vma of a GRU segment. Caller must hold mmap_sem.
54 struct vm_area_struct *gru_find_vma(unsigned long vaddr)
56 struct vm_area_struct *vma;
58 vma = find_vma(current->mm, vaddr);
59 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
65 * Find and lock the gts that contains the specified user vaddr.
68 * - *gts with the mmap_sem locked for read and the GTS locked.
69 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
72 static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
74 struct mm_struct *mm = current->mm;
75 struct vm_area_struct *vma;
76 struct gru_thread_state *gts = NULL;
78 down_read(&mm->mmap_sem);
79 vma = gru_find_vma(vaddr);
81 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
83 mutex_lock(>s->ts_ctxlock);
85 up_read(&mm->mmap_sem);
89 static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *vma;
93 struct gru_thread_state *gts = NULL;
95 down_write(&mm->mmap_sem);
96 vma = gru_find_vma(vaddr);
98 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
100 mutex_lock(>s->ts_ctxlock);
101 downgrade_write(&mm->mmap_sem);
103 up_write(&mm->mmap_sem);
110 * Unlock a GTS that was previously locked with gru_find_lock_gts().
112 static void gru_unlock_gts(struct gru_thread_state *gts)
114 mutex_unlock(>s->ts_ctxlock);
115 up_read(¤t->mm->mmap_sem);
119 * Set a CB.istatus to active using a user virtual address. This must be done
120 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
121 * If the line is evicted, the status may be lost. The in-cache update
122 * is necessary to prevent the user from seeing a stale cb.istatus that will
123 * change as soon as the TFH restart is complete. Races may cause an
124 * occasional failure to clear the cb.istatus, but that is ok.
126 * If the cb address is not valid (should not happen, but...), nothing
127 * bad will happen.. The get_user()/put_user() will fail but there
128 * are no bad side-effects.
130 static void gru_cb_set_istatus_active(unsigned long __user *cb)
133 struct gru_instruction_bits bits;
139 u.bits.istatus = CBS_ACTIVE;
145 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
146 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
147 * GRU (except for headless blades which are not currently supported). A blade
148 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
149 * number uniquely identifies the GRU chiplet on the local blade that caused the
150 * interrupt. Always called in interrupt context.
152 static inline struct gru_state *irq_to_gru(int irq)
154 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
160 * The GRU has an array of fault maps. A map is private to a cpu
161 * Only one cpu will be accessing a cpu's fault map.
163 * This function scans the cpu-private fault map & clears all bits that
164 * are set. The function returns a bitmap that indicates the bits that
165 * were cleared. Note that sense the maps may be updated asynchronously by
166 * the GRU, atomic operations must be used to clear bits.
168 static void get_clear_fault_map(struct gru_state *gru,
169 struct gru_tlb_fault_map *map)
172 struct gru_tlb_fault_map *tfm;
174 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
175 prefetchw(tfm); /* Helps on hardware, required for emulator */
176 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
177 k = tfm->fault_bits[i];
179 k = xchg(&tfm->fault_bits[i], 0UL);
180 map->fault_bits[i] = k;
184 * Not functionally required but helps performance. (Required
187 gru_flush_cache(tfm);
191 * Atomic (interrupt context) & non-atomic (user context) functions to
192 * convert a vaddr into a physical address. The size of the page
193 * is returned in pageshift.
197 * 1 - (atomic only) try again in non-atomic context
199 static int non_atomic_pte_lookup(struct vm_area_struct *vma,
200 unsigned long vaddr, int write,
201 unsigned long *paddr, int *pageshift)
205 /* ZZZ Need to handle HUGE pages */
206 if (is_vm_hugetlb_page(vma))
208 *pageshift = PAGE_SHIFT;
210 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
212 *paddr = page_to_phys(page);
220 * Convert a user virtual address to a physical address
221 * Only supports Intel large pages (2MB only) on x86_64.
222 * ZZZ - hugepage support is incomplete
224 * NOTE: mmap_sem is already held on entry to this function. This
225 * guarantees existence of the page tables.
227 static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
228 int write, unsigned long *paddr, int *pageshift)
235 pgdp = pgd_offset(vma->vm_mm, vaddr);
236 if (unlikely(pgd_none(*pgdp)))
239 pudp = pud_offset(pgdp, vaddr);
240 if (unlikely(pud_none(*pudp)))
243 pmdp = pmd_offset(pudp, vaddr);
244 if (unlikely(pmd_none(*pmdp)))
247 if (unlikely(pmd_large(*pmdp)))
248 pte = *(pte_t *) pmdp;
251 pte = *pte_offset_kernel(pmdp, vaddr);
253 if (unlikely(!pte_present(pte) ||
254 (write && (!pte_write(pte) || !pte_dirty(pte)))))
257 *paddr = pte_pfn(pte) << PAGE_SHIFT;
258 #ifdef CONFIG_HUGETLB_PAGE
259 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
261 *pageshift = PAGE_SHIFT;
270 static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
271 int write, int atomic, unsigned long *gpa, int *pageshift)
273 struct mm_struct *mm = gts->ts_mm;
274 struct vm_area_struct *vma;
278 vma = find_vma(mm, vaddr);
283 * Atomic lookup is faster & usually works even if called in non-atomic
286 rmb(); /* Must/check ms_range_active before loading PTEs */
287 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
291 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
294 if (is_gru_paddr(paddr))
296 paddr = paddr & ~((1UL << ps) - 1);
297 *gpa = uv_soc_phys_ram_to_gpa(paddr);
309 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
311 * cb Address of user CBR. Null if not running in user context
313 * 0 = dropin, exception, or switch to UPM successful
314 * 1 = range invalidate active
318 static int gru_try_dropin(struct gru_thread_state *gts,
319 struct gru_tlb_fault_handle *tfh,
320 unsigned long __user *cb)
322 int pageshift = 0, asid, write, ret, atomic = !cb;
323 unsigned long gpa = 0, vaddr = 0;
326 * NOTE: The GRU contains magic hardware that eliminates races between
327 * TLB invalidates and TLB dropins. If an invalidate occurs
328 * in the window between reading the TFH and the subsequent TLB dropin,
329 * the dropin is ignored. This eliminates the need for additional locks.
333 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
334 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
335 * is a transient state.
337 if (tfh->state == TFHSTATE_IDLE)
339 if (tfh->state == TFHSTATE_MISS_FMM && cb)
342 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
343 vaddr = tfh->missvaddr;
344 asid = tfh->missasid;
348 rmb(); /* TFH must be cache resident before reading ms_range_active */
351 * TFH is cache resident - at least briefly. Fail the dropin
352 * if a range invalidate is active.
354 if (atomic_read(>s->ts_gms->ms_range_active))
357 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
363 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
364 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
365 if (atomic || !gru_update_cch(gts, 0)) {
366 gts->ts_force_cch_reload = 1;
370 gru_cb_set_istatus_active(cb);
371 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
372 GRU_PAGESIZE(pageshift));
375 "%s: tfh 0x%p, vaddr 0x%lx, asid 0x%x, ps %d, gpa 0x%lx\n",
376 ret ? "non-atomic" : "atomic", tfh, vaddr, asid,
381 /* No asid (delayed unload). */
382 STAT(tlb_dropin_fail_no_asid);
383 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
385 tfh_user_polling_mode(tfh);
387 gru_flush_cache(tfh);
391 /* Atomic failure switch CBR to UPM */
392 tfh_user_polling_mode(tfh);
393 STAT(tlb_dropin_fail_upm);
394 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
398 /* FMM state on UPM call */
399 gru_flush_cache(tfh);
400 STAT(tlb_dropin_fail_fmm);
401 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
405 /* TFH was idle - no miss pending */
406 gru_flush_cache(tfh);
409 STAT(tlb_dropin_fail_idle);
410 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
414 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
416 STAT(tlb_dropin_fail_invalid);
417 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
421 /* Range invalidate active. Switch to UPM iff atomic */
423 tfh_user_polling_mode(tfh);
425 gru_flush_cache(tfh);
426 STAT(tlb_dropin_fail_range_active);
427 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
433 * Process an external interrupt from the GRU. This interrupt is
434 * caused by a TLB miss.
435 * Note that this is the interrupt handler that is registered with linux
436 * interrupt handlers.
438 irqreturn_t gru_intr(int irq, void *dev_id)
440 struct gru_state *gru;
441 struct gru_tlb_fault_map map;
442 struct gru_thread_state *gts;
443 struct gru_tlb_fault_handle *tfh = NULL;
448 gru = irq_to_gru(irq);
450 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n",
451 raw_smp_processor_id(), irq);
454 get_clear_fault_map(gru, &map);
455 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid,
458 for_each_cbr_in_tfm(cbrnum, map.fault_bits) {
459 tfh = get_tfh_by_index(gru, cbrnum);
460 prefetchw(tfh); /* Helps on hdw, required for emulator */
463 * When hardware sets a bit in the faultmap, it implicitly
464 * locks the GRU context so that it cannot be unloaded.
465 * The gts cannot change until a TFH start/writestart command
468 ctxnum = tfh->ctxnum;
469 gts = gru->gs_gts[ctxnum];
472 * This is running in interrupt context. Trylock the mmap_sem.
473 * If it fails, retry the fault in user context.
475 if (down_read_trylock(>s->ts_mm->mmap_sem)) {
476 gru_try_dropin(gts, tfh, NULL);
477 up_read(>s->ts_mm->mmap_sem);
479 tfh_user_polling_mode(tfh);
480 STAT(intr_mm_lock_failed);
487 static int gru_user_dropin(struct gru_thread_state *gts,
488 struct gru_tlb_fault_handle *tfh,
489 unsigned long __user *cb)
491 struct gru_mm_struct *gms = gts->ts_gms;
495 wait_event(gms->ms_wait_queue,
496 atomic_read(&gms->ms_range_active) == 0);
497 prefetchw(tfh); /* Helps on hdw, required for emulator */
498 ret = gru_try_dropin(gts, tfh, cb);
501 STAT(call_os_wait_queue);
506 * This interface is called as a result of a user detecting a "call OS" bit
507 * in a user CB. Normally means that a TLB fault has occurred.
508 * cb - user virtual address of the CB
510 int gru_handle_user_call_os(unsigned long cb)
512 struct gru_tlb_fault_handle *tfh;
513 struct gru_thread_state *gts;
514 unsigned long __user *cbp;
515 int ucbnum, cbrnum, ret = -EINVAL;
518 gru_dbg(grudev, "address 0x%lx\n", cb);
520 /* sanity check the cb pointer */
521 ucbnum = get_cb_number((void *)cb);
522 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
524 cbp = (unsigned long *)cb;
526 gts = gru_find_lock_gts(cb);
530 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
534 * If force_unload is set, the UPM TLB fault is phony. The task
535 * has migrated to another node and the GSEG must be moved. Just
536 * unload the context. The task will page fault and assign a new
539 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
540 gts->ts_blade != uv_numa_blade_id()) {
541 STAT(call_os_offnode_reference);
542 gts->ts_force_unload = 1;
546 * CCH may contain stale data if ts_force_cch_reload is set.
548 if (gts->ts_gru && gts->ts_force_cch_reload) {
549 gru_update_cch(gts, 0);
550 gts->ts_force_cch_reload = 0;
554 cbrnum = thread_cbr_number(gts, ucbnum);
555 if (gts->ts_force_unload) {
556 gru_unload_context(gts, 1);
557 } else if (gts->ts_gru) {
558 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
559 ret = gru_user_dropin(gts, tfh, cbp);
567 * Fetch the exception detail information for a CB that terminated with
570 int gru_get_exception_detail(unsigned long arg)
572 struct control_block_extended_exc_detail excdet;
573 struct gru_control_block_extended *cbe;
574 struct gru_thread_state *gts;
575 int ucbnum, cbrnum, ret;
577 STAT(user_exception);
578 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
581 gru_dbg(grudev, "address 0x%lx\n", excdet.cb);
582 gts = gru_find_lock_gts(excdet.cb);
586 ucbnum = get_cb_number((void *)excdet.cb);
587 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
589 } else if (gts->ts_gru) {
590 cbrnum = thread_cbr_number(gts, ucbnum);
591 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
592 prefetchw(cbe);/* Harmless on hardware, required for emulator */
593 excdet.opc = cbe->opccpy;
594 excdet.exopc = cbe->exopccpy;
595 excdet.ecause = cbe->ecause;
596 excdet.exceptdet0 = cbe->idef1upd;
597 excdet.exceptdet1 = cbe->idef3upd;
604 gru_dbg(grudev, "address 0x%lx, ecause 0x%x\n", excdet.cb,
606 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
612 * User request to unload a context. Content is saved for possible reload.
614 static int gru_unload_all_contexts(void)
616 struct gru_thread_state *gts;
617 struct gru_state *gru;
620 if (!capable(CAP_SYS_ADMIN))
623 gru = GID_TO_GRU(gid);
624 spin_lock(&gru->gs_lock);
625 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
626 gts = gru->gs_gts[ctxnum];
627 if (gts && mutex_trylock(>s->ts_ctxlock)) {
628 spin_unlock(&gru->gs_lock);
629 gru_unload_context(gts, 1);
631 spin_lock(&gru->gs_lock);
634 spin_unlock(&gru->gs_lock);
639 int gru_user_unload_context(unsigned long arg)
641 struct gru_thread_state *gts;
642 struct gru_unload_context_req req;
644 STAT(user_unload_context);
645 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
648 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
651 return gru_unload_all_contexts();
653 gts = gru_find_lock_gts(req.gseg);
658 gru_unload_context(gts, 1);
665 * User request to flush a range of virtual addresses from the GRU TLB
666 * (Mainly for testing).
668 int gru_user_flush_tlb(unsigned long arg)
670 struct gru_thread_state *gts;
671 struct gru_flush_tlb_req req;
673 STAT(user_flush_tlb);
674 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
677 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
680 gts = gru_find_lock_gts(req.gseg);
684 gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len);
691 * Register the current task as the user of the GSEG slice.
692 * Needed for TLB fault interrupt targeting.
694 int gru_set_task_slice(long address)
696 struct gru_thread_state *gts;
698 STAT(set_task_slice);
699 gru_dbg(grudev, "address 0x%lx\n", address);
700 gts = gru_alloc_locked_gts(address);
704 gts->ts_tgid_owner = current->tgid;