2 * arch/ia64/kernel/ivt.S
4 * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 2000, 2002-2003 Intel Co
8 * Asit Mallick <asit.k.mallick@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Kenneth Chen <kenneth.w.chen@intel.com>
11 * Fenghua Yu <fenghua.yu@intel.com>
13 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
14 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
17 * This file defines the interruption vector table used by the CPU.
18 * It does not include one entry per possible cause of interruption.
20 * The first 20 entries of the table contain 64 bundles each while the
21 * remaining 48 entries contain only 16 bundles each.
23 * The 64 bundles are used to allow inlining the whole handler for critical
24 * interruptions like TLB misses.
26 * For each entry, the comment is as follows:
28 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
29 * entry offset ----/ / / / /
30 * entry number ---------/ / / /
31 * size of the entry -------------/ / /
32 * vector name -------------------------------------/ /
33 * interruptions triggering this vector ----------------------/
35 * The table is 32KB in size and must be aligned on 32KB boundary.
36 * (The CPU ignores the 15 lower bits of the address)
38 * Table is based upon EAS2.6 (Oct 1999)
42 #include <asm/asmmacro.h>
43 #include <asm/break.h>
45 #include <asm/kregs.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/pgtable.h>
48 #include <asm/processor.h>
49 #include <asm/ptrace.h>
50 #include <asm/system.h>
51 #include <asm/thread_info.h>
52 #include <asm/unistd.h>
53 #include <asm/errno.h>
56 # define PSR_DEFAULT_BITS psr.ac
58 # define PSR_DEFAULT_BITS 0
63 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
64 * needed for something else before enabling this...
66 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
75 mov r19=n;; /* prepare to save predicates */ \
76 br.sptk.many dispatch_to_fault_handler
78 .section .text.ivt,"ax"
80 .align 32768 // align on 32KB boundary
83 /////////////////////////////////////////////////////////////////////////////////////////
84 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
88 * The VHPT vector is invoked when the TLB entry for the virtual page table
89 * is missing. This happens only as a result of a previous
90 * (the "original") TLB miss, which may either be caused by an instruction
91 * fetch or a data access (or non-access).
93 * What we do here is normal TLB miss handing for the _original_ miss,
94 * followed by inserting the TLB entry for the virtual page table page
95 * that the VHPT walker was attempting to access. The latter gets
96 * inserted as long as page table entry above pte level have valid
97 * mappings for the faulting address. The TLB entry for the original
98 * miss gets inserted only if the pte entry indicates that the page is
101 * do_page_fault gets invoked in the following cases:
102 * - the faulting virtual address uses unimplemented address bits
103 * - the faulting virtual address has no valid page table mapping
105 mov r16=cr.ifa // get address that caused the TLB miss
106 #ifdef CONFIG_HUGETLB_PAGE
111 rsm psr.dt // use physical addressing for data
112 mov r31=pr // save the predicate registers
113 mov r19=IA64_KR(PT_BASE) // get page table base address
114 shl r21=r16,3 // shift bit 60 into sign bit
115 shr.u r17=r16,61 // get the region number into r17
118 #ifdef CONFIG_HUGETLB_PAGE
124 (p8) dep r25=r18,r25,2,6
128 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
129 shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
131 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
134 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
136 .pred.rel "mutex", p6, p7
137 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
138 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
140 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
141 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
142 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
143 #ifdef CONFIG_PGTABLE_4
144 shr.u r28=r22,PUD_SHIFT // shift pud index into position
146 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
149 ld8 r17=[r17] // get *pgd (may be 0)
151 (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
152 #ifdef CONFIG_PGTABLE_4
153 dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
155 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
156 (p7) ld8 r29=[r28] // get *pud (may be 0)
158 (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
159 dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
161 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
164 (p7) ld8 r20=[r17] // get *pmd (may be 0)
165 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
167 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
168 dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
170 (p7) ld8 r18=[r21] // read *pte
171 mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss
173 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
174 mov r22=cr.iha // get the VHPT address that caused the TLB miss
175 ;; // avoid RAW on p7
176 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
177 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
179 (p10) itc.i r18 // insert the instruction TLB entry
180 (p11) itc.d r18 // insert the data TLB entry
181 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
184 #ifdef CONFIG_HUGETLB_PAGE
185 (p8) mov cr.itir=r25 // change to default page-size for VHPT
189 * Now compute and insert the TLB entry for the virtual page table. We never
190 * execute in a page table page so there is no need to set the exception deferral
193 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
199 * Tell the assemblers dependency-violation checker that the above "itc" instructions
200 * cannot possibly affect the following loads:
205 * Re-check pagetable entry. If they changed, we may have received a ptc.g
206 * between reading the pagetable and the "itc". If so, flush the entry we
207 * inserted and retry. At this point, we have:
209 * r28 = equivalent of pud_offset(pgd, ifa)
210 * r17 = equivalent of pmd_offset(pud, ifa)
211 * r21 = equivalent of pte_offset(pmd, ifa)
217 ld8 r25=[r21] // read *pte again
218 ld8 r26=[r17] // read *pmd again
219 #ifdef CONFIG_PGTABLE_4
220 ld8 r19=[r28] // read *pud again
224 cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
225 #ifdef CONFIG_PGTABLE_4
226 cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
228 mov r27=PAGE_SHIFT<<2
230 (p6) ptc.l r22,r27 // purge PTE page translation
231 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
233 (p6) ptc.l r16,r27 // purge translation
236 mov pr=r31,-1 // restore predicate registers
241 /////////////////////////////////////////////////////////////////////////////////////////
242 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
246 * The ITLB handler accesses the PTE via the virtually mapped linear
247 * page table. If a nested TLB miss occurs, we switch into physical
248 * mode, walk the page table, and then re-execute the PTE read and
249 * go on normally after that.
251 mov r16=cr.ifa // get virtual address
252 mov r29=b0 // save b0
253 mov r31=pr // save predicates
255 mov r17=cr.iha // get virtual address of PTE
256 movl r30=1f // load nested fault continuation point
258 1: ld8 r18=[r17] // read *pte
261 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
262 (p6) br.cond.spnt page_fault
268 * Tell the assemblers dependency-violation checker that the above "itc" instructions
269 * cannot possibly affect the following loads:
273 ld8 r19=[r17] // read *pte again and see if same
274 mov r20=PAGE_SHIFT<<2 // setup page size for purge
285 /////////////////////////////////////////////////////////////////////////////////////////
286 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
290 * The DTLB handler accesses the PTE via the virtually mapped linear
291 * page table. If a nested TLB miss occurs, we switch into physical
292 * mode, walk the page table, and then re-execute the PTE read and
293 * go on normally after that.
295 mov r16=cr.ifa // get virtual address
296 mov r29=b0 // save b0
297 mov r31=pr // save predicates
299 mov r17=cr.iha // get virtual address of PTE
300 movl r30=1f // load nested fault continuation point
302 1: ld8 r18=[r17] // read *pte
305 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
306 (p6) br.cond.spnt page_fault
312 * Tell the assemblers dependency-violation checker that the above "itc" instructions
313 * cannot possibly affect the following loads:
317 ld8 r19=[r17] // read *pte again and see if same
318 mov r20=PAGE_SHIFT<<2 // setup page size for purge
329 /////////////////////////////////////////////////////////////////////////////////////////
330 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
333 mov r16=cr.ifa // get address that caused the TLB miss
336 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
339 #ifdef CONFIG_DISABLE_VHPT
340 shr.u r22=r16,61 // get the region number into r21
342 cmp.gt p8,p0=6,r22 // user mode
347 (p8) mov r29=b0 // save b0
348 (p8) br.cond.dptk .itlb_fault
350 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
351 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
352 shr.u r18=r16,57 // move address bit 61 to bit 4
354 andcm r18=0x10,r18 // bit 4=~address-bit(61)
355 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
356 or r19=r17,r19 // insert PTE control bits into r19
358 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
359 (p8) br.cond.spnt page_fault
361 itc.i r19 // insert the TLB entry
367 /////////////////////////////////////////////////////////////////////////////////////////
368 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
371 mov r16=cr.ifa // get address that caused the TLB miss
374 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
379 #ifdef CONFIG_DISABLE_VHPT
380 shr.u r22=r16,61 // get the region number into r21
382 cmp.gt p8,p0=6,r22 // access to region 0-5
387 (p8) mov r29=b0 // save b0
388 (p8) br.cond.dptk dtlb_fault
390 cmp.ge p10,p11=r16,r24 // access to per_cpu_data?
391 tbit.z p12,p0=r16,61 // access to region 6?
392 mov r25=PERCPU_PAGE_SHIFT << 2
393 mov r26=PERCPU_PAGE_SIZE
397 (p10) mov r19=IA64_KR(PER_CPU_DATA)
398 (p11) and r19=r19,r16 // clear non-ppn fields
399 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
400 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
401 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
402 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
404 (p10) sub r19=r19,r26
405 (p10) mov cr.itir=r25
407 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
408 (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
409 (p8) br.cond.spnt page_fault
411 dep r21=-1,r21,IA64_PSR_ED_BIT,1
413 or r19=r19,r17 // insert PTE control bits into r19
416 (p7) itc.d r19 // insert the TLB entry
422 /////////////////////////////////////////////////////////////////////////////////////////
423 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
424 ENTRY(nested_dtlb_miss)
426 * In the absence of kernel bugs, we get here when the virtually mapped linear
427 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
428 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
429 * table is missing, a nested TLB miss fault is triggered and control is
430 * transferred to this point. When this happens, we lookup the pte for the
431 * faulting address by walking the page table in physical mode and return to the
432 * continuation point passed in register r30 (or call page_fault if the address is
435 * Input: r16: faulting address
437 * r30: continuation address
440 * Output: r17: physical address of PTE of faulting address
442 * r30: continuation address
445 * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
447 rsm psr.dt // switch to using physical data addressing
448 mov r19=IA64_KR(PT_BASE) // get the page table base address
449 shl r21=r16,3 // shift bit 60 into sign bit
452 shr.u r17=r16,61 // get the region number into r17
453 extr.u r18=r18,2,6 // get the faulting page size
455 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
456 add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
457 add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
461 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
464 LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
466 .pred.rel "mutex", p6, p7
467 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
468 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
470 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
471 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
472 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
473 #ifdef CONFIG_PGTABLE_4
474 shr.u r18=r22,PUD_SHIFT // shift pud index into position
476 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
479 ld8 r17=[r17] // get *pgd (may be 0)
481 (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
482 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
484 #ifdef CONFIG_PGTABLE_4
485 (p7) ld8 r17=[r17] // get *pud (may be 0)
486 shr.u r18=r22,PMD_SHIFT // shift pmd index into position
488 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
489 dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
492 (p7) ld8 r17=[r17] // get *pmd (may be 0)
493 shr.u r19=r22,PAGE_SHIFT // shift pte index into position
495 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
496 dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
497 (p6) br.cond.spnt page_fault
499 br.sptk.many b0 // return to continuation point
500 END(nested_dtlb_miss)
503 /////////////////////////////////////////////////////////////////////////////////////////
504 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
510 //-----------------------------------------------------------------------------------
511 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
518 alloc r15=ar.pfs,0,0,3,0
521 adds r3=8,r2 // set up second base pointer
523 ssm psr.ic | PSR_DEFAULT_BITS
525 srlz.i // guarantee that interruption collectin is on
527 (p15) ssm psr.i // restore psr.i
528 movl r14=ia64_leave_kernel
533 adds out2=16,r12 // out2 = pointer to pt_regs
534 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
538 /////////////////////////////////////////////////////////////////////////////////////////
539 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
546 /////////////////////////////////////////////////////////////////////////////////////////
547 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
551 * What we do here is to simply turn on the dirty bit in the PTE. We need to
552 * update both the page-table and the TLB entry. To efficiently access the PTE,
553 * we address it through the virtual page table. Most likely, the TLB entry for
554 * the relevant virtual page table page is still present in the TLB so we can
555 * normally do this without additional TLB misses. In case the necessary virtual
556 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
557 * up the physical address of the L3 PTE and then continue at label 1 below.
559 mov r16=cr.ifa // get the address that caused the fault
560 movl r30=1f // load continuation point in case of nested fault
562 thash r17=r16 // compute virtual address of L3 PTE
563 mov r29=b0 // save b0 in case of nested fault
564 mov r31=pr // save pr
566 mov r28=ar.ccv // save ar.ccv
569 ;; // avoid RAW on r18
570 mov ar.ccv=r18 // set compare value for cmpxchg
571 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
572 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
574 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
575 mov r24=PAGE_SHIFT<<2
577 (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
579 (p6) itc.d r25 // install updated PTE
582 * Tell the assemblers dependency-violation checker that the above "itc" instructions
583 * cannot possibly affect the following loads:
587 ld8 r18=[r17] // read PTE again
589 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
592 mov b0=r29 // restore b0
597 ;; // avoid RAW on r18
598 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
599 mov b0=r29 // restore b0
601 st8 [r17]=r18 // store back updated PTE
602 itc.d r18 // install updated PTE
604 mov pr=r31,-1 // restore pr
609 /////////////////////////////////////////////////////////////////////////////////////////
610 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
613 // Like Entry 8, except for instruction access
614 mov r16=cr.ifa // get the address that caused the fault
615 movl r30=1f // load continuation point in case of nested fault
616 mov r31=pr // save predicates
617 #ifdef CONFIG_ITANIUM
619 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
624 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
626 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
627 #endif /* CONFIG_ITANIUM */
629 thash r17=r16 // compute virtual address of L3 PTE
630 mov r29=b0 // save b0 in case of nested fault)
632 mov r28=ar.ccv // save ar.ccv
636 mov ar.ccv=r18 // set compare value for cmpxchg
637 or r25=_PAGE_A,r18 // set the accessed bit
638 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
640 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
641 mov r24=PAGE_SHIFT<<2
643 (p6) cmp.eq p6,p7=r26,r18 // Only if page present
645 (p6) itc.i r25 // install updated PTE
648 * Tell the assemblers dependency-violation checker that the above "itc" instructions
649 * cannot possibly affect the following loads:
653 ld8 r18=[r17] // read PTE again
655 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
658 mov b0=r29 // restore b0
660 #else /* !CONFIG_SMP */
664 or r18=_PAGE_A,r18 // set the accessed bit
665 mov b0=r29 // restore b0
667 st8 [r17]=r18 // store back updated PTE
668 itc.i r18 // install updated PTE
669 #endif /* !CONFIG_SMP */
675 /////////////////////////////////////////////////////////////////////////////////////////
676 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
679 // Like Entry 8, except for data access
680 mov r16=cr.ifa // get the address that caused the fault
681 movl r30=1f // load continuation point in case of nested fault
683 thash r17=r16 // compute virtual address of L3 PTE
685 mov r29=b0 // save b0 in case of nested fault)
687 mov r28=ar.ccv // save ar.ccv
690 ;; // avoid RAW on r18
691 mov ar.ccv=r18 // set compare value for cmpxchg
692 or r25=_PAGE_A,r18 // set the dirty bit
693 tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
695 (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
696 mov r24=PAGE_SHIFT<<2
698 (p6) cmp.eq p6,p7=r26,r18 // Only if page is present
700 (p6) itc.d r25 // install updated PTE
702 * Tell the assemblers dependency-violation checker that the above "itc" instructions
703 * cannot possibly affect the following loads:
707 ld8 r18=[r17] // read PTE again
709 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
716 ;; // avoid RAW on r18
717 or r18=_PAGE_A,r18 // set the accessed bit
719 st8 [r17]=r18 // store back updated PTE
720 itc.d r18 // install updated PTE
722 mov b0=r29 // restore b0
728 /////////////////////////////////////////////////////////////////////////////////////////
729 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
732 * The streamlined system call entry/exit paths only save/restore the initial part
733 * of pt_regs. This implies that the callers of system-calls must adhere to the
734 * normal procedure calling conventions.
736 * Registers to be saved & restored:
737 * CR registers: cr.ipsr, cr.iip, cr.ifs
738 * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
739 * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
740 * Registers to be restored only:
741 * r8-r11: output value from the system call.
743 * During system call exit, scratch registers (including r15) are modified/cleared
744 * to prevent leaking bits from kernel to user level.
747 mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
748 mov r29=cr.ipsr // M2 (12 cyc)
749 mov r31=pr // I0 (2 cyc)
751 mov r17=cr.iim // M2 (2 cyc)
752 mov.m r27=ar.rsc // M2 (12 cyc)
753 mov r18=__IA64_BREAK_SYSCALL // A
756 mov.m r21=ar.fpsr // M2 (12 cyc)
757 mov r19=b6 // I0 (2 cyc)
759 mov.m r23=ar.bspstore // M2 (12 cyc)
760 mov.m r24=ar.rnat // M2 (5 cyc)
761 mov.i r26=ar.pfs // I0 (2 cyc)
765 mov r20=r1 // A save r1
768 movl r30=sys_call_table // X
770 mov r28=cr.iip // M2 (2 cyc)
771 cmp.eq p0,p7=r18,r17 // I0 is this a system call?
772 (p7) br.cond.spnt non_syscall // B no ->
774 // From this point on, we are definitely on the syscall-path
775 // and we can use (non-banked) scratch registers.
777 ///////////////////////////////////////////////////////////////////////
778 mov r1=r16 // A move task-pointer to "addl"-addressable reg
779 mov r2=r16 // A setup r2 for ia64_syscall_setup
780 add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags
782 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
783 adds r15=-1024,r15 // A subtract 1024 from syscall number
784 mov r3=NR_syscalls - 1
786 ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
787 ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
788 extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
790 shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
791 addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
792 cmp.leu p6,p7=r15,r3 // A syscall number in range?
795 lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
796 (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
797 tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
799 mov.m ar.bspstore=r22 // M2 switch to kernel RBS
800 cmp.eq p8,p9=2,r8 // A isr.ei==2?
803 (p8) mov r8=0 // A clear ei to 0
804 (p7) movl r30=sys_ni_syscall // X
806 (p8) adds r28=16,r28 // A switch cr.iip to next bundle
807 (p9) adds r8=1,r8 // A increment ei to next slot
808 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
810 mov b6=r30 // I0 setup syscall handler branch reg early
816 mov.m r25=ar.unat // M2 (5 cyc)
817 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
818 adds r15=1024,r15 // A restore original syscall number
820 // If any of the above loads miss in L1D, we'll stall here until
823 ///////////////////////////////////////////////////////////////////////
824 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
825 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
826 mov.m r30=ar.itc // M get cycle for accounting
828 mov b6=r30 // I0 setup syscall handler branch reg early
830 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
832 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
833 mov r18=ar.bsp // M2 (12 cyc)
834 (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
836 .back_from_break_fixup:
837 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
838 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
839 br.call.sptk.many b7=ia64_syscall_setup // B
841 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
842 // mov.m r30=ar.itc is called in advance, and r13 is current
843 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
844 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
845 (pKStk) br.cond.spnt .skip_accounting // B unlikely skip
847 ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
848 ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
850 ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
851 ld8 r21=[r17] // M cumulated utime
852 sub r22=r19,r18 // A stime before leave
854 st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
855 sub r18=r30,r19 // A elapsed time in user
857 add r20=r20,r22 // A sum stime
858 add r21=r21,r18 // A sum utime
860 st8 [r16]=r20 // M update stime
861 st8 [r17]=r21 // M update utime
865 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
867 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
870 ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
871 movl r3=ia64_ret_from_syscall // X
874 srlz.i // M0 ensure interruption collection is on
875 mov rp=r3 // I0 set the real return addr
876 (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
878 (p15) ssm psr.i // M2 restore psr.i
879 (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
880 br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
882 ///////////////////////////////////////////////////////////////////////
883 // On entry, we optimistically assumed that we're coming from user-space.
884 // For the rare cases where a system-call is done from within the kernel,
885 // we fix things up at this point:
887 add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
888 mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
890 mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
891 br.cond.sptk .back_from_break_fixup
895 /////////////////////////////////////////////////////////////////////////////////////////
896 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
899 mov r31=pr // prepare to save predicates
901 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
902 ssm psr.ic | PSR_DEFAULT_BITS
904 adds r3=8,r2 // set up second base pointer for SAVE_REST
905 srlz.i // ensure everybody knows psr.ic is back on
909 MCA_RECOVER_RANGE(interrupt)
910 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
911 mov out0=cr.ivr // pass cr.ivr as first arg
912 add out1=16,sp // pass pointer to pt_regs as second arg
914 srlz.d // make sure we see the effect of cr.ivr
915 movl r14=ia64_leave_kernel
918 br.call.sptk.many b6=ia64_handle_irq
922 /////////////////////////////////////////////////////////////////////////////////////////
923 // 0x3400 Entry 13 (size 64 bundles) Reserved
928 /////////////////////////////////////////////////////////////////////////////////////////
929 // 0x3800 Entry 14 (size 64 bundles) Reserved
934 * There is no particular reason for this code to be here, other than that
935 * there happens to be space here that would go unused otherwise. If this
936 * fault ever gets "unreserved", simply moved the following code to a more
939 * ia64_syscall_setup() is a separate subroutine so that it can
940 * allocate stacked registers so it can safely demine any
941 * potential NaT values from the input registers.
944 * - executing on bank 0 or bank 1 register set (doesn't matter)
945 * - r1: stack pointer
946 * - r2: current task pointer
948 * - r11: original contents (saved ar.pfs to be saved)
949 * - r12: original contents (sp to be saved)
950 * - r13: original contents (tp to be saved)
951 * - r15: original contents (syscall # to be saved)
952 * - r18: saved bsp (after switching to kernel stack)
954 * - r20: saved r1 (gp)
955 * - r21: saved ar.fpsr
956 * - r22: kernel's register backing store base (krbs_base)
957 * - r23: saved ar.bspstore
958 * - r24: saved ar.rnat
959 * - r25: saved ar.unat
960 * - r26: saved ar.pfs
961 * - r27: saved ar.rsc
962 * - r28: saved cr.iip
963 * - r29: saved cr.ipsr
964 * - r30: ar.itc for accounting (don't touch)
966 * - b0: original contents (to be saved)
968 * - p10: TRUE if syscall is invoked with more than 8 out
969 * registers or r15's Nat is true
971 * - r3: preserved (same as on entry)
972 * - r8: -EINVAL if p10 is true
973 * - r12: points to kernel stack
974 * - r13: points to current task
975 * - r14: preserved (same as on entry)
977 * - p15: TRUE if interrupts need to be re-enabled
978 * - ar.fpsr: set to kernel settings
979 * - b6: preserved (same as on entry)
981 GLOBAL_ENTRY(ia64_syscall_setup)
983 # error This code assumes that b6 is the first field in pt_regs.
985 st8 [r1]=r19 // save b6
986 add r16=PT(CR_IPSR),r1 // initialize first base pointer
987 add r17=PT(R11),r1 // initialize second base pointer
989 alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
990 st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
993 st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
995 (pKStk) mov r18=r0 // make sure r18 isn't NaT
998 st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
999 st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
1000 mov r28=b0 // save b0 (2 cyc)
1003 st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
1004 dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
1008 st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
1009 extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
1010 and r8=0x7f,r19 // A // get sof of ar.pfs
1012 st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
1013 tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
1017 (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
1021 (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
1022 (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
1026 tnat.nz p12,p0=in4 // [I0]
1029 (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
1030 (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
1031 shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
1033 st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
1034 st8 [r17]=r28,PT(R1)-PT(B0) // save b0
1035 tnat.nz p13,p0=in5 // [I0]
1037 st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
1038 st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
1042 .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
1043 .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
1046 st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
1048 cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
1051 (p9) tnat.nz p10,p0=r15
1052 adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
1054 st8.spill [r17]=r15 // save r15
1058 mov r13=r2 // establish `current'
1059 movl r1=__gp // establish kernel global pointer
1061 st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
1065 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1066 movl r17=FPSR_DEFAULT
1068 mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
1069 (p10) mov r8=-EINVAL
1071 END(ia64_syscall_setup)
1073 .org ia64_ivt+0x3c00
1074 /////////////////////////////////////////////////////////////////////////////////////////
1075 // 0x3c00 Entry 15 (size 64 bundles) Reserved
1079 .org ia64_ivt+0x4000
1080 /////////////////////////////////////////////////////////////////////////////////////////
1081 // 0x4000 Entry 16 (size 64 bundles) Reserved
1085 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
1087 * There is no particular reason for this code to be here, other than
1088 * that there happens to be space here that would go unused otherwise.
1089 * If this fault ever gets "unreserved", simply moved the following
1090 * code to a more suitable spot...
1092 * account_sys_enter is called from SAVE_MIN* macros if accounting is
1093 * enabled and if the macro is entered from user mode.
1095 ENTRY(account_sys_enter)
1096 // mov.m r20=ar.itc is called in advance, and r13 is current
1097 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1098 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
1100 ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
1101 ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
1103 ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
1104 ld8 r21=[r17] // cumulated utime
1105 sub r22=r19,r18 // stime before leave kernel
1107 st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
1108 sub r18=r20,r19 // elapsed time in user mode
1110 add r23=r23,r22 // sum stime
1111 add r21=r21,r18 // sum utime
1113 st8 [r16]=r23 // update stime
1114 st8 [r17]=r21 // update utime
1117 END(account_sys_enter)
1120 .org ia64_ivt+0x4400
1121 /////////////////////////////////////////////////////////////////////////////////////////
1122 // 0x4400 Entry 17 (size 64 bundles) Reserved
1127 mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
1131 // There is no particular reason for this code to be here, other than that
1132 // there happens to be space here that would go unused otherwise. If this
1133 // fault ever gets "unreserved", simply moved the following code to a more
1136 alloc r14=ar.pfs,0,0,2,0
1139 adds r3=8,r2 // set up second base pointer for SAVE_REST
1141 ssm psr.ic | PSR_DEFAULT_BITS
1143 srlz.i // guarantee that interruption collection is on
1145 (p15) ssm psr.i // restore psr.i
1146 movl r15=ia64_leave_kernel
1151 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
1154 .org ia64_ivt+0x4800
1155 /////////////////////////////////////////////////////////////////////////////////////////
1156 // 0x4800 Entry 18 (size 64 bundles) Reserved
1161 * There is no particular reason for this code to be here, other than that
1162 * there happens to be space here that would go unused otherwise. If this
1163 * fault ever gets "unreserved", simply moved the following code to a more
1167 ENTRY(dispatch_unaligned_handler)
1170 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1174 ssm psr.ic | PSR_DEFAULT_BITS
1176 srlz.i // guarantee that interruption collection is on
1178 (p15) ssm psr.i // restore psr.i
1179 adds r3=8,r2 // set up second base pointer
1182 movl r14=ia64_leave_kernel
1185 br.sptk.many ia64_prepare_handle_unaligned
1186 END(dispatch_unaligned_handler)
1188 .org ia64_ivt+0x4c00
1189 /////////////////////////////////////////////////////////////////////////////////////////
1190 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1195 * There is no particular reason for this code to be here, other than that
1196 * there happens to be space here that would go unused otherwise. If this
1197 * fault ever gets "unreserved", simply moved the following code to a more
1201 ENTRY(dispatch_to_fault_handler)
1205 * r19: fault vector number (e.g., 24 for General Exception)
1206 * r31: contains saved predicates (pr)
1208 SAVE_MIN_WITH_COVER_R19
1209 alloc r14=ar.pfs,0,0,5,0
1216 ssm psr.ic | PSR_DEFAULT_BITS
1218 srlz.i // guarantee that interruption collection is on
1220 (p15) ssm psr.i // restore psr.i
1221 adds r3=8,r2 // set up second base pointer for SAVE_REST
1224 movl r14=ia64_leave_kernel
1227 br.call.sptk.many b6=ia64_fault
1228 END(dispatch_to_fault_handler)
1231 // --- End of long entries, Beginning of short entries
1234 .org ia64_ivt+0x5000
1235 /////////////////////////////////////////////////////////////////////////////////////////
1236 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1237 ENTRY(page_not_present)
1242 * The Linux page fault handler doesn't expect non-present pages to be in
1243 * the TLB. Flush the existing entry now, so we meet that expectation.
1245 mov r17=PAGE_SHIFT<<2
1251 br.sptk.many page_fault
1252 END(page_not_present)
1254 .org ia64_ivt+0x5100
1255 /////////////////////////////////////////////////////////////////////////////////////////
1256 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1257 ENTRY(key_permission)
1264 br.sptk.many page_fault
1267 .org ia64_ivt+0x5200
1268 /////////////////////////////////////////////////////////////////////////////////////////
1269 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1270 ENTRY(iaccess_rights)
1277 br.sptk.many page_fault
1280 .org ia64_ivt+0x5300
1281 /////////////////////////////////////////////////////////////////////////////////////////
1282 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1283 ENTRY(daccess_rights)
1290 br.sptk.many page_fault
1293 .org ia64_ivt+0x5400
1294 /////////////////////////////////////////////////////////////////////////////////////////
1295 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1296 ENTRY(general_exception)
1302 (p6) br.sptk.many dispatch_illegal_op_fault
1304 mov r19=24 // fault number
1305 br.sptk.many dispatch_to_fault_handler
1306 END(general_exception)
1308 .org ia64_ivt+0x5500
1309 /////////////////////////////////////////////////////////////////////////////////////////
1310 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1311 ENTRY(disabled_fp_reg)
1313 rsm psr.dfh // ensure we can access fph
1318 br.sptk.many dispatch_to_fault_handler
1319 END(disabled_fp_reg)
1321 .org ia64_ivt+0x5600
1322 /////////////////////////////////////////////////////////////////////////////////////////
1323 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1324 ENTRY(nat_consumption)
1329 mov r31=pr // save PR
1331 and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
1332 tbit.z p6,p0=r17,IA64_ISR_NA_BIT
1334 cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
1335 dep r16=-1,r16,IA64_PSR_ED_BIT,1
1336 (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
1338 mov cr.ipsr=r16 // set cr.ipsr.na
1346 END(nat_consumption)
1348 .org ia64_ivt+0x5700
1349 /////////////////////////////////////////////////////////////////////////////////////////
1350 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1351 ENTRY(speculation_vector)
1354 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1355 * this part of the architecture is not implemented in hardware on some CPUs, such
1356 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1357 * the relative target (not yet sign extended). So after sign extending it we
1358 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1359 * i.e., the slot to restart into.
1361 * cr.imm contains zero_ext(imm21)
1366 shl r18=r18,43 // put sign bit in position (43=64-21)
1370 shr r18=r18,39 // sign extend (39=43-4)
1373 add r17=r17,r18 // now add the offset
1376 dep r16=0,r16,41,2 // clear EI
1383 END(speculation_vector)
1385 .org ia64_ivt+0x5800
1386 /////////////////////////////////////////////////////////////////////////////////////////
1387 // 0x5800 Entry 28 (size 16 bundles) Reserved
1391 .org ia64_ivt+0x5900
1392 /////////////////////////////////////////////////////////////////////////////////////////
1393 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1399 .org ia64_ivt+0x5a00
1400 /////////////////////////////////////////////////////////////////////////////////////////
1401 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1402 ENTRY(unaligned_access)
1404 mov r31=pr // prepare to save predicates
1406 br.sptk.many dispatch_unaligned_handler
1407 END(unaligned_access)
1409 .org ia64_ivt+0x5b00
1410 /////////////////////////////////////////////////////////////////////////////////////////
1411 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1412 ENTRY(unsupported_data_reference)
1415 END(unsupported_data_reference)
1417 .org ia64_ivt+0x5c00
1418 /////////////////////////////////////////////////////////////////////////////////////////
1419 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1420 ENTRY(floating_point_fault)
1423 END(floating_point_fault)
1425 .org ia64_ivt+0x5d00
1426 /////////////////////////////////////////////////////////////////////////////////////////
1427 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1428 ENTRY(floating_point_trap)
1431 END(floating_point_trap)
1433 .org ia64_ivt+0x5e00
1434 /////////////////////////////////////////////////////////////////////////////////////////
1435 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
1436 ENTRY(lower_privilege_trap)
1439 END(lower_privilege_trap)
1441 .org ia64_ivt+0x5f00
1442 /////////////////////////////////////////////////////////////////////////////////////////
1443 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1444 ENTRY(taken_branch_trap)
1447 END(taken_branch_trap)
1449 .org ia64_ivt+0x6000
1450 /////////////////////////////////////////////////////////////////////////////////////////
1451 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1452 ENTRY(single_step_trap)
1455 END(single_step_trap)
1457 .org ia64_ivt+0x6100
1458 /////////////////////////////////////////////////////////////////////////////////////////
1459 // 0x6100 Entry 37 (size 16 bundles) Reserved
1463 .org ia64_ivt+0x6200
1464 /////////////////////////////////////////////////////////////////////////////////////////
1465 // 0x6200 Entry 38 (size 16 bundles) Reserved
1469 .org ia64_ivt+0x6300
1470 /////////////////////////////////////////////////////////////////////////////////////////
1471 // 0x6300 Entry 39 (size 16 bundles) Reserved
1475 .org ia64_ivt+0x6400
1476 /////////////////////////////////////////////////////////////////////////////////////////
1477 // 0x6400 Entry 40 (size 16 bundles) Reserved
1481 .org ia64_ivt+0x6500
1482 /////////////////////////////////////////////////////////////////////////////////////////
1483 // 0x6500 Entry 41 (size 16 bundles) Reserved
1487 .org ia64_ivt+0x6600
1488 /////////////////////////////////////////////////////////////////////////////////////////
1489 // 0x6600 Entry 42 (size 16 bundles) Reserved
1493 .org ia64_ivt+0x6700
1494 /////////////////////////////////////////////////////////////////////////////////////////
1495 // 0x6700 Entry 43 (size 16 bundles) Reserved
1499 .org ia64_ivt+0x6800
1500 /////////////////////////////////////////////////////////////////////////////////////////
1501 // 0x6800 Entry 44 (size 16 bundles) Reserved
1505 .org ia64_ivt+0x6900
1506 /////////////////////////////////////////////////////////////////////////////////////////
1507 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1508 ENTRY(ia32_exception)
1513 .org ia64_ivt+0x6a00
1514 /////////////////////////////////////////////////////////////////////////////////////////
1515 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1516 ENTRY(ia32_intercept)
1518 #ifdef CONFIG_IA32_SUPPORT
1522 extr.u r17=r16,16,8 // get ISR.code
1524 mov r19=cr.iim // old eflag value
1527 (p6) br.cond.spnt 1f // not a system flag fault
1530 extr.u r17=r16,18,1 // get the eflags.ac bit
1533 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1535 mov pr=r31,-1 // restore predicate registers
1539 #endif // CONFIG_IA32_SUPPORT
1543 .org ia64_ivt+0x6b00
1544 /////////////////////////////////////////////////////////////////////////////////////////
1545 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1546 ENTRY(ia32_interrupt)
1548 #ifdef CONFIG_IA32_SUPPORT
1550 br.sptk.many dispatch_to_ia32_handler
1556 .org ia64_ivt+0x6c00
1557 /////////////////////////////////////////////////////////////////////////////////////////
1558 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1562 .org ia64_ivt+0x6d00
1563 /////////////////////////////////////////////////////////////////////////////////////////
1564 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1568 .org ia64_ivt+0x6e00
1569 /////////////////////////////////////////////////////////////////////////////////////////
1570 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1574 .org ia64_ivt+0x6f00
1575 /////////////////////////////////////////////////////////////////////////////////////////
1576 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1580 .org ia64_ivt+0x7000
1581 /////////////////////////////////////////////////////////////////////////////////////////
1582 // 0x7000 Entry 52 (size 16 bundles) Reserved
1586 .org ia64_ivt+0x7100
1587 /////////////////////////////////////////////////////////////////////////////////////////
1588 // 0x7100 Entry 53 (size 16 bundles) Reserved
1592 .org ia64_ivt+0x7200
1593 /////////////////////////////////////////////////////////////////////////////////////////
1594 // 0x7200 Entry 54 (size 16 bundles) Reserved
1598 .org ia64_ivt+0x7300
1599 /////////////////////////////////////////////////////////////////////////////////////////
1600 // 0x7300 Entry 55 (size 16 bundles) Reserved
1604 .org ia64_ivt+0x7400
1605 /////////////////////////////////////////////////////////////////////////////////////////
1606 // 0x7400 Entry 56 (size 16 bundles) Reserved
1610 .org ia64_ivt+0x7500
1611 /////////////////////////////////////////////////////////////////////////////////////////
1612 // 0x7500 Entry 57 (size 16 bundles) Reserved
1616 .org ia64_ivt+0x7600
1617 /////////////////////////////////////////////////////////////////////////////////////////
1618 // 0x7600 Entry 58 (size 16 bundles) Reserved
1622 .org ia64_ivt+0x7700
1623 /////////////////////////////////////////////////////////////////////////////////////////
1624 // 0x7700 Entry 59 (size 16 bundles) Reserved
1628 .org ia64_ivt+0x7800
1629 /////////////////////////////////////////////////////////////////////////////////////////
1630 // 0x7800 Entry 60 (size 16 bundles) Reserved
1634 .org ia64_ivt+0x7900
1635 /////////////////////////////////////////////////////////////////////////////////////////
1636 // 0x7900 Entry 61 (size 16 bundles) Reserved
1640 .org ia64_ivt+0x7a00
1641 /////////////////////////////////////////////////////////////////////////////////////////
1642 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1646 .org ia64_ivt+0x7b00
1647 /////////////////////////////////////////////////////////////////////////////////////////
1648 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1652 .org ia64_ivt+0x7c00
1653 /////////////////////////////////////////////////////////////////////////////////////////
1654 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1658 .org ia64_ivt+0x7d00
1659 /////////////////////////////////////////////////////////////////////////////////////////
1660 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1664 .org ia64_ivt+0x7e00
1665 /////////////////////////////////////////////////////////////////////////////////////////
1666 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1670 .org ia64_ivt+0x7f00
1671 /////////////////////////////////////////////////////////////////////////////////////////
1672 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1677 * Squatting in this space ...
1679 * This special case dispatcher for illegal operation faults allows preserved
1680 * registers to be modified through a callback function (asm only) that is handed
1681 * back from the fault handler in r8. Up to three arguments can be passed to the
1682 * callback function by returning an aggregate with the callback as its first
1683 * element, followed by the arguments.
1685 ENTRY(dispatch_illegal_op_fault)
1689 ssm psr.ic | PSR_DEFAULT_BITS
1691 srlz.i // guarantee that interruption collection is on
1693 (p15) ssm psr.i // restore psr.i
1694 adds r3=8,r2 // set up second base pointer for SAVE_REST
1696 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
1700 PT_REGS_UNWIND_INFO(0)
1702 br.call.sptk.many rp=ia64_illegal_op_fault
1704 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
1708 movl r15=ia64_leave_kernel
1714 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1715 br.sptk.many ia64_leave_kernel
1716 END(dispatch_illegal_op_fault)
1718 #ifdef CONFIG_IA32_SUPPORT
1721 * There is no particular reason for this code to be here, other than that
1722 * there happens to be space here that would go unused otherwise. If this
1723 * fault ever gets "unreserved", simply moved the following code to a more
1727 // IA32 interrupt entry point
1729 ENTRY(dispatch_to_ia32_handler)
1733 ssm psr.ic | PSR_DEFAULT_BITS
1735 srlz.i // guarantee that interruption collection is on
1738 adds r3=8,r2 // Base pointer for SAVE_REST
1743 shr r14=r14,16 // Get interrupt number
1745 cmp.ne p6,p0=r14,r15
1746 (p6) br.call.dpnt.many b6=non_ia32_syscall
1748 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
1749 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
1751 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1752 ld8 r8=[r14] // get r8
1754 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
1756 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
1758 ld4 r8=[r14],8 // r8 == eax (syscall number)
1759 mov r15=IA32_NR_syscalls
1761 cmp.ltu.unc p6,p7=r8,r15
1762 ld4 out1=[r14],8 // r9 == ecx
1764 ld4 out2=[r14],8 // r10 == edx
1766 ld4 out0=[r14] // r11 == ebx
1767 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
1769 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
1771 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
1772 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
1774 ld4 out4=[r14] // r15 == edi
1775 movl r16=ia32_syscall_table
1777 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
1778 ld4 r2=[r2] // r2 = current_thread_info()->flags
1781 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1784 movl r15=ia32_ret_from_syscall
1788 (p8) br.call.sptk.many b6=b6
1789 br.cond.sptk ia32_trace_syscall
1792 alloc r15=ar.pfs,0,0,2,0
1793 mov out0=r14 // interrupt #
1794 add out1=16,sp // pointer to pt_regs
1795 ;; // avoid WAW on CFM
1796 br.call.sptk.many rp=ia32_bad_interrupt
1797 .ret1: movl r15=ia64_leave_kernel
1801 END(dispatch_to_ia32_handler)
1803 #endif /* CONFIG_IA32_SUPPORT */