2 // assembly portion of the IA64 MCA handling
4 // Mods by cfleck to integrate into kernel build
5 // 00/03/15 davidm Added various stop bits to get a clean compile
7 // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8 // kstack, switch modes, jump to C INIT handler
10 // 02/01/04 J.Hall <jenna.s.hall@intel.com>
11 // Before entering virtual mode code:
12 // 1. Check for TLB CPU error
13 // 2. Restore current thread pointer to kr6
14 // 3. Move stack ptr 16 bytes to conform to C calling convention
16 // 04/11/12 Russ Anderson <rja@sgi.com>
17 // Added per cpu MCA/INIT stack save areas.
19 #include <linux/config.h>
20 #include <linux/threads.h>
22 #include <asm/asmmacro.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/mca_asm.h>
29 * When we get a machine check, the kernel stack pointer is no longer
30 * valid, so we need to set a new stack pointer.
32 #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
35 * Needed for return context to SAL
37 #define IA64_MCA_SAME_CONTEXT 0
38 #define IA64_MCA_COLD_BOOT -2
43 * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
45 * 2. GR8 = PAL_PROC physical address
46 * 3. GR9 = SAL_PROC physical address
47 * 4. GR10 = SAL GP (physical)
48 * 5. GR11 = Rendez state
49 * 6. GR12 = Return address to location within SAL_CHECK
51 #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
52 LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
53 st8 [_tmp]=r1,0x08;; \
54 st8 [_tmp]=r8,0x08;; \
55 st8 [_tmp]=r9,0x08;; \
56 st8 [_tmp]=r10,0x08;; \
57 st8 [_tmp]=r11,0x08;; \
58 st8 [_tmp]=r12,0x08;; \
59 st8 [_tmp]=r17,0x08;; \
63 * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
64 * (p6) is executed if we never entered virtual mode (TLB error)
65 * (p7) is executed if we entered virtual mode as expected (normal case)
66 * 1. GR8 = OS_MCA return status
67 * 2. GR9 = SAL GP (physical)
68 * 3. GR10 = 0/1 returning same/new context
69 * 4. GR22 = New min state save area pointer
70 * returns ptr to SAL rtn save loc in _tmp
72 #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
73 movl _tmp=ia64_os_to_sal_handoff_state;; \
74 DATA_VA_TO_PA(_tmp);; \
75 ld8 r8=[_tmp],0x08;; \
76 ld8 r9=[_tmp],0x08;; \
77 ld8 r10=[_tmp],0x08;; \
79 // now _tmp is pointing to SAL rtn save location
82 * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
83 * imots_os_status=IA64_MCA_COLD_BOOT
85 * imots_context=IA64_MCA_SAME_CONTEXT
86 * imots_new_min_state=Min state save area pointer
87 * imots_sal_check_ra=Return address to location within SAL_CHECK
90 #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
91 movl tmp=IA64_MCA_COLD_BOOT; \
92 movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
93 movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
94 st8 [os_to_sal_handoff]=tmp,8;; \
95 ld8 tmp=[sal_to_os_handoff],48;; \
96 st8 [os_to_sal_handoff]=tmp,8;; \
97 movl tmp=IA64_MCA_SAME_CONTEXT;; \
98 st8 [os_to_sal_handoff]=tmp,8;; \
99 ld8 tmp=[sal_to_os_handoff],-8;; \
100 st8 [os_to_sal_handoff]=tmp,8;; \
101 ld8 tmp=[sal_to_os_handoff];; \
102 st8 [os_to_sal_handoff]=tmp;;
104 #define GET_IA64_MCA_DATA(reg) \
105 GET_THIS_PADDR(reg, ia64_mca_data) \
109 .global ia64_os_mca_dispatch
110 .global ia64_os_mca_dispatch_end
111 .global ia64_sal_to_os_handoff_state
112 .global ia64_os_to_sal_handoff_state
113 .global ia64_do_tlb_purge
119 * Just the TLB purge part is moved to a separate function
120 * so we can re-use the code for cpu hotplug code as well
121 * Caller should now setup b1, so we can branch once the
122 * tlb flush is complete.
126 #define O(member) IA64_CPUINFO_##member##_OFFSET
128 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
130 addl r17=O(PTCE_STRIDE),r2
131 addl r2=O(PTCE_BASE),r2
133 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
134 ld4 r19=[r2],4 // r19=ptce_count[0]
135 ld4 r21=[r17],4 // r21=ptce_stride[0]
137 ld4 r20=[r2] // r20=ptce_count[1]
138 ld4 r22=[r17] // r22=ptce_stride[1]
146 cmp.ltu p6,p7=r24,r19
147 (p7) br.cond.dpnt.few 4f
160 srlz.i // srlz.i implies srlz.d
163 // Now purge addresses formerly mapped by TR registers
164 // 1. Purge ITR&DTR for kernel.
165 movl r16=KERNEL_START
166 mov r18=KERNEL_TR_PAGE_SHIFT<<2
175 // 2. Purge DTR for PERCPU data.
177 mov r18=PERCPU_PAGE_SHIFT<<2
183 // 3. Purge ITR for PAL code.
184 GET_THIS_PADDR(r2, ia64_mca_pal_base)
187 mov r18=IA64_GRANULE_SHIFT<<2
193 // 4. Purge DTR for stack.
194 mov r16=IA64_KR(CURRENT_STACK)
196 shl r16=r16,IA64_GRANULE_SHIFT
200 mov r18=IA64_GRANULE_SHIFT<<2
206 // Now branch away to caller.
210 ia64_os_mca_dispatch:
212 // Serialize all MCA processing
214 LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
218 (p6) br ia64_os_mca_spin
220 // Save the SAL to OS MCA handoff state as defined
222 // NOTE : The order in which the state gets saved
223 // is dependent on the way the C-structure
224 // for ia64_mca_sal_to_os_state_t has been
225 // defined in include/asm/mca.h
226 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
229 // LOG PROCESSOR STATE INFO FROM HERE ON..
231 br ia64_os_mca_proc_state_dump;;
233 ia64_os_mca_done_dump:
235 LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
237 ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
240 (p7) br.spnt done_tlb_purge_and_reload
242 // The following code purges TC and TR entries. Then reload all TC entries.
243 // Purge percpu data TC entries.
244 begin_tlb_purge_and_reload:
245 movl r18=ia64_reload_tr;;
246 LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
248 br.sptk.many ia64_do_tlb_purge;;
251 // Finally reload the TR registers.
252 // 1. Reload DTR/ITR registers for kernel.
253 mov r18=KERNEL_TR_PAGE_SHIFT<<2
254 movl r17=KERNEL_START
258 mov r16=IA64_TR_KERNEL
262 dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
273 // 2. Reload DTR register for PERCPU data.
274 GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
276 movl r16=PERCPU_ADDR // vaddr
277 movl r18=PERCPU_PAGE_SHIFT<<2
282 ld8 r18=[r2] // load per-CPU PTE
283 mov r16=IA64_TR_PERCPU_DATA;
289 // 3. Reload ITR for PAL code.
290 GET_THIS_PADDR(r2, ia64_mca_pal_pte)
292 ld8 r18=[r2] // load PAL PTE
294 GET_THIS_PADDR(r2, ia64_mca_pal_base)
296 ld8 r16=[r2] // load PAL vaddr
297 mov r19=IA64_GRANULE_SHIFT<<2
301 mov r20=IA64_TR_PALCODE
307 // 4. Reload DTR for stack.
308 mov r16=IA64_KR(CURRENT_STACK)
310 shl r16=r16,IA64_GRANULE_SHIFT
317 mov r19=IA64_GRANULE_SHIFT<<2
321 mov r20=IA64_TR_CURRENT_STACK
327 br.sptk.many done_tlb_purge_and_reload
329 COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
330 br.sptk.many ia64_os_mca_done_restore
332 done_tlb_purge_and_reload:
334 // Setup new stack frame for OS_MCA handling
335 GET_IA64_MCA_DATA(r2)
337 add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
338 add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
340 rse_switch_context(r6,r3,r2);; // RSC management in this new context
342 GET_IA64_MCA_DATA(r2)
344 add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
346 mov r12=r2 // establish new stack-pointer
348 // Enter virtual mode from physical mode
349 VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
350 ia64_os_mca_virtual_begin:
352 // Call virtual mode handler
353 movl r2=ia64_mca_ucmc_handler;;
355 br.call.sptk.many b0=b6;;
357 // Revert back to physical mode before going back to SAL
358 PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
359 ia64_os_mca_virtual_end:
361 // restore the original stack frame here
362 GET_IA64_MCA_DATA(r2)
364 add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
368 rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
370 // let us restore all the registers from our PSI structure
373 begin_os_mca_restore:
374 br ia64_os_mca_proc_state_restore;;
376 ia64_os_mca_done_restore:
377 OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
378 // branch back to SALE_CHECK
380 mov b0=r3;; // SAL_CHECK return address
383 movl r3=ia64_mca_serialize;;
389 ia64_os_mca_dispatch_end:
390 //EndMain//////////////////////////////////////////////////////////////////////
395 // ia64_os_mca_proc_state_dump()
399 // This stub dumps the processor state during MCHK to a data area
403 ia64_os_mca_proc_state_dump:
404 // Save bank 1 GRs 16-31 which will be used by c-language code when we switch
405 // to virtual addressing mode.
406 GET_IA64_MCA_DATA(r2)
408 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
411 mov r5=ar.unat // ar.unat
413 // save banked GRs 16-31 along with NaT bits
415 st8.spill [r2]=r16,8;;
416 st8.spill [r2]=r17,8;;
417 st8.spill [r2]=r18,8;;
418 st8.spill [r2]=r19,8;;
419 st8.spill [r2]=r20,8;;
420 st8.spill [r2]=r21,8;;
421 st8.spill [r2]=r22,8;;
422 st8.spill [r2]=r23,8;;
423 st8.spill [r2]=r24,8;;
424 st8.spill [r2]=r25,8;;
425 st8.spill [r2]=r26,8;;
426 st8.spill [r2]=r27,8;;
427 st8.spill [r2]=r28,8;;
428 st8.spill [r2]=r29,8;;
429 st8.spill [r2]=r30,8;;
430 st8.spill [r2]=r31,8;;
433 st8 [r2]=r4,8 // save User NaT bits for r16-r31
434 mov ar.unat=r5 // restore original unat
438 add r4=8,r2 // duplicate r2 in r4
439 add r6=2*8,r2 // duplicate r2 in r4
462 add r4=8,r2 // duplicate r2 in r4
463 add r6=2*8,r2 // duplicate r2 in r4
471 st8 [r6]=r7,3*8;; // 48 byte rements
474 st8 [r2]=r3,8*8;; // 64 byte rements
476 // if PSR.ic=0, reading interruption registers causes an illegal operation fault
478 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
479 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
480 begin_skip_intr_regs:
481 (p6) br SkipIntrRegs;;
483 add r4=8,r2 // duplicate r2 in r4
484 add r6=2*8,r2 // duplicate r2 in r6
507 mov r3=cr25;; // cr.iha
508 st8 [r2]=r3,160;; // 160 byte rement
511 st8 [r2]=r0,152;; // another 152 byte .
513 add r4=8,r2 // duplicate r2 in r4
514 add r6=2*8,r2 // duplicate r2 in r6
517 // mov r5=cr.ivr // cr.ivr, don't read it
523 mov r3=r0 // cr.eoi => cr67
524 mov r5=r0 // cr.irr0 => cr68
525 mov r7=r0;; // cr.irr1 => cr69
530 mov r3=r0 // cr.irr2 => cr70
531 mov r5=r0 // cr.irr3 => cr71
542 mov r3=r0 // cr.lrr0 => cr80
543 mov r5=r0;; // cr.lrr1 => cr81
551 add r4=8,r2 // duplicate r2 in r4
552 add r6=2*8,r2 // duplicate r2 in r6
570 mov r7=r0;; // ar.kr8
573 st8 [r6]=r7,10*8;; // rement by 72 bytes
576 mov ar.rsc=r0 // put RSE in enforced lazy mode
585 st8 [r2]=r3,8*13 // increment by 13x8 bytes
597 st8 [r2]=r3,160 // 160
607 add r2=8*62,r2 //padding
618 br.cloop.sptk.few cStRR
621 br ia64_os_mca_done_dump;;
623 //EndStub//////////////////////////////////////////////////////////////////////
628 // ia64_os_mca_proc_state_restore()
632 // This is a stub to restore the saved processor state during MCHK
636 ia64_os_mca_proc_state_restore:
638 // Restore bank1 GR16-31
639 GET_IA64_MCA_DATA(r2)
641 add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
643 restore_GRs: // restore bank-1 GRs 16-31
645 add r3=16*8,r2;; // to get to NaT of GR 16-31
647 mov ar.unat=r3;; // first restore NaT
649 ld8.fill r16=[r2],8;;
650 ld8.fill r17=[r2],8;;
651 ld8.fill r18=[r2],8;;
652 ld8.fill r19=[r2],8;;
653 ld8.fill r20=[r2],8;;
654 ld8.fill r21=[r2],8;;
655 ld8.fill r22=[r2],8;;
656 ld8.fill r23=[r2],8;;
657 ld8.fill r24=[r2],8;;
658 ld8.fill r25=[r2],8;;
659 ld8.fill r26=[r2],8;;
660 ld8.fill r27=[r2],8;;
661 ld8.fill r28=[r2],8;;
662 ld8.fill r29=[r2],8;;
663 ld8.fill r30=[r2],8;;
664 ld8.fill r31=[r2],8;;
666 ld8 r3=[r2],8;; // increment to skip NaT
670 add r4=8,r2 // duplicate r2 in r4
671 add r6=2*8,r2;; // duplicate r2 in r4
693 add r4=8,r2 // duplicate r2 in r4
694 add r6=2*8,r2;; // duplicate r2 in r4
698 ld8 r7=[r6],3*8;; // 48 byte increments
703 ld8 r3=[r2],8*8;; // 64 byte increments
707 // if PSR.ic=1, reading interruption registers causes an illegal operation fault
709 tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
710 (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
712 begin_rskip_intr_regs:
713 (p6) br rSkipIntrRegs;;
715 add r4=8,r2 // duplicate r2 in r4
716 add r6=2*8,r2;; // duplicate r2 in r4
722 // mov cr.isr=r5 // cr.isr is read only
738 ld8 r3=[r2],160;; // 160 byte increment
742 ld8 r3=[r2],152;; // another 152 byte inc.
744 add r4=8,r2 // duplicate r2 in r4
745 add r6=2*8,r2;; // duplicate r2 in r6
751 // mov cr.ivr=r5 // cr.ivr is read only
758 // mov cr.irr0=r5 // cr.irr0 is read only
759 // mov cr.irr1=r7;; // cr.irr1 is read only
764 // mov cr.irr2=r3 // cr.irr2 is read only
765 // mov cr.irr3=r5 // cr.irr3 is read only
783 add r4=8,r2 // duplicate r2 in r4
784 add r6=2*8,r2;; // duplicate r2 in r4
811 // mov ar.bsp=r5 // ar.bsp is read only
812 mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
829 ld8 r3=[r2],160;; // 160
840 add r2=8*62,r2;; // padding
849 mov rr[r7]=r3 // what are its access previledges?
851 br.cloop.sptk.few cStRRr
856 br ia64_os_mca_done_restore;;
858 //EndStub//////////////////////////////////////////////////////////////////////
861 // ok, the issue here is that we need to save state information so
862 // it can be useable by the kernel debugger and show regs routines.
863 // In order to do this, our best bet is save the current state (plus
864 // the state information obtain from the MIN_STATE_AREA) into a pt_regs
865 // format. This way we can pass it on in a useable format.
869 // SAL to OS entry point for INIT on the monarch processor
870 // This has been defined for registration purposes with SAL
871 // as a part of ia64_mca_init.
873 // When we get here, the following registers have been
874 // set by the SAL for our use
876 // 1. GR1 = OS INIT GP
877 // 2. GR8 = PAL_PROC physical address
878 // 3. GR9 = SAL_PROC physical address
879 // 4. GR10 = SAL GP (physical)
880 // 5. GR11 = Init Reason
881 // 0 = Received INIT for event other than crash dump switch
882 // 1 = Received wakeup at the end of an OS_MCA corrected machine check
883 // 2 = Received INIT dude to CrashDump switch assertion
885 // 6. GR12 = Return address to location within SAL_INIT procedure
888 GLOBAL_ENTRY(ia64_monarch_init_handler)
890 // stash the information the SAL passed to os
891 SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
897 adds r3=8,r2 // set up second base pointer
901 // ok, enough should be saved at this point to be dangerous, and supply
902 // information for a dump
903 // We need to switch to Virtual mode before hitting the C functions.
905 movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
906 mov r3=psr // get the current psr, minimum enabled at this point
910 movl r3=IVirtual_Switch
912 mov cr.iip=r3 // short return to set the appropriate bits
913 mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
919 // We should now be running virtual
921 // Let's call the C handler to get the rest of the state info
923 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
925 adds out0=16,sp // out0 = pointer to pt_regs
929 adds out1=16,sp // out0 = pointer to switch_stack
931 br.call.sptk.many rp=ia64_init_handler
935 br.sptk return_from_init
936 END(ia64_monarch_init_handler)
939 // SAL to OS entry point for INIT on the slave processor
940 // This has been defined for registration purposes with SAL
941 // as a part of ia64_mca_init.
944 GLOBAL_ENTRY(ia64_slave_init_handler)
946 END(ia64_slave_init_handler)