3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Entry to the kernel is "interesting":
13 * (1) There are no stack pointers, not even for the kernel
14 * (2) General Registers should not be clobbered
15 * (3) There are no kernel-only data registers
16 * (4) Since all addressing modes are wrt to a General Register, no global
17 * variables can be reached
19 * We deal with this by declaring that we shall kill GR28 on entering the
20 * kernel from userspace
22 * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
23 * they can't rely on GR28 to be anything useful, and so need to clobber a
24 * separate register (GR31). Break interrupts are managed in break.S
26 * GR29 _is_ saved, and holds the current task pointer globally
30 #include <linux/linkage.h>
31 #include <asm/thread_info.h>
32 #include <asm/setup.h>
33 #include <asm/segment.h>
34 #include <asm/ptrace.h>
35 #include <asm/errno.h>
36 #include <asm/cache.h>
37 #include <asm/spr-regs.h>
39 #define nr_syscalls ((syscall_table_size)/4)
45 # sethi.p %hi(0xe1200004),gr30
46 # setlo %lo(0xe1200004),gr30
49 # sethi.p %hi(0xffc00100),gr30
50 # setlo %lo(0xffc00100),gr30
57 # sethi.p %hi(0xe1200004),gr30
58 # setlo %lo(0xe1200004),gr30
59 # st.p gr31,@(gr30,gr0)
61 # sethi.p %hi(0xffc00100),gr30
62 # setlo %lo(0xffc00100),gr30
63 # sth gr31,@(gr30,gr0)
67 ###############################################################################
69 # entry point for External interrupts received whilst executing userspace code
71 ###############################################################################
72 .globl __entry_uspace_external_interrupt
73 .type __entry_uspace_external_interrupt,@function
74 __entry_uspace_external_interrupt:
76 sethi.p %hi(__kernel_frame0_ptr),gr28
77 setlo %lo(__kernel_frame0_ptr),gr28
80 # handle h/w single-step through exceptions
81 sti gr0,@(gr28,#REG__STATUS)
83 .globl __entry_uspace_external_interrupt_reentry
84 __entry_uspace_external_interrupt_reentry:
90 # finish building the exception frame
91 sti sp, @(gr28,#REG_SP)
92 stdi gr2, @(gr28,#REG_GR(2))
93 stdi gr4, @(gr28,#REG_GR(4))
94 stdi gr6, @(gr28,#REG_GR(6))
95 stdi gr8, @(gr28,#REG_GR(8))
96 stdi gr10,@(gr28,#REG_GR(10))
97 stdi gr12,@(gr28,#REG_GR(12))
98 stdi gr14,@(gr28,#REG_GR(14))
99 stdi gr16,@(gr28,#REG_GR(16))
100 stdi gr18,@(gr28,#REG_GR(18))
101 stdi gr20,@(gr28,#REG_GR(20))
102 stdi gr22,@(gr28,#REG_GR(22))
103 stdi gr24,@(gr28,#REG_GR(24))
104 stdi gr26,@(gr28,#REG_GR(26))
105 sti gr0, @(gr28,#REG_GR(28))
106 sti gr29,@(gr28,#REG_GR(29))
107 stdi.p gr30,@(gr28,#REG_GR(30))
109 # set up the kernel stack pointer
122 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
123 andi.p gr22,#~(PSR_PS|PSR_S),gr6
126 andi gr5,#~PSR_ET,gr5
128 sti gr20,@(gr28,#REG_TBR)
129 sti gr21,@(gr28,#REG_PC)
130 sti gr5 ,@(gr28,#REG_PSR)
131 sti gr23,@(gr28,#REG_ISR)
132 stdi gr24,@(gr28,#REG_CCR)
133 stdi gr26,@(gr28,#REG_LR)
134 sti gr4 ,@(gr28,#REG_SYSCALLNO)
138 stdi gr4,@(gr28,#REG_IACC0)
142 stdi.p gr4,@(gr28,#REG_GNER0)
144 # interrupts start off fully disabled in the interrupt handler
145 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
147 # set up kernel global registers
148 sethi.p %hi(__kernel_current_task),gr5
149 setlo %lo(__kernel_current_task),gr5
150 sethi.p %hi(_gp),gr16
153 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
155 # make sure we (the kernel) get div-zero and misalignment exceptions
156 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
159 # switch to the kernel trap table
160 sethi.p %hi(__entry_kerneltrap_table),gr6
161 setlo %lo(__entry_kerneltrap_table),gr6
164 # set the return address
165 sethi.p %hi(__entry_return_from_user_interrupt),gr4
166 setlo %lo(__entry_return_from_user_interrupt),gr4
169 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
172 ori gr4,#PSR_PIL_14,gr4
174 ori gr4,#PSR_PIL_14|PSR_ET,gr4
180 .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
182 ###############################################################################
184 # entry point for External interrupts received whilst executing kernel code
185 # - on arriving here, the following registers should already be set up:
186 # GR15 - current thread_info struct pointer
187 # GR16 - kernel GP-REL pointer
188 # GR29 - current task struct pointer
189 # TBR - kernel trap vector table
190 # ISR - kernel's preferred integer controls
192 ###############################################################################
193 .globl __entry_kernel_external_interrupt
194 .type __entry_kernel_external_interrupt,@function
195 __entry_kernel_external_interrupt:
200 # set up the stack pointer
203 sti gr30,@(sp,#REG_SP)
205 # handle h/w single-step through exceptions
206 sti gr0,@(sp,#REG__STATUS)
208 .globl __entry_kernel_external_interrupt_reentry
209 __entry_kernel_external_interrupt_reentry:
212 # set up the exception frame
213 setlos #REG__END,gr30
216 sti.p gr28,@(sp,#REG_GR(28))
219 # finish building the exception frame
220 stdi gr2,@(gr28,#REG_GR(2))
221 stdi gr4,@(gr28,#REG_GR(4))
222 stdi gr6,@(gr28,#REG_GR(6))
223 stdi gr8,@(gr28,#REG_GR(8))
224 stdi gr10,@(gr28,#REG_GR(10))
225 stdi gr12,@(gr28,#REG_GR(12))
226 stdi gr14,@(gr28,#REG_GR(14))
227 stdi gr16,@(gr28,#REG_GR(16))
228 stdi gr18,@(gr28,#REG_GR(18))
229 stdi gr20,@(gr28,#REG_GR(20))
230 stdi gr22,@(gr28,#REG_GR(22))
231 stdi gr24,@(gr28,#REG_GR(24))
232 stdi gr26,@(gr28,#REG_GR(26))
233 sti gr29,@(gr28,#REG_GR(29))
234 stdi.p gr30,@(gr28,#REG_GR(30))
236 # note virtual interrupts will be fully enabled upon return
237 subicc gr0,#1,gr0,icc2 /* clear Z, set C */
249 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
250 andi.p gr22,#~(PSR_PS|PSR_S),gr6
253 andi.p gr5,#~PSR_ET,gr5
255 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
256 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
257 andi gr25,#~0xc0,gr25
259 sti gr20,@(gr28,#REG_TBR)
260 sti gr21,@(gr28,#REG_PC)
261 sti gr5 ,@(gr28,#REG_PSR)
262 sti gr23,@(gr28,#REG_ISR)
263 stdi gr24,@(gr28,#REG_CCR)
264 stdi gr26,@(gr28,#REG_LR)
265 sti gr4 ,@(gr28,#REG_SYSCALLNO)
269 stdi gr4,@(gr28,#REG_IACC0)
273 stdi.p gr4,@(gr28,#REG_GNER0)
275 # interrupts start off fully disabled in the interrupt handler
276 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
278 # set the return address
279 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
280 setlo %lo(__entry_return_from_kernel_interrupt),gr4
283 # clear power-saving mode flags
285 andi gr4,#~HSR0_PDM,gr4
288 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
290 ori gr4,#PSR_PIL_14,gr4
298 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
300 ###############################################################################
302 # deal with interrupts that were actually virtually disabled
303 # - we need to really disable them, flag the fact and return immediately
304 # - if you change this, you must alter break.S also
306 ###############################################################################
307 .balign L1_CACHE_BYTES
308 .globl __entry_kernel_external_interrupt_virtually_disabled
309 .type __entry_kernel_external_interrupt_virtually_disabled,@function
310 __entry_kernel_external_interrupt_virtually_disabled:
312 andi gr30,#~PSR_PIL,gr30
313 ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
315 subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
318 .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
320 ###############################################################################
322 # deal with re-enablement of interrupts that were pending when virtually re-enabled
323 # - set ICC2.C, re-enable the real interrupts and return
324 # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
325 # - if you change this, you must alter break.S also
327 ###############################################################################
328 .balign L1_CACHE_BYTES
329 .globl __entry_kernel_external_interrupt_virtual_reenable
330 .type __entry_kernel_external_interrupt_virtual_reenable,@function
331 __entry_kernel_external_interrupt_virtual_reenable:
333 andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
335 subicc gr0,#1,gr0,icc2 ; clear Z, set C
338 .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
340 ###############################################################################
342 # entry point for Software and Progam interrupts generated whilst executing userspace code
344 ###############################################################################
345 .globl __entry_uspace_softprog_interrupt
346 .type __entry_uspace_softprog_interrupt,@function
347 .globl __entry_uspace_handle_mmu_fault
348 __entry_uspace_softprog_interrupt:
352 __entry_uspace_handle_mmu_fault:
355 sethi.p %hi(__kernel_frame0_ptr),gr28
356 setlo %lo(__kernel_frame0_ptr),gr28
359 # handle h/w single-step through exceptions
360 sti gr0,@(gr28,#REG__STATUS)
362 .globl __entry_uspace_softprog_interrupt_reentry
363 __entry_uspace_softprog_interrupt_reentry:
366 setlos #REG__END,gr30
369 # set up the kernel stack pointer
370 sti.p sp,@(gr28,#REG_SP)
372 sti gr0,@(gr28,#REG_GR(28))
374 stdi gr20,@(gr28,#REG_GR(20))
375 stdi gr22,@(gr28,#REG_GR(22))
381 sethi.p %hi(__entry_return_from_user_exception),gr23
382 setlo %lo(__entry_return_from_user_exception),gr23
386 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
388 # single-stepping was disabled on entry to a TLB handler that then faulted
390 .globl __entry_uspace_handle_mmu_fault_sstep
391 __entry_uspace_handle_mmu_fault_sstep:
393 sethi.p %hi(__kernel_frame0_ptr),gr28
394 setlo %lo(__kernel_frame0_ptr),gr28
397 # flag single-step re-enablement
398 sti gr0,@(gr28,#REG__STATUS)
399 bra __entry_uspace_softprog_interrupt_reentry
403 ###############################################################################
405 # entry point for Software and Progam interrupts generated whilst executing kernel code
407 ###############################################################################
408 .globl __entry_kernel_softprog_interrupt
409 .type __entry_kernel_softprog_interrupt,@function
410 __entry_kernel_softprog_interrupt:
418 .globl __entry_kernel_handle_mmu_fault
419 __entry_kernel_handle_mmu_fault:
420 # set up the stack pointer
423 sti sp,@(sp,#REG_SP-4)
426 # handle h/w single-step through exceptions
427 sti gr0,@(sp,#REG__STATUS)
429 .globl __entry_kernel_softprog_interrupt_reentry
430 __entry_kernel_softprog_interrupt_reentry:
433 setlos #REG__END,gr30
436 # set up the exception frame
437 sti.p gr28,@(sp,#REG_GR(28))
440 stdi gr20,@(gr28,#REG_GR(20))
441 stdi gr22,@(gr28,#REG_GR(22))
443 ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
444 addi gr22,#REG__END,gr22
445 sti gr22,@(sp,#REG_SP)
447 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
448 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt
450 andi gr20,#~0xc0,gr20
457 sethi.p %hi(__entry_return_from_kernel_exception),gr23
458 setlo %lo(__entry_return_from_kernel_exception),gr23
461 .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
463 # single-stepping was disabled on entry to a TLB handler that then faulted
465 .globl __entry_kernel_handle_mmu_fault_sstep
466 __entry_kernel_handle_mmu_fault_sstep:
467 # set up the stack pointer
470 sti sp,@(sp,#REG_SP-4)
473 # flag single-step re-enablement
474 sethi #REG__STATUS_STEP,gr30
475 sti gr30,@(sp,#REG__STATUS)
476 bra __entry_kernel_softprog_interrupt_reentry
480 ###############################################################################
482 # the rest of the kernel entry point code
483 # - on arriving here, the following registers should be set up:
484 # GR1 - kernel stack pointer
485 # GR7 - syscall number (trap 0 only)
486 # GR8-13 - syscall args (trap 0 only)
490 # GR23 - return handler address
491 # GR28 - exception frame on stack
492 # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
493 # PSR - PSR.S 1, PSR.ET 0
495 ###############################################################################
496 .globl __entry_common
497 .type __entry_common,@function
501 # finish building the exception frame
502 stdi gr2,@(gr28,#REG_GR(2))
503 stdi gr4,@(gr28,#REG_GR(4))
504 stdi gr6,@(gr28,#REG_GR(6))
505 stdi gr8,@(gr28,#REG_GR(8))
506 stdi gr10,@(gr28,#REG_GR(10))
507 stdi gr12,@(gr28,#REG_GR(12))
508 stdi gr14,@(gr28,#REG_GR(14))
509 stdi gr16,@(gr28,#REG_GR(16))
510 stdi gr18,@(gr28,#REG_GR(18))
511 stdi gr24,@(gr28,#REG_GR(24))
512 stdi gr26,@(gr28,#REG_GR(26))
513 sti gr29,@(gr28,#REG_GR(29))
514 stdi gr30,@(gr28,#REG_GR(30))
524 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
525 andi.p gr22,#~(PSR_PS|PSR_S),gr6
528 andi gr5,#~PSR_ET,gr5
530 sti gr20,@(gr28,#REG_TBR)
531 sti gr21,@(gr28,#REG_PC)
532 sti gr5 ,@(gr28,#REG_PSR)
533 sti gr23,@(gr28,#REG_ISR)
534 stdi gr24,@(gr28,#REG_CCR)
535 stdi gr26,@(gr28,#REG_LR)
536 sti gr4 ,@(gr28,#REG_SYSCALLNO)
540 stdi gr4,@(gr28,#REG_IACC0)
544 stdi.p gr4,@(gr28,#REG_GNER0)
546 # set up virtual interrupt disablement
547 subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
549 # set up kernel global registers
550 sethi.p %hi(__kernel_current_task),gr5
551 setlo %lo(__kernel_current_task),gr5
552 sethi.p %hi(_gp),gr16
555 ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
557 # switch to the kernel trap table
558 sethi.p %hi(__entry_kerneltrap_table),gr6
559 setlo %lo(__entry_kerneltrap_table),gr6
562 # make sure we (the kernel) get div-zero and misalignment exceptions
563 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
566 # clear power-saving mode flags
568 andi gr4,#~HSR0_PDM,gr4
571 # multiplex again using old TBR as a guide
573 sethi %hi(__entry_vector_table),gr6
575 setlo %lo(__entry_vector_table),gr6
583 .size __entry_common,.-__entry_common
585 ###############################################################################
587 # handle instruction MMU fault
589 ###############################################################################
591 .globl __entry_insn_mmu_fault
592 __entry_insn_mmu_fault:
598 # now that we've accessed the exception regs, we can enable exceptions
603 sethi.p %hi(do_page_fault),gr5
604 setlo %lo(do_page_fault),gr5
605 jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
609 ###############################################################################
611 # handle instruction access error
613 ###############################################################################
614 .globl __entry_insn_access_error
615 __entry_insn_access_error:
617 sethi.p %hi(insn_access_error),gr5
618 setlo %lo(insn_access_error),gr5
623 # now that we've accessed the exception regs, we can enable exceptions
627 jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
629 ###############################################################################
631 # handle various instructions of dubious legality
633 ###############################################################################
634 .globl __entry_unsupported_trap
635 .globl __entry_illegal_instruction
636 .globl __entry_privileged_instruction
637 .globl __entry_debug_exception
638 __entry_unsupported_trap:
640 sti gr21,@(gr28,#REG_PC)
641 __entry_illegal_instruction:
642 __entry_privileged_instruction:
643 __entry_debug_exception:
645 sethi.p %hi(illegal_instruction),gr5
646 setlo %lo(illegal_instruction),gr5
651 # now that we've accessed the exception regs, we can enable exceptions
655 jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
657 ###############################################################################
659 # handle atomic operation emulation for userspace
661 ###############################################################################
662 .globl __entry_atomic_op
665 sethi.p %hi(atomic_operation),gr5
666 setlo %lo(atomic_operation),gr5
671 # now that we've accessed the exception regs, we can enable exceptions
675 jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0)
677 ###############################################################################
679 # handle media exception
681 ###############################################################################
682 .globl __entry_media_exception
683 __entry_media_exception:
685 sethi.p %hi(media_exception),gr5
686 setlo %lo(media_exception),gr5
690 # now that we've accessed the exception regs, we can enable exceptions
694 jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
696 ###############################################################################
698 # handle data MMU fault
699 # handle data DAT fault (write-protect exception)
701 ###############################################################################
703 .globl __entry_data_mmu_fault
704 __entry_data_mmu_fault:
705 .globl __entry_data_dat_fault
706 __entry_data_dat_fault:
710 movsg scr2,gr10 ; saved EAR0
712 # now that we've accessed the exception regs, we can enable exceptions
717 sethi.p %hi(do_page_fault),gr5
718 setlo %lo(do_page_fault),gr5
719 jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
722 ###############################################################################
724 # handle data and instruction access exceptions
726 ###############################################################################
727 .globl __entry_insn_access_exception
728 .globl __entry_data_access_exception
729 __entry_insn_access_exception:
730 __entry_data_access_exception:
732 sethi.p %hi(memory_access_exception),gr5
733 setlo %lo(memory_access_exception),gr5
735 movsg scr2,gr9 ; saved EAR0
738 # now that we've accessed the exception regs, we can enable exceptions
742 jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
744 ###############################################################################
746 # handle data access error
748 ###############################################################################
749 .globl __entry_data_access_error
750 __entry_data_access_error:
752 sethi.p %hi(data_access_error),gr5
753 setlo %lo(data_access_error),gr5
758 # now that we've accessed the exception regs, we can enable exceptions
762 jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
764 ###############################################################################
766 # handle data store error
768 ###############################################################################
769 .globl __entry_data_store_error
770 __entry_data_store_error:
772 sethi.p %hi(data_store_error),gr5
773 setlo %lo(data_store_error),gr5
777 # now that we've accessed the exception regs, we can enable exceptions
781 jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
783 ###############################################################################
785 # handle division exception
787 ###############################################################################
788 .globl __entry_division_exception
789 __entry_division_exception:
791 sethi.p %hi(division_exception),gr5
792 setlo %lo(division_exception),gr5
797 # now that we've accessed the exception regs, we can enable exceptions
801 jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
803 ###############################################################################
805 # handle compound exception
807 ###############################################################################
808 .globl __entry_compound_exception
809 __entry_compound_exception:
811 sethi.p %hi(compound_exception),gr5
812 setlo %lo(compound_exception),gr5
820 # now that we've accessed the exception regs, we can enable exceptions
824 jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
826 ###############################################################################
828 # handle interrupts and NMIs
830 ###############################################################################
831 .globl __entry_do_IRQ
835 # we can enable exceptions
841 .globl __entry_do_NMI
845 # we can enable exceptions
851 ###############################################################################
853 # the return path for a newly forked child process
854 # - __switch_to() saved the old current pointer in GR8 for us
856 ###############################################################################
862 # fork & co. return 0 to child
866 ###################################################################################################
868 # Return to user mode is not as complex as all this looks,
869 # but we want the default path for a system call return to
870 # go as quickly as possible which is why some of this is
871 # less clear than it otherwise should be.
873 ###################################################################################################
874 .balign L1_CACHE_BYTES
878 movsg psr,gr4 ; enable exceptions
882 sti gr7,@(gr28,#REG_SYSCALLNO)
883 sti.p gr8,@(gr28,#REG_ORIG_GR8)
885 subicc gr7,#nr_syscalls,gr0,icc0
886 bnc icc0,#0,__syscall_badsys
888 ldi @(gr15,#TI_FLAGS),gr4
889 ori gr4,#_TIF_SYSCALL_TRACE,gr4
890 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
891 bne icc0,#0,__syscall_trace_entry
895 sethi %hi(sys_call_table),gr5
896 setlo %lo(sys_call_table),gr5
901 ###############################################################################
903 # return to interrupted process
905 ###############################################################################
909 sti gr8,@(gr28,#REG_GR(8)) ; save return value
911 # rebuild saved psr - execve will change it for init/main.c
912 ldi @(gr28,#REG_PSR),gr22
914 andi.p gr22,#~PSR_PS,gr22
919 # keep current PSR in GR23
922 # make sure we don't miss an interrupt setting need_resched or sigpending between
923 # sampling and the RETT
924 ori gr23,#PSR_PIL_14,gr23
927 ldi @(gr15,#TI_FLAGS),gr4
928 sethi.p %hi(_TIF_ALLWORK_MASK),gr5
929 setlo %lo(_TIF_ALLWORK_MASK),gr5
930 andcc gr4,gr5,gr0,icc0
931 bne icc0,#0,__syscall_exit_work
933 # restore all registers and return
934 __entry_return_direct:
937 andi gr22,#~PSR_ET,gr22
940 ldi @(gr28,#REG_ISR),gr23
941 lddi @(gr28,#REG_CCR),gr24
942 lddi @(gr28,#REG_LR) ,gr26
943 ldi @(gr28,#REG_PC) ,gr21
944 ldi @(gr28,#REG_TBR),gr20
954 lddi @(gr28,#REG_GNER0),gr4
958 lddi @(gr28,#REG_IACC0),gr4
962 lddi @(gr28,#REG_GR(4)) ,gr4
963 lddi @(gr28,#REG_GR(6)) ,gr6
964 lddi @(gr28,#REG_GR(8)) ,gr8
965 lddi @(gr28,#REG_GR(10)),gr10
966 lddi @(gr28,#REG_GR(12)),gr12
967 lddi @(gr28,#REG_GR(14)),gr14
968 lddi @(gr28,#REG_GR(16)),gr16
969 lddi @(gr28,#REG_GR(18)),gr18
970 lddi @(gr28,#REG_GR(20)),gr20
971 lddi @(gr28,#REG_GR(22)),gr22
972 lddi @(gr28,#REG_GR(24)),gr24
973 lddi @(gr28,#REG_GR(26)),gr26
974 ldi @(gr28,#REG_GR(29)),gr29
975 lddi @(gr28,#REG_GR(30)),gr30
977 # check to see if a debugging return is required
980 ldi @(gr28,#REG__STATUS),gr3
981 andicc gr3,#REG__STATUS_STEP,gr0,icc0
982 bne icc0,#0,__entry_return_singlestep
985 ldi @(gr28,#REG_SP) ,sp
986 lddi @(gr28,#REG_GR(2)) ,gr2
987 ldi @(gr28,#REG_GR(28)),gr28
994 # store the current frame in the workram on the FR451
996 sethi.p %hi(0xfe800000),gr28
997 setlo %lo(0xfe800000),gr28
999 stdi gr2,@(gr28,#REG_GR(2))
1000 stdi gr4,@(gr28,#REG_GR(4))
1001 stdi gr6,@(gr28,#REG_GR(6))
1002 stdi gr8,@(gr28,#REG_GR(8))
1003 stdi gr10,@(gr28,#REG_GR(10))
1004 stdi gr12,@(gr28,#REG_GR(12))
1005 stdi gr14,@(gr28,#REG_GR(14))
1006 stdi gr16,@(gr28,#REG_GR(16))
1007 stdi gr18,@(gr28,#REG_GR(18))
1008 stdi gr24,@(gr28,#REG_GR(24))
1009 stdi gr26,@(gr28,#REG_GR(26))
1010 sti gr29,@(gr28,#REG_GR(29))
1011 stdi gr30,@(gr28,#REG_GR(30))
1014 sti gr30,@(gr28,#REG_TBR)
1016 sti gr30,@(gr28,#REG_PC)
1018 sti gr30,@(gr28,#REG_PSR)
1020 sti gr30,@(gr28,#REG_ISR)
1023 stdi gr30,@(gr28,#REG_CCR)
1026 stdi gr30,@(gr28,#REG_LR)
1027 sti gr0 ,@(gr28,#REG_SYSCALLNO)
1033 # return via break.S
1034 __entry_return_singlestep:
1036 lddi @(gr28,#REG_GR(2)) ,gr2
1037 ldi @(gr28,#REG_SP) ,sp
1038 ldi @(gr28,#REG_GR(28)),gr28
1041 .globl __entry_return_singlestep_breaks_here
1042 __entry_return_singlestep_breaks_here:
1046 ###############################################################################
1048 # return to a process interrupted in kernel space
1049 # - we need to consider preemption if that is enabled
1051 ###############################################################################
1052 .balign L1_CACHE_BYTES
1053 __entry_return_from_kernel_exception:
1056 ori gr23,#PSR_PIL_14,gr23
1058 bra __entry_return_direct
1060 .balign L1_CACHE_BYTES
1061 __entry_return_from_kernel_interrupt:
1064 ori gr23,#PSR_PIL_14,gr23
1067 #ifdef CONFIG_PREEMPT
1068 ldi @(gr15,#TI_PRE_COUNT),gr5
1069 subicc gr5,#0,gr0,icc0
1070 beq icc0,#0,__entry_return_direct
1072 __entry_preempt_need_resched:
1073 ldi @(gr15,#TI_FLAGS),gr4
1074 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1075 beq icc0,#1,__entry_return_direct
1077 setlos #PREEMPT_ACTIVE,gr5
1078 sti gr5,@(gr15,#TI_FLAGS)
1080 andi gr23,#~PSR_PIL,gr23
1084 sti gr0,@(gr15,#TI_PRE_COUNT)
1087 ori gr23,#PSR_PIL_14,gr23
1089 bra __entry_preempt_need_resched
1091 bra __entry_return_direct
1095 ###############################################################################
1097 # perform work that needs to be done immediately before resumption
1099 ###############################################################################
1100 .globl __entry_return_from_user_exception
1101 .balign L1_CACHE_BYTES
1102 __entry_return_from_user_exception:
1105 __entry_resume_userspace:
1106 # make sure we don't miss an interrupt setting need_resched or sigpending between
1107 # sampling and the RETT
1109 ori gr23,#PSR_PIL_14,gr23
1112 __entry_return_from_user_interrupt:
1114 ldi @(gr15,#TI_FLAGS),gr4
1115 sethi.p %hi(_TIF_WORK_MASK),gr5
1116 setlo %lo(_TIF_WORK_MASK),gr5
1117 andcc gr4,gr5,gr0,icc0
1118 beq icc0,#1,__entry_return_direct
1120 __entry_work_pending:
1122 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1123 beq icc0,#1,__entry_work_notifysig
1125 __entry_work_resched:
1128 andi gr23,#~PSR_PIL,gr23
1132 ori gr23,#PSR_PIL_14,gr23
1136 ldi @(gr15,#TI_FLAGS),gr4
1137 sethi.p %hi(_TIF_WORK_MASK),gr5
1138 setlo %lo(_TIF_WORK_MASK),gr5
1139 andcc gr4,gr5,gr0,icc0
1140 beq icc0,#1,__entry_return_direct
1141 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1142 bne icc0,#1,__entry_work_resched
1144 __entry_work_notifysig:
1147 call do_notify_resume
1148 bra __entry_resume_userspace
1150 # perform syscall entry tracing
1151 __syscall_trace_entry:
1154 call do_syscall_trace
1156 ldi @(gr28,#REG_SYSCALLNO),gr7
1157 lddi @(gr28,#REG_GR(8)) ,gr8
1158 lddi @(gr28,#REG_GR(10)),gr10
1159 lddi.p @(gr28,#REG_GR(12)),gr12
1161 subicc gr7,#nr_syscalls,gr0,icc0
1162 bnc icc0,#0,__syscall_badsys
1165 # perform syscall exit tracing
1166 __syscall_exit_work:
1168 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
1169 beq icc0,#1,__entry_work_pending
1172 andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
1176 call do_syscall_trace
1177 bra __entry_resume_userspace
1182 sti gr8,@(gr28,#REG_GR(8)) ; save return value
1183 bra __entry_resume_userspace
1186 ###############################################################################
1188 # syscall vector table
1190 ###############################################################################
1193 .globl sys_call_table
1195 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
1200 .long sys_open /* 5 */
1205 .long sys_unlink /* 10 */
1210 .long sys_chmod /* 15 */
1212 .long sys_ni_syscall /* old break syscall holder */
1215 .long sys_getpid /* 20 */
1220 .long sys_ni_syscall // sys_stime /* 25 */
1225 .long sys_utime /* 30 */
1226 .long sys_ni_syscall /* old stty syscall holder */
1227 .long sys_ni_syscall /* old gtty syscall holder */
1230 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1235 .long sys_rmdir /* 40 */
1239 .long sys_ni_syscall /* old prof syscall holder */
1240 .long sys_brk /* 45 */
1243 .long sys_ni_syscall // sys_signal
1245 .long sys_getegid16 /* 50 */
1247 .long sys_umount /* recycled never used phys( */
1248 .long sys_ni_syscall /* old lock syscall holder */
1250 .long sys_fcntl /* 55 */
1251 .long sys_ni_syscall /* old mpx syscall holder */
1253 .long sys_ni_syscall /* old ulimit syscall holder */
1254 .long sys_ni_syscall /* old old uname syscall */
1255 .long sys_umask /* 60 */
1260 .long sys_getpgrp /* 65 */
1263 .long sys_ni_syscall // sys_sgetmask
1264 .long sys_ni_syscall // sys_ssetmask
1265 .long sys_setreuid16 /* 70 */
1266 .long sys_setregid16
1267 .long sys_sigsuspend
1268 .long sys_ni_syscall // sys_sigpending
1269 .long sys_sethostname
1270 .long sys_setrlimit /* 75 */
1271 .long sys_ni_syscall // sys_old_getrlimit
1273 .long sys_gettimeofday
1274 .long sys_settimeofday
1275 .long sys_getgroups16 /* 80 */
1276 .long sys_setgroups16
1277 .long sys_ni_syscall /* old_select slot */
1280 .long sys_readlink /* 85 */
1284 .long sys_ni_syscall // old_readdir
1285 .long sys_ni_syscall /* 90 */ /* old_mmap slot */
1290 .long sys_fchown16 /* 95 */
1291 .long sys_getpriority
1292 .long sys_setpriority
1293 .long sys_ni_syscall /* old profil syscall holder */
1295 .long sys_fstatfs /* 100 */
1296 .long sys_ni_syscall /* ioperm for i386 */
1297 .long sys_socketcall
1300 .long sys_getitimer /* 105 */
1304 .long sys_ni_syscall /* obsolete olduname( syscall */
1305 .long sys_ni_syscall /* iopl for i386 */ /* 110 */
1307 .long sys_ni_syscall /* obsolete idle( syscall */
1308 .long sys_ni_syscall /* vm86old for i386 */
1310 .long sys_swapoff /* 115 */
1315 .long sys_clone /* 120 */
1316 .long sys_setdomainname
1318 .long sys_ni_syscall /* old "cacheflush" */
1320 .long sys_mprotect /* 125 */
1321 .long sys_sigprocmask
1322 .long sys_ni_syscall /* old "create_module" */
1323 .long sys_init_module
1324 .long sys_delete_module
1325 .long sys_ni_syscall /* old "get_kernel_syms" */
1330 .long sys_sysfs /* 135 */
1331 .long sys_personality
1332 .long sys_ni_syscall /* for afs_syscall */
1333 .long sys_setfsuid16
1334 .long sys_setfsgid16
1335 .long sys_llseek /* 140 */
1340 .long sys_readv /* 145 */
1345 .long sys_mlock /* 150 */
1348 .long sys_munlockall
1349 .long sys_sched_setparam
1350 .long sys_sched_getparam /* 155 */
1351 .long sys_sched_setscheduler
1352 .long sys_sched_getscheduler
1353 .long sys_sched_yield
1354 .long sys_sched_get_priority_max
1355 .long sys_sched_get_priority_min /* 160 */
1356 .long sys_sched_rr_get_interval
1359 .long sys_setresuid16
1360 .long sys_getresuid16 /* 165 */
1361 .long sys_ni_syscall /* for vm86 */
1362 .long sys_ni_syscall /* Old sys_query_module */
1364 .long sys_nfsservctl
1365 .long sys_setresgid16 /* 170 */
1366 .long sys_getresgid16
1368 .long sys_rt_sigreturn
1369 .long sys_rt_sigaction
1370 .long sys_rt_sigprocmask /* 175 */
1371 .long sys_rt_sigpending
1372 .long sys_rt_sigtimedwait
1373 .long sys_rt_sigqueueinfo
1374 .long sys_rt_sigsuspend
1375 .long sys_pread64 /* 180 */
1380 .long sys_capset /* 185 */
1381 .long sys_sigaltstack
1383 .long sys_ni_syscall /* streams1 */
1384 .long sys_ni_syscall /* streams2 */
1385 .long sys_vfork /* 190 */
1388 .long sys_truncate64
1389 .long sys_ftruncate64
1390 .long sys_stat64 /* 195 */
1395 .long sys_getgid /* 200 */
1400 .long sys_getgroups /* 205 */
1405 .long sys_setresgid /* 210 */
1410 .long sys_setfsuid /* 215 */
1412 .long sys_pivot_root
1415 .long sys_getdents64 /* 220 */
1417 .long sys_ni_syscall /* reserved for TUX */
1418 .long sys_ni_syscall /* Reserved for Security */
1420 .long sys_readahead /* 225 */
1425 .long sys_lgetxattr /* 230 */
1428 .long sys_llistxattr
1429 .long sys_flistxattr
1430 .long sys_removexattr /* 235 */
1431 .long sys_lremovexattr
1432 .long sys_fremovexattr
1434 .long sys_sendfile64
1435 .long sys_futex /* 240 */
1436 .long sys_sched_setaffinity
1437 .long sys_sched_getaffinity
1438 .long sys_ni_syscall //sys_set_thread_area
1439 .long sys_ni_syscall //sys_get_thread_area
1440 .long sys_io_setup /* 245 */
1441 .long sys_io_destroy
1442 .long sys_io_getevents
1445 .long sys_fadvise64 /* 250 */
1446 .long sys_ni_syscall
1447 .long sys_exit_group
1448 .long sys_lookup_dcookie
1449 .long sys_epoll_create
1450 .long sys_epoll_ctl /* 255 */
1451 .long sys_epoll_wait
1452 .long sys_remap_file_pages
1453 .long sys_set_tid_address
1454 .long sys_timer_create
1455 .long sys_timer_settime /* 260 */
1456 .long sys_timer_gettime
1457 .long sys_timer_getoverrun
1458 .long sys_timer_delete
1459 .long sys_clock_settime
1460 .long sys_clock_gettime /* 265 */
1461 .long sys_clock_getres
1462 .long sys_clock_nanosleep
1465 .long sys_tgkill /* 270 */
1467 .long sys_fadvise64_64
1468 .long sys_ni_syscall /* sys_vserver */
1470 .long sys_get_mempolicy
1471 .long sys_set_mempolicy
1474 .long sys_mq_timedsend
1475 .long sys_mq_timedreceive /* 280 */
1477 .long sys_mq_getsetattr
1478 .long sys_ni_syscall /* reserved for kexec */
1480 .long sys_ni_syscall /* 285 */ /* available */
1482 .long sys_request_key
1484 .long sys_ioprio_set
1485 .long sys_ioprio_get /* 290 */
1486 .long sys_inotify_init
1487 .long sys_inotify_add_watch
1488 .long sys_inotify_rm_watch
1489 .long sys_migrate_pages
1490 .long sys_openat /* 295 */
1495 .long sys_fstatat64 /* 300 */
1500 .long sys_readlinkat /* 305 */
1505 .long sys_unshare /* 310 */
1506 .long sys_set_robust_list
1507 .long sys_get_robust_list
1509 .long sys_sync_file_range
1510 .long sys_tee /* 315 */
1512 .long sys_move_pages
1514 .long sys_epoll_pwait
1515 .long sys_utimensat /* 320 */
1517 .long sys_timerfd_create
1520 .long sys_timerfd_settime /* 325 */
1521 .long sys_timerfd_gettime
1524 syscall_table_size = (. - sys_call_table)