2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/config.h>
26 #include <asm/asm-offsets.h>
28 /* we have the following possibilities to act on an interruption:
29 * - handle in assembly and use shadowed registers only
30 * - save registers to kernel stack and handle in assembly or C */
34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h>
36 #include <asm/signal.h>
37 #include <asm/unistd.h>
38 #include <asm/thread_info.h>
54 .import pa_dbit_lock,data
56 /* space_to_prot macro creates a prot id from a space id */
58 #if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot
63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
68 /* Switch to virtual mapping, trashing only %r1 */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
78 load32 KERNEL_PSW, %r1
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
83 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */
87 mtctl %r1, %cr18 /* Set IIAOQ tail */
89 mtctl %r1, %cr18 /* Set IIAOQ head */
96 * The "get_stack" macros are responsible for determining the
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack.
121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases.
131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir.
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
145 .macro get_stack_use_cr30
147 /* we save the registers in the task struct */
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9)
159 ldo THREAD_SZ_ALGN(%r1), %r30
162 .macro get_stack_use_r30
164 /* we put a struct pt_regs on the stack and save the registers there */
167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9)
175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29
180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */
188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */
196 .import os_hpmc, code
200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3
204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */
210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form.
215 /* Register definitions for tlb miss handler macros */
217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */
223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
237 * itlb miss interruption handler (parisc 2.0)
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses
259 * have the faulting address in the
263 .macro naitlb_11 code
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them
277 * naitlb miss interruption handler (parisc 2.0)
279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses
282 * have the faulting address in the
286 .macro naitlb_20 code
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
318 * dtlb miss interruption handler (parisc 2.0)
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
337 .macro nadtlb_11 code
347 /* nadtlb miss interruption handler (parisc 2.0) */
349 .macro nadtlb_20 code
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
378 * dirty bit trap interruption handler (parisc 2.0)
394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2
398 extrd,u \reg1,32+\start,\length,\reg2
400 extrw,u \reg1,\start,\length,\reg2
404 .macro DEP reg1,start,length,reg2
406 depd \reg1,32+\start,\length,\reg2
408 depw \reg1,\start,\length,\reg2
412 .macro DEPI val,start,length,reg
414 depdi \val,32+\start,\length,\reg
416 depwi \val,\start,\length,\reg
420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va
431 .import swapper_pg_dir,code
433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0
444 space_check(spc,tmp,fault)
446 spc - The space we saw the fault with.
447 tmp - The place to store the current space.
448 fault - Function to call on failure.
450 Only allow faults on different spaces from the
451 currently active one if we're the kernel
454 .macro space_check spc,tmp,fault
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault
464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present
467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB
470 .macro L2_ptep pmd,pte,index,va,fault
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
483 shld %r9,PxD_VALUE_SHIFT,\pmd
485 shlw %r9,PxD_VALUE_SHIFT,\pmd
487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490 LDREG %r0(\pmd),\pte /* pmd is now pte */
491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
494 /* Look up PTE in a 3-Level scheme.
496 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497 * first pmd adjacent to the pgd. This means that we can
498 * subtract a constant offset to get to it. The pmd and pgd
499 * sizes are arranged so that a single pmd covers 4GB (giving
500 * a full LP64 process access to 8TB) so our lookups are
501 * effectively L2 for the first 4GB of the kernel (i.e. for
502 * all ILP32 processes and all the kernel for machines with
503 * under 4GB of memory) */
504 .macro L3_ptep pgd,pte,index,va,fault
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
507 extrd,u,*= \va,31,32,%r0
508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,31,32,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,31,32,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,31,32,%r0
515 extrd,u,*<> \va,31,32,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
517 L2_ptep \pgd,\pte,\index,\va,\fault
520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
521 * don't needlessly dirty the cache line if it was already set */
522 .macro update_ptep ptep,pte,tmp,tmp1
523 ldi _PAGE_ACCESSED,\tmp1
525 and,COND(<>) \tmp1,\pte,%r0
529 /* Set the dirty bit (and accessed bit). No need to be
530 * clever, this is only used from the dirty fault */
531 .macro update_dirty ptep,pte,tmp
532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
537 /* Convert the pte and prot to tlb insertion values. How
538 * this happens is quite subtle, read below */
539 .macro make_insert_tlb spc,pte,prot
540 space_to_prot \spc \prot /* create prot id from space */
541 /* The following is the real subtlety. This is depositing
542 * T <-> _PAGE_REFTRAP
544 * B <-> _PAGE_DMB (memory break)
546 * Then incredible subtlety: The access rights are
547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548 * See 3-14 of the parisc 2.0 manual
550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551 * trigger an access rights trap in user space if the user
552 * tries to read an unreadable page */
555 /* PAGE_USER indicates the page can be read with user privileges,
556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557 * contains _PAGE_READ */
558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
560 /* If we're a gateway page, drop PL2 back to zero for promotion
561 * to kernel privilege (so we can execute the page as kernel).
562 * Any privilege promotion page always denys read and write */
563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
566 /* Get rid of prot bits and convert to page addr for iitlbt */
568 depd %r0,63,PAGE_SHIFT,\pte
569 extrd,u \pte,56,32,\pte
572 /* Identical macro to make_insert_tlb above, except it
573 * makes the tlb entry for the differently formatted pa11
574 * insertion instructions */
575 .macro make_insert_tlb_11 spc,pte,prot
576 zdep \spc,30,15,\prot
578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
580 extru,= \pte,_PAGE_USER_BIT,1,%r0
581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
585 /* Get rid of prot bits and convert to page addr for iitlba */
588 extru \pte,24,25,\pte
592 /* This is for ILP32 PA2.0 only. The TLB insertion needs
593 * to extend into I/O space if the address is 0xfXXXXXXX
594 * so we extend the f's into the top word of the pte in
596 .macro f_extend pte,tmp
597 extrd,s \pte,42,4,\tmp
599 extrd,s \pte,63,25,\pte
602 /* The alias region is an 8MB aligned 16MB to do clear and
603 * copy user pages at addresses congruent with the user
606 * To use the alias page, you set %r26 up with the to TLB
607 * entry (identifying the physical page) and %r23 up with
608 * the from tlb entry (or nothing if only a to entry---for
609 * clear_user_page_asm) */
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp
613 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */
620 cmpb,COND(<>),n \tmp,\tmp1,\fault
621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622 depd,z \prot,8,7,\prot
624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26.
628 extrd,u,*= \va,41,1,%r0
630 extrw,u,= \va,9,1,%r0
632 or,COND(tr) %r23,%r0,\pte
638 * Align fault_vector_20 on 4K boundary so that both
639 * fault_vector_11 and fault_vector_20 are on the
640 * same page. This is only necessary as long as we
641 * write protect the kernel text, which we may stop
642 * doing once we use large page translations to cover
643 * the static part of the kernel address space.
646 .export fault_vector_20
653 /* First vector is invalid (0) */
654 .ascii "cows can fly"
696 .export fault_vector_11
701 /* First vector is invalid (0) */
702 .ascii "cows can fly"
744 .import handle_interruption,code
745 .import do_cpu_irq_mask,code
748 * r26 = function to be called
749 * r25 = argument to pass in
750 * r24 = flags for do_fork()
752 * Kernel threads don't ever return, so they don't need
753 * a true register context. We just save away the arguments
754 * for copy_thread/ret_ to properly set up the child.
757 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
758 #define CLONE_UNTRACED 0x00800000
760 .export __kernel_thread, code
763 STREG %r2, -RP_OFFSET(%r30)
766 ldo PT_SZ_ALGN(%r30),%r30
768 /* Yo, function pointers in wide mode are little structs... -PB */
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
774 copy %r0, %r22 /* user_tid */
776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
777 STREG %r25, PT_GR25(%r1)
778 ldil L%CLONE_UNTRACED, %r26
779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */
784 ldo -16(%r30),%r29 /* Reference param save area */
787 copy %r1, %r24 /* pt_regs */
789 /* Parent Returns here */
791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792 ldo -PT_SZ_ALGN(%r30), %r30
799 * copy_thread moved args from temp save area set up above
800 * into task save area.
803 .export ret_from_kernel_thread
804 ret_from_kernel_thread:
806 /* Call schedule_tail first though */
807 BL schedule_tail, %r2
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26
813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22
816 LDREG TASK_PT_GR26(%r1), %r1
821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */
832 .import sys_execve, code
833 .export __execve, code
837 ldo PT_SZ_ALGN(%r30), %r30
838 STREG %r26, PT_GR26(%r16)
839 STREG %r25, PT_GR25(%r16)
840 STREG %r24, PT_GR24(%r16)
842 ldo -16(%r30),%r29 /* Reference param save area */
847 cmpib,=,n 0,%r28,intr_return /* forward */
849 /* yes, this will trap and die. */
858 * struct task_struct *_switch_to(struct task_struct *prev,
859 * struct task_struct *next)
861 * switch kernel stacks and return prev */
862 .export _switch_to, code
864 STREG %r2, -RP_OFFSET(%r30)
869 load32 _switch_to_ret, %r2
871 STREG %r2, TASK_PT_KPC(%r26)
872 LDREG TASK_PT_KPC(%r25), %r2
874 STREG %r30, TASK_PT_KSP(%r26)
875 LDREG TASK_PT_KSP(%r25), %r30
876 LDREG TASK_THREAD_INFO(%r25), %r25
881 mtctl %r0, %cr0 /* Needed for single stepping */
885 LDREG -RP_OFFSET(%r30), %r2
890 * Common rfi return path for interruptions, kernel execve, and
891 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
892 * return via this path if the signal was received when the process
893 * was running; if the process was blocked on a syscall then the
894 * normal syscall_exit path is used. All syscalls for traced
895 * proceses exit via intr_restore.
897 * XXX If any syscalls that change a processes space id ever exit
898 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
905 .export syscall_exit_rfi
908 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
909 ldo TASK_REGS(%r16),%r16
910 /* Force iaoq to userspace, as the user has had access to our current
911 * context via sigcontext. Also Filter the PSW for the same reason.
913 LDREG PT_IAOQ0(%r16),%r19
915 STREG %r19,PT_IAOQ0(%r16)
916 LDREG PT_IAOQ1(%r16),%r19
918 STREG %r19,PT_IAOQ1(%r16)
919 LDREG PT_PSW(%r16),%r19
920 load32 USER_PSW_MASK,%r1
922 load32 USER_PSW_HI_MASK,%r20
925 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
927 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
928 STREG %r19,PT_PSW(%r16)
931 * If we aren't being traced, we never saved space registers
932 * (we don't store them in the sigcontext), so set them
933 * to "proper" values now (otherwise we'll wind up restoring
934 * whatever was last stored in the task structure, which might
935 * be inconsistent if an interrupt occured while on the gateway
936 * page) Note that we may be "trashing" values the user put in
937 * them, but we don't support the the user changing them.
940 STREG %r0,PT_SR2(%r16)
942 STREG %r19,PT_SR0(%r16)
943 STREG %r19,PT_SR1(%r16)
944 STREG %r19,PT_SR3(%r16)
945 STREG %r19,PT_SR4(%r16)
946 STREG %r19,PT_SR5(%r16)
947 STREG %r19,PT_SR6(%r16)
948 STREG %r19,PT_SR7(%r16)
951 /* NOTE: Need to enable interrupts incase we schedule. */
954 /* Check for software interrupts */
956 .import irq_stat,data
961 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
962 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
963 ** irq_stat[] is defined using ____cacheline_aligned.
970 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
971 #endif /* CONFIG_SMP */
975 /* check for reschedule */
977 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
978 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
983 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
984 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
988 ldo PT_FR31(%r29),%r1
992 /* inverse of virt_map */
994 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
997 /* Restore space id's and special cr's from PT_REGS
998 * structure pointed to by r29
1002 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1003 * It also restores r1 and r30.
1017 .import schedule,code
1019 /* Only do reschedule if we are returning to user space */
1020 LDREG PT_IASQ0(%r16), %r20
1021 CMPIB= 0,%r20,intr_restore /* backward */
1023 LDREG PT_IASQ1(%r16), %r20
1024 CMPIB= 0,%r20,intr_restore /* backward */
1028 ldo -16(%r30),%r29 /* Reference param save area */
1031 ldil L%intr_check_sig, %r2
1032 #ifndef CONFIG_64BIT
1035 load32 schedule, %r20
1038 ldo R%intr_check_sig(%r2), %r2
1041 .import do_signal,code
1044 This check is critical to having LWS
1045 working. The IASQ is zero on the gateway
1046 page and we cannot deliver any signals until
1047 we get off the gateway page.
1049 Only do signals if we are returning to user space
1051 LDREG PT_IASQ0(%r16), %r20
1052 CMPIB= 0,%r20,intr_restore /* backward */
1054 LDREG PT_IASQ1(%r16), %r20
1055 CMPIB= 0,%r20,intr_restore /* backward */
1058 copy %r0, %r24 /* unsigned long in_syscall */
1059 copy %r16, %r25 /* struct pt_regs *regs */
1061 ldo -16(%r30),%r29 /* Reference param save area */
1065 copy %r0, %r26 /* sigset_t *oldset = NULL */
1071 * External interrupts.
1080 #if 0 /* Interrupt Stack support not working yet! */
1083 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1101 ldo PT_FR0(%r29), %r24
1106 copy %r29, %r26 /* arg0 is pt_regs */
1107 copy %r29, %r16 /* save pt_regs */
1109 ldil L%intr_return, %r2
1112 ldo -16(%r30),%r29 /* Reference param save area */
1116 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1119 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1121 .export intr_save, code /* for os_hpmc */
1137 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1140 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1142 * 2) Once we start executing code above 4 Gb, we need
1143 * to adjust iasq/iaoq here in the same way we
1144 * adjust isr/ior below.
1147 CMPIB=,n 6,%r26,skip_save_ior
1150 mfctl %cr20, %r16 /* isr */
1151 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1152 mfctl %cr21, %r17 /* ior */
1157 * If the interrupted code was running with W bit off (32 bit),
1158 * clear the b bits (bits 0 & 1) in the ior.
1159 * save_specials left ipsw value in r8 for us to test.
1161 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1165 * FIXME: This code has hardwired assumptions about the split
1166 * between space bits and offset bits. This will change
1167 * when we allow alternate page sizes.
1170 /* adjust isr/ior. */
1172 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
1173 depd %r1,31,7,%r17 /* deposit them into ior */
1174 depdi 0,63,7,%r16 /* clear them from isr */
1176 STREG %r16, PT_ISR(%r29)
1177 STREG %r17, PT_IOR(%r29)
1184 ldo PT_FR0(%r29), %r25
1189 copy %r29, %r25 /* arg1 is pt_regs */
1191 ldo -16(%r30),%r29 /* Reference param save area */
1194 ldil L%intr_check_sig, %r2
1195 copy %r25, %r16 /* save pt_regs */
1197 b handle_interruption
1198 ldo R%intr_check_sig(%r2), %r2
1202 * Note for all tlb miss handlers:
1204 * cr24 contains a pointer to the kernel address space
1207 * cr25 contains a pointer to the current user address
1208 * space page directory.
1210 * sr3 will contain the space id of the user address space
1211 * of the current running thread while that thread is
1212 * running in the kernel.
1216 * register number allocations. Note that these are all
1217 * in the shadowed registers
1220 t0 = r1 /* temporary register 0 */
1221 va = r8 /* virtual address for which the trap occured */
1222 t1 = r9 /* temporary register 1 */
1223 pte = r16 /* pte/phys page # */
1224 prot = r17 /* prot bits */
1225 spc = r24 /* space for which the trap occured */
1226 ptp = r25 /* page directory/page table pointer */
1231 space_adjust spc,va,t0
1233 space_check spc,t0,dtlb_fault
1235 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1237 update_ptep ptp,pte,t0,t1
1239 make_insert_tlb spc,pte,prot
1246 dtlb_check_alias_20w:
1247 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1255 space_adjust spc,va,t0
1257 space_check spc,t0,nadtlb_fault
1259 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1261 update_ptep ptp,pte,t0,t1
1263 make_insert_tlb spc,pte,prot
1270 nadtlb_check_flush_20w:
1271 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1273 /* Insert a "flush only" translation */
1278 /* Get rid of prot bits and convert to page addr for idtlbt */
1281 extrd,u pte,56,52,pte
1292 space_check spc,t0,dtlb_fault
1294 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1296 update_ptep ptp,pte,t0,t1
1298 make_insert_tlb_11 spc,pte,prot
1300 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1303 idtlba pte,(%sr1,va)
1304 idtlbp prot,(%sr1,va)
1306 mtsp t0, %sr1 /* Restore sr1 */
1311 dtlb_check_alias_11:
1313 /* Check to see if fault is in the temporary alias region */
1315 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1316 ldil L%(TMPALIAS_MAP_START),t0
1319 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1320 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1321 depw,z prot,8,7,prot
1324 * OK, it is in the temp alias region, check whether "from" or "to".
1325 * Check "subtle" note in pacache.S re: r23/r26.
1329 or,tr %r23,%r0,pte /* If "from" use "from" page */
1330 or %r26,%r0,pte /* else "to", use "to" page */
1341 space_check spc,t0,nadtlb_fault
1343 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1345 update_ptep ptp,pte,t0,t1
1347 make_insert_tlb_11 spc,pte,prot
1350 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1353 idtlba pte,(%sr1,va)
1354 idtlbp prot,(%sr1,va)
1356 mtsp t0, %sr1 /* Restore sr1 */
1361 nadtlb_check_flush_11:
1362 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1364 /* Insert a "flush only" translation */
1369 /* Get rid of prot bits and convert to page addr for idtlba */
1374 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1377 idtlba pte,(%sr1,va)
1378 idtlbp prot,(%sr1,va)
1380 mtsp t0, %sr1 /* Restore sr1 */
1386 space_adjust spc,va,t0
1388 space_check spc,t0,dtlb_fault
1390 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1392 update_ptep ptp,pte,t0,t1
1394 make_insert_tlb spc,pte,prot
1403 dtlb_check_alias_20:
1404 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1414 space_check spc,t0,nadtlb_fault
1416 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1418 update_ptep ptp,pte,t0,t1
1420 make_insert_tlb spc,pte,prot
1429 nadtlb_check_flush_20:
1430 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1432 /* Insert a "flush only" translation */
1437 /* Get rid of prot bits and convert to page addr for idtlbt */
1440 extrd,u pte,56,32,pte
1450 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1451 * probei instructions. We don't want to fault for these
1452 * instructions (not only does it not make sense, it can cause
1453 * deadlocks, since some flushes are done with the mmap
1454 * semaphore held). If the translation doesn't exist, we can't
1455 * insert a translation, so have to emulate the side effects
1456 * of the instruction. Since we don't insert a translation
1457 * we can get a lot of faults during a flush loop, so it makes
1458 * sense to try to do it here with minimum overhead. We only
1459 * emulate fdc,fic,pdc,probew,prober instructions whose base
1460 * and index registers are not shadowed. We defer everything
1461 * else to the "slow" path.
1464 mfctl %cr19,%r9 /* Get iir */
1466 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1467 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1469 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1472 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1473 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1474 BL get_register,%r25
1475 extrw,u %r9,15,5,%r8 /* Get index register # */
1476 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1478 BL get_register,%r25
1479 extrw,u %r9,10,5,%r8 /* Get base register # */
1480 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1481 BL set_register,%r25
1482 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1487 or %r8,%r9,%r8 /* Set PSW_N */
1494 When there is no translation for the probe address then we
1495 must nullify the insn and return zero in the target regsiter.
1496 This will indicate to the calling code that it does not have
1497 write/read privileges to this address.
1499 This should technically work for prober and probew in PA 1.1,
1500 and also probe,r and probe,w in PA 2.0
1502 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1503 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1509 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1510 BL get_register,%r25 /* Find the target register */
1511 extrw,u %r9,31,5,%r8 /* Get target register */
1512 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1513 BL set_register,%r25
1514 copy %r0,%r1 /* Write zero to target register */
1515 b nadtlb_nullify /* Nullify return insn */
1523 * I miss is a little different, since we allow users to fault
1524 * on the gateway page which is in the kernel address space.
1527 space_adjust spc,va,t0
1529 space_check spc,t0,itlb_fault
1531 L3_ptep ptp,pte,t0,va,itlb_fault
1533 update_ptep ptp,pte,t0,t1
1535 make_insert_tlb spc,pte,prot
1547 space_check spc,t0,itlb_fault
1549 L2_ptep ptp,pte,t0,va,itlb_fault
1551 update_ptep ptp,pte,t0,t1
1553 make_insert_tlb_11 spc,pte,prot
1555 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1558 iitlba pte,(%sr1,va)
1559 iitlbp prot,(%sr1,va)
1561 mtsp t0, %sr1 /* Restore sr1 */
1569 space_check spc,t0,itlb_fault
1571 L2_ptep ptp,pte,t0,va,itlb_fault
1573 update_ptep ptp,pte,t0,t1
1575 make_insert_tlb spc,pte,prot
1589 space_adjust spc,va,t0
1591 space_check spc,t0,dbit_fault
1593 L3_ptep ptp,pte,t0,va,dbit_fault
1596 CMPIB=,n 0,spc,dbit_nolock_20w
1597 load32 PA(pa_dbit_lock),t0
1601 cmpib,= 0,t1,dbit_spin_20w
1606 update_dirty ptp,pte,t1
1608 make_insert_tlb spc,pte,prot
1612 CMPIB=,n 0,spc,dbit_nounlock_20w
1627 space_check spc,t0,dbit_fault
1629 L2_ptep ptp,pte,t0,va,dbit_fault
1632 CMPIB=,n 0,spc,dbit_nolock_11
1633 load32 PA(pa_dbit_lock),t0
1637 cmpib,= 0,t1,dbit_spin_11
1642 update_dirty ptp,pte,t1
1644 make_insert_tlb_11 spc,pte,prot
1646 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1649 idtlba pte,(%sr1,va)
1650 idtlbp prot,(%sr1,va)
1652 mtsp t1, %sr1 /* Restore sr1 */
1654 CMPIB=,n 0,spc,dbit_nounlock_11
1667 space_check spc,t0,dbit_fault
1669 L2_ptep ptp,pte,t0,va,dbit_fault
1672 CMPIB=,n 0,spc,dbit_nolock_20
1673 load32 PA(pa_dbit_lock),t0
1677 cmpib,= 0,t1,dbit_spin_20
1682 update_dirty ptp,pte,t1
1684 make_insert_tlb spc,pte,prot
1691 CMPIB=,n 0,spc,dbit_nounlock_20
1702 .import handle_interruption,code
1706 ldi 31,%r8 /* Use an unused code */
1724 /* Register saving semantics for system calls:
1726 %r1 clobbered by system call macro in userspace
1727 %r2 saved in PT_REGS by gateway page
1728 %r3 - %r18 preserved by C code (saved by signal code)
1729 %r19 - %r20 saved in PT_REGS by gateway page
1730 %r21 - %r22 non-standard syscall args
1731 stored in kernel stack by gateway page
1732 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1733 %r27 - %r30 saved in PT_REGS by gateway page
1734 %r31 syscall return pointer
1737 /* Floating point registers (FIXME: what do we do with these?)
1739 %fr0 - %fr3 status/exception, not preserved
1740 %fr4 - %fr7 arguments
1741 %fr8 - %fr11 not preserved by C code
1742 %fr12 - %fr21 preserved by C code
1743 %fr22 - %fr31 not preserved by C code
1746 .macro reg_save regs
1747 STREG %r3, PT_GR3(\regs)
1748 STREG %r4, PT_GR4(\regs)
1749 STREG %r5, PT_GR5(\regs)
1750 STREG %r6, PT_GR6(\regs)
1751 STREG %r7, PT_GR7(\regs)
1752 STREG %r8, PT_GR8(\regs)
1753 STREG %r9, PT_GR9(\regs)
1754 STREG %r10,PT_GR10(\regs)
1755 STREG %r11,PT_GR11(\regs)
1756 STREG %r12,PT_GR12(\regs)
1757 STREG %r13,PT_GR13(\regs)
1758 STREG %r14,PT_GR14(\regs)
1759 STREG %r15,PT_GR15(\regs)
1760 STREG %r16,PT_GR16(\regs)
1761 STREG %r17,PT_GR17(\regs)
1762 STREG %r18,PT_GR18(\regs)
1765 .macro reg_restore regs
1766 LDREG PT_GR3(\regs), %r3
1767 LDREG PT_GR4(\regs), %r4
1768 LDREG PT_GR5(\regs), %r5
1769 LDREG PT_GR6(\regs), %r6
1770 LDREG PT_GR7(\regs), %r7
1771 LDREG PT_GR8(\regs), %r8
1772 LDREG PT_GR9(\regs), %r9
1773 LDREG PT_GR10(\regs),%r10
1774 LDREG PT_GR11(\regs),%r11
1775 LDREG PT_GR12(\regs),%r12
1776 LDREG PT_GR13(\regs),%r13
1777 LDREG PT_GR14(\regs),%r14
1778 LDREG PT_GR15(\regs),%r15
1779 LDREG PT_GR16(\regs),%r16
1780 LDREG PT_GR17(\regs),%r17
1781 LDREG PT_GR18(\regs),%r18
1784 .export sys_fork_wrapper
1785 .export child_return
1787 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1788 ldo TASK_REGS(%r1),%r1
1791 STREG %r3, PT_CR27(%r1)
1793 STREG %r2,-RP_OFFSET(%r30)
1794 ldo FRAME_SIZE(%r30),%r30
1796 ldo -16(%r30),%r29 /* Reference param save area */
1799 /* These are call-clobbered registers and therefore
1800 also syscall-clobbered (we hope). */
1801 STREG %r2,PT_GR19(%r1) /* save for child */
1802 STREG %r30,PT_GR21(%r1)
1804 LDREG PT_GR30(%r1),%r25
1809 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1811 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1812 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1813 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1815 LDREG PT_CR27(%r1), %r3
1819 /* strace expects syscall # to be preserved in r20 */
1822 STREG %r20,PT_GR20(%r1)
1824 /* Set the return value for the child */
1826 BL schedule_tail, %r2
1829 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1830 LDREG TASK_PT_GR19(%r1),%r2
1835 .export sys_clone_wrapper
1837 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1838 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1841 STREG %r3, PT_CR27(%r1)
1843 STREG %r2,-RP_OFFSET(%r30)
1844 ldo FRAME_SIZE(%r30),%r30
1846 ldo -16(%r30),%r29 /* Reference param save area */
1849 STREG %r2,PT_GR19(%r1) /* save for child */
1850 STREG %r30,PT_GR21(%r1)
1855 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1857 .export sys_vfork_wrapper
1859 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1860 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1863 STREG %r3, PT_CR27(%r1)
1865 STREG %r2,-RP_OFFSET(%r30)
1866 ldo FRAME_SIZE(%r30),%r30
1868 ldo -16(%r30),%r29 /* Reference param save area */
1871 STREG %r2,PT_GR19(%r1) /* save for child */
1872 STREG %r30,PT_GR21(%r1)
1878 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1881 .macro execve_wrapper execve
1882 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1883 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1886 * Do we need to save/restore r3-r18 here?
1887 * I don't think so. why would new thread need old
1888 * threads registers?
1891 /* %arg0 - %arg3 are already saved for us. */
1893 STREG %r2,-RP_OFFSET(%r30)
1894 ldo FRAME_SIZE(%r30),%r30
1896 ldo -16(%r30),%r29 /* Reference param save area */
1901 ldo -FRAME_SIZE(%r30),%r30
1902 LDREG -RP_OFFSET(%r30),%r2
1904 /* If exec succeeded we need to load the args */
1907 cmpb,>>= %r28,%r1,error_\execve
1915 .export sys_execve_wrapper
1919 execve_wrapper sys_execve
1922 .export sys32_execve_wrapper
1923 .import sys32_execve
1925 sys32_execve_wrapper:
1926 execve_wrapper sys32_execve
1929 .export sys_rt_sigreturn_wrapper
1930 sys_rt_sigreturn_wrapper:
1931 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1932 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1933 /* Don't save regs, we are going to restore them from sigcontext. */
1934 STREG %r2, -RP_OFFSET(%r30)
1936 ldo FRAME_SIZE(%r30), %r30
1937 BL sys_rt_sigreturn,%r2
1938 ldo -16(%r30),%r29 /* Reference param save area */
1940 BL sys_rt_sigreturn,%r2
1941 ldo FRAME_SIZE(%r30), %r30
1944 ldo -FRAME_SIZE(%r30), %r30
1945 LDREG -RP_OFFSET(%r30), %r2
1947 /* FIXME: I think we need to restore a few more things here. */
1948 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1949 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1952 /* If the signal was received while the process was blocked on a
1953 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1954 * take us to syscall_exit_rfi and on to intr_return.
1957 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1959 .export sys_sigaltstack_wrapper
1960 sys_sigaltstack_wrapper:
1961 /* Get the user stack pointer */
1962 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1963 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1964 LDREG TASK_PT_GR30(%r24),%r24
1965 STREG %r2, -RP_OFFSET(%r30)
1967 ldo FRAME_SIZE(%r30), %r30
1968 b,l do_sigaltstack,%r2
1969 ldo -16(%r30),%r29 /* Reference param save area */
1971 bl do_sigaltstack,%r2
1972 ldo FRAME_SIZE(%r30), %r30
1975 ldo -FRAME_SIZE(%r30), %r30
1976 LDREG -RP_OFFSET(%r30), %r2
1981 .export sys32_sigaltstack_wrapper
1982 sys32_sigaltstack_wrapper:
1983 /* Get the user stack pointer */
1984 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1985 LDREG TASK_PT_GR30(%r24),%r24
1986 STREG %r2, -RP_OFFSET(%r30)
1987 ldo FRAME_SIZE(%r30), %r30
1988 b,l do_sigaltstack32,%r2
1989 ldo -16(%r30),%r29 /* Reference param save area */
1991 ldo -FRAME_SIZE(%r30), %r30
1992 LDREG -RP_OFFSET(%r30), %r2
1997 .export sys_rt_sigsuspend_wrapper
1998 sys_rt_sigsuspend_wrapper:
1999 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2000 ldo TASK_REGS(%r1),%r24
2003 STREG %r2, -RP_OFFSET(%r30)
2005 ldo FRAME_SIZE(%r30), %r30
2006 b,l sys_rt_sigsuspend,%r2
2007 ldo -16(%r30),%r29 /* Reference param save area */
2009 bl sys_rt_sigsuspend,%r2
2010 ldo FRAME_SIZE(%r30), %r30
2013 ldo -FRAME_SIZE(%r30), %r30
2014 LDREG -RP_OFFSET(%r30), %r2
2016 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2017 ldo TASK_REGS(%r1),%r1
2023 .export syscall_exit
2026 /* NOTE: HP-UX syscalls also come through here
2027 * after hpux_syscall_exit fixes up return
2030 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2031 * via syscall_exit_rfi if the signal was received while the process
2035 /* save return value now */
2038 LDREG TI_TASK(%r1),%r1
2039 STREG %r28,TASK_PT_GR28(%r1)
2043 /* <linux/personality.h> cannot be easily included */
2044 #define PER_HPUX 0x10
2045 LDREG TASK_PERSONALITY(%r1),%r19
2047 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2048 ldo -PER_HPUX(%r19), %r19
2051 /* Save other hpux returns if personality is PER_HPUX */
2052 STREG %r22,TASK_PT_GR22(%r1)
2053 STREG %r29,TASK_PT_GR29(%r1)
2056 #endif /* CONFIG_HPUX */
2058 /* Seems to me that dp could be wrong here, if the syscall involved
2059 * calling a module, and nothing got round to restoring dp on return.
2065 /* Check for software interrupts */
2067 .import irq_stat,data
2069 load32 irq_stat,%r19
2072 /* sched.h: int processor */
2073 /* %r26 is used as scratch register to index into irq_stat[] */
2074 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2076 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2082 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2083 #endif /* CONFIG_SMP */
2085 syscall_check_resched:
2087 /* check for reschedule */
2089 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2090 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2093 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2094 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2097 /* Are we being ptraced? */
2098 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2100 LDREG TASK_PTRACE(%r1), %r19
2101 bb,< %r19,31,syscall_restore_rfi
2104 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2107 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2110 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2111 LDREG TASK_PT_GR19(%r1),%r19
2112 LDREG TASK_PT_GR20(%r1),%r20
2113 LDREG TASK_PT_GR21(%r1),%r21
2114 LDREG TASK_PT_GR22(%r1),%r22
2115 LDREG TASK_PT_GR23(%r1),%r23
2116 LDREG TASK_PT_GR24(%r1),%r24
2117 LDREG TASK_PT_GR25(%r1),%r25
2118 LDREG TASK_PT_GR26(%r1),%r26
2119 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2120 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2121 LDREG TASK_PT_GR29(%r1),%r29
2122 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2124 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2126 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2127 mfsp %sr3,%r1 /* Get users space id */
2128 mtsp %r1,%sr7 /* Restore sr7 */
2131 /* Set sr2 to zero for userspace syscalls to work. */
2133 mtsp %r1,%sr4 /* Restore sr4 */
2134 mtsp %r1,%sr5 /* Restore sr5 */
2135 mtsp %r1,%sr6 /* Restore sr6 */
2137 depi 3,31,2,%r31 /* ensure return to user mode. */
2140 /* decide whether to reset the wide mode bit
2142 * For a syscall, the W bit is stored in the lowest bit
2143 * of sp. Extract it and reset W if it is zero */
2144 extrd,u,*<> %r30,63,1,%r1
2146 /* now reset the lowest bit of sp if it was set */
2149 be,n 0(%sr3,%r31) /* return to user space */
2151 /* We have to return via an RFI, so that PSW T and R bits can be set
2153 * This sets up pt_regs so we can return via intr_restore, which is not
2154 * the most efficient way of doing things, but it works.
2156 syscall_restore_rfi:
2157 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2158 mtctl %r2,%cr0 /* for immediate trap */
2159 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2160 ldi 0x0b,%r20 /* Create new PSW */
2161 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2163 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2164 * set in include/linux/ptrace.h and converted to PA bitmap
2165 * numbers in asm-offsets.c */
2167 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2168 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2169 depi -1,27,1,%r20 /* R bit */
2171 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2172 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2173 depi -1,7,1,%r20 /* T bit */
2175 STREG %r20,TASK_PT_PSW(%r1)
2177 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2180 STREG %r25,TASK_PT_SR3(%r1)
2181 STREG %r25,TASK_PT_SR4(%r1)
2182 STREG %r25,TASK_PT_SR5(%r1)
2183 STREG %r25,TASK_PT_SR6(%r1)
2184 STREG %r25,TASK_PT_SR7(%r1)
2185 STREG %r25,TASK_PT_IASQ0(%r1)
2186 STREG %r25,TASK_PT_IASQ1(%r1)
2189 /* Now if old D bit is clear, it means we didn't save all registers
2190 * on syscall entry, so do that now. This only happens on TRACEME
2191 * calls, or if someone attached to us while we were on a syscall.
2192 * We could make this more efficient by not saving r3-r18, but
2193 * then we wouldn't be able to use the common intr_restore path.
2194 * It is only for traced processes anyway, so performance is not
2197 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2198 ldo TASK_REGS(%r1),%r25
2199 reg_save %r25 /* Save r3 to r18 */
2201 /* Save the current sr */
2203 STREG %r2,TASK_PT_SR0(%r1)
2205 /* Save the scratch sr */
2207 STREG %r2,TASK_PT_SR1(%r1)
2209 /* sr2 should be set to zero for userspace syscalls */
2210 STREG %r0,TASK_PT_SR2(%r1)
2213 LDREG TASK_PT_GR31(%r1),%r2
2214 depi 3,31,2,%r2 /* ensure return to user mode. */
2215 STREG %r2,TASK_PT_IAOQ0(%r1)
2217 STREG %r2,TASK_PT_IAOQ1(%r1)
2222 .import schedule,code
2226 ldo -16(%r30),%r29 /* Reference param save area */
2230 b syscall_check_bh /* if resched, we start over again */
2233 .import do_signal,code
2235 /* Save callee-save registers (for sigcontext).
2236 FIXME: After this point the process structure should be
2237 consistent with all the relevant state of the process
2238 before the syscall. We need to verify this. */
2239 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2240 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2243 ldi 1, %r24 /* unsigned long in_syscall */
2246 ldo -16(%r30),%r29 /* Reference param save area */
2249 copy %r0, %r26 /* sigset_t *oldset = NULL */
2251 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2252 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2255 b,n syscall_check_sig
2258 * get_register is used by the non access tlb miss handlers to
2259 * copy the value of the general register specified in r8 into
2260 * r1. This routine can't be used for shadowed registers, since
2261 * the rfir will restore the original value. So, for the shadowed
2262 * registers we put a -1 into r1 to indicate that the register
2263 * should not be used (the register being copied could also have
2264 * a -1 in it, but that is OK, it just means that we will have
2265 * to use the slow path instead).
2271 bv %r0(%r25) /* r0 */
2273 bv %r0(%r25) /* r1 - shadowed */
2275 bv %r0(%r25) /* r2 */
2277 bv %r0(%r25) /* r3 */
2279 bv %r0(%r25) /* r4 */
2281 bv %r0(%r25) /* r5 */
2283 bv %r0(%r25) /* r6 */
2285 bv %r0(%r25) /* r7 */
2287 bv %r0(%r25) /* r8 - shadowed */
2289 bv %r0(%r25) /* r9 - shadowed */
2291 bv %r0(%r25) /* r10 */
2293 bv %r0(%r25) /* r11 */
2295 bv %r0(%r25) /* r12 */
2297 bv %r0(%r25) /* r13 */
2299 bv %r0(%r25) /* r14 */
2301 bv %r0(%r25) /* r15 */
2303 bv %r0(%r25) /* r16 - shadowed */
2305 bv %r0(%r25) /* r17 - shadowed */
2307 bv %r0(%r25) /* r18 */
2309 bv %r0(%r25) /* r19 */
2311 bv %r0(%r25) /* r20 */
2313 bv %r0(%r25) /* r21 */
2315 bv %r0(%r25) /* r22 */
2317 bv %r0(%r25) /* r23 */
2319 bv %r0(%r25) /* r24 - shadowed */
2321 bv %r0(%r25) /* r25 - shadowed */
2323 bv %r0(%r25) /* r26 */
2325 bv %r0(%r25) /* r27 */
2327 bv %r0(%r25) /* r28 */
2329 bv %r0(%r25) /* r29 */
2331 bv %r0(%r25) /* r30 */
2333 bv %r0(%r25) /* r31 */
2337 * set_register is used by the non access tlb miss handlers to
2338 * copy the value of r1 into the general register specified in
2345 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2347 bv %r0(%r25) /* r1 */
2349 bv %r0(%r25) /* r2 */
2351 bv %r0(%r25) /* r3 */
2353 bv %r0(%r25) /* r4 */
2355 bv %r0(%r25) /* r5 */
2357 bv %r0(%r25) /* r6 */
2359 bv %r0(%r25) /* r7 */
2361 bv %r0(%r25) /* r8 */
2363 bv %r0(%r25) /* r9 */
2365 bv %r0(%r25) /* r10 */
2367 bv %r0(%r25) /* r11 */
2369 bv %r0(%r25) /* r12 */
2371 bv %r0(%r25) /* r13 */
2373 bv %r0(%r25) /* r14 */
2375 bv %r0(%r25) /* r15 */
2377 bv %r0(%r25) /* r16 */
2379 bv %r0(%r25) /* r17 */
2381 bv %r0(%r25) /* r18 */
2383 bv %r0(%r25) /* r19 */
2385 bv %r0(%r25) /* r20 */
2387 bv %r0(%r25) /* r21 */
2389 bv %r0(%r25) /* r22 */
2391 bv %r0(%r25) /* r23 */
2393 bv %r0(%r25) /* r24 */
2395 bv %r0(%r25) /* r25 */
2397 bv %r0(%r25) /* r26 */
2399 bv %r0(%r25) /* r27 */
2401 bv %r0(%r25) /* r28 */
2403 bv %r0(%r25) /* r29 */
2405 bv %r0(%r25) /* r30 */
2407 bv %r0(%r25) /* r31 */