6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
7 /* read ar.itc in advance, and use it before leaving bank 0 */
8 #define ACCOUNT_GET_STAMP \
9 (pUStk) mov.m r20=ar.itc;
10 #define ACCOUNT_SYS_ENTER \
11 (pUStk) br.call.spnt rp=account_sys_enter \
14 #define ACCOUNT_GET_STAMP
15 #define ACCOUNT_SYS_ENTER
19 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
20 * the minimum state necessary that allows us to turn psr.ic back
23 * Assumed state upon entry:
25 * r31: contains saved predicates (pr)
27 * Upon exit, the state is as follows:
29 * r2 = points to &pt_regs.r16
30 * r8 = contents of ar.ccv
31 * r9 = contents of ar.csd
32 * r10 = contents of ar.ssd
34 * r12 = kernel sp (kernel virtual address)
35 * r13 = points to current task_struct (kernel virtual address)
36 * p15 = TRUE if psr.i is set in cr.ipsr
37 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
40 * Note that psr.ic is NOT turned on by this macro. This is so that
41 * we can pass interruption state as arguments to a handler.
43 #define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
44 mov r16=IA64_KR(CURRENT); /* M */ \
45 mov r27=ar.rsc; /* M */ \
47 mov r25=ar.unat; /* M */ \
48 mov r29=cr.ipsr; /* M */ \
49 mov r26=ar.pfs; /* I */ \
50 mov r28=cr.iip; /* M */ \
51 mov r21=ar.fpsr; /* M */ \
52 COVER; /* B;; (or nothing) */ \
54 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
56 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
57 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
58 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
59 /* switch from user to kernel RBS: */ \
63 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
65 (pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
67 (pUStk) mov.m r24=ar.rnat; \
68 (pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
69 (pKStk) mov r1=sp; /* get sp */ \
71 (pUStk) lfetch.fault.excl.nt1 [r22]; \
72 (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
73 (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
75 (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
76 (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
78 (pUStk) mov r18=ar.bsp; \
79 (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
80 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
81 adds r16=PT(CR_IPSR),r1; \
83 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
84 st8 [r16]=r29; /* save cr.ipsr */ \
86 lfetch.fault.excl.nt1 [r17]; \
87 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
90 adds r16=PT(R8),r1; /* initialize first base pointer */ \
91 adds r17=PT(R9),r1; /* initialize second base pointer */ \
92 (pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
94 .mem.offset 0,0; st8.spill [r16]=r8,16; \
95 .mem.offset 8,0; st8.spill [r17]=r9,16; \
97 .mem.offset 0,0; st8.spill [r16]=r10,24; \
98 .mem.offset 8,0; st8.spill [r17]=r11,24; \
100 st8 [r16]=r28,16; /* save cr.iip */ \
101 st8 [r17]=r30,16; /* save cr.ifs */ \
102 (pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
106 movl r11=FPSR_DEFAULT; /* L-unit */ \
108 st8 [r16]=r25,16; /* save ar.unat */ \
109 st8 [r17]=r26,16; /* save ar.pfs */ \
110 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
112 st8 [r16]=r27,16; /* save ar.rsc */ \
113 (pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
114 (pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
115 ;; /* avoid RAW on r16 & r17 */ \
116 (pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
117 st8 [r17]=r31,16; /* save predicates */ \
118 (pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
120 st8 [r16]=r29,16; /* save b0 */ \
121 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
122 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
124 .mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
125 .mem.offset 8,0; st8.spill [r17]=r12,16; \
126 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
128 .mem.offset 0,0; st8.spill [r16]=r13,16; \
129 .mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
130 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
132 .mem.offset 0,0; st8.spill [r16]=r15,16; \
133 .mem.offset 8,0; st8.spill [r17]=r14,16; \
135 .mem.offset 0,0; st8.spill [r16]=r2,16; \
136 .mem.offset 8,0; st8.spill [r17]=r3,16; \
138 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
141 movl r1=__gp; /* establish kernel global pointer */ \
144 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
148 * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
150 * Assumed state upon entry:
152 * r2: points to &pt_regs.r16
153 * r3: points to &pt_regs.r17
154 * r8: contents of ar.ccv
155 * r9: contents of ar.csd
156 * r10: contents of ar.ssd
159 * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
162 .mem.offset 0,0; st8.spill [r2]=r16,16; \
163 .mem.offset 8,0; st8.spill [r3]=r17,16; \
165 .mem.offset 0,0; st8.spill [r2]=r18,16; \
166 .mem.offset 8,0; st8.spill [r3]=r19,16; \
168 .mem.offset 0,0; st8.spill [r2]=r20,16; \
169 .mem.offset 8,0; st8.spill [r3]=r21,16; \
172 .mem.offset 0,0; st8.spill [r2]=r22,16; \
173 .mem.offset 8,0; st8.spill [r3]=r23,16; \
176 .mem.offset 0,0; st8.spill [r2]=r24,16; \
177 .mem.offset 8,0; st8.spill [r3]=r25,16; \
179 .mem.offset 0,0; st8.spill [r2]=r26,16; \
180 .mem.offset 8,0; st8.spill [r3]=r27,16; \
182 .mem.offset 0,0; st8.spill [r2]=r28,16; \
183 .mem.offset 8,0; st8.spill [r3]=r29,16; \
185 .mem.offset 0,0; st8.spill [r2]=r30,16; \
186 .mem.offset 8,0; st8.spill [r3]=r31,32; \
188 mov ar.fpsr=r11; /* M-unit */ \
189 st8 [r2]=r8,8; /* ar.ccv */ \
190 adds r24=PT(B6)-PT(F7),r3; \
192 stf.spill [r2]=f6,32; \
193 stf.spill [r3]=f7,32; \
195 stf.spill [r2]=f8,32; \
196 stf.spill [r3]=f9,32; \
198 stf.spill [r2]=f10; \
199 stf.spill [r3]=f11; \
200 adds r25=PT(B7)-PT(F11),r3; \
202 st8 [r24]=r18,16; /* b6 */ \
203 st8 [r25]=r19,16; /* b7 */ \
205 st8 [r24]=r9; /* ar.csd */ \
206 st8 [r25]=r10; /* ar.ssd */ \
209 #define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
210 #define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
211 #define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )