Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / arch / ia64 / kernel / relocate_kernel.S
1 /*
2  * arch/ia64/kernel/relocate_kernel.S
3  *
4  * Relocate kexec'able kernel and start it
5  *
6  * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
7  * Copyright (C) 2005 Khalid Aziz  <khalid.aziz@hp.com>
8  * Copyright (C) 2005 Intel Corp,  Zou Nan hai <nanhai.zou@intel.com>
9  *
10  * This source code is licensed under the GNU General Public License,
11  * Version 2.  See the file COPYING for more details.
12  */
13 #include <asm/asmmacro.h>
14 #include <asm/kregs.h>
15 #include <asm/page.h>
16 #include <asm/pgtable.h>
17 #include <asm/mca_asm.h>
18
19        /* Must be relocatable PIC code callable as a C function
20         */
21 GLOBAL_ENTRY(relocate_new_kernel)
22         .prologue
23         alloc r31=ar.pfs,4,0,0,0
24         .body
25 .reloc_entry:
26 {
27         rsm psr.i| psr.ic
28         mov r2=ip
29 }
30         ;;
31 {
32         flushrs                         // must be first insn in group
33         srlz.i
34 }
35         ;;
36         dep r2=0,r2,61,3                //to physical address
37         ;;
38         //first switch to physical mode
39         add r3=1f-.reloc_entry, r2
40         movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
41         mov ar.rsc=0                    // put RSE in enforced lazy mode
42         ;;
43         add sp=(memory_stack_end - 16 - .reloc_entry),r2
44         add r8=(register_stack - .reloc_entry),r2
45         ;;
46         mov r18=ar.rnat
47         mov ar.bspstore=r8
48         ;;
49         mov cr.ipsr=r16
50         mov cr.iip=r3
51         mov cr.ifs=r0
52         srlz.i
53         ;;
54         mov ar.rnat=r18
55         rfi
56         ;;
57 1:
58         //physical mode code begin
59         mov b6=in1
60         dep r28=0,in2,61,3      //to physical address
61
62         // purge all TC entries
63 #define O(member)       IA64_CPUINFO_##member##_OFFSET
64         GET_THIS_PADDR(r2, cpu_info)    // load phys addr of cpu_info into r2
65         ;;
66         addl r17=O(PTCE_STRIDE),r2
67         addl r2=O(PTCE_BASE),r2
68         ;;
69         ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));;     // r18=ptce_base
70         ld4 r19=[r2],4                                  // r19=ptce_count[0]
71         ld4 r21=[r17],4                                 // r21=ptce_stride[0]
72         ;;
73         ld4 r20=[r2]                                    // r20=ptce_count[1]
74         ld4 r22=[r17]                                   // r22=ptce_stride[1]
75         mov r24=r0
76         ;;
77         adds r20=-1,r20
78         ;;
79 #undef O
80 2:
81         cmp.ltu p6,p7=r24,r19
82 (p7)    br.cond.dpnt.few 4f
83         mov ar.lc=r20
84 3:
85         ptc.e r18
86         ;;
87         add r18=r22,r18
88         br.cloop.sptk.few 3b
89         ;;
90         add r18=r21,r18
91         add r24=1,r24
92         ;;
93         br.sptk.few 2b
94 4:
95         srlz.i
96         ;;
97         // purge TR entry for kernel text and data
98         movl r16=KERNEL_START
99         mov r18=KERNEL_TR_PAGE_SHIFT<<2
100         ;;
101         ptr.i r16, r18
102         ptr.d r16, r18
103         ;;
104         srlz.i
105         ;;
106
107         // purge TR entry for pal code
108         mov r16=in3
109         mov r18=IA64_GRANULE_SHIFT<<2
110         ;;
111         ptr.i r16,r18
112         ;;
113         srlz.i
114         ;;
115
116         // purge TR entry for stack
117         mov r16=IA64_KR(CURRENT_STACK)
118         ;;
119         shl r16=r16,IA64_GRANULE_SHIFT
120         movl r19=PAGE_OFFSET
121         ;;
122         add r16=r19,r16
123         mov r18=IA64_GRANULE_SHIFT<<2
124         ;;
125         ptr.d r16,r18
126         ;;
127         srlz.i
128         ;;
129
130         //copy segments
131         movl r16=PAGE_MASK
132         mov  r30=in0                    // in0 is page_list
133         br.sptk.few .dest_page
134         ;;
135 .loop:
136         ld8  r30=[in0], 8;;
137 .dest_page:
138         tbit.z p0, p6=r30, 0;;          // 0x1 dest page
139 (p6)    and r17=r30, r16
140 (p6)    br.cond.sptk.few .loop;;
141
142         tbit.z p0, p6=r30, 1;;          // 0x2 indirect page
143 (p6)    and in0=r30, r16
144 (p6)    br.cond.sptk.few .loop;;
145
146         tbit.z p0, p6=r30, 2;;          // 0x4 end flag
147 (p6)    br.cond.sptk.few .end_loop;;
148
149         tbit.z p6, p0=r30, 3;;          // 0x8 source page
150 (p6)    br.cond.sptk.few .loop
151
152         and r18=r30, r16
153
154         // simple copy page, may optimize later
155         movl r14=PAGE_SIZE/8 - 1;;
156         mov ar.lc=r14;;
157 1:
158         ld8 r14=[r18], 8;;
159         st8 [r17]=r14;;
160         fc.i r17
161         add r17=8, r17
162         br.ctop.sptk.few 1b
163         br.sptk.few .loop
164         ;;
165
166 .end_loop:
167         sync.i                  // for fc.i
168         ;;
169         srlz.i
170         ;;
171         srlz.d
172         ;;
173         br.call.sptk.many b0=b6;;
174
175 .align  32
176 memory_stack:
177         .fill           8192, 1, 0
178 memory_stack_end:
179 register_stack:
180         .fill           8192, 1, 0
181 register_stack_end:
182 relocate_new_kernel_end:
183 END(relocate_new_kernel)
184
185 .global relocate_new_kernel_size
186 relocate_new_kernel_size:
187         data8   relocate_new_kernel_end - relocate_new_kernel
188
189 GLOBAL_ENTRY(ia64_dump_cpu_regs)
190         .prologue
191         alloc loc0=ar.pfs,1,2,0,0
192         .body
193         mov     ar.rsc=0                // put RSE in enforced lazy mode
194         add     loc1=4*8, in0           // save r4 and r5 first
195         ;;
196 {
197         flushrs                         // flush dirty regs to backing store
198         srlz.i
199 }
200         st8 [loc1]=r4, 8
201         ;;
202         st8 [loc1]=r5, 8
203         ;;
204         add loc1=32*8, in0
205         mov r4=ar.rnat
206         ;;
207         st8 [in0]=r0, 8                 // r0
208         st8 [loc1]=r4, 8                // rnat
209         mov r5=pr
210         ;;
211         st8 [in0]=r1, 8                 // r1
212         st8 [loc1]=r5, 8                // pr
213         mov r4=b0
214         ;;
215         st8 [in0]=r2, 8                 // r2
216         st8 [loc1]=r4, 8                // b0
217         mov r5=b1;
218         ;;
219         st8 [in0]=r3, 24                // r3
220         st8 [loc1]=r5, 8                // b1
221         mov r4=b2
222         ;;
223         st8 [in0]=r6, 8                 // r6
224         st8 [loc1]=r4, 8                // b2
225         mov r5=b3
226         ;;
227         st8 [in0]=r7, 8                 // r7
228         st8 [loc1]=r5, 8                // b3
229         mov r4=b4
230         ;;
231         st8 [in0]=r8, 8                 // r8
232         st8 [loc1]=r4, 8                // b4
233         mov r5=b5
234         ;;
235         st8 [in0]=r9, 8                 // r9
236         st8 [loc1]=r5, 8                // b5
237         mov r4=b6
238         ;;
239         st8 [in0]=r10, 8                // r10
240         st8 [loc1]=r5, 8                // b6
241         mov r5=b7
242         ;;
243         st8 [in0]=r11, 8                // r11
244         st8 [loc1]=r5, 8                // b7
245         mov r4=b0
246         ;;
247         st8 [in0]=r12, 8                // r12
248         st8 [loc1]=r4, 8                // ip
249         mov r5=loc0
250         ;;
251         st8 [in0]=r13, 8                // r13
252         extr.u r5=r5, 0, 38             // ar.pfs.pfm
253         mov r4=r0                       // user mask
254         ;;
255         st8 [in0]=r14, 8                // r14
256         st8 [loc1]=r5, 8                // cfm
257         ;;
258         st8 [in0]=r15, 8                // r15
259         st8 [loc1]=r4, 8                // user mask
260         mov r5=ar.rsc
261         ;;
262         st8 [in0]=r16, 8                // r16
263         st8 [loc1]=r5, 8                // ar.rsc
264         mov r4=ar.bsp
265         ;;
266         st8 [in0]=r17, 8                // r17
267         st8 [loc1]=r4, 8                // ar.bsp
268         mov r5=ar.bspstore
269         ;;
270         st8 [in0]=r18, 8                // r18
271         st8 [loc1]=r5, 8                // ar.bspstore
272         mov r4=ar.rnat
273         ;;
274         st8 [in0]=r19, 8                // r19
275         st8 [loc1]=r4, 8                // ar.rnat
276         mov r5=ar.ccv
277         ;;
278         st8 [in0]=r20, 8                // r20
279         st8 [loc1]=r5, 8                // ar.ccv
280         mov r4=ar.unat
281         ;;
282         st8 [in0]=r21, 8                // r21
283         st8 [loc1]=r4, 8                // ar.unat
284         mov r5 = ar.fpsr
285         ;;
286         st8 [in0]=r22, 8                // r22
287         st8 [loc1]=r5, 8                // ar.fpsr
288         mov r4 = ar.unat
289         ;;
290         st8 [in0]=r23, 8                // r23
291         st8 [loc1]=r4, 8                // unat
292         mov r5 = ar.fpsr
293         ;;
294         st8 [in0]=r24, 8                // r24
295         st8 [loc1]=r5, 8                // fpsr
296         mov r4 = ar.pfs
297         ;;
298         st8 [in0]=r25, 8                // r25
299         st8 [loc1]=r4, 8                // ar.pfs
300         mov r5 = ar.lc
301         ;;
302         st8 [in0]=r26, 8                // r26
303         st8 [loc1]=r5, 8                // ar.lc
304         mov r4 = ar.ec
305         ;;
306         st8 [in0]=r27, 8                // r27
307         st8 [loc1]=r4, 8                // ar.ec
308         mov r5 = ar.csd
309         ;;
310         st8 [in0]=r28, 8                // r28
311         st8 [loc1]=r5, 8                // ar.csd
312         mov r4 = ar.ssd
313         ;;
314         st8 [in0]=r29, 8                // r29
315         st8 [loc1]=r4, 8                // ar.ssd
316         ;;
317         st8 [in0]=r30, 8                // r30
318         ;;
319         st8 [in0]=r31, 8                // r31
320         mov ar.pfs=loc0
321         ;;
322         br.ret.sptk.many rp
323 END(ia64_dump_cpu_regs)
324
325