1 #include <linux/threads.h>
2 #include <asm/processor.h>
4 #include <asm/cputable.h>
5 #include <asm/thread_info.h>
6 #include <asm/ppc_asm.h>
7 #include <asm/asm-offsets.h>
11 * Structure for storing CPU registers on the save area.
17 #define SL_SPRG0 0x10 /* 4 sprg's */
30 #define SL_R12 0x74 /* r12 to r31 */
31 #define SL_SIZE (SL_R12 + 80)
36 _GLOBAL(swsusp_save_area)
43 _GLOBAL(swsusp_arch_suspend)
45 lis r11,swsusp_save_area@h
46 ori r11,r11,swsusp_save_area@l
62 /* Get a stable timebase and save it */
75 stw r4,SL_SPRG0+4(r11)
77 stw r4,SL_SPRG0+8(r11)
79 stw r4,SL_SPRG0+12(r11)
85 stw r4,SL_DBAT0+4(r11)
89 stw r4,SL_DBAT1+4(r11)
93 stw r4,SL_DBAT2+4(r11)
97 stw r4,SL_DBAT3+4(r11)
101 stw r4,SL_IBAT0+4(r11)
105 stw r4,SL_IBAT1+4(r11)
109 stw r4,SL_IBAT2+4(r11)
113 stw r4,SL_IBAT3+4(r11)
116 /* Backup various CPU config stuffs */
119 /* Call the low level suspend stuff (we should probably have made
124 /* Restore LR from the save area */
125 lis r11,swsusp_save_area@h
126 ori r11,r11,swsusp_save_area@l
134 _GLOBAL(swsusp_arch_resume)
136 /* Stop pending alitvec streams and memory accesses */
139 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
142 /* Disable MSR:DR to make sure we don't take a TLB or
143 * hash miss during the copy, as our hash table will
144 * for a while be unuseable. For .text, we assume we are
145 * covered by a BAT. This works only for non-G5 at this
146 * point. G5 will need a better approach, possibly using
147 * a small temporary hash table filled with large mappings,
148 * disabling the MMU completely isn't a good option for
149 * performance reasons.
150 * (Note that 750's may have the same performance issue as
151 * the G5 in this case, we should investigate using moving
152 * BATs for these CPUs)
156 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
161 /* Load ptr the list of pages to copy in r3 */
162 lis r11,(pagedir_nosave - KERNELBASE)@h
163 ori r11,r11,pagedir_nosave@l
166 /* Copy the pages. This is a very basic implementation, to
167 * be replaced by something more cache efficient */
172 lwz r11,pbe_address(r3) /* source */
174 lwz r10,pbe_orig_address(r3) /* destination */
192 /* Do a very simple cache flush/inval of the L1 to ensure
193 * coherency of the icache
205 /* Now flush those cache lines */
215 /* Ok, we are now running with the kernel data of the old
216 * kernel fully restored. We can get to the save area
217 * easily now. As for the rest of the code, it assumes the
218 * loader kernel and the booted one are exactly identical
220 lis r11,swsusp_save_area@h
221 ori r11,r11,swsusp_save_area@l
225 /* Restore various CPU config stuffs */
226 bl __restore_cpu_setup
228 /* Restore the BATs, and SDR1. Then we can turn on the MMU.
229 * This is a bit hairy as we are running out of those BATs,
230 * but first, our code is probably in the icache, and we are
231 * writing the same value to the BAT, so that should be fine,
232 * though a better solution will have to be found long-term
238 lwz r4,SL_SPRG0+4(r11)
240 lwz r4,SL_SPRG0+8(r11)
242 lwz r4,SL_SPRG0+12(r11)
248 lwz r4,SL_DBAT0+4(r11)
252 lwz r4,SL_DBAT1+4(r11)
256 lwz r4,SL_DBAT2+4(r11)
260 lwz r4,SL_DBAT3+4(r11)
264 lwz r4,SL_IBAT0+4(r11)
268 lwz r4,SL_IBAT1+4(r11)
272 lwz r4,SL_IBAT2+4(r11)
276 lwz r4,SL_IBAT3+4(r11)
298 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
302 1: addic. r4,r4,-0x1000
307 /* restore the MSR and turn on the MMU */
320 /* Kick decrementer */
324 /* Restore the callee-saved registers and return */
333 // XXX Note: we don't really need to call swsusp_resume
338 /* FIXME:This construct is actually not useful since we don't shut
339 * down the instruction MMU, we could just flip back MSR-DR on.