4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
41 #include <linux/smp.h>
42 #include <linux/stddef.h>
43 #include <linux/unistd.h>
47 #include <asm/spu_priv1.h>
48 #include <asm/spu_csa.h>
49 #include <asm/mmu_context.h>
51 #include "spu_save_dump.h"
52 #include "spu_restore_dump.h"
55 #define POLL_WHILE_TRUE(_c) { \
60 #define RELAX_SPIN_COUNT 1000
61 #define POLL_WHILE_TRUE(_c) { \
64 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
67 if (unlikely(_c)) yield(); \
73 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
75 static inline void acquire_spu_lock(struct spu *spu)
79 * Acquire SPU-specific mutual exclusion lock.
84 static inline void release_spu_lock(struct spu *spu)
87 * Release SPU-specific mutual exclusion lock.
92 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
94 struct spu_problem __iomem *prob = spu->problem;
99 * If SPU_Status[E,L,IS] any field is '1', this
100 * SPU is in isolate state and cannot be context
101 * saved at this time.
103 isolate_state = SPU_STATUS_ISOLATED_STATE |
104 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
105 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
108 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
112 * Save INT_Mask_class0 in CSA.
113 * Write INT_MASK_class0 with value of 0.
114 * Save INT_Mask_class1 in CSA.
115 * Write INT_MASK_class1 with value of 0.
116 * Save INT_Mask_class2 in CSA.
117 * Write INT_MASK_class2 with value of 0.
119 spin_lock_irq(&spu->register_lock);
121 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
122 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
123 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
125 spu_int_mask_set(spu, 0, 0ul);
126 spu_int_mask_set(spu, 1, 0ul);
127 spu_int_mask_set(spu, 2, 0ul);
129 spin_unlock_irq(&spu->register_lock);
132 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
136 * Set a software watchdog timer, which specifies the
137 * maximum allowable time for a context save sequence.
139 * For present, this implementation will not set a global
140 * watchdog timer, as virtualization & variable system load
141 * may cause unpredictable execution times.
145 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
149 * Inhibit user-space access (if provided) to this
150 * SPU by unmapping the virtual pages assigned to
151 * the SPU memory-mapped I/O (MMIO) for problem
156 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
160 * Set a software context switch pending flag.
162 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
166 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
168 struct spu_priv2 __iomem *priv2 = spu->priv2;
171 * Suspend DMA and save MFC_CNTL.
173 switch (in_be64(&priv2->mfc_control_RW) &
174 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
175 case MFC_CNTL_SUSPEND_IN_PROGRESS:
176 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
177 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
178 MFC_CNTL_SUSPEND_COMPLETE);
180 case MFC_CNTL_SUSPEND_COMPLETE:
182 csa->priv2.mfc_control_RW =
183 MFC_CNTL_SUSPEND_MASK |
184 MFC_CNTL_SUSPEND_DMA_QUEUE;
187 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
188 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
189 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
190 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
191 MFC_CNTL_SUSPEND_COMPLETE);
193 csa->priv2.mfc_control_RW = 0;
199 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
201 struct spu_problem __iomem *prob = spu->problem;
204 * Save SPU_Runcntl in the CSA. This value contains
205 * the "Application Desired State".
207 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
210 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
213 * Save MFC_SR1 in the CSA.
215 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
218 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
220 struct spu_problem __iomem *prob = spu->problem;
223 * Read SPU_Status[R], and save to CSA.
225 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
226 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
230 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
232 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
235 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
236 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
237 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
238 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
240 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
244 static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
246 struct spu_priv2 __iomem *priv2 = spu->priv2;
249 * Read MFC_CNTL[Ds]. Update saved copy of
252 csa->priv2.mfc_control_RW |=
253 in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
256 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
258 struct spu_priv2 __iomem *priv2 = spu->priv2;
261 * Write MFC_CNTL[Dh] set to a '1' to halt
264 out_be64(&priv2->mfc_control_RW,
265 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
269 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
272 * Read PPE Timebase High and Timebase low registers
273 * and save in CSA. TBD.
275 csa->suspend_time = get_cycles();
278 static inline void remove_other_spu_access(struct spu_state *csa,
282 * Remove other SPU access to this SPU by unmapping
283 * this SPU's pages from their address space. TBD.
287 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
289 struct spu_problem __iomem *prob = spu->problem;
293 * Write SPU_MSSync register. Poll SPU_MSSync[P]
296 out_be64(&prob->spc_mssync_RW, 1UL);
297 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
300 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
305 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
306 * Then issue a PPE sync instruction.
308 spu_tlb_invalidate(spu);
312 static inline void handle_pending_interrupts(struct spu_state *csa,
316 * Handle any pending interrupts from this SPU
317 * here. This is OS or hypervisor specific. One
318 * option is to re-enable interrupts to handle any
319 * pending interrupts, with the interrupt handlers
320 * recognizing the software Context Switch Pending
321 * flag, to ensure the SPU execution or MFC command
322 * queue is not restarted. TBD.
326 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
328 struct spu_priv2 __iomem *priv2 = spu->priv2;
332 * If MFC_Cntl[Se]=0 then save
333 * MFC command queues.
335 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
336 for (i = 0; i < 8; i++) {
337 csa->priv2.puq[i].mfc_cq_data0_RW =
338 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
339 csa->priv2.puq[i].mfc_cq_data1_RW =
340 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
341 csa->priv2.puq[i].mfc_cq_data2_RW =
342 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
343 csa->priv2.puq[i].mfc_cq_data3_RW =
344 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
346 for (i = 0; i < 16; i++) {
347 csa->priv2.spuq[i].mfc_cq_data0_RW =
348 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
349 csa->priv2.spuq[i].mfc_cq_data1_RW =
350 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
351 csa->priv2.spuq[i].mfc_cq_data2_RW =
352 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
353 csa->priv2.spuq[i].mfc_cq_data3_RW =
354 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
359 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
361 struct spu_problem __iomem *prob = spu->problem;
364 * Save the PPU_QueryMask register
367 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
370 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
372 struct spu_problem __iomem *prob = spu->problem;
375 * Save the PPU_QueryType register
378 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
381 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
383 struct spu_problem __iomem *prob = spu->problem;
385 /* Save the Prxy_TagStatus register in the CSA.
387 * It is unnecessary to restore dma_tagstatus_R, however,
388 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
391 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
394 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
396 struct spu_priv2 __iomem *priv2 = spu->priv2;
399 * Save the MFC_CSR_TSQ register
402 csa->priv2.spu_tag_status_query_RW =
403 in_be64(&priv2->spu_tag_status_query_RW);
406 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
408 struct spu_priv2 __iomem *priv2 = spu->priv2;
411 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
412 * registers in the CSA.
414 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
415 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
418 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
420 struct spu_priv2 __iomem *priv2 = spu->priv2;
423 * Save the MFC_CSR_ATO register in
426 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
429 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
432 * Save the MFC_TCLASS_ID register in
435 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
438 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
442 * Write the MFC_TCLASS_ID register with
443 * the value 0x10000000.
445 spu_mfc_tclass_id_set(spu, 0x10000000);
449 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
451 struct spu_priv2 __iomem *priv2 = spu->priv2;
455 * Write MFC_CNTL[Pc]=1 (purge queue).
457 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
461 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
463 struct spu_priv2 __iomem *priv2 = spu->priv2;
466 * Poll MFC_CNTL[Ps] until value '11' is read
469 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
470 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
471 MFC_CNTL_PURGE_DMA_COMPLETE);
474 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
478 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
479 * MFC_SR1[TL,R,Pr,T] set correctly for the
480 * OS specific environment.
482 * Implementation note: The SPU-side code
483 * for save/restore is privileged, so the
484 * MFC_SR1[Pr] bit is not set.
487 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
488 MFC_STATE1_RELOCATE_MASK |
489 MFC_STATE1_BUS_TLBIE_MASK));
492 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
494 struct spu_problem __iomem *prob = spu->problem;
497 * Save SPU_NPC in the CSA.
499 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
502 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
504 struct spu_priv2 __iomem *priv2 = spu->priv2;
507 * Save SPU_PrivCntl in the CSA.
509 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
512 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
514 struct spu_priv2 __iomem *priv2 = spu->priv2;
518 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
520 out_be64(&priv2->spu_privcntl_RW, 0UL);
524 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
526 struct spu_priv2 __iomem *priv2 = spu->priv2;
529 * Save SPU_LSLR in the CSA.
531 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
534 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
536 struct spu_priv2 __iomem *priv2 = spu->priv2;
542 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
546 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
548 struct spu_priv2 __iomem *priv2 = spu->priv2;
551 * Save SPU_Cfg in the CSA.
553 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
556 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
559 * Save PM_Trace_Tag_Wait_Mask in the CSA.
560 * Not performed by this implementation.
564 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
567 * Save RA_GROUP_ID register and the
568 * RA_ENABLE reigster in the CSA.
570 csa->priv1.resource_allocation_groupID_RW =
571 spu_resource_allocation_groupID_get(spu);
572 csa->priv1.resource_allocation_enable_RW =
573 spu_resource_allocation_enable_get(spu);
576 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
578 struct spu_problem __iomem *prob = spu->problem;
581 * Save MB_Stat register in the CSA.
583 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
586 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
588 struct spu_problem __iomem *prob = spu->problem;
591 * Save the PPU_MB register in the CSA.
593 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
596 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
598 struct spu_priv2 __iomem *priv2 = spu->priv2;
601 * Save the PPUINT_MB register in the CSA.
603 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
606 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
608 struct spu_priv2 __iomem *priv2 = spu->priv2;
609 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
615 /* Save CH 1, without channel count */
616 out_be64(&priv2->spu_chnlcntptr_RW, 1);
617 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
619 /* Save the following CH: [0,3,4,24,25,27] */
620 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
622 out_be64(&priv2->spu_chnlcntptr_RW, idx);
624 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
625 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
626 out_be64(&priv2->spu_chnldata_RW, 0UL);
627 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
632 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
634 struct spu_priv2 __iomem *priv2 = spu->priv2;
638 * Save SPU Read Mailbox Channel.
640 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
642 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
643 for (i = 0; i < 4; i++) {
644 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
646 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
650 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
652 struct spu_priv2 __iomem *priv2 = spu->priv2;
655 * Save MFC_CMD Channel.
657 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
659 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
663 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
665 struct spu_priv2 __iomem *priv2 = spu->priv2;
666 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
667 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
672 * Reset the following CH: [21, 23, 28, 30]
674 for (i = 0; i < 4; i++) {
676 out_be64(&priv2->spu_chnlcntptr_RW, idx);
678 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
683 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
685 struct spu_priv2 __iomem *priv2 = spu->priv2;
689 * Write MFC_CNTL[Sc]=0 (resume queue processing).
691 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
694 static inline void get_kernel_slb(u64 ea, u64 slb[2])
698 if (REGION_ID(ea) == KERNEL_REGION_ID)
699 llp = mmu_psize_defs[mmu_linear_psize].sllp;
701 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
702 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
703 SLB_VSID_KERNEL | llp;
704 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
707 static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
709 struct spu_priv2 __iomem *priv2 = spu->priv2;
711 out_be64(&priv2->slb_index_W, slbe);
713 out_be64(&priv2->slb_vsid_RW, slb[0]);
714 out_be64(&priv2->slb_esid_RW, slb[1]);
718 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
725 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
726 * register, then initialize SLB_VSID and SLB_ESID
727 * to provide access to SPU context save code and
730 * This implementation places both the context
731 * switch code and LSCSA in kernel address space.
733 * Further this implementation assumes that the
734 * MFC_SR1[R]=1 (in other words, assume that
735 * translation is desired by OS environment).
737 spu_invalidate_slbs(spu);
738 get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
739 get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
740 load_mfc_slb(spu, code_slb, 0);
741 if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
742 load_mfc_slb(spu, lscsa_slb, 1);
745 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
749 * Change the software context switch pending flag
750 * to context switch active.
752 set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
753 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
757 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
759 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
760 CLASS1_ENABLE_STORAGE_FAULT_INTR;
764 * Reset and then enable interrupts, as
767 * This implementation enables only class1
768 * (translation) interrupts.
770 spin_lock_irq(&spu->register_lock);
771 spu_int_stat_clear(spu, 0, ~0ul);
772 spu_int_stat_clear(spu, 1, ~0ul);
773 spu_int_stat_clear(spu, 2, ~0ul);
774 spu_int_mask_set(spu, 0, 0ul);
775 spu_int_mask_set(spu, 1, class1_mask);
776 spu_int_mask_set(spu, 2, 0ul);
777 spin_unlock_irq(&spu->register_lock);
780 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
781 unsigned int ls_offset, unsigned int size,
782 unsigned int tag, unsigned int rclass,
785 struct spu_problem __iomem *prob = spu->problem;
786 union mfc_tag_size_class_cmd command;
787 unsigned int transfer_size;
788 volatile unsigned int status = 0x0;
792 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
793 command.u.mfc_size = transfer_size;
794 command.u.mfc_tag = tag;
795 command.u.mfc_rclassid = rclass;
796 command.u.mfc_cmd = cmd;
798 out_be32(&prob->mfc_lsa_W, ls_offset);
799 out_be64(&prob->mfc_ea_W, ea);
800 out_be64(&prob->mfc_union_W.all64, command.all64);
802 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
803 if (unlikely(status & 0x2)) {
806 } while (status & 0x3);
807 size -= transfer_size;
809 ls_offset += transfer_size;
814 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
816 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
817 unsigned int ls_offset = 0x0;
818 unsigned int size = 16384;
819 unsigned int tag = 0;
820 unsigned int rclass = 0;
821 unsigned int cmd = MFC_PUT_CMD;
824 * Issue a DMA command to copy the first 16K bytes
825 * of local storage to the CSA.
827 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
830 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
832 struct spu_problem __iomem *prob = spu->problem;
836 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
837 * point address of context save code in local
840 * This implementation uses SPU-side save/restore
841 * programs with entry points at LSA of 0.
843 out_be32(&prob->spu_npc_RW, 0);
847 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
849 struct spu_problem __iomem *prob = spu->problem;
857 * Write SPU_Sig_Notify_1 register with upper 32-bits
858 * of the CSA.LSCSA effective address.
860 addr64.ull = (u64) csa->lscsa;
861 out_be32(&prob->signal_notify1, addr64.ui[0]);
865 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
867 struct spu_problem __iomem *prob = spu->problem;
875 * Write SPU_Sig_Notify_2 register with lower 32-bits
876 * of the CSA.LSCSA effective address.
878 addr64.ull = (u64) csa->lscsa;
879 out_be32(&prob->signal_notify2, addr64.ui[1]);
883 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
885 unsigned long addr = (unsigned long)&spu_save_code[0];
886 unsigned int ls_offset = 0x0;
887 unsigned int size = sizeof(spu_save_code);
888 unsigned int tag = 0;
889 unsigned int rclass = 0;
890 unsigned int cmd = MFC_GETFS_CMD;
893 * Issue a DMA command to copy context save code
894 * to local storage and start SPU.
896 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
899 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
901 struct spu_problem __iomem *prob = spu->problem;
905 * Write PPU_QueryMask=1 (enable Tag Group 0)
906 * and issue eieio instruction.
908 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
912 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
914 struct spu_problem __iomem *prob = spu->problem;
915 u32 mask = MFC_TAGID_TO_TAGMASK(0);
922 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
923 * or write PPU_QueryType[TS]=01 and wait for Tag Group
924 * Complete Interrupt. Write INT_Stat_Class0 or
925 * INT_Stat_Class2 with value of 'handled'.
927 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
929 local_irq_save(flags);
930 spu_int_stat_clear(spu, 0, ~(0ul));
931 spu_int_stat_clear(spu, 2, ~(0ul));
932 local_irq_restore(flags);
935 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
937 struct spu_problem __iomem *prob = spu->problem;
942 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
943 * or SPU Class 2 interrupt. Write INT_Stat_class0
944 * or INT_Stat_class2 with value of handled.
946 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
948 local_irq_save(flags);
949 spu_int_stat_clear(spu, 0, ~(0ul));
950 spu_int_stat_clear(spu, 2, ~(0ul));
951 local_irq_restore(flags);
954 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
956 struct spu_problem __iomem *prob = spu->problem;
960 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
961 * context save succeeded, otherwise context save
964 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
965 SPU_STATUS_STOPPED_BY_STOP);
966 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
969 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
972 * If required, notify the "using application" that
973 * the SPU task has been terminated. TBD.
977 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
980 struct spu_priv2 __iomem *priv2 = spu->priv2;
983 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
984 * the queue and halt the decrementer.
986 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
987 MFC_CNTL_DECREMENTER_HALTED);
991 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
994 struct spu_priv2 __iomem *priv2 = spu->priv2;
998 * Poll MFC_CNTL[Ss] until 11 is returned.
1000 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
1001 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1002 MFC_CNTL_SUSPEND_COMPLETE);
1005 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1007 struct spu_problem __iomem *prob = spu->problem;
1010 * If SPU_Status[R]=1, stop SPU execution
1011 * and wait for stop to complete.
1013 * Returns 1 if SPU_Status[R]=1 on entry.
1016 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1017 if (in_be32(&prob->spu_status_R) &
1018 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1019 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1020 SPU_STATUS_RUNNING);
1022 if ((in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_ISOLATED_LOAD_STATUS)
1024 || (in_be32(&prob->spu_status_R) &
1025 SPU_STATUS_ISOLATED_STATE)) {
1026 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1028 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1029 SPU_STATUS_RUNNING);
1030 out_be32(&prob->spu_runcntl_RW, 0x2);
1032 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1033 SPU_STATUS_RUNNING);
1035 if (in_be32(&prob->spu_status_R) &
1036 SPU_STATUS_WAITING_FOR_CHANNEL) {
1037 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1039 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1040 SPU_STATUS_RUNNING);
1047 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1049 struct spu_problem __iomem *prob = spu->problem;
1051 /* Restore, Step 10:
1052 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1053 * release SPU from isolate state.
1055 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1056 if (in_be32(&prob->spu_status_R) &
1057 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1058 spu_mfc_sr1_set(spu,
1059 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1061 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1063 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1064 SPU_STATUS_RUNNING);
1066 if ((in_be32(&prob->spu_status_R) &
1067 SPU_STATUS_ISOLATED_LOAD_STATUS)
1068 || (in_be32(&prob->spu_status_R) &
1069 SPU_STATUS_ISOLATED_STATE)) {
1070 spu_mfc_sr1_set(spu,
1071 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1073 out_be32(&prob->spu_runcntl_RW, 0x2);
1075 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1076 SPU_STATUS_RUNNING);
1081 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1083 struct spu_priv2 __iomem *priv2 = spu->priv2;
1084 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1088 /* Restore, Step 20:
1092 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1093 out_be64(&priv2->spu_chnldata_RW, 0UL);
1095 /* Reset the following CH: [0,3,4,24,25,27] */
1096 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1097 idx = ch_indices[i];
1098 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1100 out_be64(&priv2->spu_chnldata_RW, 0UL);
1101 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1106 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1108 struct spu_priv2 __iomem *priv2 = spu->priv2;
1109 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1110 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1114 /* Restore, Step 21:
1115 * Reset the following CH: [21, 23, 28, 29, 30]
1117 for (i = 0; i < 5; i++) {
1118 idx = ch_indices[i];
1119 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1121 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1126 static inline void setup_spu_status_part1(struct spu_state *csa,
1129 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1130 u32 status_I = SPU_STATUS_INVALID_INSTR;
1131 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1132 u32 status_S = SPU_STATUS_SINGLE_STEP;
1133 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1134 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1135 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1136 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1139 /* Restore, Step 27:
1140 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1141 * instruction sequence to the end of the SPU based restore
1142 * code (after the "context restored" stop and signal) to
1143 * restore the correct SPU status.
1145 * NOTE: Rather than modifying the SPU executable, we
1146 * instead add a new 'stopped_status' field to the
1147 * LSCSA. The SPU-side restore reads this field and
1148 * takes the appropriate action when exiting.
1152 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1153 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1155 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1156 * by Stop and Signal instruction, followed by 'br -4'.
1159 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1160 csa->lscsa->stopped_status.slot[1] = status_code;
1162 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1164 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1165 * by Stop and Signal instruction, followed by
1168 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1169 csa->lscsa->stopped_status.slot[1] = status_code;
1171 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1173 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1174 * followed by 'br -4'.
1176 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1177 csa->lscsa->stopped_status.slot[1] = status_code;
1179 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1181 /* SPU_Status[S,I]=1 - Illegal instruction followed
1184 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1185 csa->lscsa->stopped_status.slot[1] = status_code;
1187 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1189 /* SPU_Status[P]=1 - Stop and Signal instruction
1190 * followed by 'br -4'.
1192 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1193 csa->lscsa->stopped_status.slot[1] = status_code;
1195 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1197 /* SPU_Status[H]=1 - Halt Conditional, followed
1200 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1202 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1204 /* SPU_Status[S]=1 - Two nop instructions.
1206 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1208 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1210 /* SPU_Status[I]=1 - Illegal instruction followed
1213 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1218 static inline void setup_spu_status_part2(struct spu_state *csa,
1223 /* Restore, Step 28:
1224 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1225 * add a 'br *' instruction to the end of
1226 * the SPU based restore code.
1228 * NOTE: Rather than modifying the SPU executable, we
1229 * instead add a new 'stopped_status' field to the
1230 * LSCSA. The SPU-side restore reads this field and
1231 * takes the appropriate action when exiting.
1233 mask = SPU_STATUS_INVALID_INSTR |
1234 SPU_STATUS_SINGLE_STEP |
1235 SPU_STATUS_STOPPED_BY_HALT |
1236 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1237 if (!(csa->prob.spu_status_R & mask)) {
1238 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1242 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1244 /* Restore, Step 29:
1245 * Restore RA_GROUP_ID register and the
1246 * RA_ENABLE reigster from the CSA.
1248 spu_resource_allocation_groupID_set(spu,
1249 csa->priv1.resource_allocation_groupID_RW);
1250 spu_resource_allocation_enable_set(spu,
1251 csa->priv1.resource_allocation_enable_RW);
1254 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1256 unsigned long addr = (unsigned long)&spu_restore_code[0];
1257 unsigned int ls_offset = 0x0;
1258 unsigned int size = sizeof(spu_restore_code);
1259 unsigned int tag = 0;
1260 unsigned int rclass = 0;
1261 unsigned int cmd = MFC_GETFS_CMD;
1263 /* Restore, Step 37:
1264 * Issue MFC DMA command to copy context
1265 * restore code to local storage.
1267 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1270 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1272 /* Restore, Step 34:
1273 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1274 * running) then adjust decrementer, set
1275 * decrementer running status in LSCSA,
1276 * and set decrementer "wrapped" status
1279 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1280 cycles_t resume_time = get_cycles();
1281 cycles_t delta_time = resume_time - csa->suspend_time;
1283 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1284 if (csa->lscsa->decr.slot[0] < delta_time) {
1285 csa->lscsa->decr_status.slot[0] |=
1286 SPU_DECR_STATUS_WRAPPED;
1289 csa->lscsa->decr.slot[0] -= delta_time;
1291 csa->lscsa->decr_status.slot[0] = 0;
1295 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1297 /* Restore, Step 35:
1298 * Copy the CSA.PU_MB data into the LSCSA.
1300 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1303 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1305 /* Restore, Step 36:
1306 * Copy the CSA.PUINT_MB data into the LSCSA.
1308 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1311 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1313 struct spu_problem __iomem *prob = spu->problem;
1316 /* Restore, Step 40:
1317 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1318 * context restore succeeded, otherwise context restore
1321 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1322 SPU_STATUS_STOPPED_BY_STOP);
1323 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1326 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1328 struct spu_priv2 __iomem *priv2 = spu->priv2;
1330 /* Restore, Step 41:
1331 * Restore SPU_PrivCntl from the CSA.
1333 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1337 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1339 struct spu_problem __iomem *prob = spu->problem;
1342 /* Restore, Step 42:
1343 * If any CSA.SPU_Status[I,S,H,P]=1, then
1344 * restore the error or single step state.
1346 mask = SPU_STATUS_INVALID_INSTR |
1347 SPU_STATUS_SINGLE_STEP |
1348 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1349 if (csa->prob.spu_status_R & mask) {
1350 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1352 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1353 SPU_STATUS_RUNNING);
1357 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1359 struct spu_problem __iomem *prob = spu->problem;
1362 /* Restore, Step 43:
1363 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1364 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1365 * then write '00' to SPU_RunCntl[R0R1] and wait
1366 * for SPU_Status[R]=0.
1368 mask = SPU_STATUS_INVALID_INSTR |
1369 SPU_STATUS_SINGLE_STEP |
1370 SPU_STATUS_STOPPED_BY_HALT |
1371 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1372 if (!(csa->prob.spu_status_R & mask)) {
1373 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1375 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1376 SPU_STATUS_RUNNING);
1377 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1379 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1380 SPU_STATUS_RUNNING);
1384 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1386 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1387 unsigned int ls_offset = 0x0;
1388 unsigned int size = 16384;
1389 unsigned int tag = 0;
1390 unsigned int rclass = 0;
1391 unsigned int cmd = MFC_GET_CMD;
1393 /* Restore, Step 44:
1394 * Issue a DMA command to restore the first
1395 * 16kb of local storage from CSA.
1397 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1400 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1402 struct spu_priv2 __iomem *priv2 = spu->priv2;
1404 /* Restore, Step 47.
1405 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1408 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1412 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1414 /* Restore, Step 49:
1415 * Write INT_MASK_class0 with value of 0.
1416 * Write INT_MASK_class1 with value of 0.
1417 * Write INT_MASK_class2 with value of 0.
1418 * Write INT_STAT_class0 with value of -1.
1419 * Write INT_STAT_class1 with value of -1.
1420 * Write INT_STAT_class2 with value of -1.
1422 spin_lock_irq(&spu->register_lock);
1423 spu_int_mask_set(spu, 0, 0ul);
1424 spu_int_mask_set(spu, 1, 0ul);
1425 spu_int_mask_set(spu, 2, 0ul);
1426 spu_int_stat_clear(spu, 0, ~0ul);
1427 spu_int_stat_clear(spu, 1, ~0ul);
1428 spu_int_stat_clear(spu, 2, ~0ul);
1429 spin_unlock_irq(&spu->register_lock);
1432 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1434 struct spu_priv2 __iomem *priv2 = spu->priv2;
1437 /* Restore, Step 50:
1438 * If MFC_Cntl[Se]!=0 then restore
1439 * MFC command queues.
1441 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1442 for (i = 0; i < 8; i++) {
1443 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1444 csa->priv2.puq[i].mfc_cq_data0_RW);
1445 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1446 csa->priv2.puq[i].mfc_cq_data1_RW);
1447 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1448 csa->priv2.puq[i].mfc_cq_data2_RW);
1449 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1450 csa->priv2.puq[i].mfc_cq_data3_RW);
1452 for (i = 0; i < 16; i++) {
1453 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1454 csa->priv2.spuq[i].mfc_cq_data0_RW);
1455 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1456 csa->priv2.spuq[i].mfc_cq_data1_RW);
1457 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1458 csa->priv2.spuq[i].mfc_cq_data2_RW);
1459 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1460 csa->priv2.spuq[i].mfc_cq_data3_RW);
1466 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1468 struct spu_problem __iomem *prob = spu->problem;
1470 /* Restore, Step 51:
1471 * Restore the PPU_QueryMask register from CSA.
1473 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1477 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1479 struct spu_problem __iomem *prob = spu->problem;
1481 /* Restore, Step 52:
1482 * Restore the PPU_QueryType register from CSA.
1484 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1488 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1490 struct spu_priv2 __iomem *priv2 = spu->priv2;
1492 /* Restore, Step 53:
1493 * Restore the MFC_CSR_TSQ register from CSA.
1495 out_be64(&priv2->spu_tag_status_query_RW,
1496 csa->priv2.spu_tag_status_query_RW);
1500 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1502 struct spu_priv2 __iomem *priv2 = spu->priv2;
1504 /* Restore, Step 54:
1505 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1506 * registers from CSA.
1508 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1509 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1513 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1515 struct spu_priv2 __iomem *priv2 = spu->priv2;
1517 /* Restore, Step 55:
1518 * Restore the MFC_CSR_ATO register from CSA.
1520 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1523 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1525 /* Restore, Step 56:
1526 * Restore the MFC_TCLASS_ID register from CSA.
1528 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1532 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1534 u64 ch0_cnt, ch0_data;
1537 /* Restore, Step 57:
1538 * Set the Lock Line Reservation Lost Event by:
1539 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1540 * 2. If CSA.SPU_Channel_0_Count=0 and
1541 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1542 * CSA.SPU_Event_Status[Lr]=0 then set
1543 * CSA.SPU_Event_Status_Count=1.
1545 ch0_cnt = csa->spu_chnlcnt_RW[0];
1546 ch0_data = csa->spu_chnldata_RW[0];
1547 ch1_data = csa->spu_chnldata_RW[1];
1548 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1549 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1550 (ch1_data & MFC_LLR_LOST_EVENT)) {
1551 csa->spu_chnlcnt_RW[0] = 1;
1555 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1557 /* Restore, Step 58:
1558 * If the status of the CSA software decrementer
1559 * "wrapped" flag is set, OR in a '1' to
1560 * CSA.SPU_Event_Status[Tm].
1562 if (csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) {
1563 csa->spu_chnldata_RW[0] |= 0x20;
1565 if ((csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED) &&
1566 (csa->spu_chnlcnt_RW[0] == 0 &&
1567 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
1568 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
1569 csa->spu_chnlcnt_RW[0] = 1;
1573 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1575 struct spu_priv2 __iomem *priv2 = spu->priv2;
1576 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1579 /* Restore, Step 59:
1580 * Restore the following CH: [0,3,4,24,25,27]
1582 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1583 idx = ch_indices[i];
1584 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1586 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1587 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1592 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1594 struct spu_priv2 __iomem *priv2 = spu->priv2;
1595 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1596 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1600 /* Restore, Step 60:
1601 * Restore the following CH: [9,21,23].
1604 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1606 for (i = 0; i < 3; i++) {
1607 idx = ch_indices[i];
1608 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1610 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1615 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1617 struct spu_priv2 __iomem *priv2 = spu->priv2;
1619 /* Restore, Step 61:
1620 * Restore the SPU_LSLR register from CSA.
1622 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1626 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1628 struct spu_priv2 __iomem *priv2 = spu->priv2;
1630 /* Restore, Step 62:
1631 * Restore the SPU_Cfg register from CSA.
1633 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1637 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1639 /* Restore, Step 63:
1640 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1641 * Not performed by this implementation.
1645 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1647 struct spu_problem __iomem *prob = spu->problem;
1649 /* Restore, Step 64:
1650 * Restore SPU_NPC from CSA.
1652 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1656 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1658 struct spu_priv2 __iomem *priv2 = spu->priv2;
1661 /* Restore, Step 65:
1662 * Restore MFC_RdSPU_MB from CSA.
1664 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1666 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1667 for (i = 0; i < 4; i++) {
1668 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1673 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1675 struct spu_problem __iomem *prob = spu->problem;
1678 /* Restore, Step 66:
1679 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1680 * read from the PPU_MB register.
1682 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1683 dummy = in_be32(&prob->pu_mb_R);
1688 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1690 struct spu_priv2 __iomem *priv2 = spu->priv2;
1693 /* Restore, Step 66:
1694 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1695 * read from the PPUINT_MB register.
1697 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1698 dummy = in_be64(&priv2->puint_mb_R);
1700 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1705 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1707 /* Restore, Step 69:
1708 * Restore the MFC_SR1 register from CSA.
1710 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1714 static inline void restore_other_spu_access(struct spu_state *csa,
1717 /* Restore, Step 70:
1718 * Restore other SPU mappings to this SPU. TBD.
1722 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1724 struct spu_problem __iomem *prob = spu->problem;
1726 /* Restore, Step 71:
1727 * If CSA.SPU_Status[R]=1 then write
1728 * SPU_RunCntl[R0R1]='01'.
1730 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1731 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1736 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1738 struct spu_priv2 __iomem *priv2 = spu->priv2;
1740 /* Restore, Step 72:
1741 * Restore the MFC_CNTL register for the CSA.
1743 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1746 * FIXME: this is to restart a DMA that we were processing
1747 * before the save. better remember the fault information
1748 * in the csa instead.
1750 if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
1751 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
1756 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1758 /* Restore, Step 73:
1759 * Enable user-space access (if provided) to this
1760 * SPU by mapping the virtual pages assigned to
1761 * the SPU memory-mapped I/O (MMIO) for problem
1766 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1768 /* Restore, Step 74:
1769 * Reset the "context switch active" flag.
1771 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
1775 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1777 /* Restore, Step 75:
1778 * Re-enable SPU interrupts.
1780 spin_lock_irq(&spu->register_lock);
1781 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1782 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1783 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1784 spin_unlock_irq(&spu->register_lock);
1787 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1790 * Combined steps 2-18 of SPU context save sequence, which
1791 * quiesce the SPU state (disable SPU execution, MFC command
1792 * queues, decrementer, SPU interrupts, etc.).
1794 * Returns 0 on success.
1795 * 2 if failed step 2.
1796 * 6 if failed step 6.
1799 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1802 disable_interrupts(prev, spu); /* Step 3. */
1803 set_watchdog_timer(prev, spu); /* Step 4. */
1804 inhibit_user_access(prev, spu); /* Step 5. */
1805 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1808 set_switch_pending(prev, spu); /* Step 7. */
1809 save_mfc_cntl(prev, spu); /* Step 8. */
1810 save_spu_runcntl(prev, spu); /* Step 9. */
1811 save_mfc_sr1(prev, spu); /* Step 10. */
1812 save_spu_status(prev, spu); /* Step 11. */
1813 save_mfc_decr(prev, spu); /* Step 12. */
1814 halt_mfc_decr(prev, spu); /* Step 13. */
1815 save_timebase(prev, spu); /* Step 14. */
1816 remove_other_spu_access(prev, spu); /* Step 15. */
1817 do_mfc_mssync(prev, spu); /* Step 16. */
1818 issue_mfc_tlbie(prev, spu); /* Step 17. */
1819 handle_pending_interrupts(prev, spu); /* Step 18. */
1824 static void save_csa(struct spu_state *prev, struct spu *spu)
1827 * Combine steps 19-44 of SPU context save sequence, which
1828 * save regions of the privileged & problem state areas.
1831 save_mfc_queues(prev, spu); /* Step 19. */
1832 save_ppu_querymask(prev, spu); /* Step 20. */
1833 save_ppu_querytype(prev, spu); /* Step 21. */
1834 save_ppu_tagstatus(prev, spu); /* NEW. */
1835 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1836 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1837 save_mfc_csr_ato(prev, spu); /* Step 24. */
1838 save_mfc_tclass_id(prev, spu); /* Step 25. */
1839 set_mfc_tclass_id(prev, spu); /* Step 26. */
1840 purge_mfc_queue(prev, spu); /* Step 27. */
1841 wait_purge_complete(prev, spu); /* Step 28. */
1842 setup_mfc_sr1(prev, spu); /* Step 30. */
1843 save_spu_npc(prev, spu); /* Step 31. */
1844 save_spu_privcntl(prev, spu); /* Step 32. */
1845 reset_spu_privcntl(prev, spu); /* Step 33. */
1846 save_spu_lslr(prev, spu); /* Step 34. */
1847 reset_spu_lslr(prev, spu); /* Step 35. */
1848 save_spu_cfg(prev, spu); /* Step 36. */
1849 save_pm_trace(prev, spu); /* Step 37. */
1850 save_mfc_rag(prev, spu); /* Step 38. */
1851 save_ppu_mb_stat(prev, spu); /* Step 39. */
1852 save_ppu_mb(prev, spu); /* Step 40. */
1853 save_ppuint_mb(prev, spu); /* Step 41. */
1854 save_ch_part1(prev, spu); /* Step 42. */
1855 save_spu_mb(prev, spu); /* Step 43. */
1856 save_mfc_cmd(prev, spu); /* Step 44. */
1857 reset_ch(prev, spu); /* Step 45. */
1860 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1863 * Perform steps 46-57 of SPU context save sequence,
1864 * which save regions of the local store and register
1868 resume_mfc_queue(prev, spu); /* Step 46. */
1869 setup_mfc_slbs(prev, spu); /* Step 47. */
1870 set_switch_active(prev, spu); /* Step 48. */
1871 enable_interrupts(prev, spu); /* Step 49. */
1872 save_ls_16kb(prev, spu); /* Step 50. */
1873 set_spu_npc(prev, spu); /* Step 51. */
1874 set_signot1(prev, spu); /* Step 52. */
1875 set_signot2(prev, spu); /* Step 53. */
1876 send_save_code(prev, spu); /* Step 54. */
1877 set_ppu_querymask(prev, spu); /* Step 55. */
1878 wait_tag_complete(prev, spu); /* Step 56. */
1879 wait_spu_stopped(prev, spu); /* Step 57. */
1882 static void force_spu_isolate_exit(struct spu *spu)
1884 struct spu_problem __iomem *prob = spu->problem;
1885 struct spu_priv2 __iomem *priv2 = spu->priv2;
1887 /* Stop SPE execution and wait for completion. */
1888 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1890 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1892 /* Restart SPE master runcntl. */
1893 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1896 /* Initiate isolate exit request and wait for completion. */
1897 out_be64(&priv2->spu_privcntl_RW, 4LL);
1899 out_be32(&prob->spu_runcntl_RW, 2);
1901 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1902 & SPU_STATUS_STOPPED_BY_STOP));
1904 /* Reset load request to normal. */
1905 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1911 * Check SPU run-control state and force isolated
1912 * exit function as necessary.
1914 static void stop_spu_isolate(struct spu *spu)
1916 struct spu_problem __iomem *prob = spu->problem;
1918 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1919 /* The SPU is in isolated state; the only way
1920 * to get it out is to perform an isolated
1921 * exit (clean) operation.
1923 force_spu_isolate_exit(spu);
1927 static void harvest(struct spu_state *prev, struct spu *spu)
1930 * Perform steps 2-25 of SPU context restore sequence,
1931 * which resets an SPU either after a failed save, or
1932 * when using SPU for first time.
1935 disable_interrupts(prev, spu); /* Step 2. */
1936 inhibit_user_access(prev, spu); /* Step 3. */
1937 terminate_spu_app(prev, spu); /* Step 4. */
1938 set_switch_pending(prev, spu); /* Step 5. */
1939 stop_spu_isolate(spu); /* NEW. */
1940 remove_other_spu_access(prev, spu); /* Step 6. */
1941 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1942 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1943 if (!suspend_spe(prev, spu)) /* Step 9. */
1944 clear_spu_status(prev, spu); /* Step 10. */
1945 do_mfc_mssync(prev, spu); /* Step 11. */
1946 issue_mfc_tlbie(prev, spu); /* Step 12. */
1947 handle_pending_interrupts(prev, spu); /* Step 13. */
1948 purge_mfc_queue(prev, spu); /* Step 14. */
1949 wait_purge_complete(prev, spu); /* Step 15. */
1950 reset_spu_privcntl(prev, spu); /* Step 16. */
1951 reset_spu_lslr(prev, spu); /* Step 17. */
1952 setup_mfc_sr1(prev, spu); /* Step 18. */
1953 spu_invalidate_slbs(spu); /* Step 19. */
1954 reset_ch_part1(prev, spu); /* Step 20. */
1955 reset_ch_part2(prev, spu); /* Step 21. */
1956 enable_interrupts(prev, spu); /* Step 22. */
1957 set_switch_active(prev, spu); /* Step 23. */
1958 set_mfc_tclass_id(prev, spu); /* Step 24. */
1959 resume_mfc_queue(prev, spu); /* Step 25. */
1962 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1965 * Perform steps 26-40 of SPU context restore sequence,
1966 * which restores regions of the local store and register
1970 set_watchdog_timer(next, spu); /* Step 26. */
1971 setup_spu_status_part1(next, spu); /* Step 27. */
1972 setup_spu_status_part2(next, spu); /* Step 28. */
1973 restore_mfc_rag(next, spu); /* Step 29. */
1974 setup_mfc_slbs(next, spu); /* Step 30. */
1975 set_spu_npc(next, spu); /* Step 31. */
1976 set_signot1(next, spu); /* Step 32. */
1977 set_signot2(next, spu); /* Step 33. */
1978 setup_decr(next, spu); /* Step 34. */
1979 setup_ppu_mb(next, spu); /* Step 35. */
1980 setup_ppuint_mb(next, spu); /* Step 36. */
1981 send_restore_code(next, spu); /* Step 37. */
1982 set_ppu_querymask(next, spu); /* Step 38. */
1983 wait_tag_complete(next, spu); /* Step 39. */
1984 wait_spu_stopped(next, spu); /* Step 40. */
1987 static void restore_csa(struct spu_state *next, struct spu *spu)
1990 * Combine steps 41-76 of SPU context restore sequence, which
1991 * restore regions of the privileged & problem state areas.
1994 restore_spu_privcntl(next, spu); /* Step 41. */
1995 restore_status_part1(next, spu); /* Step 42. */
1996 restore_status_part2(next, spu); /* Step 43. */
1997 restore_ls_16kb(next, spu); /* Step 44. */
1998 wait_tag_complete(next, spu); /* Step 45. */
1999 suspend_mfc(next, spu); /* Step 46. */
2000 wait_suspend_mfc_complete(next, spu); /* Step 47. */
2001 issue_mfc_tlbie(next, spu); /* Step 48. */
2002 clear_interrupts(next, spu); /* Step 49. */
2003 restore_mfc_queues(next, spu); /* Step 50. */
2004 restore_ppu_querymask(next, spu); /* Step 51. */
2005 restore_ppu_querytype(next, spu); /* Step 52. */
2006 restore_mfc_csr_tsq(next, spu); /* Step 53. */
2007 restore_mfc_csr_cmd(next, spu); /* Step 54. */
2008 restore_mfc_csr_ato(next, spu); /* Step 55. */
2009 restore_mfc_tclass_id(next, spu); /* Step 56. */
2010 set_llr_event(next, spu); /* Step 57. */
2011 restore_decr_wrapped(next, spu); /* Step 58. */
2012 restore_ch_part1(next, spu); /* Step 59. */
2013 restore_ch_part2(next, spu); /* Step 60. */
2014 restore_spu_lslr(next, spu); /* Step 61. */
2015 restore_spu_cfg(next, spu); /* Step 62. */
2016 restore_pm_trace(next, spu); /* Step 63. */
2017 restore_spu_npc(next, spu); /* Step 64. */
2018 restore_spu_mb(next, spu); /* Step 65. */
2019 check_ppu_mb_stat(next, spu); /* Step 66. */
2020 check_ppuint_mb_stat(next, spu); /* Step 67. */
2021 spu_invalidate_slbs(spu); /* Modified Step 68. */
2022 restore_mfc_sr1(next, spu); /* Step 69. */
2023 restore_other_spu_access(next, spu); /* Step 70. */
2024 restore_spu_runcntl(next, spu); /* Step 71. */
2025 restore_mfc_cntl(next, spu); /* Step 72. */
2026 enable_user_access(next, spu); /* Step 73. */
2027 reset_switch_active(next, spu); /* Step 74. */
2028 reenable_interrupts(next, spu); /* Step 75. */
2031 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2036 * SPU context save can be broken into three phases:
2038 * (a) quiesce [steps 2-16].
2039 * (b) save of CSA, performed by PPE [steps 17-42]
2040 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2042 * Returns 0 on success.
2043 * 2,6 if failed to quiece SPU
2044 * 53 if SPU-side of save failed.
2047 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2058 save_csa(prev, spu); /* Steps 17-43. */
2059 save_lscsa(prev, spu); /* Steps 44-53. */
2060 return check_save_status(prev, spu); /* Step 54. */
2063 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2068 * SPU context restore can be broken into three phases:
2070 * (a) harvest (or reset) SPU [steps 2-24].
2071 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2072 * (c) restore CSA [steps 41-76], performed by PPE.
2074 * The 'harvest' step is not performed here, but rather
2078 restore_lscsa(next, spu); /* Steps 24-39. */
2079 rc = check_restore_status(next, spu); /* Step 40. */
2082 /* Failed. Return now. */
2086 /* Fall through to next step. */
2089 restore_csa(next, spu);
2095 * spu_save - SPU context save, with locking.
2096 * @prev: pointer to SPU context save area, to be saved.
2097 * @spu: pointer to SPU iomem structure.
2099 * Acquire locks, perform the save operation then return.
2101 int spu_save(struct spu_state *prev, struct spu *spu)
2105 acquire_spu_lock(spu); /* Step 1. */
2106 prev->dar = spu->dar;
2107 prev->dsisr = spu->dsisr;
2110 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2111 release_spu_lock(spu);
2112 if (rc != 0 && rc != 2 && rc != 6) {
2113 panic("%s failed on SPU[%d], rc=%d.\n",
2114 __func__, spu->number, rc);
2118 EXPORT_SYMBOL_GPL(spu_save);
2121 * spu_restore - SPU context restore, with harvest and locking.
2122 * @new: pointer to SPU context save area, to be restored.
2123 * @spu: pointer to SPU iomem structure.
2125 * Perform harvest + restore, as we may not be coming
2126 * from a previous successful save operation, and the
2127 * hardware state is unknown.
2129 int spu_restore(struct spu_state *new, struct spu *spu)
2133 acquire_spu_lock(spu);
2135 spu->slb_replace = 0;
2138 spu->class_0_pending = 0;
2139 rc = __do_spu_restore(new, spu);
2140 release_spu_lock(spu);
2142 panic("%s failed on SPU[%d] rc=%d.\n",
2143 __func__, spu->number, rc);
2147 EXPORT_SYMBOL_GPL(spu_restore);
2150 * spu_harvest - SPU harvest (reset) operation
2151 * @spu: pointer to SPU iomem structure.
2153 * Perform SPU harvest (reset) operation.
2155 void spu_harvest(struct spu *spu)
2157 acquire_spu_lock(spu);
2159 release_spu_lock(spu);
2162 static void init_prob(struct spu_state *csa)
2164 csa->spu_chnlcnt_RW[9] = 1;
2165 csa->spu_chnlcnt_RW[21] = 16;
2166 csa->spu_chnlcnt_RW[23] = 1;
2167 csa->spu_chnlcnt_RW[28] = 1;
2168 csa->spu_chnlcnt_RW[30] = 1;
2169 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2170 csa->prob.mb_stat_R = 0x000400;
2173 static void init_priv1(struct spu_state *csa)
2175 /* Enable decode, relocate, tlbie response, master runcntl. */
2176 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2177 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2178 MFC_STATE1_PROBLEM_STATE_MASK |
2179 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2181 /* Enable OS-specific set of interrupts. */
2182 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2183 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2184 CLASS0_ENABLE_SPU_ERROR_INTR;
2185 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2186 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2187 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2188 CLASS2_ENABLE_SPU_HALT_INTR |
2189 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2192 static void init_priv2(struct spu_state *csa)
2194 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2195 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2196 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2197 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2201 * spu_alloc_csa - allocate and initialize an SPU context save area.
2203 * Allocate and initialize the contents of an SPU context save area.
2204 * This includes enabling address translation, interrupt masks, etc.,
2205 * as appropriate for the given OS environment.
2207 * Note that storage for the 'lscsa' is allocated separately,
2208 * as it is by far the largest of the context save regions,
2209 * and may need to be pinned or otherwise specially aligned.
2211 int spu_init_csa(struct spu_state *csa)
2217 memset(csa, 0, sizeof(struct spu_state));
2219 rc = spu_alloc_lscsa(csa);
2223 spin_lock_init(&csa->register_lock);
2231 EXPORT_SYMBOL_GPL(spu_init_csa);
2233 void spu_fini_csa(struct spu_state *csa)
2235 spu_free_lscsa(csa);
2237 EXPORT_SYMBOL_GPL(spu_fini_csa);