4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
40 #include <linux/vmalloc.h>
41 #include <linux/smp.h>
42 #include <linux/stddef.h>
43 #include <linux/unistd.h>
47 #include <asm/spu_priv1.h>
48 #include <asm/spu_csa.h>
49 #include <asm/mmu_context.h>
53 #include "spu_save_dump.h"
54 #include "spu_restore_dump.h"
57 #define POLL_WHILE_TRUE(_c) { \
62 #define RELAX_SPIN_COUNT 1000
63 #define POLL_WHILE_TRUE(_c) { \
66 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
69 if (unlikely(_c)) yield(); \
75 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
77 static inline void acquire_spu_lock(struct spu *spu)
81 * Acquire SPU-specific mutual exclusion lock.
86 static inline void release_spu_lock(struct spu *spu)
89 * Release SPU-specific mutual exclusion lock.
94 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
96 struct spu_problem __iomem *prob = spu->problem;
101 * If SPU_Status[E,L,IS] any field is '1', this
102 * SPU is in isolate state and cannot be context
103 * saved at this time.
105 isolate_state = SPU_STATUS_ISOLATED_STATE |
106 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
107 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
110 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
114 * Save INT_Mask_class0 in CSA.
115 * Write INT_MASK_class0 with value of 0.
116 * Save INT_Mask_class1 in CSA.
117 * Write INT_MASK_class1 with value of 0.
118 * Save INT_Mask_class2 in CSA.
119 * Write INT_MASK_class2 with value of 0.
121 spin_lock_irq(&spu->register_lock);
123 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
124 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
125 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
127 spu_int_mask_set(spu, 0, 0ul);
128 spu_int_mask_set(spu, 1, 0ul);
129 spu_int_mask_set(spu, 2, 0ul);
131 spin_unlock_irq(&spu->register_lock);
134 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
138 * Set a software watchdog timer, which specifies the
139 * maximum allowable time for a context save sequence.
141 * For present, this implementation will not set a global
142 * watchdog timer, as virtualization & variable system load
143 * may cause unpredictable execution times.
147 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
151 * Inhibit user-space access (if provided) to this
152 * SPU by unmapping the virtual pages assigned to
153 * the SPU memory-mapped I/O (MMIO) for problem
158 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
162 * Set a software context switch pending flag.
164 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
168 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
170 struct spu_priv2 __iomem *priv2 = spu->priv2;
173 * Suspend DMA and save MFC_CNTL.
175 switch (in_be64(&priv2->mfc_control_RW) &
176 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
177 case MFC_CNTL_SUSPEND_IN_PROGRESS:
178 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
179 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
180 MFC_CNTL_SUSPEND_COMPLETE);
182 case MFC_CNTL_SUSPEND_COMPLETE:
184 csa->priv2.mfc_control_RW =
185 MFC_CNTL_SUSPEND_MASK |
186 MFC_CNTL_SUSPEND_DMA_QUEUE;
189 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
190 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
191 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193 MFC_CNTL_SUSPEND_COMPLETE);
195 csa->priv2.mfc_control_RW = 0;
201 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
203 struct spu_problem __iomem *prob = spu->problem;
206 * Save SPU_Runcntl in the CSA. This value contains
207 * the "Application Desired State".
209 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
212 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
215 * Save MFC_SR1 in the CSA.
217 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
220 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
222 struct spu_problem __iomem *prob = spu->problem;
225 * Read SPU_Status[R], and save to CSA.
227 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
228 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
232 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
234 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
237 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
238 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
239 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
240 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
246 static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
248 struct spu_priv2 __iomem *priv2 = spu->priv2;
251 * Read MFC_CNTL[Ds]. Update saved copy of
254 csa->priv2.mfc_control_RW |=
255 in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
258 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
260 struct spu_priv2 __iomem *priv2 = spu->priv2;
263 * Write MFC_CNTL[Dh] set to a '1' to halt
266 out_be64(&priv2->mfc_control_RW,
267 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
271 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
274 * Read PPE Timebase High and Timebase low registers
275 * and save in CSA. TBD.
277 csa->suspend_time = get_cycles();
280 static inline void remove_other_spu_access(struct spu_state *csa,
284 * Remove other SPU access to this SPU by unmapping
285 * this SPU's pages from their address space. TBD.
289 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
291 struct spu_problem __iomem *prob = spu->problem;
295 * Write SPU_MSSync register. Poll SPU_MSSync[P]
298 out_be64(&prob->spc_mssync_RW, 1UL);
299 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
302 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
307 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
308 * Then issue a PPE sync instruction.
310 spu_tlb_invalidate(spu);
314 static inline void handle_pending_interrupts(struct spu_state *csa,
318 * Handle any pending interrupts from this SPU
319 * here. This is OS or hypervisor specific. One
320 * option is to re-enable interrupts to handle any
321 * pending interrupts, with the interrupt handlers
322 * recognizing the software Context Switch Pending
323 * flag, to ensure the SPU execution or MFC command
324 * queue is not restarted. TBD.
328 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
330 struct spu_priv2 __iomem *priv2 = spu->priv2;
334 * If MFC_Cntl[Se]=0 then save
335 * MFC command queues.
337 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
338 for (i = 0; i < 8; i++) {
339 csa->priv2.puq[i].mfc_cq_data0_RW =
340 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
341 csa->priv2.puq[i].mfc_cq_data1_RW =
342 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
343 csa->priv2.puq[i].mfc_cq_data2_RW =
344 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
345 csa->priv2.puq[i].mfc_cq_data3_RW =
346 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
348 for (i = 0; i < 16; i++) {
349 csa->priv2.spuq[i].mfc_cq_data0_RW =
350 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
351 csa->priv2.spuq[i].mfc_cq_data1_RW =
352 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
353 csa->priv2.spuq[i].mfc_cq_data2_RW =
354 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
355 csa->priv2.spuq[i].mfc_cq_data3_RW =
356 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
361 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
363 struct spu_problem __iomem *prob = spu->problem;
366 * Save the PPU_QueryMask register
369 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
372 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
374 struct spu_problem __iomem *prob = spu->problem;
377 * Save the PPU_QueryType register
380 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
383 static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
385 struct spu_problem __iomem *prob = spu->problem;
387 /* Save the Prxy_TagStatus register in the CSA.
389 * It is unnecessary to restore dma_tagstatus_R, however,
390 * dma_tagstatus_R in the CSA is accessed via backing_ops, so
393 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
396 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
398 struct spu_priv2 __iomem *priv2 = spu->priv2;
401 * Save the MFC_CSR_TSQ register
404 csa->priv2.spu_tag_status_query_RW =
405 in_be64(&priv2->spu_tag_status_query_RW);
408 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
410 struct spu_priv2 __iomem *priv2 = spu->priv2;
413 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
414 * registers in the CSA.
416 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
417 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
420 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
422 struct spu_priv2 __iomem *priv2 = spu->priv2;
425 * Save the MFC_CSR_ATO register in
428 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
431 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
434 * Save the MFC_TCLASS_ID register in
437 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
440 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
444 * Write the MFC_TCLASS_ID register with
445 * the value 0x10000000.
447 spu_mfc_tclass_id_set(spu, 0x10000000);
451 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
453 struct spu_priv2 __iomem *priv2 = spu->priv2;
457 * Write MFC_CNTL[Pc]=1 (purge queue).
459 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
463 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
465 struct spu_priv2 __iomem *priv2 = spu->priv2;
468 * Poll MFC_CNTL[Ps] until value '11' is read
471 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
472 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
473 MFC_CNTL_PURGE_DMA_COMPLETE);
476 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
480 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
481 * MFC_SR1[TL,R,Pr,T] set correctly for the
482 * OS specific environment.
484 * Implementation note: The SPU-side code
485 * for save/restore is privileged, so the
486 * MFC_SR1[Pr] bit is not set.
489 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
490 MFC_STATE1_RELOCATE_MASK |
491 MFC_STATE1_BUS_TLBIE_MASK));
494 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
496 struct spu_problem __iomem *prob = spu->problem;
499 * Save SPU_NPC in the CSA.
501 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
504 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
506 struct spu_priv2 __iomem *priv2 = spu->priv2;
509 * Save SPU_PrivCntl in the CSA.
511 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
514 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
516 struct spu_priv2 __iomem *priv2 = spu->priv2;
520 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
522 out_be64(&priv2->spu_privcntl_RW, 0UL);
526 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
528 struct spu_priv2 __iomem *priv2 = spu->priv2;
531 * Save SPU_LSLR in the CSA.
533 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
536 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
538 struct spu_priv2 __iomem *priv2 = spu->priv2;
544 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
548 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
550 struct spu_priv2 __iomem *priv2 = spu->priv2;
553 * Save SPU_Cfg in the CSA.
555 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
558 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
561 * Save PM_Trace_Tag_Wait_Mask in the CSA.
562 * Not performed by this implementation.
566 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
569 * Save RA_GROUP_ID register and the
570 * RA_ENABLE reigster in the CSA.
572 csa->priv1.resource_allocation_groupID_RW =
573 spu_resource_allocation_groupID_get(spu);
574 csa->priv1.resource_allocation_enable_RW =
575 spu_resource_allocation_enable_get(spu);
578 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
580 struct spu_problem __iomem *prob = spu->problem;
583 * Save MB_Stat register in the CSA.
585 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
588 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
590 struct spu_problem __iomem *prob = spu->problem;
593 * Save the PPU_MB register in the CSA.
595 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
598 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
600 struct spu_priv2 __iomem *priv2 = spu->priv2;
603 * Save the PPUINT_MB register in the CSA.
605 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
608 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
610 struct spu_priv2 __iomem *priv2 = spu->priv2;
611 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
617 /* Save CH 1, without channel count */
618 out_be64(&priv2->spu_chnlcntptr_RW, 1);
619 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
621 /* Save the following CH: [0,3,4,24,25,27] */
622 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
624 out_be64(&priv2->spu_chnlcntptr_RW, idx);
626 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
627 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
628 out_be64(&priv2->spu_chnldata_RW, 0UL);
629 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
634 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
636 struct spu_priv2 __iomem *priv2 = spu->priv2;
640 * Save SPU Read Mailbox Channel.
642 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
644 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
645 for (i = 0; i < 4; i++) {
646 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
648 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
652 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
654 struct spu_priv2 __iomem *priv2 = spu->priv2;
657 * Save MFC_CMD Channel.
659 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
661 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
665 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
667 struct spu_priv2 __iomem *priv2 = spu->priv2;
668 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
669 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
674 * Reset the following CH: [21, 23, 28, 30]
676 for (i = 0; i < 4; i++) {
678 out_be64(&priv2->spu_chnlcntptr_RW, idx);
680 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
685 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
687 struct spu_priv2 __iomem *priv2 = spu->priv2;
691 * Write MFC_CNTL[Sc]=0 (resume queue processing).
693 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
696 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
697 unsigned int *code, int code_size)
701 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
702 * register, then initialize SLB_VSID and SLB_ESID
703 * to provide access to SPU context save code and
706 * This implementation places both the context
707 * switch code and LSCSA in kernel address space.
709 * Further this implementation assumes that the
710 * MFC_SR1[R]=1 (in other words, assume that
711 * translation is desired by OS environment).
713 spu_invalidate_slbs(spu);
714 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
717 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
721 * Change the software context switch pending flag
722 * to context switch active.
724 * This implementation does not uses a switch active flag.
726 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
730 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
732 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
733 CLASS1_ENABLE_STORAGE_FAULT_INTR;
737 * Reset and then enable interrupts, as
740 * This implementation enables only class1
741 * (translation) interrupts.
743 spin_lock_irq(&spu->register_lock);
744 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
745 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
746 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
747 spu_int_mask_set(spu, 0, 0ul);
748 spu_int_mask_set(spu, 1, class1_mask);
749 spu_int_mask_set(spu, 2, 0ul);
750 spin_unlock_irq(&spu->register_lock);
753 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
754 unsigned int ls_offset, unsigned int size,
755 unsigned int tag, unsigned int rclass,
758 struct spu_problem __iomem *prob = spu->problem;
759 union mfc_tag_size_class_cmd command;
760 unsigned int transfer_size;
761 volatile unsigned int status = 0x0;
765 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
766 command.u.mfc_size = transfer_size;
767 command.u.mfc_tag = tag;
768 command.u.mfc_rclassid = rclass;
769 command.u.mfc_cmd = cmd;
771 out_be32(&prob->mfc_lsa_W, ls_offset);
772 out_be64(&prob->mfc_ea_W, ea);
773 out_be64(&prob->mfc_union_W.all64, command.all64);
775 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
776 if (unlikely(status & 0x2)) {
779 } while (status & 0x3);
780 size -= transfer_size;
782 ls_offset += transfer_size;
787 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
789 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
790 unsigned int ls_offset = 0x0;
791 unsigned int size = 16384;
792 unsigned int tag = 0;
793 unsigned int rclass = 0;
794 unsigned int cmd = MFC_PUT_CMD;
797 * Issue a DMA command to copy the first 16K bytes
798 * of local storage to the CSA.
800 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
803 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
805 struct spu_problem __iomem *prob = spu->problem;
809 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
810 * point address of context save code in local
813 * This implementation uses SPU-side save/restore
814 * programs with entry points at LSA of 0.
816 out_be32(&prob->spu_npc_RW, 0);
820 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
822 struct spu_problem __iomem *prob = spu->problem;
830 * Write SPU_Sig_Notify_1 register with upper 32-bits
831 * of the CSA.LSCSA effective address.
833 addr64.ull = (u64) csa->lscsa;
834 out_be32(&prob->signal_notify1, addr64.ui[0]);
838 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
840 struct spu_problem __iomem *prob = spu->problem;
848 * Write SPU_Sig_Notify_2 register with lower 32-bits
849 * of the CSA.LSCSA effective address.
851 addr64.ull = (u64) csa->lscsa;
852 out_be32(&prob->signal_notify2, addr64.ui[1]);
856 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
858 unsigned long addr = (unsigned long)&spu_save_code[0];
859 unsigned int ls_offset = 0x0;
860 unsigned int size = sizeof(spu_save_code);
861 unsigned int tag = 0;
862 unsigned int rclass = 0;
863 unsigned int cmd = MFC_GETFS_CMD;
866 * Issue a DMA command to copy context save code
867 * to local storage and start SPU.
869 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
872 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
874 struct spu_problem __iomem *prob = spu->problem;
878 * Write PPU_QueryMask=1 (enable Tag Group 0)
879 * and issue eieio instruction.
881 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
885 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
887 struct spu_problem __iomem *prob = spu->problem;
888 u32 mask = MFC_TAGID_TO_TAGMASK(0);
895 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
896 * or write PPU_QueryType[TS]=01 and wait for Tag Group
897 * Complete Interrupt. Write INT_Stat_Class0 or
898 * INT_Stat_Class2 with value of 'handled'.
900 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
902 local_irq_save(flags);
903 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
904 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
905 local_irq_restore(flags);
908 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
910 struct spu_problem __iomem *prob = spu->problem;
915 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
916 * or SPU Class 2 interrupt. Write INT_Stat_class0
917 * or INT_Stat_class2 with value of handled.
919 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
921 local_irq_save(flags);
922 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
923 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
924 local_irq_restore(flags);
927 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
929 struct spu_problem __iomem *prob = spu->problem;
933 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
934 * context save succeeded, otherwise context save
937 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
938 SPU_STATUS_STOPPED_BY_STOP);
939 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
942 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
945 * If required, notify the "using application" that
946 * the SPU task has been terminated. TBD.
950 static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
953 struct spu_priv2 __iomem *priv2 = spu->priv2;
956 * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend
957 * the queue and halt the decrementer.
959 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
960 MFC_CNTL_DECREMENTER_HALTED);
964 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
967 struct spu_priv2 __iomem *priv2 = spu->priv2;
971 * Poll MFC_CNTL[Ss] until 11 is returned.
973 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
974 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
975 MFC_CNTL_SUSPEND_COMPLETE);
978 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
980 struct spu_problem __iomem *prob = spu->problem;
983 * If SPU_Status[R]=1, stop SPU execution
984 * and wait for stop to complete.
986 * Returns 1 if SPU_Status[R]=1 on entry.
989 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
990 if (in_be32(&prob->spu_status_R) &
991 SPU_STATUS_ISOLATED_EXIT_STATUS) {
992 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
995 if ((in_be32(&prob->spu_status_R) &
996 SPU_STATUS_ISOLATED_LOAD_STATUS)
997 || (in_be32(&prob->spu_status_R) &
998 SPU_STATUS_ISOLATED_STATE)) {
999 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1001 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1002 SPU_STATUS_RUNNING);
1003 out_be32(&prob->spu_runcntl_RW, 0x2);
1005 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1006 SPU_STATUS_RUNNING);
1008 if (in_be32(&prob->spu_status_R) &
1009 SPU_STATUS_WAITING_FOR_CHANNEL) {
1010 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1012 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1013 SPU_STATUS_RUNNING);
1020 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1022 struct spu_problem __iomem *prob = spu->problem;
1024 /* Restore, Step 10:
1025 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1026 * release SPU from isolate state.
1028 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1029 if (in_be32(&prob->spu_status_R) &
1030 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1031 spu_mfc_sr1_set(spu,
1032 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1034 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1036 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1037 SPU_STATUS_RUNNING);
1039 if ((in_be32(&prob->spu_status_R) &
1040 SPU_STATUS_ISOLATED_LOAD_STATUS)
1041 || (in_be32(&prob->spu_status_R) &
1042 SPU_STATUS_ISOLATED_STATE)) {
1043 spu_mfc_sr1_set(spu,
1044 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1046 out_be32(&prob->spu_runcntl_RW, 0x2);
1048 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1049 SPU_STATUS_RUNNING);
1054 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1056 struct spu_priv2 __iomem *priv2 = spu->priv2;
1057 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1061 /* Restore, Step 20:
1065 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1066 out_be64(&priv2->spu_chnldata_RW, 0UL);
1068 /* Reset the following CH: [0,3,4,24,25,27] */
1069 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1070 idx = ch_indices[i];
1071 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1073 out_be64(&priv2->spu_chnldata_RW, 0UL);
1074 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1079 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1081 struct spu_priv2 __iomem *priv2 = spu->priv2;
1082 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1083 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1087 /* Restore, Step 21:
1088 * Reset the following CH: [21, 23, 28, 29, 30]
1090 for (i = 0; i < 5; i++) {
1091 idx = ch_indices[i];
1092 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1094 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1099 static inline void setup_spu_status_part1(struct spu_state *csa,
1102 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1103 u32 status_I = SPU_STATUS_INVALID_INSTR;
1104 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1105 u32 status_S = SPU_STATUS_SINGLE_STEP;
1106 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1107 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1108 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1109 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1112 /* Restore, Step 27:
1113 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1114 * instruction sequence to the end of the SPU based restore
1115 * code (after the "context restored" stop and signal) to
1116 * restore the correct SPU status.
1118 * NOTE: Rather than modifying the SPU executable, we
1119 * instead add a new 'stopped_status' field to the
1120 * LSCSA. The SPU-side restore reads this field and
1121 * takes the appropriate action when exiting.
1125 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1126 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1128 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1129 * by Stop and Signal instruction, followed by 'br -4'.
1132 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1133 csa->lscsa->stopped_status.slot[1] = status_code;
1135 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1137 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1138 * by Stop and Signal instruction, followed by
1141 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1142 csa->lscsa->stopped_status.slot[1] = status_code;
1144 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1146 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1147 * followed by 'br -4'.
1149 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1150 csa->lscsa->stopped_status.slot[1] = status_code;
1152 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1154 /* SPU_Status[S,I]=1 - Illegal instruction followed
1157 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1158 csa->lscsa->stopped_status.slot[1] = status_code;
1160 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1162 /* SPU_Status[P]=1 - Stop and Signal instruction
1163 * followed by 'br -4'.
1165 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1166 csa->lscsa->stopped_status.slot[1] = status_code;
1168 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1170 /* SPU_Status[H]=1 - Halt Conditional, followed
1173 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1175 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1177 /* SPU_Status[S]=1 - Two nop instructions.
1179 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1181 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1183 /* SPU_Status[I]=1 - Illegal instruction followed
1186 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1191 static inline void setup_spu_status_part2(struct spu_state *csa,
1196 /* Restore, Step 28:
1197 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1198 * add a 'br *' instruction to the end of
1199 * the SPU based restore code.
1201 * NOTE: Rather than modifying the SPU executable, we
1202 * instead add a new 'stopped_status' field to the
1203 * LSCSA. The SPU-side restore reads this field and
1204 * takes the appropriate action when exiting.
1206 mask = SPU_STATUS_INVALID_INSTR |
1207 SPU_STATUS_SINGLE_STEP |
1208 SPU_STATUS_STOPPED_BY_HALT |
1209 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1210 if (!(csa->prob.spu_status_R & mask)) {
1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1215 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1217 /* Restore, Step 29:
1218 * Restore RA_GROUP_ID register and the
1219 * RA_ENABLE reigster from the CSA.
1221 spu_resource_allocation_groupID_set(spu,
1222 csa->priv1.resource_allocation_groupID_RW);
1223 spu_resource_allocation_enable_set(spu,
1224 csa->priv1.resource_allocation_enable_RW);
1227 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1229 unsigned long addr = (unsigned long)&spu_restore_code[0];
1230 unsigned int ls_offset = 0x0;
1231 unsigned int size = sizeof(spu_restore_code);
1232 unsigned int tag = 0;
1233 unsigned int rclass = 0;
1234 unsigned int cmd = MFC_GETFS_CMD;
1236 /* Restore, Step 37:
1237 * Issue MFC DMA command to copy context
1238 * restore code to local storage.
1240 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1243 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1245 /* Restore, Step 34:
1246 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1247 * running) then adjust decrementer, set
1248 * decrementer running status in LSCSA,
1249 * and set decrementer "wrapped" status
1252 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1253 cycles_t resume_time = get_cycles();
1254 cycles_t delta_time = resume_time - csa->suspend_time;
1256 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1257 if (csa->lscsa->decr.slot[0] < delta_time) {
1258 csa->lscsa->decr_status.slot[0] |=
1259 SPU_DECR_STATUS_WRAPPED;
1262 csa->lscsa->decr.slot[0] -= delta_time;
1264 csa->lscsa->decr_status.slot[0] = 0;
1268 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1270 /* Restore, Step 35:
1271 * Copy the CSA.PU_MB data into the LSCSA.
1273 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1276 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1278 /* Restore, Step 36:
1279 * Copy the CSA.PUINT_MB data into the LSCSA.
1281 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1284 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1286 struct spu_problem __iomem *prob = spu->problem;
1289 /* Restore, Step 40:
1290 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1291 * context restore succeeded, otherwise context restore
1294 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1295 SPU_STATUS_STOPPED_BY_STOP);
1296 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1299 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1301 struct spu_priv2 __iomem *priv2 = spu->priv2;
1303 /* Restore, Step 41:
1304 * Restore SPU_PrivCntl from the CSA.
1306 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1310 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1312 struct spu_problem __iomem *prob = spu->problem;
1315 /* Restore, Step 42:
1316 * If any CSA.SPU_Status[I,S,H,P]=1, then
1317 * restore the error or single step state.
1319 mask = SPU_STATUS_INVALID_INSTR |
1320 SPU_STATUS_SINGLE_STEP |
1321 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1322 if (csa->prob.spu_status_R & mask) {
1323 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1325 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1326 SPU_STATUS_RUNNING);
1330 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1332 struct spu_problem __iomem *prob = spu->problem;
1335 /* Restore, Step 43:
1336 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1337 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1338 * then write '00' to SPU_RunCntl[R0R1] and wait
1339 * for SPU_Status[R]=0.
1341 mask = SPU_STATUS_INVALID_INSTR |
1342 SPU_STATUS_SINGLE_STEP |
1343 SPU_STATUS_STOPPED_BY_HALT |
1344 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1345 if (!(csa->prob.spu_status_R & mask)) {
1346 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1348 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1349 SPU_STATUS_RUNNING);
1350 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1352 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1353 SPU_STATUS_RUNNING);
1357 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1359 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1360 unsigned int ls_offset = 0x0;
1361 unsigned int size = 16384;
1362 unsigned int tag = 0;
1363 unsigned int rclass = 0;
1364 unsigned int cmd = MFC_GET_CMD;
1366 /* Restore, Step 44:
1367 * Issue a DMA command to restore the first
1368 * 16kb of local storage from CSA.
1370 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1373 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1375 struct spu_priv2 __iomem *priv2 = spu->priv2;
1377 /* Restore, Step 47.
1378 * Write MFC_Cntl[Sc,Sm]='1','0' to suspend
1381 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1385 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1387 /* Restore, Step 49:
1388 * Write INT_MASK_class0 with value of 0.
1389 * Write INT_MASK_class1 with value of 0.
1390 * Write INT_MASK_class2 with value of 0.
1391 * Write INT_STAT_class0 with value of -1.
1392 * Write INT_STAT_class1 with value of -1.
1393 * Write INT_STAT_class2 with value of -1.
1395 spin_lock_irq(&spu->register_lock);
1396 spu_int_mask_set(spu, 0, 0ul);
1397 spu_int_mask_set(spu, 1, 0ul);
1398 spu_int_mask_set(spu, 2, 0ul);
1399 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1400 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1401 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1402 spin_unlock_irq(&spu->register_lock);
1405 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1407 struct spu_priv2 __iomem *priv2 = spu->priv2;
1410 /* Restore, Step 50:
1411 * If MFC_Cntl[Se]!=0 then restore
1412 * MFC command queues.
1414 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1415 for (i = 0; i < 8; i++) {
1416 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1417 csa->priv2.puq[i].mfc_cq_data0_RW);
1418 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1419 csa->priv2.puq[i].mfc_cq_data1_RW);
1420 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1421 csa->priv2.puq[i].mfc_cq_data2_RW);
1422 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1423 csa->priv2.puq[i].mfc_cq_data3_RW);
1425 for (i = 0; i < 16; i++) {
1426 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1427 csa->priv2.spuq[i].mfc_cq_data0_RW);
1428 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1429 csa->priv2.spuq[i].mfc_cq_data1_RW);
1430 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1431 csa->priv2.spuq[i].mfc_cq_data2_RW);
1432 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1433 csa->priv2.spuq[i].mfc_cq_data3_RW);
1439 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1441 struct spu_problem __iomem *prob = spu->problem;
1443 /* Restore, Step 51:
1444 * Restore the PPU_QueryMask register from CSA.
1446 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1450 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1452 struct spu_problem __iomem *prob = spu->problem;
1454 /* Restore, Step 52:
1455 * Restore the PPU_QueryType register from CSA.
1457 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1461 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1463 struct spu_priv2 __iomem *priv2 = spu->priv2;
1465 /* Restore, Step 53:
1466 * Restore the MFC_CSR_TSQ register from CSA.
1468 out_be64(&priv2->spu_tag_status_query_RW,
1469 csa->priv2.spu_tag_status_query_RW);
1473 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1475 struct spu_priv2 __iomem *priv2 = spu->priv2;
1477 /* Restore, Step 54:
1478 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1479 * registers from CSA.
1481 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1482 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1486 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1488 struct spu_priv2 __iomem *priv2 = spu->priv2;
1490 /* Restore, Step 55:
1491 * Restore the MFC_CSR_ATO register from CSA.
1493 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1496 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1498 /* Restore, Step 56:
1499 * Restore the MFC_TCLASS_ID register from CSA.
1501 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1505 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1507 u64 ch0_cnt, ch0_data;
1510 /* Restore, Step 57:
1511 * Set the Lock Line Reservation Lost Event by:
1512 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1513 * 2. If CSA.SPU_Channel_0_Count=0 and
1514 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1515 * CSA.SPU_Event_Status[Lr]=0 then set
1516 * CSA.SPU_Event_Status_Count=1.
1518 ch0_cnt = csa->spu_chnlcnt_RW[0];
1519 ch0_data = csa->spu_chnldata_RW[0];
1520 ch1_data = csa->spu_chnldata_RW[1];
1521 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1522 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1523 (ch1_data & MFC_LLR_LOST_EVENT)) {
1524 csa->spu_chnlcnt_RW[0] = 1;
1528 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1530 /* Restore, Step 58:
1531 * If the status of the CSA software decrementer
1532 * "wrapped" flag is set, OR in a '1' to
1533 * CSA.SPU_Event_Status[Tm].
1535 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1538 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1539 (csa->spu_chnldata_RW[1] & 0x20) &&
1540 !(csa->spu_chnldata_RW[0] & 0x20))
1541 csa->spu_chnlcnt_RW[0] = 1;
1543 csa->spu_chnldata_RW[0] |= 0x20;
1546 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1548 struct spu_priv2 __iomem *priv2 = spu->priv2;
1549 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1552 /* Restore, Step 59:
1553 * Restore the following CH: [0,3,4,24,25,27]
1555 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1556 idx = ch_indices[i];
1557 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1559 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1560 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1565 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1567 struct spu_priv2 __iomem *priv2 = spu->priv2;
1568 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1569 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1573 /* Restore, Step 60:
1574 * Restore the following CH: [9,21,23].
1577 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1579 for (i = 0; i < 3; i++) {
1580 idx = ch_indices[i];
1581 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1583 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1588 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1590 struct spu_priv2 __iomem *priv2 = spu->priv2;
1592 /* Restore, Step 61:
1593 * Restore the SPU_LSLR register from CSA.
1595 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1599 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1601 struct spu_priv2 __iomem *priv2 = spu->priv2;
1603 /* Restore, Step 62:
1604 * Restore the SPU_Cfg register from CSA.
1606 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1610 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1612 /* Restore, Step 63:
1613 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1614 * Not performed by this implementation.
1618 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1620 struct spu_problem __iomem *prob = spu->problem;
1622 /* Restore, Step 64:
1623 * Restore SPU_NPC from CSA.
1625 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1629 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1631 struct spu_priv2 __iomem *priv2 = spu->priv2;
1634 /* Restore, Step 65:
1635 * Restore MFC_RdSPU_MB from CSA.
1637 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1639 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1640 for (i = 0; i < 4; i++) {
1641 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1646 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1648 struct spu_problem __iomem *prob = spu->problem;
1651 /* Restore, Step 66:
1652 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1653 * read from the PPU_MB register.
1655 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1656 dummy = in_be32(&prob->pu_mb_R);
1661 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1663 struct spu_priv2 __iomem *priv2 = spu->priv2;
1666 /* Restore, Step 66:
1667 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1668 * read from the PPUINT_MB register.
1670 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1671 dummy = in_be64(&priv2->puint_mb_R);
1673 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1678 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1680 /* Restore, Step 69:
1681 * Restore the MFC_SR1 register from CSA.
1683 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1687 static inline void restore_other_spu_access(struct spu_state *csa,
1690 /* Restore, Step 70:
1691 * Restore other SPU mappings to this SPU. TBD.
1695 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1697 struct spu_problem __iomem *prob = spu->problem;
1699 /* Restore, Step 71:
1700 * If CSA.SPU_Status[R]=1 then write
1701 * SPU_RunCntl[R0R1]='01'.
1703 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1704 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1709 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1711 struct spu_priv2 __iomem *priv2 = spu->priv2;
1713 /* Restore, Step 72:
1714 * Restore the MFC_CNTL register for the CSA.
1716 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1719 * FIXME: this is to restart a DMA that we were processing
1720 * before the save. better remember the fault information
1721 * in the csa instead.
1723 if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
1724 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
1729 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1731 /* Restore, Step 73:
1732 * Enable user-space access (if provided) to this
1733 * SPU by mapping the virtual pages assigned to
1734 * the SPU memory-mapped I/O (MMIO) for problem
1739 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1741 /* Restore, Step 74:
1742 * Reset the "context switch active" flag.
1743 * Not performed by this implementation.
1747 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1749 /* Restore, Step 75:
1750 * Re-enable SPU interrupts.
1752 spin_lock_irq(&spu->register_lock);
1753 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1754 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1755 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1756 spin_unlock_irq(&spu->register_lock);
1759 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1762 * Combined steps 2-18 of SPU context save sequence, which
1763 * quiesce the SPU state (disable SPU execution, MFC command
1764 * queues, decrementer, SPU interrupts, etc.).
1766 * Returns 0 on success.
1767 * 2 if failed step 2.
1768 * 6 if failed step 6.
1771 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1774 disable_interrupts(prev, spu); /* Step 3. */
1775 set_watchdog_timer(prev, spu); /* Step 4. */
1776 inhibit_user_access(prev, spu); /* Step 5. */
1777 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1780 set_switch_pending(prev, spu); /* Step 7. */
1781 save_mfc_cntl(prev, spu); /* Step 8. */
1782 save_spu_runcntl(prev, spu); /* Step 9. */
1783 save_mfc_sr1(prev, spu); /* Step 10. */
1784 save_spu_status(prev, spu); /* Step 11. */
1785 save_mfc_decr(prev, spu); /* Step 12. */
1786 halt_mfc_decr(prev, spu); /* Step 13. */
1787 save_timebase(prev, spu); /* Step 14. */
1788 remove_other_spu_access(prev, spu); /* Step 15. */
1789 do_mfc_mssync(prev, spu); /* Step 16. */
1790 issue_mfc_tlbie(prev, spu); /* Step 17. */
1791 handle_pending_interrupts(prev, spu); /* Step 18. */
1796 static void save_csa(struct spu_state *prev, struct spu *spu)
1799 * Combine steps 19-44 of SPU context save sequence, which
1800 * save regions of the privileged & problem state areas.
1803 save_mfc_queues(prev, spu); /* Step 19. */
1804 save_ppu_querymask(prev, spu); /* Step 20. */
1805 save_ppu_querytype(prev, spu); /* Step 21. */
1806 save_ppu_tagstatus(prev, spu); /* NEW. */
1807 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1808 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1809 save_mfc_csr_ato(prev, spu); /* Step 24. */
1810 save_mfc_tclass_id(prev, spu); /* Step 25. */
1811 set_mfc_tclass_id(prev, spu); /* Step 26. */
1812 purge_mfc_queue(prev, spu); /* Step 27. */
1813 wait_purge_complete(prev, spu); /* Step 28. */
1814 setup_mfc_sr1(prev, spu); /* Step 30. */
1815 save_spu_npc(prev, spu); /* Step 31. */
1816 save_spu_privcntl(prev, spu); /* Step 32. */
1817 reset_spu_privcntl(prev, spu); /* Step 33. */
1818 save_spu_lslr(prev, spu); /* Step 34. */
1819 reset_spu_lslr(prev, spu); /* Step 35. */
1820 save_spu_cfg(prev, spu); /* Step 36. */
1821 save_pm_trace(prev, spu); /* Step 37. */
1822 save_mfc_rag(prev, spu); /* Step 38. */
1823 save_ppu_mb_stat(prev, spu); /* Step 39. */
1824 save_ppu_mb(prev, spu); /* Step 40. */
1825 save_ppuint_mb(prev, spu); /* Step 41. */
1826 save_ch_part1(prev, spu); /* Step 42. */
1827 save_spu_mb(prev, spu); /* Step 43. */
1828 save_mfc_cmd(prev, spu); /* Step 44. */
1829 reset_ch(prev, spu); /* Step 45. */
1832 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1835 * Perform steps 46-57 of SPU context save sequence,
1836 * which save regions of the local store and register
1840 resume_mfc_queue(prev, spu); /* Step 46. */
1842 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1843 set_switch_active(prev, spu); /* Step 48. */
1844 enable_interrupts(prev, spu); /* Step 49. */
1845 save_ls_16kb(prev, spu); /* Step 50. */
1846 set_spu_npc(prev, spu); /* Step 51. */
1847 set_signot1(prev, spu); /* Step 52. */
1848 set_signot2(prev, spu); /* Step 53. */
1849 send_save_code(prev, spu); /* Step 54. */
1850 set_ppu_querymask(prev, spu); /* Step 55. */
1851 wait_tag_complete(prev, spu); /* Step 56. */
1852 wait_spu_stopped(prev, spu); /* Step 57. */
1855 static void force_spu_isolate_exit(struct spu *spu)
1857 struct spu_problem __iomem *prob = spu->problem;
1858 struct spu_priv2 __iomem *priv2 = spu->priv2;
1860 /* Stop SPE execution and wait for completion. */
1861 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1863 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1865 /* Restart SPE master runcntl. */
1866 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1869 /* Initiate isolate exit request and wait for completion. */
1870 out_be64(&priv2->spu_privcntl_RW, 4LL);
1872 out_be32(&prob->spu_runcntl_RW, 2);
1874 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1875 & SPU_STATUS_STOPPED_BY_STOP));
1877 /* Reset load request to normal. */
1878 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1884 * Check SPU run-control state and force isolated
1885 * exit function as necessary.
1887 static void stop_spu_isolate(struct spu *spu)
1889 struct spu_problem __iomem *prob = spu->problem;
1891 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1892 /* The SPU is in isolated state; the only way
1893 * to get it out is to perform an isolated
1894 * exit (clean) operation.
1896 force_spu_isolate_exit(spu);
1900 static void harvest(struct spu_state *prev, struct spu *spu)
1903 * Perform steps 2-25 of SPU context restore sequence,
1904 * which resets an SPU either after a failed save, or
1905 * when using SPU for first time.
1908 disable_interrupts(prev, spu); /* Step 2. */
1909 inhibit_user_access(prev, spu); /* Step 3. */
1910 terminate_spu_app(prev, spu); /* Step 4. */
1911 set_switch_pending(prev, spu); /* Step 5. */
1912 stop_spu_isolate(spu); /* NEW. */
1913 remove_other_spu_access(prev, spu); /* Step 6. */
1914 suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */
1915 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1916 if (!suspend_spe(prev, spu)) /* Step 9. */
1917 clear_spu_status(prev, spu); /* Step 10. */
1918 do_mfc_mssync(prev, spu); /* Step 11. */
1919 issue_mfc_tlbie(prev, spu); /* Step 12. */
1920 handle_pending_interrupts(prev, spu); /* Step 13. */
1921 purge_mfc_queue(prev, spu); /* Step 14. */
1922 wait_purge_complete(prev, spu); /* Step 15. */
1923 reset_spu_privcntl(prev, spu); /* Step 16. */
1924 reset_spu_lslr(prev, spu); /* Step 17. */
1925 setup_mfc_sr1(prev, spu); /* Step 18. */
1926 spu_invalidate_slbs(spu); /* Step 19. */
1927 reset_ch_part1(prev, spu); /* Step 20. */
1928 reset_ch_part2(prev, spu); /* Step 21. */
1929 enable_interrupts(prev, spu); /* Step 22. */
1930 set_switch_active(prev, spu); /* Step 23. */
1931 set_mfc_tclass_id(prev, spu); /* Step 24. */
1932 resume_mfc_queue(prev, spu); /* Step 25. */
1935 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1938 * Perform steps 26-40 of SPU context restore sequence,
1939 * which restores regions of the local store and register
1943 set_watchdog_timer(next, spu); /* Step 26. */
1944 setup_spu_status_part1(next, spu); /* Step 27. */
1945 setup_spu_status_part2(next, spu); /* Step 28. */
1946 restore_mfc_rag(next, spu); /* Step 29. */
1948 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1949 set_spu_npc(next, spu); /* Step 31. */
1950 set_signot1(next, spu); /* Step 32. */
1951 set_signot2(next, spu); /* Step 33. */
1952 setup_decr(next, spu); /* Step 34. */
1953 setup_ppu_mb(next, spu); /* Step 35. */
1954 setup_ppuint_mb(next, spu); /* Step 36. */
1955 send_restore_code(next, spu); /* Step 37. */
1956 set_ppu_querymask(next, spu); /* Step 38. */
1957 wait_tag_complete(next, spu); /* Step 39. */
1958 wait_spu_stopped(next, spu); /* Step 40. */
1961 static void restore_csa(struct spu_state *next, struct spu *spu)
1964 * Combine steps 41-76 of SPU context restore sequence, which
1965 * restore regions of the privileged & problem state areas.
1968 restore_spu_privcntl(next, spu); /* Step 41. */
1969 restore_status_part1(next, spu); /* Step 42. */
1970 restore_status_part2(next, spu); /* Step 43. */
1971 restore_ls_16kb(next, spu); /* Step 44. */
1972 wait_tag_complete(next, spu); /* Step 45. */
1973 suspend_mfc(next, spu); /* Step 46. */
1974 wait_suspend_mfc_complete(next, spu); /* Step 47. */
1975 issue_mfc_tlbie(next, spu); /* Step 48. */
1976 clear_interrupts(next, spu); /* Step 49. */
1977 restore_mfc_queues(next, spu); /* Step 50. */
1978 restore_ppu_querymask(next, spu); /* Step 51. */
1979 restore_ppu_querytype(next, spu); /* Step 52. */
1980 restore_mfc_csr_tsq(next, spu); /* Step 53. */
1981 restore_mfc_csr_cmd(next, spu); /* Step 54. */
1982 restore_mfc_csr_ato(next, spu); /* Step 55. */
1983 restore_mfc_tclass_id(next, spu); /* Step 56. */
1984 set_llr_event(next, spu); /* Step 57. */
1985 restore_decr_wrapped(next, spu); /* Step 58. */
1986 restore_ch_part1(next, spu); /* Step 59. */
1987 restore_ch_part2(next, spu); /* Step 60. */
1988 restore_spu_lslr(next, spu); /* Step 61. */
1989 restore_spu_cfg(next, spu); /* Step 62. */
1990 restore_pm_trace(next, spu); /* Step 63. */
1991 restore_spu_npc(next, spu); /* Step 64. */
1992 restore_spu_mb(next, spu); /* Step 65. */
1993 check_ppu_mb_stat(next, spu); /* Step 66. */
1994 check_ppuint_mb_stat(next, spu); /* Step 67. */
1995 spu_invalidate_slbs(spu); /* Modified Step 68. */
1996 restore_mfc_sr1(next, spu); /* Step 69. */
1997 restore_other_spu_access(next, spu); /* Step 70. */
1998 restore_spu_runcntl(next, spu); /* Step 71. */
1999 restore_mfc_cntl(next, spu); /* Step 72. */
2000 enable_user_access(next, spu); /* Step 73. */
2001 reset_switch_active(next, spu); /* Step 74. */
2002 reenable_interrupts(next, spu); /* Step 75. */
2005 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2010 * SPU context save can be broken into three phases:
2012 * (a) quiesce [steps 2-16].
2013 * (b) save of CSA, performed by PPE [steps 17-42]
2014 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2016 * Returns 0 on success.
2017 * 2,6 if failed to quiece SPU
2018 * 53 if SPU-side of save failed.
2021 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2032 save_csa(prev, spu); /* Steps 17-43. */
2033 save_lscsa(prev, spu); /* Steps 44-53. */
2034 return check_save_status(prev, spu); /* Step 54. */
2037 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2042 * SPU context restore can be broken into three phases:
2044 * (a) harvest (or reset) SPU [steps 2-24].
2045 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2046 * (c) restore CSA [steps 41-76], performed by PPE.
2048 * The 'harvest' step is not performed here, but rather
2052 restore_lscsa(next, spu); /* Steps 24-39. */
2053 rc = check_restore_status(next, spu); /* Step 40. */
2056 /* Failed. Return now. */
2060 /* Fall through to next step. */
2063 restore_csa(next, spu);
2069 * spu_save - SPU context save, with locking.
2070 * @prev: pointer to SPU context save area, to be saved.
2071 * @spu: pointer to SPU iomem structure.
2073 * Acquire locks, perform the save operation then return.
2075 int spu_save(struct spu_state *prev, struct spu *spu)
2079 acquire_spu_lock(spu); /* Step 1. */
2080 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2081 release_spu_lock(spu);
2082 if (rc != 0 && rc != 2 && rc != 6) {
2083 panic("%s failed on SPU[%d], rc=%d.\n",
2084 __func__, spu->number, rc);
2088 EXPORT_SYMBOL_GPL(spu_save);
2091 * spu_restore - SPU context restore, with harvest and locking.
2092 * @new: pointer to SPU context save area, to be restored.
2093 * @spu: pointer to SPU iomem structure.
2095 * Perform harvest + restore, as we may not be coming
2096 * from a previous successful save operation, and the
2097 * hardware state is unknown.
2099 int spu_restore(struct spu_state *new, struct spu *spu)
2103 acquire_spu_lock(spu);
2105 spu->slb_replace = 0;
2106 rc = __do_spu_restore(new, spu);
2107 release_spu_lock(spu);
2109 panic("%s failed on SPU[%d] rc=%d.\n",
2110 __func__, spu->number, rc);
2114 EXPORT_SYMBOL_GPL(spu_restore);
2116 static void init_prob(struct spu_state *csa)
2118 csa->spu_chnlcnt_RW[9] = 1;
2119 csa->spu_chnlcnt_RW[21] = 16;
2120 csa->spu_chnlcnt_RW[23] = 1;
2121 csa->spu_chnlcnt_RW[28] = 1;
2122 csa->spu_chnlcnt_RW[30] = 1;
2123 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2124 csa->prob.mb_stat_R = 0x000400;
2127 static void init_priv1(struct spu_state *csa)
2129 /* Enable decode, relocate, tlbie response, master runcntl. */
2130 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2131 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2132 MFC_STATE1_PROBLEM_STATE_MASK |
2133 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2135 /* Enable OS-specific set of interrupts. */
2136 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2137 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2138 CLASS0_ENABLE_SPU_ERROR_INTR;
2139 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2140 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2141 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2142 CLASS2_ENABLE_SPU_HALT_INTR |
2143 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2146 static void init_priv2(struct spu_state *csa)
2148 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2149 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2150 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2151 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2155 * spu_alloc_csa - allocate and initialize an SPU context save area.
2157 * Allocate and initialize the contents of an SPU context save area.
2158 * This includes enabling address translation, interrupt masks, etc.,
2159 * as appropriate for the given OS environment.
2161 * Note that storage for the 'lscsa' is allocated separately,
2162 * as it is by far the largest of the context save regions,
2163 * and may need to be pinned or otherwise specially aligned.
2165 int spu_init_csa(struct spu_state *csa)
2171 memset(csa, 0, sizeof(struct spu_state));
2173 rc = spu_alloc_lscsa(csa);
2177 spin_lock_init(&csa->register_lock);
2186 void spu_fini_csa(struct spu_state *csa)
2188 spu_free_lscsa(csa);