Merge branches 'core/futexes', 'core/locking', 'core/rcu' and 'linus' into core/urgent
[linux-2.6] / arch / powerpc / platforms / cell / spufs / hw_ops.c
1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/poll.h>
27 #include <linux/smp.h>
28 #include <linux/stddef.h>
29 #include <linux/unistd.h>
30
31 #include <asm/io.h>
32 #include <asm/spu.h>
33 #include <asm/spu_priv1.h>
34 #include <asm/spu_csa.h>
35 #include <asm/mmu_context.h>
36 #include "spufs.h"
37
38 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
39 {
40         struct spu *spu = ctx->spu;
41         struct spu_problem __iomem *prob = spu->problem;
42         u32 mbox_stat;
43         int ret = 0;
44
45         spin_lock_irq(&spu->register_lock);
46         mbox_stat = in_be32(&prob->mb_stat_R);
47         if (mbox_stat & 0x0000ff) {
48                 *data = in_be32(&prob->pu_mb_R);
49                 ret = 4;
50         }
51         spin_unlock_irq(&spu->register_lock);
52         return ret;
53 }
54
55 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
56 {
57         return in_be32(&ctx->spu->problem->mb_stat_R);
58 }
59
60 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
61                                           unsigned int events)
62 {
63         struct spu *spu = ctx->spu;
64         int ret = 0;
65         u32 stat;
66
67         spin_lock_irq(&spu->register_lock);
68         stat = in_be32(&spu->problem->mb_stat_R);
69
70         /* if the requested event is there, return the poll
71            mask, otherwise enable the interrupt to get notified,
72            but first mark any pending interrupts as done so
73            we don't get woken up unnecessarily */
74
75         if (events & (POLLIN | POLLRDNORM)) {
76                 if (stat & 0xff0000)
77                         ret |= POLLIN | POLLRDNORM;
78                 else {
79                         spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
80                         spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
81                 }
82         }
83         if (events & (POLLOUT | POLLWRNORM)) {
84                 if (stat & 0x00ff00)
85                         ret = POLLOUT | POLLWRNORM;
86                 else {
87                         spu_int_stat_clear(spu, 2,
88                                         CLASS2_MAILBOX_THRESHOLD_INTR);
89                         spu_int_mask_or(spu, 2,
90                                         CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
91                 }
92         }
93         spin_unlock_irq(&spu->register_lock);
94         return ret;
95 }
96
97 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
98 {
99         struct spu *spu = ctx->spu;
100         struct spu_problem __iomem *prob = spu->problem;
101         struct spu_priv2 __iomem *priv2 = spu->priv2;
102         int ret;
103
104         spin_lock_irq(&spu->register_lock);
105         if (in_be32(&prob->mb_stat_R) & 0xff0000) {
106                 /* read the first available word */
107                 *data = in_be64(&priv2->puint_mb_R);
108                 ret = 4;
109         } else {
110                 /* make sure we get woken up by the interrupt */
111                 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
112                 ret = 0;
113         }
114         spin_unlock_irq(&spu->register_lock);
115         return ret;
116 }
117
118 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
119 {
120         struct spu *spu = ctx->spu;
121         struct spu_problem __iomem *prob = spu->problem;
122         int ret;
123
124         spin_lock_irq(&spu->register_lock);
125         if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
126                 /* we have space to write wbox_data to */
127                 out_be32(&prob->spu_mb_W, data);
128                 ret = 4;
129         } else {
130                 /* make sure we get woken up by the interrupt when space
131                    becomes available */
132                 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
133                 ret = 0;
134         }
135         spin_unlock_irq(&spu->register_lock);
136         return ret;
137 }
138
139 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
140 {
141         out_be32(&ctx->spu->problem->signal_notify1, data);
142 }
143
144 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
145 {
146         out_be32(&ctx->spu->problem->signal_notify2, data);
147 }
148
149 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
150 {
151         struct spu *spu = ctx->spu;
152         struct spu_priv2 __iomem *priv2 = spu->priv2;
153         u64 tmp;
154
155         spin_lock_irq(&spu->register_lock);
156         tmp = in_be64(&priv2->spu_cfg_RW);
157         if (val)
158                 tmp |= 1;
159         else
160                 tmp &= ~1;
161         out_be64(&priv2->spu_cfg_RW, tmp);
162         spin_unlock_irq(&spu->register_lock);
163 }
164
165 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
166 {
167         return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
168 }
169
170 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
171 {
172         struct spu *spu = ctx->spu;
173         struct spu_priv2 __iomem *priv2 = spu->priv2;
174         u64 tmp;
175
176         spin_lock_irq(&spu->register_lock);
177         tmp = in_be64(&priv2->spu_cfg_RW);
178         if (val)
179                 tmp |= 2;
180         else
181                 tmp &= ~2;
182         out_be64(&priv2->spu_cfg_RW, tmp);
183         spin_unlock_irq(&spu->register_lock);
184 }
185
186 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
187 {
188         return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
189 }
190
191 static u32 spu_hw_npc_read(struct spu_context *ctx)
192 {
193         return in_be32(&ctx->spu->problem->spu_npc_RW);
194 }
195
196 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
197 {
198         out_be32(&ctx->spu->problem->spu_npc_RW, val);
199 }
200
201 static u32 spu_hw_status_read(struct spu_context *ctx)
202 {
203         return in_be32(&ctx->spu->problem->spu_status_R);
204 }
205
206 static char *spu_hw_get_ls(struct spu_context *ctx)
207 {
208         return ctx->spu->local_store;
209 }
210
211 static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
212 {
213         out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
214 }
215
216 static u32 spu_hw_runcntl_read(struct spu_context *ctx)
217 {
218         return in_be32(&ctx->spu->problem->spu_runcntl_RW);
219 }
220
221 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
222 {
223         spin_lock_irq(&ctx->spu->register_lock);
224         if (val & SPU_RUNCNTL_ISOLATE)
225                 spu_hw_privcntl_write(ctx,
226                         SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
227         out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
228         spin_unlock_irq(&ctx->spu->register_lock);
229 }
230
231 static void spu_hw_runcntl_stop(struct spu_context *ctx)
232 {
233         spin_lock_irq(&ctx->spu->register_lock);
234         out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
235         while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
236                 cpu_relax();
237         spin_unlock_irq(&ctx->spu->register_lock);
238 }
239
240 static void spu_hw_master_start(struct spu_context *ctx)
241 {
242         struct spu *spu = ctx->spu;
243         u64 sr1;
244
245         spin_lock_irq(&spu->register_lock);
246         sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
247         spu_mfc_sr1_set(spu, sr1);
248         spin_unlock_irq(&spu->register_lock);
249 }
250
251 static void spu_hw_master_stop(struct spu_context *ctx)
252 {
253         struct spu *spu = ctx->spu;
254         u64 sr1;
255
256         spin_lock_irq(&spu->register_lock);
257         sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
258         spu_mfc_sr1_set(spu, sr1);
259         spin_unlock_irq(&spu->register_lock);
260 }
261
262 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
263 {
264         struct spu_problem __iomem *prob = ctx->spu->problem;
265         int ret;
266
267         spin_lock_irq(&ctx->spu->register_lock);
268         ret = -EAGAIN;
269         if (in_be32(&prob->dma_querytype_RW))
270                 goto out;
271         ret = 0;
272         out_be32(&prob->dma_querymask_RW, mask);
273         out_be32(&prob->dma_querytype_RW, mode);
274 out:
275         spin_unlock_irq(&ctx->spu->register_lock);
276         return ret;
277 }
278
279 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
280 {
281         return in_be32(&ctx->spu->problem->dma_tagstatus_R);
282 }
283
284 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
285 {
286         return in_be32(&ctx->spu->problem->dma_qstatus_R);
287 }
288
289 static int spu_hw_send_mfc_command(struct spu_context *ctx,
290                                         struct mfc_dma_command *cmd)
291 {
292         u32 status;
293         struct spu_problem __iomem *prob = ctx->spu->problem;
294
295         spin_lock_irq(&ctx->spu->register_lock);
296         out_be32(&prob->mfc_lsa_W, cmd->lsa);
297         out_be64(&prob->mfc_ea_W, cmd->ea);
298         out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
299                                 cmd->size << 16 | cmd->tag);
300         out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
301                                 cmd->class << 16 | cmd->cmd);
302         status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
303         spin_unlock_irq(&ctx->spu->register_lock);
304
305         switch (status & 0xffff) {
306         case 0:
307                 return 0;
308         case 2:
309                 return -EAGAIN;
310         default:
311                 return -EINVAL;
312         }
313 }
314
315 static void spu_hw_restart_dma(struct spu_context *ctx)
316 {
317         struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
318
319         if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
320                 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
321 }
322
323 struct spu_context_ops spu_hw_ops = {
324         .mbox_read = spu_hw_mbox_read,
325         .mbox_stat_read = spu_hw_mbox_stat_read,
326         .mbox_stat_poll = spu_hw_mbox_stat_poll,
327         .ibox_read = spu_hw_ibox_read,
328         .wbox_write = spu_hw_wbox_write,
329         .signal1_write = spu_hw_signal1_write,
330         .signal2_write = spu_hw_signal2_write,
331         .signal1_type_set = spu_hw_signal1_type_set,
332         .signal1_type_get = spu_hw_signal1_type_get,
333         .signal2_type_set = spu_hw_signal2_type_set,
334         .signal2_type_get = spu_hw_signal2_type_get,
335         .npc_read = spu_hw_npc_read,
336         .npc_write = spu_hw_npc_write,
337         .status_read = spu_hw_status_read,
338         .get_ls = spu_hw_get_ls,
339         .privcntl_write = spu_hw_privcntl_write,
340         .runcntl_read = spu_hw_runcntl_read,
341         .runcntl_write = spu_hw_runcntl_write,
342         .runcntl_stop = spu_hw_runcntl_stop,
343         .master_start = spu_hw_master_start,
344         .master_stop = spu_hw_master_stop,
345         .set_mfc_query = spu_hw_set_mfc_query,
346         .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
347         .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
348         .send_mfc_command = spu_hw_send_mfc_command,
349         .restart_dma = spu_hw_restart_dma,
350 };