1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/smp_lock.h>
34 #include <linux/stddef.h>
35 #include <linux/unistd.h>
36 #include <linux/numa.h>
37 #include <linux/mutex.h>
38 #include <linux/notifier.h>
41 #include <asm/mmu_context.h>
43 #include <asm/spu_csa.h>
44 #include <asm/spu_priv1.h>
47 #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
49 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
50 struct spu_prio_array {
51 unsigned long bitmap[SPU_BITMAP_SIZE];
52 struct list_head runq[MAX_PRIO];
54 struct list_head active_list[MAX_NUMNODES];
55 struct mutex active_mutex[MAX_NUMNODES];
58 static struct spu_prio_array *spu_prio;
60 static inline int node_allowed(int node)
64 if (!nr_cpus_node(node))
66 mask = node_to_cpumask(node);
67 if (!cpus_intersects(mask, current->cpus_allowed))
73 * spu_add_to_active_list - add spu to active list
74 * @spu: spu to add to the active list
76 static void spu_add_to_active_list(struct spu *spu)
78 mutex_lock(&spu_prio->active_mutex[spu->node]);
79 list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
80 mutex_unlock(&spu_prio->active_mutex[spu->node]);
84 * spu_remove_from_active_list - remove spu from active list
85 * @spu: spu to remove from the active list
87 * This function removes an spu from the active list. If the spu was
88 * found on the active list the function returns 1, else it doesn't do
89 * anything and returns 0.
91 static int spu_remove_from_active_list(struct spu *spu)
97 mutex_lock(&spu_prio->active_mutex[node]);
98 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
100 list_del_init(&spu->list);
105 mutex_unlock(&spu_prio->active_mutex[node]);
109 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
111 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
113 /* Global TLBIE broadcast required with SPEs. */
114 __cpus_setall(&mm->cpu_vm_mask, nr);
117 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
119 static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
121 blocking_notifier_call_chain(&spu_switch_notifier,
122 ctx ? ctx->object_id : 0, spu);
125 int spu_switch_event_register(struct notifier_block * n)
127 return blocking_notifier_chain_register(&spu_switch_notifier, n);
130 int spu_switch_event_unregister(struct notifier_block * n)
132 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
136 * spu_bind_context - bind spu context to physical spu
137 * @spu: physical spu to bind to
138 * @ctx: context to bind
140 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
142 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
143 spu->number, spu->node);
147 ctx->ops = &spu_hw_ops;
148 spu->pid = current->pid;
149 spu->mm = ctx->owner;
150 mm_needs_global_tlbie(spu->mm);
151 spu->ibox_callback = spufs_ibox_callback;
152 spu->wbox_callback = spufs_wbox_callback;
153 spu->stop_callback = spufs_stop_callback;
154 spu->mfc_callback = spufs_mfc_callback;
155 spu->dma_callback = spufs_dma_callback;
157 spu_unmap_mappings(ctx);
158 spu_restore(&ctx->csa, spu);
159 spu->timestamp = jiffies;
160 spu_cpu_affinity_set(spu, raw_smp_processor_id());
161 spu_switch_notify(spu, ctx);
162 spu_add_to_active_list(spu);
163 ctx->state = SPU_STATE_RUNNABLE;
167 * spu_unbind_context - unbind spu context from physical spu
168 * @spu: physical spu to unbind from
169 * @ctx: context to unbind
171 * If the spu was on the active list the function returns 1, else 0.
173 static int spu_unbind_context(struct spu *spu, struct spu_context *ctx)
175 int was_active = spu_remove_from_active_list(spu);
177 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
178 spu->pid, spu->number, spu->node);
180 spu_switch_notify(spu, NULL);
181 spu_unmap_mappings(ctx);
182 spu_save(&ctx->csa, spu);
183 spu->timestamp = jiffies;
184 ctx->state = SPU_STATE_SAVED;
185 spu->ibox_callback = NULL;
186 spu->wbox_callback = NULL;
187 spu->stop_callback = NULL;
188 spu->mfc_callback = NULL;
189 spu->dma_callback = NULL;
192 ctx->ops = &spu_backing_ops;
201 * spu_add_to_rq - add a context to the runqueue
202 * @ctx: context to add
204 static void spu_add_to_rq(struct spu_context *ctx)
206 spin_lock(&spu_prio->runq_lock);
207 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
208 set_bit(ctx->prio, spu_prio->bitmap);
209 spin_unlock(&spu_prio->runq_lock);
213 * spu_del_from_rq - remove a context from the runqueue
214 * @ctx: context to remove
216 static void spu_del_from_rq(struct spu_context *ctx)
218 spin_lock(&spu_prio->runq_lock);
219 list_del_init(&ctx->rq);
220 if (list_empty(&spu_prio->runq[ctx->prio]))
221 clear_bit(ctx->prio, spu_prio->bitmap);
222 spin_unlock(&spu_prio->runq_lock);
226 * spu_grab_context - remove one context from the runqueue
227 * @prio: priority of the context to be removed
229 * This function removes one context from the runqueue for priority @prio.
230 * If there is more than one context with the given priority the first
231 * task on the runqueue will be taken.
233 * Returns the spu_context it just removed.
235 * Must be called with spu_prio->runq_lock held.
237 static struct spu_context *spu_grab_context(int prio)
239 struct list_head *rq = &spu_prio->runq[prio];
243 return list_entry(rq->next, struct spu_context, rq);
246 static void spu_prio_wait(struct spu_context *ctx)
250 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
252 if (!signal_pending(current)) {
253 mutex_unlock(&ctx->state_mutex);
255 mutex_lock(&ctx->state_mutex);
257 __set_current_state(TASK_RUNNING);
258 remove_wait_queue(&ctx->stop_wq, &wait);
262 * spu_reschedule - try to find a runnable context for a spu
263 * @spu: spu available
265 * This function is called whenever a spu becomes idle. It looks for the
266 * most suitable runnable spu context and schedules it for execution.
268 static void spu_reschedule(struct spu *spu)
274 spin_lock(&spu_prio->runq_lock);
275 best = sched_find_first_bit(spu_prio->bitmap);
276 if (best < MAX_PRIO) {
277 struct spu_context *ctx = spu_grab_context(best);
279 wake_up(&ctx->stop_wq);
281 spin_unlock(&spu_prio->runq_lock);
284 static struct spu *spu_get_idle(struct spu_context *ctx)
286 struct spu *spu = NULL;
287 int node = cpu_to_node(raw_smp_processor_id());
290 for (n = 0; n < MAX_NUMNODES; n++, node++) {
291 node = (node < MAX_NUMNODES) ? node : 0;
292 if (!node_allowed(node))
294 spu = spu_alloc_node(node);
301 /* The three externally callable interfaces
302 * for the scheduler begin here.
304 * spu_activate - bind a context to SPU, waiting as needed.
305 * spu_deactivate - unbind a context from its SPU.
306 * spu_yield - yield an SPU if others are waiting.
310 * spu_activate - find a free spu for a context and execute it
311 * @ctx: spu context to schedule
312 * @flags: flags (currently ignored)
314 * Tries to find a free spu to run @ctx. If no free spu is availble
315 * add the context to the runqueue so it gets woken up once an spu
318 int spu_activate(struct spu_context *ctx, u64 flags)
327 spu = spu_get_idle(ctx);
329 spu_bind_context(spu, ctx);
335 spu_del_from_rq(ctx);
336 } while (!signal_pending(current));
341 void spu_deactivate(struct spu_context *ctx)
349 was_active = spu_unbind_context(spu, ctx);
354 void spu_yield(struct spu_context *ctx)
359 if (mutex_trylock(&ctx->state_mutex)) {
360 if ((spu = ctx->spu) != NULL) {
361 int best = sched_find_first_bit(spu_prio->bitmap);
362 if (best < MAX_PRIO) {
363 pr_debug("%s: yielding SPU %d NODE %d\n",
364 __FUNCTION__, spu->number, spu->node);
369 mutex_unlock(&ctx->state_mutex);
371 if (unlikely(need_yield))
375 int __init spu_sched_init(void)
379 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
381 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
385 for (i = 0; i < MAX_PRIO; i++) {
386 INIT_LIST_HEAD(&spu_prio->runq[i]);
387 __clear_bit(i, spu_prio->bitmap);
389 __set_bit(MAX_PRIO, spu_prio->bitmap);
390 for (i = 0; i < MAX_NUMNODES; i++) {
391 mutex_init(&spu_prio->active_mutex[i]);
392 INIT_LIST_HEAD(&spu_prio->active_list[i]);
394 spin_lock_init(&spu_prio->runq_lock);
398 void __exit spu_sched_exit(void)
400 struct spu *spu, *tmp;
403 for (node = 0; node < MAX_NUMNODES; node++) {
404 mutex_lock(&spu_prio->active_mutex[node]);
405 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
407 list_del_init(&spu->list);
410 mutex_unlock(&spu_prio->active_mutex[node]);