1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * SPU scheduler, based on Linux thread priority. For now use
7 * a simple "cooperative" yield model with no preemption. SPU
8 * scheduling will eventually be preemptive: When a thread with
9 * a higher static priority gets ready to run, then an active SPU
10 * context will be preempted and returned to the waitq.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/errno.h>
32 #include <linux/sched.h>
33 #include <linux/kernel.h>
35 #include <linux/completion.h>
36 #include <linux/vmalloc.h>
37 #include <linux/smp.h>
38 #include <linux/smp_lock.h>
39 #include <linux/stddef.h>
40 #include <linux/unistd.h>
43 #include <asm/mmu_context.h>
45 #include <asm/spu_csa.h>
48 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
49 struct spu_prio_array {
51 unsigned long bitmap[SPU_BITMAP_SIZE];
52 wait_queue_head_t waitq[MAX_PRIO];
55 /* spu_runqueue - This is the main runqueue data structure for SPUs. */
58 unsigned long nr_active;
59 unsigned long nr_idle;
60 unsigned long nr_switches;
61 struct list_head active_list;
62 struct list_head idle_list;
63 struct spu_prio_array prio;
66 static struct spu_runqueue *spu_runqueues = NULL;
68 static inline struct spu_runqueue *spu_rq(void)
70 /* Future: make this a per-NODE array,
71 * and use cpu_to_node(smp_processor_id())
76 static inline struct spu *del_idle(struct spu_runqueue *rq)
80 BUG_ON(rq->nr_idle <= 0);
81 BUG_ON(list_empty(&rq->idle_list));
82 /* Future: Move SPU out of low-power SRI state. */
83 spu = list_entry(rq->idle_list.next, struct spu, sched_list);
84 list_del_init(&spu->sched_list);
89 static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
91 BUG_ON(rq->nr_active <= 0);
92 BUG_ON(list_empty(&rq->active_list));
93 list_del_init(&spu->sched_list);
97 static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
99 /* Future: Put SPU into low-power SRI state. */
100 list_add_tail(&spu->sched_list, &rq->idle_list);
104 static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
108 list_add_tail(&spu->sched_list, &rq->active_list);
111 static void prio_wakeup(struct spu_runqueue *rq)
113 if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
114 int best = sched_find_first_bit(rq->prio.bitmap);
115 if (best < MAX_PRIO) {
116 wait_queue_head_t *wq = &rq->prio.waitq[best];
117 wake_up_interruptible_nr(wq, 1);
122 static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
125 int prio = current->prio;
126 wait_queue_head_t *wq = &rq->prio.waitq[prio];
129 __set_bit(prio, rq->prio.bitmap);
130 atomic_inc(&rq->prio.nr_blocked);
131 prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
132 if (!signal_pending(current)) {
134 up_write(&ctx->state_sema);
135 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
136 current->pid, current->prio);
138 down_write(&ctx->state_sema);
141 finish_wait(wq, &wait);
142 atomic_dec(&rq->prio.nr_blocked);
143 if (!waitqueue_active(wq))
144 __clear_bit(prio, rq->prio.bitmap);
147 static inline int is_best_prio(struct spu_runqueue *rq)
151 best_prio = sched_find_first_bit(rq->prio.bitmap);
152 return (current->prio < best_prio) ? 1 : 0;
155 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
157 /* Global TLBIE broadcast required with SPEs. */
159 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
161 __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
165 static inline void bind_context(struct spu *spu, struct spu_context *ctx)
167 pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
172 ctx->ops = &spu_hw_ops;
173 spu->pid = current->pid;
174 spu->prio = current->prio;
175 spu->mm = ctx->owner;
176 mm_needs_global_tlbie(spu->mm);
177 spu->ibox_callback = spufs_ibox_callback;
178 spu->wbox_callback = spufs_wbox_callback;
179 spu->stop_callback = spufs_stop_callback;
181 spu_unmap_mappings(ctx);
182 spu_restore(&ctx->csa, spu);
185 static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
187 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
188 spu->pid, spu->number);
189 spu_unmap_mappings(ctx);
190 spu_save(&ctx->csa, spu);
191 ctx->state = SPU_STATE_SAVED;
192 spu->ibox_callback = NULL;
193 spu->wbox_callback = NULL;
194 spu->stop_callback = NULL;
197 spu->prio = MAX_PRIO;
198 ctx->ops = &spu_backing_ops;
203 static struct spu *preempt_active(struct spu_runqueue *rq)
206 struct spu *worst, *spu;
208 worst = list_entry(rq->active_list.next, struct spu, sched_list);
209 list_for_each(p, &rq->active_list) {
210 spu = list_entry(p, struct spu, sched_list);
211 if (spu->prio > worst->prio) {
215 if (current->prio < worst->prio) {
216 struct spu_context *ctx = worst->ctx;
219 if (down_write_trylock(&ctx->state_sema)) {
220 pr_debug("%s: booting pid=%d from SPU %d\n",
221 __FUNCTION__, spu->pid, spu->number);
224 wake_up_all(&ctx->stop_wq);
225 ctx->ops->runcntl_stop(ctx);
226 unbind_context(spu, ctx);
227 up_write(&ctx->state_sema);
234 static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
236 struct spu_runqueue *rq;
237 struct spu *spu = NULL;
242 if (rq->nr_idle > 0) {
243 if (is_best_prio(rq)) {
251 if (signal_pending(current)) {
259 if (is_best_prio(rq)) {
260 if ((spu = preempt_active(rq)) != NULL)
263 prio_wait(rq, ctx, flags);
264 if (signal_pending(current)) {
276 static void put_idle_spu(struct spu *spu)
278 struct spu_runqueue *rq = spu->rq;
286 static int get_active_spu(struct spu *spu)
288 struct spu_runqueue *rq = spu->rq;
294 list_for_each(p, &rq->active_list) {
295 tmp = list_entry(p, struct spu, sched_list);
306 static void put_active_spu(struct spu *spu)
308 struct spu_runqueue *rq = spu->rq;
316 * spu_activate() & spu_deactivate() require the
317 * caller to have down_write(&ctx->state_sema).
319 * The rq->sem is breifly held (inside or outside a
320 * given ctx lock) for list management, but is never
321 * held during save/restore.
324 int spu_activate(struct spu_context *ctx, u64 flags)
330 spu = get_idle_spu(ctx, flags);
332 return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
333 bind_context(spu, ctx);
338 void spu_deactivate(struct spu_context *ctx)
346 needs_idle = get_active_spu(spu);
347 unbind_context(spu, ctx);
352 void spu_yield(struct spu_context *ctx)
357 down_write(&ctx->state_sema);
359 if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
360 pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
362 ctx->state = SPU_STATE_SAVED;
365 up_write(&ctx->state_sema);
366 if (unlikely(need_yield))
370 int __init spu_sched_init(void)
372 struct spu_runqueue *rq;
376 rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
378 printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
382 memset(rq, 0, sizeof(struct spu_runqueue));
383 init_MUTEX(&rq->sem);
384 INIT_LIST_HEAD(&rq->active_list);
385 INIT_LIST_HEAD(&rq->idle_list);
389 atomic_set(&rq->prio.nr_blocked, 0);
390 for (i = 0; i < MAX_PRIO; i++) {
391 init_waitqueue_head(&rq->prio.waitq[i]);
392 __clear_bit(i, rq->prio.bitmap);
394 __set_bit(MAX_PRIO, rq->prio.bitmap);
399 pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
404 printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
411 void __exit spu_sched_exit(void)
413 struct spu_runqueue *rq = spu_rq();
417 printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
420 while (rq->nr_idle > 0) {