1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/smp_lock.h>
34 #include <linux/stddef.h>
35 #include <linux/unistd.h>
36 #include <linux/numa.h>
37 #include <linux/mutex.h>
40 #include <asm/mmu_context.h>
42 #include <asm/spu_csa.h>
43 #include <asm/spu_priv1.h>
46 #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
48 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
49 struct spu_prio_array {
50 unsigned long bitmap[SPU_BITMAP_SIZE];
51 wait_queue_head_t waitq[MAX_PRIO];
52 struct list_head active_list[MAX_NUMNODES];
53 struct mutex active_mutex[MAX_NUMNODES];
56 static struct spu_prio_array *spu_prio;
58 static inline int node_allowed(int node)
62 if (!nr_cpus_node(node))
64 mask = node_to_cpumask(node);
65 if (!cpus_intersects(mask, current->cpus_allowed))
70 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
72 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
74 /* Global TLBIE broadcast required with SPEs. */
75 __cpus_setall(&mm->cpu_vm_mask, nr);
78 static inline void bind_context(struct spu *spu, struct spu_context *ctx)
80 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
81 spu->number, spu->node);
85 ctx->ops = &spu_hw_ops;
86 spu->pid = current->pid;
87 spu->prio = current->prio;
89 mm_needs_global_tlbie(spu->mm);
90 spu->ibox_callback = spufs_ibox_callback;
91 spu->wbox_callback = spufs_wbox_callback;
92 spu->stop_callback = spufs_stop_callback;
93 spu->mfc_callback = spufs_mfc_callback;
94 spu->dma_callback = spufs_dma_callback;
96 spu_unmap_mappings(ctx);
97 spu_restore(&ctx->csa, spu);
98 spu->timestamp = jiffies;
99 spu_cpu_affinity_set(spu, raw_smp_processor_id());
102 static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
104 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
105 spu->pid, spu->number, spu->node);
106 spu_unmap_mappings(ctx);
107 spu_save(&ctx->csa, spu);
108 spu->timestamp = jiffies;
109 ctx->state = SPU_STATE_SAVED;
110 spu->ibox_callback = NULL;
111 spu->wbox_callback = NULL;
112 spu->stop_callback = NULL;
113 spu->mfc_callback = NULL;
114 spu->dma_callback = NULL;
117 spu->prio = MAX_PRIO;
118 ctx->ops = &spu_backing_ops;
124 static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
127 prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
128 set_bit(prio, spu_prio->bitmap);
131 static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
136 __set_current_state(TASK_RUNNING);
138 spin_lock_irqsave(&wq->lock, flags);
140 remove_wait_queue_locked(wq, wait);
141 if (list_empty(&wq->task_list))
142 clear_bit(prio, spu_prio->bitmap);
144 spin_unlock_irqrestore(&wq->lock, flags);
147 static void spu_prio_wait(struct spu_context *ctx, u64 flags)
149 int prio = current->prio;
150 wait_queue_head_t *wq = &spu_prio->waitq[prio];
156 spu_add_wq(wq, &wait, prio);
158 if (!signal_pending(current)) {
159 up_write(&ctx->state_sema);
160 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
161 current->pid, current->prio);
163 down_write(&ctx->state_sema);
166 spu_del_wq(wq, &wait, prio);
169 static void spu_prio_wakeup(void)
171 int best = sched_find_first_bit(spu_prio->bitmap);
172 if (best < MAX_PRIO) {
173 wait_queue_head_t *wq = &spu_prio->waitq[best];
174 wake_up_interruptible_nr(wq, 1);
178 static int get_active_spu(struct spu *spu)
180 int node = spu->node;
184 mutex_lock(&spu_prio->active_mutex[node]);
185 list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
187 list_del_init(&spu->list);
192 mutex_unlock(&spu_prio->active_mutex[node]);
196 static void put_active_spu(struct spu *spu)
198 int node = spu->node;
200 mutex_lock(&spu_prio->active_mutex[node]);
201 list_add_tail(&spu->list, &spu_prio->active_list[node]);
202 mutex_unlock(&spu_prio->active_mutex[node]);
205 static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
207 struct spu *spu = NULL;
208 int node = cpu_to_node(raw_smp_processor_id());
211 for (n = 0; n < MAX_NUMNODES; n++, node++) {
212 node = (node < MAX_NUMNODES) ? node : 0;
213 if (!node_allowed(node))
215 spu = spu_alloc_node(node);
222 static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
224 /* Future: spu_get_idle() if possible,
225 * otherwise try to preempt an active
228 return spu_get_idle(ctx, flags);
231 /* The three externally callable interfaces
232 * for the scheduler begin here.
234 * spu_activate - bind a context to SPU, waiting as needed.
235 * spu_deactivate - unbind a context from its SPU.
236 * spu_yield - yield an SPU if others are waiting.
239 int spu_activate(struct spu_context *ctx, u64 flags)
247 spu = spu_get(ctx, flags);
249 if (ctx->spu != NULL) {
254 bind_context(spu, ctx);
258 spu_prio_wait(ctx, flags);
259 if (signal_pending(current)) {
268 void spu_deactivate(struct spu_context *ctx)
276 needs_idle = get_active_spu(spu);
277 unbind_context(spu, ctx);
284 void spu_yield(struct spu_context *ctx)
289 if (down_write_trylock(&ctx->state_sema)) {
290 if ((spu = ctx->spu) != NULL) {
291 int best = sched_find_first_bit(spu_prio->bitmap);
292 if (best < MAX_PRIO) {
293 pr_debug("%s: yielding SPU %d NODE %d\n",
294 __FUNCTION__, spu->number, spu->node);
296 ctx->state = SPU_STATE_SAVED;
299 spu->prio = MAX_PRIO;
302 up_write(&ctx->state_sema);
304 if (unlikely(need_yield))
308 int __init spu_sched_init(void)
312 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
314 printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
318 for (i = 0; i < MAX_PRIO; i++) {
319 init_waitqueue_head(&spu_prio->waitq[i]);
320 __clear_bit(i, spu_prio->bitmap);
322 __set_bit(MAX_PRIO, spu_prio->bitmap);
323 for (i = 0; i < MAX_NUMNODES; i++) {
324 mutex_init(&spu_prio->active_mutex[i]);
325 INIT_LIST_HEAD(&spu_prio->active_list[i]);
330 void __exit spu_sched_exit(void)
332 struct spu *spu, *tmp;
335 for (node = 0; node < MAX_NUMNODES; node++) {
336 mutex_lock(&spu_prio->active_mutex[node]);
337 list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
339 list_del_init(&spu->list);
342 mutex_unlock(&spu_prio->active_mutex[node]);