sched: add new set_cpus_allowed_ptr function
[linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 if (!rq->rt.overloaded) {
38                         rt_set_overload(rq);
39                         rq->rt.overloaded = 1;
40                 }
41         } else if (rq->rt.overloaded) {
42                 rt_clear_overload(rq);
43                 rq->rt.overloaded = 0;
44         }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50         return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55         return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62         if (!rt_rq->tg)
63                 return RUNTIME_INF;
64
65         return rt_rq->rt_runtime;
66 }
67
68 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69 {
70         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
71 }
72
73 #define for_each_leaf_rt_rq(rt_rq, rq) \
74         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77 {
78         return rt_rq->rq;
79 }
80
81 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82 {
83         return rt_se->rt_rq;
84 }
85
86 #define for_each_sched_rt_entity(rt_se) \
87         for (; rt_se; rt_se = rt_se->parent)
88
89 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90 {
91         return rt_se->my_q;
92 }
93
94 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
97 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
98 {
99         struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
102                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
104                 enqueue_rt_entity(rt_se);
105                 if (rt_rq->highest_prio < curr->prio)
106                         resched_task(curr);
107         }
108 }
109
110 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
111 {
112         struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114         if (rt_se && on_rt_rq(rt_se))
115                 dequeue_rt_entity(rt_se);
116 }
117
118 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119 {
120         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121 }
122
123 static int rt_se_boosted(struct sched_rt_entity *rt_se)
124 {
125         struct rt_rq *rt_rq = group_rt_rq(rt_se);
126         struct task_struct *p;
127
128         if (rt_rq)
129                 return !!rt_rq->rt_nr_boosted;
130
131         p = rt_task_of(rt_se);
132         return p->prio != p->normal_prio;
133 }
134
135 #ifdef CONFIG_SMP
136 static inline cpumask_t sched_rt_period_mask(void)
137 {
138         return cpu_rq(smp_processor_id())->rd->span;
139 }
140 #else
141 static inline cpumask_t sched_rt_period_mask(void)
142 {
143         return cpu_online_map;
144 }
145 #endif
146
147 static inline
148 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
149 {
150         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151 }
152
153 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154 {
155         return &rt_rq->tg->rt_bandwidth;
156 }
157
158 #else
159
160 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
161 {
162         return rt_rq->rt_runtime;
163 }
164
165 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166 {
167         return ktime_to_ns(def_rt_bandwidth.rt_period);
168 }
169
170 #define for_each_leaf_rt_rq(rt_rq, rq) \
171         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174 {
175         return container_of(rt_rq, struct rq, rt);
176 }
177
178 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179 {
180         struct task_struct *p = rt_task_of(rt_se);
181         struct rq *rq = task_rq(p);
182
183         return &rq->rt;
184 }
185
186 #define for_each_sched_rt_entity(rt_se) \
187         for (; rt_se; rt_se = NULL)
188
189 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190 {
191         return NULL;
192 }
193
194 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
195 {
196 }
197
198 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
199 {
200 }
201
202 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203 {
204         return rt_rq->rt_throttled;
205 }
206
207 static inline cpumask_t sched_rt_period_mask(void)
208 {
209         return cpu_online_map;
210 }
211
212 static inline
213 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214 {
215         return &cpu_rq(cpu)->rt;
216 }
217
218 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219 {
220         return &def_rt_bandwidth;
221 }
222
223 #endif
224
225 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226 {
227         int i, idle = 1;
228         cpumask_t span;
229
230         if (rt_b->rt_runtime == RUNTIME_INF)
231                 return 1;
232
233         span = sched_rt_period_mask();
234         for_each_cpu_mask(i, span) {
235                 int enqueue = 0;
236                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237                 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239                 spin_lock(&rq->lock);
240                 if (rt_rq->rt_time) {
241                         u64 runtime;
242
243                         spin_lock(&rt_rq->rt_runtime_lock);
244                         runtime = rt_rq->rt_runtime;
245                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247                                 rt_rq->rt_throttled = 0;
248                                 enqueue = 1;
249                         }
250                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
251                                 idle = 0;
252                         spin_unlock(&rt_rq->rt_runtime_lock);
253                 }
254
255                 if (enqueue)
256                         sched_rt_rq_enqueue(rt_rq);
257                 spin_unlock(&rq->lock);
258         }
259
260         return idle;
261 }
262
263 #ifdef CONFIG_SMP
264 static int balance_runtime(struct rt_rq *rt_rq)
265 {
266         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
267         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
268         int i, weight, more = 0;
269         u64 rt_period;
270
271         weight = cpus_weight(rd->span);
272
273         spin_lock(&rt_b->rt_runtime_lock);
274         rt_period = ktime_to_ns(rt_b->rt_period);
275         for_each_cpu_mask(i, rd->span) {
276                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277                 s64 diff;
278
279                 if (iter == rt_rq)
280                         continue;
281
282                 spin_lock(&iter->rt_runtime_lock);
283                 diff = iter->rt_runtime - iter->rt_time;
284                 if (diff > 0) {
285                         do_div(diff, weight);
286                         if (rt_rq->rt_runtime + diff > rt_period)
287                                 diff = rt_period - rt_rq->rt_runtime;
288                         iter->rt_runtime -= diff;
289                         rt_rq->rt_runtime += diff;
290                         more = 1;
291                         if (rt_rq->rt_runtime == rt_period) {
292                                 spin_unlock(&iter->rt_runtime_lock);
293                                 break;
294                         }
295                 }
296                 spin_unlock(&iter->rt_runtime_lock);
297         }
298         spin_unlock(&rt_b->rt_runtime_lock);
299
300         return more;
301 }
302 #endif
303
304 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
305 {
306 #ifdef CONFIG_RT_GROUP_SCHED
307         struct rt_rq *rt_rq = group_rt_rq(rt_se);
308
309         if (rt_rq)
310                 return rt_rq->highest_prio;
311 #endif
312
313         return rt_task_of(rt_se)->prio;
314 }
315
316 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
317 {
318         u64 runtime = sched_rt_runtime(rt_rq);
319
320         if (runtime == RUNTIME_INF)
321                 return 0;
322
323         if (rt_rq->rt_throttled)
324                 return rt_rq_throttled(rt_rq);
325
326         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
327                 return 0;
328
329 #ifdef CONFIG_SMP
330         if (rt_rq->rt_time > runtime) {
331                 int more;
332
333                 spin_unlock(&rt_rq->rt_runtime_lock);
334                 more = balance_runtime(rt_rq);
335                 spin_lock(&rt_rq->rt_runtime_lock);
336
337                 if (more)
338                         runtime = sched_rt_runtime(rt_rq);
339         }
340 #endif
341
342         if (rt_rq->rt_time > runtime) {
343                 rt_rq->rt_throttled = 1;
344                 if (rt_rq_throttled(rt_rq)) {
345                         sched_rt_rq_dequeue(rt_rq);
346                         return 1;
347                 }
348         }
349
350         return 0;
351 }
352
353 /*
354  * Update the current task's runtime statistics. Skip current tasks that
355  * are not in our scheduling class.
356  */
357 static void update_curr_rt(struct rq *rq)
358 {
359         struct task_struct *curr = rq->curr;
360         struct sched_rt_entity *rt_se = &curr->rt;
361         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
362         u64 delta_exec;
363
364         if (!task_has_rt_policy(curr))
365                 return;
366
367         delta_exec = rq->clock - curr->se.exec_start;
368         if (unlikely((s64)delta_exec < 0))
369                 delta_exec = 0;
370
371         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
372
373         curr->se.sum_exec_runtime += delta_exec;
374         curr->se.exec_start = rq->clock;
375         cpuacct_charge(curr, delta_exec);
376
377         spin_lock(&rt_rq->rt_runtime_lock);
378         rt_rq->rt_time += delta_exec;
379         if (sched_rt_runtime_exceeded(rt_rq))
380                 resched_task(curr);
381         spin_unlock(&rt_rq->rt_runtime_lock);
382 }
383
384 static inline
385 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
386 {
387         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
388         rt_rq->rt_nr_running++;
389 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
390         if (rt_se_prio(rt_se) < rt_rq->highest_prio)
391                 rt_rq->highest_prio = rt_se_prio(rt_se);
392 #endif
393 #ifdef CONFIG_SMP
394         if (rt_se->nr_cpus_allowed > 1) {
395                 struct rq *rq = rq_of_rt_rq(rt_rq);
396                 rq->rt.rt_nr_migratory++;
397         }
398
399         update_rt_migration(rq_of_rt_rq(rt_rq));
400 #endif
401 #ifdef CONFIG_RT_GROUP_SCHED
402         if (rt_se_boosted(rt_se))
403                 rt_rq->rt_nr_boosted++;
404
405         if (rt_rq->tg)
406                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
407 #else
408         start_rt_bandwidth(&def_rt_bandwidth);
409 #endif
410 }
411
412 static inline
413 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
414 {
415         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
416         WARN_ON(!rt_rq->rt_nr_running);
417         rt_rq->rt_nr_running--;
418 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
419         if (rt_rq->rt_nr_running) {
420                 struct rt_prio_array *array;
421
422                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
423                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
424                         /* recalculate */
425                         array = &rt_rq->active;
426                         rt_rq->highest_prio =
427                                 sched_find_first_bit(array->bitmap);
428                 } /* otherwise leave rq->highest prio alone */
429         } else
430                 rt_rq->highest_prio = MAX_RT_PRIO;
431 #endif
432 #ifdef CONFIG_SMP
433         if (rt_se->nr_cpus_allowed > 1) {
434                 struct rq *rq = rq_of_rt_rq(rt_rq);
435                 rq->rt.rt_nr_migratory--;
436         }
437
438         update_rt_migration(rq_of_rt_rq(rt_rq));
439 #endif /* CONFIG_SMP */
440 #ifdef CONFIG_RT_GROUP_SCHED
441         if (rt_se_boosted(rt_se))
442                 rt_rq->rt_nr_boosted--;
443
444         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
445 #endif
446 }
447
448 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
449 {
450         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
451         struct rt_prio_array *array = &rt_rq->active;
452         struct rt_rq *group_rq = group_rt_rq(rt_se);
453
454         if (group_rq && rt_rq_throttled(group_rq))
455                 return;
456
457         list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
458         __set_bit(rt_se_prio(rt_se), array->bitmap);
459
460         inc_rt_tasks(rt_se, rt_rq);
461 }
462
463 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
464 {
465         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
466         struct rt_prio_array *array = &rt_rq->active;
467
468         list_del_init(&rt_se->run_list);
469         if (list_empty(array->queue + rt_se_prio(rt_se)))
470                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
471
472         dec_rt_tasks(rt_se, rt_rq);
473 }
474
475 /*
476  * Because the prio of an upper entry depends on the lower
477  * entries, we must remove entries top - down.
478  *
479  * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
480  *      doesn't matter much for now, as h=2 for GROUP_SCHED.
481  */
482 static void dequeue_rt_stack(struct task_struct *p)
483 {
484         struct sched_rt_entity *rt_se, *top_se;
485
486         /*
487          * dequeue all, top - down.
488          */
489         do {
490                 rt_se = &p->rt;
491                 top_se = NULL;
492                 for_each_sched_rt_entity(rt_se) {
493                         if (on_rt_rq(rt_se))
494                                 top_se = rt_se;
495                 }
496                 if (top_se)
497                         dequeue_rt_entity(top_se);
498         } while (top_se);
499 }
500
501 /*
502  * Adding/removing a task to/from a priority array:
503  */
504 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
505 {
506         struct sched_rt_entity *rt_se = &p->rt;
507
508         if (wakeup)
509                 rt_se->timeout = 0;
510
511         dequeue_rt_stack(p);
512
513         /*
514          * enqueue everybody, bottom - up.
515          */
516         for_each_sched_rt_entity(rt_se)
517                 enqueue_rt_entity(rt_se);
518 }
519
520 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
521 {
522         struct sched_rt_entity *rt_se = &p->rt;
523         struct rt_rq *rt_rq;
524
525         update_curr_rt(rq);
526
527         dequeue_rt_stack(p);
528
529         /*
530          * re-enqueue all non-empty rt_rq entities.
531          */
532         for_each_sched_rt_entity(rt_se) {
533                 rt_rq = group_rt_rq(rt_se);
534                 if (rt_rq && rt_rq->rt_nr_running)
535                         enqueue_rt_entity(rt_se);
536         }
537 }
538
539 /*
540  * Put task to the end of the run list without the overhead of dequeue
541  * followed by enqueue.
542  */
543 static
544 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
545 {
546         struct rt_prio_array *array = &rt_rq->active;
547
548         list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
549 }
550
551 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
552 {
553         struct sched_rt_entity *rt_se = &p->rt;
554         struct rt_rq *rt_rq;
555
556         for_each_sched_rt_entity(rt_se) {
557                 rt_rq = rt_rq_of_se(rt_se);
558                 requeue_rt_entity(rt_rq, rt_se);
559         }
560 }
561
562 static void yield_task_rt(struct rq *rq)
563 {
564         requeue_task_rt(rq, rq->curr);
565 }
566
567 #ifdef CONFIG_SMP
568 static int find_lowest_rq(struct task_struct *task);
569
570 static int select_task_rq_rt(struct task_struct *p, int sync)
571 {
572         struct rq *rq = task_rq(p);
573
574         /*
575          * If the current task is an RT task, then
576          * try to see if we can wake this RT task up on another
577          * runqueue. Otherwise simply start this RT task
578          * on its current runqueue.
579          *
580          * We want to avoid overloading runqueues. Even if
581          * the RT task is of higher priority than the current RT task.
582          * RT tasks behave differently than other tasks. If
583          * one gets preempted, we try to push it off to another queue.
584          * So trying to keep a preempting RT task on the same
585          * cache hot CPU will force the running RT task to
586          * a cold CPU. So we waste all the cache for the lower
587          * RT task in hopes of saving some of a RT task
588          * that is just being woken and probably will have
589          * cold cache anyway.
590          */
591         if (unlikely(rt_task(rq->curr)) &&
592             (p->rt.nr_cpus_allowed > 1)) {
593                 int cpu = find_lowest_rq(p);
594
595                 return (cpu == -1) ? task_cpu(p) : cpu;
596         }
597
598         /*
599          * Otherwise, just let it ride on the affined RQ and the
600          * post-schedule router will push the preempted task away
601          */
602         return task_cpu(p);
603 }
604 #endif /* CONFIG_SMP */
605
606 /*
607  * Preempt the current task with a newly woken task if needed:
608  */
609 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
610 {
611         if (p->prio < rq->curr->prio)
612                 resched_task(rq->curr);
613 }
614
615 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
616                                                    struct rt_rq *rt_rq)
617 {
618         struct rt_prio_array *array = &rt_rq->active;
619         struct sched_rt_entity *next = NULL;
620         struct list_head *queue;
621         int idx;
622
623         idx = sched_find_first_bit(array->bitmap);
624         BUG_ON(idx >= MAX_RT_PRIO);
625
626         queue = array->queue + idx;
627         next = list_entry(queue->next, struct sched_rt_entity, run_list);
628
629         return next;
630 }
631
632 static struct task_struct *pick_next_task_rt(struct rq *rq)
633 {
634         struct sched_rt_entity *rt_se;
635         struct task_struct *p;
636         struct rt_rq *rt_rq;
637
638         rt_rq = &rq->rt;
639
640         if (unlikely(!rt_rq->rt_nr_running))
641                 return NULL;
642
643         if (rt_rq_throttled(rt_rq))
644                 return NULL;
645
646         do {
647                 rt_se = pick_next_rt_entity(rq, rt_rq);
648                 BUG_ON(!rt_se);
649                 rt_rq = group_rt_rq(rt_se);
650         } while (rt_rq);
651
652         p = rt_task_of(rt_se);
653         p->se.exec_start = rq->clock;
654         return p;
655 }
656
657 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
658 {
659         update_curr_rt(rq);
660         p->se.exec_start = 0;
661 }
662
663 #ifdef CONFIG_SMP
664
665 /* Only try algorithms three times */
666 #define RT_MAX_TRIES 3
667
668 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
669 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
670
671 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
672 {
673         if (!task_running(rq, p) &&
674             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
675             (p->rt.nr_cpus_allowed > 1))
676                 return 1;
677         return 0;
678 }
679
680 /* Return the second highest RT task, NULL otherwise */
681 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
682 {
683         struct task_struct *next = NULL;
684         struct sched_rt_entity *rt_se;
685         struct rt_prio_array *array;
686         struct rt_rq *rt_rq;
687         int idx;
688
689         for_each_leaf_rt_rq(rt_rq, rq) {
690                 array = &rt_rq->active;
691                 idx = sched_find_first_bit(array->bitmap);
692  next_idx:
693                 if (idx >= MAX_RT_PRIO)
694                         continue;
695                 if (next && next->prio < idx)
696                         continue;
697                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
698                         struct task_struct *p = rt_task_of(rt_se);
699                         if (pick_rt_task(rq, p, cpu)) {
700                                 next = p;
701                                 break;
702                         }
703                 }
704                 if (!next) {
705                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
706                         goto next_idx;
707                 }
708         }
709
710         return next;
711 }
712
713 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
714
715 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
716 {
717         int       lowest_prio = -1;
718         int       lowest_cpu  = -1;
719         int       count       = 0;
720         int       cpu;
721
722         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
723
724         /*
725          * Scan each rq for the lowest prio.
726          */
727         for_each_cpu_mask(cpu, *lowest_mask) {
728                 struct rq *rq = cpu_rq(cpu);
729
730                 /* We look for lowest RT prio or non-rt CPU */
731                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
732                         /*
733                          * if we already found a low RT queue
734                          * and now we found this non-rt queue
735                          * clear the mask and set our bit.
736                          * Otherwise just return the queue as is
737                          * and the count==1 will cause the algorithm
738                          * to use the first bit found.
739                          */
740                         if (lowest_cpu != -1) {
741                                 cpus_clear(*lowest_mask);
742                                 cpu_set(rq->cpu, *lowest_mask);
743                         }
744                         return 1;
745                 }
746
747                 /* no locking for now */
748                 if ((rq->rt.highest_prio > task->prio)
749                     && (rq->rt.highest_prio >= lowest_prio)) {
750                         if (rq->rt.highest_prio > lowest_prio) {
751                                 /* new low - clear old data */
752                                 lowest_prio = rq->rt.highest_prio;
753                                 lowest_cpu = cpu;
754                                 count = 0;
755                         }
756                         count++;
757                 } else
758                         cpu_clear(cpu, *lowest_mask);
759         }
760
761         /*
762          * Clear out all the set bits that represent
763          * runqueues that were of higher prio than
764          * the lowest_prio.
765          */
766         if (lowest_cpu > 0) {
767                 /*
768                  * Perhaps we could add another cpumask op to
769                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
770                  * Then that could be optimized to use memset and such.
771                  */
772                 for_each_cpu_mask(cpu, *lowest_mask) {
773                         if (cpu >= lowest_cpu)
774                                 break;
775                         cpu_clear(cpu, *lowest_mask);
776                 }
777         }
778
779         return count;
780 }
781
782 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
783 {
784         int first;
785
786         /* "this_cpu" is cheaper to preempt than a remote processor */
787         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
788                 return this_cpu;
789
790         first = first_cpu(*mask);
791         if (first != NR_CPUS)
792                 return first;
793
794         return -1;
795 }
796
797 static int find_lowest_rq(struct task_struct *task)
798 {
799         struct sched_domain *sd;
800         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
801         int this_cpu = smp_processor_id();
802         int cpu      = task_cpu(task);
803         int count    = find_lowest_cpus(task, lowest_mask);
804
805         if (!count)
806                 return -1; /* No targets found */
807
808         /*
809          * There is no sense in performing an optimal search if only one
810          * target is found.
811          */
812         if (count == 1)
813                 return first_cpu(*lowest_mask);
814
815         /*
816          * At this point we have built a mask of cpus representing the
817          * lowest priority tasks in the system.  Now we want to elect
818          * the best one based on our affinity and topology.
819          *
820          * We prioritize the last cpu that the task executed on since
821          * it is most likely cache-hot in that location.
822          */
823         if (cpu_isset(cpu, *lowest_mask))
824                 return cpu;
825
826         /*
827          * Otherwise, we consult the sched_domains span maps to figure
828          * out which cpu is logically closest to our hot cache data.
829          */
830         if (this_cpu == cpu)
831                 this_cpu = -1; /* Skip this_cpu opt if the same */
832
833         for_each_domain(cpu, sd) {
834                 if (sd->flags & SD_WAKE_AFFINE) {
835                         cpumask_t domain_mask;
836                         int       best_cpu;
837
838                         cpus_and(domain_mask, sd->span, *lowest_mask);
839
840                         best_cpu = pick_optimal_cpu(this_cpu,
841                                                     &domain_mask);
842                         if (best_cpu != -1)
843                                 return best_cpu;
844                 }
845         }
846
847         /*
848          * And finally, if there were no matches within the domains
849          * just give the caller *something* to work with from the compatible
850          * locations.
851          */
852         return pick_optimal_cpu(this_cpu, lowest_mask);
853 }
854
855 /* Will lock the rq it finds */
856 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
857 {
858         struct rq *lowest_rq = NULL;
859         int tries;
860         int cpu;
861
862         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
863                 cpu = find_lowest_rq(task);
864
865                 if ((cpu == -1) || (cpu == rq->cpu))
866                         break;
867
868                 lowest_rq = cpu_rq(cpu);
869
870                 /* if the prio of this runqueue changed, try again */
871                 if (double_lock_balance(rq, lowest_rq)) {
872                         /*
873                          * We had to unlock the run queue. In
874                          * the mean time, task could have
875                          * migrated already or had its affinity changed.
876                          * Also make sure that it wasn't scheduled on its rq.
877                          */
878                         if (unlikely(task_rq(task) != rq ||
879                                      !cpu_isset(lowest_rq->cpu,
880                                                 task->cpus_allowed) ||
881                                      task_running(rq, task) ||
882                                      !task->se.on_rq)) {
883
884                                 spin_unlock(&lowest_rq->lock);
885                                 lowest_rq = NULL;
886                                 break;
887                         }
888                 }
889
890                 /* If this rq is still suitable use it. */
891                 if (lowest_rq->rt.highest_prio > task->prio)
892                         break;
893
894                 /* try again */
895                 spin_unlock(&lowest_rq->lock);
896                 lowest_rq = NULL;
897         }
898
899         return lowest_rq;
900 }
901
902 /*
903  * If the current CPU has more than one RT task, see if the non
904  * running task can migrate over to a CPU that is running a task
905  * of lesser priority.
906  */
907 static int push_rt_task(struct rq *rq)
908 {
909         struct task_struct *next_task;
910         struct rq *lowest_rq;
911         int ret = 0;
912         int paranoid = RT_MAX_TRIES;
913
914         if (!rq->rt.overloaded)
915                 return 0;
916
917         next_task = pick_next_highest_task_rt(rq, -1);
918         if (!next_task)
919                 return 0;
920
921  retry:
922         if (unlikely(next_task == rq->curr)) {
923                 WARN_ON(1);
924                 return 0;
925         }
926
927         /*
928          * It's possible that the next_task slipped in of
929          * higher priority than current. If that's the case
930          * just reschedule current.
931          */
932         if (unlikely(next_task->prio < rq->curr->prio)) {
933                 resched_task(rq->curr);
934                 return 0;
935         }
936
937         /* We might release rq lock */
938         get_task_struct(next_task);
939
940         /* find_lock_lowest_rq locks the rq if found */
941         lowest_rq = find_lock_lowest_rq(next_task, rq);
942         if (!lowest_rq) {
943                 struct task_struct *task;
944                 /*
945                  * find lock_lowest_rq releases rq->lock
946                  * so it is possible that next_task has changed.
947                  * If it has, then try again.
948                  */
949                 task = pick_next_highest_task_rt(rq, -1);
950                 if (unlikely(task != next_task) && task && paranoid--) {
951                         put_task_struct(next_task);
952                         next_task = task;
953                         goto retry;
954                 }
955                 goto out;
956         }
957
958         deactivate_task(rq, next_task, 0);
959         set_task_cpu(next_task, lowest_rq->cpu);
960         activate_task(lowest_rq, next_task, 0);
961
962         resched_task(lowest_rq->curr);
963
964         spin_unlock(&lowest_rq->lock);
965
966         ret = 1;
967 out:
968         put_task_struct(next_task);
969
970         return ret;
971 }
972
973 /*
974  * TODO: Currently we just use the second highest prio task on
975  *       the queue, and stop when it can't migrate (or there's
976  *       no more RT tasks).  There may be a case where a lower
977  *       priority RT task has a different affinity than the
978  *       higher RT task. In this case the lower RT task could
979  *       possibly be able to migrate where as the higher priority
980  *       RT task could not.  We currently ignore this issue.
981  *       Enhancements are welcome!
982  */
983 static void push_rt_tasks(struct rq *rq)
984 {
985         /* push_rt_task will return true if it moved an RT */
986         while (push_rt_task(rq))
987                 ;
988 }
989
990 static int pull_rt_task(struct rq *this_rq)
991 {
992         int this_cpu = this_rq->cpu, ret = 0, cpu;
993         struct task_struct *p, *next;
994         struct rq *src_rq;
995
996         if (likely(!rt_overloaded(this_rq)))
997                 return 0;
998
999         next = pick_next_task_rt(this_rq);
1000
1001         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1002                 if (this_cpu == cpu)
1003                         continue;
1004
1005                 src_rq = cpu_rq(cpu);
1006                 /*
1007                  * We can potentially drop this_rq's lock in
1008                  * double_lock_balance, and another CPU could
1009                  * steal our next task - hence we must cause
1010                  * the caller to recalculate the next task
1011                  * in that case:
1012                  */
1013                 if (double_lock_balance(this_rq, src_rq)) {
1014                         struct task_struct *old_next = next;
1015
1016                         next = pick_next_task_rt(this_rq);
1017                         if (next != old_next)
1018                                 ret = 1;
1019                 }
1020
1021                 /*
1022                  * Are there still pullable RT tasks?
1023                  */
1024                 if (src_rq->rt.rt_nr_running <= 1)
1025                         goto skip;
1026
1027                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1028
1029                 /*
1030                  * Do we have an RT task that preempts
1031                  * the to-be-scheduled task?
1032                  */
1033                 if (p && (!next || (p->prio < next->prio))) {
1034                         WARN_ON(p == src_rq->curr);
1035                         WARN_ON(!p->se.on_rq);
1036
1037                         /*
1038                          * There's a chance that p is higher in priority
1039                          * than what's currently running on its cpu.
1040                          * This is just that p is wakeing up and hasn't
1041                          * had a chance to schedule. We only pull
1042                          * p if it is lower in priority than the
1043                          * current task on the run queue or
1044                          * this_rq next task is lower in prio than
1045                          * the current task on that rq.
1046                          */
1047                         if (p->prio < src_rq->curr->prio ||
1048                             (next && next->prio < src_rq->curr->prio))
1049                                 goto skip;
1050
1051                         ret = 1;
1052
1053                         deactivate_task(src_rq, p, 0);
1054                         set_task_cpu(p, this_cpu);
1055                         activate_task(this_rq, p, 0);
1056                         /*
1057                          * We continue with the search, just in
1058                          * case there's an even higher prio task
1059                          * in another runqueue. (low likelyhood
1060                          * but possible)
1061                          *
1062                          * Update next so that we won't pick a task
1063                          * on another cpu with a priority lower (or equal)
1064                          * than the one we just picked.
1065                          */
1066                         next = p;
1067
1068                 }
1069  skip:
1070                 spin_unlock(&src_rq->lock);
1071         }
1072
1073         return ret;
1074 }
1075
1076 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1077 {
1078         /* Try to pull RT tasks here if we lower this rq's prio */
1079         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1080                 pull_rt_task(rq);
1081 }
1082
1083 static void post_schedule_rt(struct rq *rq)
1084 {
1085         /*
1086          * If we have more than one rt_task queued, then
1087          * see if we can push the other rt_tasks off to other CPUS.
1088          * Note we may release the rq lock, and since
1089          * the lock was owned by prev, we need to release it
1090          * first via finish_lock_switch and then reaquire it here.
1091          */
1092         if (unlikely(rq->rt.overloaded)) {
1093                 spin_lock_irq(&rq->lock);
1094                 push_rt_tasks(rq);
1095                 spin_unlock_irq(&rq->lock);
1096         }
1097 }
1098
1099
1100 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1101 {
1102         if (!task_running(rq, p) &&
1103             (p->prio >= rq->rt.highest_prio) &&
1104             rq->rt.overloaded)
1105                 push_rt_tasks(rq);
1106 }
1107
1108 static unsigned long
1109 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1110                 unsigned long max_load_move,
1111                 struct sched_domain *sd, enum cpu_idle_type idle,
1112                 int *all_pinned, int *this_best_prio)
1113 {
1114         /* don't touch RT tasks */
1115         return 0;
1116 }
1117
1118 static int
1119 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1120                  struct sched_domain *sd, enum cpu_idle_type idle)
1121 {
1122         /* don't touch RT tasks */
1123         return 0;
1124 }
1125
1126 static void set_cpus_allowed_rt(struct task_struct *p,
1127                                 const cpumask_t *new_mask)
1128 {
1129         int weight = cpus_weight(*new_mask);
1130
1131         BUG_ON(!rt_task(p));
1132
1133         /*
1134          * Update the migration status of the RQ if we have an RT task
1135          * which is running AND changing its weight value.
1136          */
1137         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1138                 struct rq *rq = task_rq(p);
1139
1140                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1141                         rq->rt.rt_nr_migratory++;
1142                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1143                         BUG_ON(!rq->rt.rt_nr_migratory);
1144                         rq->rt.rt_nr_migratory--;
1145                 }
1146
1147                 update_rt_migration(rq);
1148         }
1149
1150         p->cpus_allowed    = *new_mask;
1151         p->rt.nr_cpus_allowed = weight;
1152 }
1153
1154 /* Assumes rq->lock is held */
1155 static void join_domain_rt(struct rq *rq)
1156 {
1157         if (rq->rt.overloaded)
1158                 rt_set_overload(rq);
1159 }
1160
1161 /* Assumes rq->lock is held */
1162 static void leave_domain_rt(struct rq *rq)
1163 {
1164         if (rq->rt.overloaded)
1165                 rt_clear_overload(rq);
1166 }
1167
1168 /*
1169  * When switch from the rt queue, we bring ourselves to a position
1170  * that we might want to pull RT tasks from other runqueues.
1171  */
1172 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1173                            int running)
1174 {
1175         /*
1176          * If there are other RT tasks then we will reschedule
1177          * and the scheduling of the other RT tasks will handle
1178          * the balancing. But if we are the last RT task
1179          * we may need to handle the pulling of RT tasks
1180          * now.
1181          */
1182         if (!rq->rt.rt_nr_running)
1183                 pull_rt_task(rq);
1184 }
1185 #endif /* CONFIG_SMP */
1186
1187 /*
1188  * When switching a task to RT, we may overload the runqueue
1189  * with RT tasks. In this case we try to push them off to
1190  * other runqueues.
1191  */
1192 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1193                            int running)
1194 {
1195         int check_resched = 1;
1196
1197         /*
1198          * If we are already running, then there's nothing
1199          * that needs to be done. But if we are not running
1200          * we may need to preempt the current running task.
1201          * If that current running task is also an RT task
1202          * then see if we can move to another run queue.
1203          */
1204         if (!running) {
1205 #ifdef CONFIG_SMP
1206                 if (rq->rt.overloaded && push_rt_task(rq) &&
1207                     /* Don't resched if we changed runqueues */
1208                     rq != task_rq(p))
1209                         check_resched = 0;
1210 #endif /* CONFIG_SMP */
1211                 if (check_resched && p->prio < rq->curr->prio)
1212                         resched_task(rq->curr);
1213         }
1214 }
1215
1216 /*
1217  * Priority of the task has changed. This may cause
1218  * us to initiate a push or pull.
1219  */
1220 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1221                             int oldprio, int running)
1222 {
1223         if (running) {
1224 #ifdef CONFIG_SMP
1225                 /*
1226                  * If our priority decreases while running, we
1227                  * may need to pull tasks to this runqueue.
1228                  */
1229                 if (oldprio < p->prio)
1230                         pull_rt_task(rq);
1231                 /*
1232                  * If there's a higher priority task waiting to run
1233                  * then reschedule. Note, the above pull_rt_task
1234                  * can release the rq lock and p could migrate.
1235                  * Only reschedule if p is still on the same runqueue.
1236                  */
1237                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1238                         resched_task(p);
1239 #else
1240                 /* For UP simply resched on drop of prio */
1241                 if (oldprio < p->prio)
1242                         resched_task(p);
1243 #endif /* CONFIG_SMP */
1244         } else {
1245                 /*
1246                  * This task is not running, but if it is
1247                  * greater than the current running task
1248                  * then reschedule.
1249                  */
1250                 if (p->prio < rq->curr->prio)
1251                         resched_task(rq->curr);
1252         }
1253 }
1254
1255 static void watchdog(struct rq *rq, struct task_struct *p)
1256 {
1257         unsigned long soft, hard;
1258
1259         if (!p->signal)
1260                 return;
1261
1262         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1263         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1264
1265         if (soft != RLIM_INFINITY) {
1266                 unsigned long next;
1267
1268                 p->rt.timeout++;
1269                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1270                 if (p->rt.timeout > next)
1271                         p->it_sched_expires = p->se.sum_exec_runtime;
1272         }
1273 }
1274
1275 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1276 {
1277         update_curr_rt(rq);
1278
1279         watchdog(rq, p);
1280
1281         /*
1282          * RR tasks need a special form of timeslice management.
1283          * FIFO tasks have no timeslices.
1284          */
1285         if (p->policy != SCHED_RR)
1286                 return;
1287
1288         if (--p->rt.time_slice)
1289                 return;
1290
1291         p->rt.time_slice = DEF_TIMESLICE;
1292
1293         /*
1294          * Requeue to the end of queue if we are not the only element
1295          * on the queue:
1296          */
1297         if (p->rt.run_list.prev != p->rt.run_list.next) {
1298                 requeue_task_rt(rq, p);
1299                 set_tsk_need_resched(p);
1300         }
1301 }
1302
1303 static void set_curr_task_rt(struct rq *rq)
1304 {
1305         struct task_struct *p = rq->curr;
1306
1307         p->se.exec_start = rq->clock;
1308 }
1309
1310 const struct sched_class rt_sched_class = {
1311         .next                   = &fair_sched_class,
1312         .enqueue_task           = enqueue_task_rt,
1313         .dequeue_task           = dequeue_task_rt,
1314         .yield_task             = yield_task_rt,
1315 #ifdef CONFIG_SMP
1316         .select_task_rq         = select_task_rq_rt,
1317 #endif /* CONFIG_SMP */
1318
1319         .check_preempt_curr     = check_preempt_curr_rt,
1320
1321         .pick_next_task         = pick_next_task_rt,
1322         .put_prev_task          = put_prev_task_rt,
1323
1324 #ifdef CONFIG_SMP
1325         .load_balance           = load_balance_rt,
1326         .move_one_task          = move_one_task_rt,
1327         .set_cpus_allowed       = set_cpus_allowed_rt,
1328         .join_domain            = join_domain_rt,
1329         .leave_domain           = leave_domain_rt,
1330         .pre_schedule           = pre_schedule_rt,
1331         .post_schedule          = post_schedule_rt,
1332         .task_wake_up           = task_wake_up_rt,
1333         .switched_from          = switched_from_rt,
1334 #endif
1335
1336         .set_curr_task          = set_curr_task_rt,
1337         .task_tick              = task_tick_rt,
1338
1339         .prio_changed           = prio_changed_rt,
1340         .switched_to            = switched_to_rt,
1341 };