Merge branch 'x86/unify-cpu-detect' into x86-v28-for-linus-phase4-D
[linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         if (!rq->online)
16                 return;
17
18         cpu_set(rq->cpu, rq->rd->rto_mask);
19         /*
20          * Make sure the mask is visible before we set
21          * the overload count. That is checked to determine
22          * if we should look at the mask. It would be a shame
23          * if we looked at the mask, but the mask was not
24          * updated yet.
25          */
26         wmb();
27         atomic_inc(&rq->rd->rto_count);
28 }
29
30 static inline void rt_clear_overload(struct rq *rq)
31 {
32         if (!rq->online)
33                 return;
34
35         /* the order here really doesn't matter */
36         atomic_dec(&rq->rd->rto_count);
37         cpu_clear(rq->cpu, rq->rd->rto_mask);
38 }
39
40 static void update_rt_migration(struct rq *rq)
41 {
42         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43                 if (!rq->rt.overloaded) {
44                         rt_set_overload(rq);
45                         rq->rt.overloaded = 1;
46                 }
47         } else if (rq->rt.overloaded) {
48                 rt_clear_overload(rq);
49                 rq->rt.overloaded = 0;
50         }
51 }
52 #endif /* CONFIG_SMP */
53
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55 {
56         return container_of(rt_se, struct task_struct, rt);
57 }
58
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60 {
61         return !list_empty(&rt_se->run_list);
62 }
63
64 #ifdef CONFIG_RT_GROUP_SCHED
65
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67 {
68         if (!rt_rq->tg)
69                 return RUNTIME_INF;
70
71         return rt_rq->rt_runtime;
72 }
73
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75 {
76         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77 }
78
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83 {
84         return rt_rq->rq;
85 }
86
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88 {
89         return rt_se->rt_rq;
90 }
91
92 #define for_each_sched_rt_entity(rt_se) \
93         for (; rt_se; rt_se = rt_se->parent)
94
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96 {
97         return rt_se->my_q;
98 }
99
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104 {
105         struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
110                 enqueue_rt_entity(rt_se);
111                 if (rt_rq->highest_prio < curr->prio)
112                         resched_task(curr);
113         }
114 }
115
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117 {
118         struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120         if (rt_se && on_rt_rq(rt_se))
121                 dequeue_rt_entity(rt_se);
122 }
123
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125 {
126         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127 }
128
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
130 {
131         struct rt_rq *rt_rq = group_rt_rq(rt_se);
132         struct task_struct *p;
133
134         if (rt_rq)
135                 return !!rt_rq->rt_nr_boosted;
136
137         p = rt_task_of(rt_se);
138         return p->prio != p->normal_prio;
139 }
140
141 #ifdef CONFIG_SMP
142 static inline cpumask_t sched_rt_period_mask(void)
143 {
144         return cpu_rq(smp_processor_id())->rd->span;
145 }
146 #else
147 static inline cpumask_t sched_rt_period_mask(void)
148 {
149         return cpu_online_map;
150 }
151 #endif
152
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155 {
156         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157 }
158
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160 {
161         return &rt_rq->tg->rt_bandwidth;
162 }
163
164 #else /* !CONFIG_RT_GROUP_SCHED */
165
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167 {
168         return rt_rq->rt_runtime;
169 }
170
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172 {
173         return ktime_to_ns(def_rt_bandwidth.rt_period);
174 }
175
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180 {
181         return container_of(rt_rq, struct rq, rt);
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         struct task_struct *p = rt_task_of(rt_se);
187         struct rq *rq = task_rq(p);
188
189         return &rq->rt;
190 }
191
192 #define for_each_sched_rt_entity(rt_se) \
193         for (; rt_se; rt_se = NULL)
194
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196 {
197         return NULL;
198 }
199
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201 {
202         if (rt_rq->rt_nr_running)
203                 resched_task(rq_of_rt_rq(rt_rq)->curr);
204 }
205
206 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
207 {
208 }
209
210 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
211 {
212         return rt_rq->rt_throttled;
213 }
214
215 static inline cpumask_t sched_rt_period_mask(void)
216 {
217         return cpu_online_map;
218 }
219
220 static inline
221 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
222 {
223         return &cpu_rq(cpu)->rt;
224 }
225
226 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
227 {
228         return &def_rt_bandwidth;
229 }
230
231 #endif /* CONFIG_RT_GROUP_SCHED */
232
233 #ifdef CONFIG_SMP
234 static int do_balance_runtime(struct rt_rq *rt_rq)
235 {
236         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
237         struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
238         int i, weight, more = 0;
239         u64 rt_period;
240
241         weight = cpus_weight(rd->span);
242
243         spin_lock(&rt_b->rt_runtime_lock);
244         rt_period = ktime_to_ns(rt_b->rt_period);
245         for_each_cpu_mask_nr(i, rd->span) {
246                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
247                 s64 diff;
248
249                 if (iter == rt_rq)
250                         continue;
251
252                 spin_lock(&iter->rt_runtime_lock);
253                 if (iter->rt_runtime == RUNTIME_INF)
254                         goto next;
255
256                 diff = iter->rt_runtime - iter->rt_time;
257                 if (diff > 0) {
258                         diff = div_u64((u64)diff, weight);
259                         if (rt_rq->rt_runtime + diff > rt_period)
260                                 diff = rt_period - rt_rq->rt_runtime;
261                         iter->rt_runtime -= diff;
262                         rt_rq->rt_runtime += diff;
263                         more = 1;
264                         if (rt_rq->rt_runtime == rt_period) {
265                                 spin_unlock(&iter->rt_runtime_lock);
266                                 break;
267                         }
268                 }
269 next:
270                 spin_unlock(&iter->rt_runtime_lock);
271         }
272         spin_unlock(&rt_b->rt_runtime_lock);
273
274         return more;
275 }
276
277 static void __disable_runtime(struct rq *rq)
278 {
279         struct root_domain *rd = rq->rd;
280         struct rt_rq *rt_rq;
281
282         if (unlikely(!scheduler_running))
283                 return;
284
285         for_each_leaf_rt_rq(rt_rq, rq) {
286                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
287                 s64 want;
288                 int i;
289
290                 spin_lock(&rt_b->rt_runtime_lock);
291                 spin_lock(&rt_rq->rt_runtime_lock);
292                 if (rt_rq->rt_runtime == RUNTIME_INF ||
293                                 rt_rq->rt_runtime == rt_b->rt_runtime)
294                         goto balanced;
295                 spin_unlock(&rt_rq->rt_runtime_lock);
296
297                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
298
299                 for_each_cpu_mask(i, rd->span) {
300                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
301                         s64 diff;
302
303                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
304                                 continue;
305
306                         spin_lock(&iter->rt_runtime_lock);
307                         if (want > 0) {
308                                 diff = min_t(s64, iter->rt_runtime, want);
309                                 iter->rt_runtime -= diff;
310                                 want -= diff;
311                         } else {
312                                 iter->rt_runtime -= want;
313                                 want -= want;
314                         }
315                         spin_unlock(&iter->rt_runtime_lock);
316
317                         if (!want)
318                                 break;
319                 }
320
321                 spin_lock(&rt_rq->rt_runtime_lock);
322                 BUG_ON(want);
323 balanced:
324                 rt_rq->rt_runtime = RUNTIME_INF;
325                 spin_unlock(&rt_rq->rt_runtime_lock);
326                 spin_unlock(&rt_b->rt_runtime_lock);
327         }
328 }
329
330 static void disable_runtime(struct rq *rq)
331 {
332         unsigned long flags;
333
334         spin_lock_irqsave(&rq->lock, flags);
335         __disable_runtime(rq);
336         spin_unlock_irqrestore(&rq->lock, flags);
337 }
338
339 static void __enable_runtime(struct rq *rq)
340 {
341         struct rt_rq *rt_rq;
342
343         if (unlikely(!scheduler_running))
344                 return;
345
346         for_each_leaf_rt_rq(rt_rq, rq) {
347                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
348
349                 spin_lock(&rt_b->rt_runtime_lock);
350                 spin_lock(&rt_rq->rt_runtime_lock);
351                 rt_rq->rt_runtime = rt_b->rt_runtime;
352                 rt_rq->rt_time = 0;
353                 rt_rq->rt_throttled = 0;
354                 spin_unlock(&rt_rq->rt_runtime_lock);
355                 spin_unlock(&rt_b->rt_runtime_lock);
356         }
357 }
358
359 static void enable_runtime(struct rq *rq)
360 {
361         unsigned long flags;
362
363         spin_lock_irqsave(&rq->lock, flags);
364         __enable_runtime(rq);
365         spin_unlock_irqrestore(&rq->lock, flags);
366 }
367
368 static int balance_runtime(struct rt_rq *rt_rq)
369 {
370         int more = 0;
371
372         if (rt_rq->rt_time > rt_rq->rt_runtime) {
373                 spin_unlock(&rt_rq->rt_runtime_lock);
374                 more = do_balance_runtime(rt_rq);
375                 spin_lock(&rt_rq->rt_runtime_lock);
376         }
377
378         return more;
379 }
380 #else /* !CONFIG_SMP */
381 static inline int balance_runtime(struct rt_rq *rt_rq)
382 {
383         return 0;
384 }
385 #endif /* CONFIG_SMP */
386
387 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
388 {
389         int i, idle = 1;
390         cpumask_t span;
391
392         if (rt_b->rt_runtime == RUNTIME_INF)
393                 return 1;
394
395         span = sched_rt_period_mask();
396         for_each_cpu_mask(i, span) {
397                 int enqueue = 0;
398                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
399                 struct rq *rq = rq_of_rt_rq(rt_rq);
400
401                 spin_lock(&rq->lock);
402                 if (rt_rq->rt_time) {
403                         u64 runtime;
404
405                         spin_lock(&rt_rq->rt_runtime_lock);
406                         if (rt_rq->rt_throttled)
407                                 balance_runtime(rt_rq);
408                         runtime = rt_rq->rt_runtime;
409                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
410                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
411                                 rt_rq->rt_throttled = 0;
412                                 enqueue = 1;
413                         }
414                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
415                                 idle = 0;
416                         spin_unlock(&rt_rq->rt_runtime_lock);
417                 } else if (rt_rq->rt_nr_running)
418                         idle = 0;
419
420                 if (enqueue)
421                         sched_rt_rq_enqueue(rt_rq);
422                 spin_unlock(&rq->lock);
423         }
424
425         return idle;
426 }
427
428 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
429 {
430 #ifdef CONFIG_RT_GROUP_SCHED
431         struct rt_rq *rt_rq = group_rt_rq(rt_se);
432
433         if (rt_rq)
434                 return rt_rq->highest_prio;
435 #endif
436
437         return rt_task_of(rt_se)->prio;
438 }
439
440 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
441 {
442         u64 runtime = sched_rt_runtime(rt_rq);
443
444         if (rt_rq->rt_throttled)
445                 return rt_rq_throttled(rt_rq);
446
447         if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
448                 return 0;
449
450         balance_runtime(rt_rq);
451         runtime = sched_rt_runtime(rt_rq);
452         if (runtime == RUNTIME_INF)
453                 return 0;
454
455         if (rt_rq->rt_time > runtime) {
456                 rt_rq->rt_throttled = 1;
457                 if (rt_rq_throttled(rt_rq)) {
458                         sched_rt_rq_dequeue(rt_rq);
459                         return 1;
460                 }
461         }
462
463         return 0;
464 }
465
466 /*
467  * Update the current task's runtime statistics. Skip current tasks that
468  * are not in our scheduling class.
469  */
470 static void update_curr_rt(struct rq *rq)
471 {
472         struct task_struct *curr = rq->curr;
473         struct sched_rt_entity *rt_se = &curr->rt;
474         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
475         u64 delta_exec;
476
477         if (!task_has_rt_policy(curr))
478                 return;
479
480         delta_exec = rq->clock - curr->se.exec_start;
481         if (unlikely((s64)delta_exec < 0))
482                 delta_exec = 0;
483
484         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
485
486         curr->se.sum_exec_runtime += delta_exec;
487         curr->se.exec_start = rq->clock;
488         cpuacct_charge(curr, delta_exec);
489
490         for_each_sched_rt_entity(rt_se) {
491                 rt_rq = rt_rq_of_se(rt_se);
492
493                 spin_lock(&rt_rq->rt_runtime_lock);
494                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
495                         rt_rq->rt_time += delta_exec;
496                         if (sched_rt_runtime_exceeded(rt_rq))
497                                 resched_task(curr);
498                 }
499                 spin_unlock(&rt_rq->rt_runtime_lock);
500         }
501 }
502
503 static inline
504 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
505 {
506         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
507         rt_rq->rt_nr_running++;
508 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
509         if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
510 #ifdef CONFIG_SMP
511                 struct rq *rq = rq_of_rt_rq(rt_rq);
512 #endif
513
514                 rt_rq->highest_prio = rt_se_prio(rt_se);
515 #ifdef CONFIG_SMP
516                 if (rq->online)
517                         cpupri_set(&rq->rd->cpupri, rq->cpu,
518                                    rt_se_prio(rt_se));
519 #endif
520         }
521 #endif
522 #ifdef CONFIG_SMP
523         if (rt_se->nr_cpus_allowed > 1) {
524                 struct rq *rq = rq_of_rt_rq(rt_rq);
525
526                 rq->rt.rt_nr_migratory++;
527         }
528
529         update_rt_migration(rq_of_rt_rq(rt_rq));
530 #endif
531 #ifdef CONFIG_RT_GROUP_SCHED
532         if (rt_se_boosted(rt_se))
533                 rt_rq->rt_nr_boosted++;
534
535         if (rt_rq->tg)
536                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
537 #else
538         start_rt_bandwidth(&def_rt_bandwidth);
539 #endif
540 }
541
542 static inline
543 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
544 {
545 #ifdef CONFIG_SMP
546         int highest_prio = rt_rq->highest_prio;
547 #endif
548
549         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
550         WARN_ON(!rt_rq->rt_nr_running);
551         rt_rq->rt_nr_running--;
552 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
553         if (rt_rq->rt_nr_running) {
554                 struct rt_prio_array *array;
555
556                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
557                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
558                         /* recalculate */
559                         array = &rt_rq->active;
560                         rt_rq->highest_prio =
561                                 sched_find_first_bit(array->bitmap);
562                 } /* otherwise leave rq->highest prio alone */
563         } else
564                 rt_rq->highest_prio = MAX_RT_PRIO;
565 #endif
566 #ifdef CONFIG_SMP
567         if (rt_se->nr_cpus_allowed > 1) {
568                 struct rq *rq = rq_of_rt_rq(rt_rq);
569                 rq->rt.rt_nr_migratory--;
570         }
571
572         if (rt_rq->highest_prio != highest_prio) {
573                 struct rq *rq = rq_of_rt_rq(rt_rq);
574
575                 if (rq->online)
576                         cpupri_set(&rq->rd->cpupri, rq->cpu,
577                                    rt_rq->highest_prio);
578         }
579
580         update_rt_migration(rq_of_rt_rq(rt_rq));
581 #endif /* CONFIG_SMP */
582 #ifdef CONFIG_RT_GROUP_SCHED
583         if (rt_se_boosted(rt_se))
584                 rt_rq->rt_nr_boosted--;
585
586         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
587 #endif
588 }
589
590 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
591 {
592         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
593         struct rt_prio_array *array = &rt_rq->active;
594         struct rt_rq *group_rq = group_rt_rq(rt_se);
595         struct list_head *queue = array->queue + rt_se_prio(rt_se);
596
597         /*
598          * Don't enqueue the group if its throttled, or when empty.
599          * The latter is a consequence of the former when a child group
600          * get throttled and the current group doesn't have any other
601          * active members.
602          */
603         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
604                 return;
605
606         list_add_tail(&rt_se->run_list, queue);
607         __set_bit(rt_se_prio(rt_se), array->bitmap);
608
609         inc_rt_tasks(rt_se, rt_rq);
610 }
611
612 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
613 {
614         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
615         struct rt_prio_array *array = &rt_rq->active;
616
617         list_del_init(&rt_se->run_list);
618         if (list_empty(array->queue + rt_se_prio(rt_se)))
619                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
620
621         dec_rt_tasks(rt_se, rt_rq);
622 }
623
624 /*
625  * Because the prio of an upper entry depends on the lower
626  * entries, we must remove entries top - down.
627  */
628 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
629 {
630         struct sched_rt_entity *back = NULL;
631
632         for_each_sched_rt_entity(rt_se) {
633                 rt_se->back = back;
634                 back = rt_se;
635         }
636
637         for (rt_se = back; rt_se; rt_se = rt_se->back) {
638                 if (on_rt_rq(rt_se))
639                         __dequeue_rt_entity(rt_se);
640         }
641 }
642
643 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
644 {
645         dequeue_rt_stack(rt_se);
646         for_each_sched_rt_entity(rt_se)
647                 __enqueue_rt_entity(rt_se);
648 }
649
650 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
651 {
652         dequeue_rt_stack(rt_se);
653
654         for_each_sched_rt_entity(rt_se) {
655                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
656
657                 if (rt_rq && rt_rq->rt_nr_running)
658                         __enqueue_rt_entity(rt_se);
659         }
660 }
661
662 /*
663  * Adding/removing a task to/from a priority array:
664  */
665 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
666 {
667         struct sched_rt_entity *rt_se = &p->rt;
668
669         if (wakeup)
670                 rt_se->timeout = 0;
671
672         enqueue_rt_entity(rt_se);
673
674         inc_cpu_load(rq, p->se.load.weight);
675 }
676
677 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
678 {
679         struct sched_rt_entity *rt_se = &p->rt;
680
681         update_curr_rt(rq);
682         dequeue_rt_entity(rt_se);
683
684         dec_cpu_load(rq, p->se.load.weight);
685 }
686
687 /*
688  * Put task to the end of the run list without the overhead of dequeue
689  * followed by enqueue.
690  */
691 static void
692 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
693 {
694         if (on_rt_rq(rt_se)) {
695                 struct rt_prio_array *array = &rt_rq->active;
696                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
697
698                 if (head)
699                         list_move(&rt_se->run_list, queue);
700                 else
701                         list_move_tail(&rt_se->run_list, queue);
702         }
703 }
704
705 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
706 {
707         struct sched_rt_entity *rt_se = &p->rt;
708         struct rt_rq *rt_rq;
709
710         for_each_sched_rt_entity(rt_se) {
711                 rt_rq = rt_rq_of_se(rt_se);
712                 requeue_rt_entity(rt_rq, rt_se, head);
713         }
714 }
715
716 static void yield_task_rt(struct rq *rq)
717 {
718         requeue_task_rt(rq, rq->curr, 0);
719 }
720
721 #ifdef CONFIG_SMP
722 static int find_lowest_rq(struct task_struct *task);
723
724 static int select_task_rq_rt(struct task_struct *p, int sync)
725 {
726         struct rq *rq = task_rq(p);
727
728         /*
729          * If the current task is an RT task, then
730          * try to see if we can wake this RT task up on another
731          * runqueue. Otherwise simply start this RT task
732          * on its current runqueue.
733          *
734          * We want to avoid overloading runqueues. Even if
735          * the RT task is of higher priority than the current RT task.
736          * RT tasks behave differently than other tasks. If
737          * one gets preempted, we try to push it off to another queue.
738          * So trying to keep a preempting RT task on the same
739          * cache hot CPU will force the running RT task to
740          * a cold CPU. So we waste all the cache for the lower
741          * RT task in hopes of saving some of a RT task
742          * that is just being woken and probably will have
743          * cold cache anyway.
744          */
745         if (unlikely(rt_task(rq->curr)) &&
746             (p->rt.nr_cpus_allowed > 1)) {
747                 int cpu = find_lowest_rq(p);
748
749                 return (cpu == -1) ? task_cpu(p) : cpu;
750         }
751
752         /*
753          * Otherwise, just let it ride on the affined RQ and the
754          * post-schedule router will push the preempted task away
755          */
756         return task_cpu(p);
757 }
758
759 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
760 {
761         cpumask_t mask;
762
763         if (rq->curr->rt.nr_cpus_allowed == 1)
764                 return;
765
766         if (p->rt.nr_cpus_allowed != 1
767             && cpupri_find(&rq->rd->cpupri, p, &mask))
768                 return;
769
770         if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
771                 return;
772
773         /*
774          * There appears to be other cpus that can accept
775          * current and none to run 'p', so lets reschedule
776          * to try and push current away:
777          */
778         requeue_task_rt(rq, p, 1);
779         resched_task(rq->curr);
780 }
781
782 #endif /* CONFIG_SMP */
783
784 /*
785  * Preempt the current task with a newly woken task if needed:
786  */
787 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
788 {
789         if (p->prio < rq->curr->prio) {
790                 resched_task(rq->curr);
791                 return;
792         }
793
794 #ifdef CONFIG_SMP
795         /*
796          * If:
797          *
798          * - the newly woken task is of equal priority to the current task
799          * - the newly woken task is non-migratable while current is migratable
800          * - current will be preempted on the next reschedule
801          *
802          * we should check to see if current can readily move to a different
803          * cpu.  If so, we will reschedule to allow the push logic to try
804          * to move current somewhere else, making room for our non-migratable
805          * task.
806          */
807         if (p->prio == rq->curr->prio && !need_resched())
808                 check_preempt_equal_prio(rq, p);
809 #endif
810 }
811
812 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
813                                                    struct rt_rq *rt_rq)
814 {
815         struct rt_prio_array *array = &rt_rq->active;
816         struct sched_rt_entity *next = NULL;
817         struct list_head *queue;
818         int idx;
819
820         idx = sched_find_first_bit(array->bitmap);
821         BUG_ON(idx >= MAX_RT_PRIO);
822
823         queue = array->queue + idx;
824         next = list_entry(queue->next, struct sched_rt_entity, run_list);
825
826         return next;
827 }
828
829 static struct task_struct *pick_next_task_rt(struct rq *rq)
830 {
831         struct sched_rt_entity *rt_se;
832         struct task_struct *p;
833         struct rt_rq *rt_rq;
834
835         rt_rq = &rq->rt;
836
837         if (unlikely(!rt_rq->rt_nr_running))
838                 return NULL;
839
840         if (rt_rq_throttled(rt_rq))
841                 return NULL;
842
843         do {
844                 rt_se = pick_next_rt_entity(rq, rt_rq);
845                 BUG_ON(!rt_se);
846                 rt_rq = group_rt_rq(rt_se);
847         } while (rt_rq);
848
849         p = rt_task_of(rt_se);
850         p->se.exec_start = rq->clock;
851         return p;
852 }
853
854 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
855 {
856         update_curr_rt(rq);
857         p->se.exec_start = 0;
858 }
859
860 #ifdef CONFIG_SMP
861
862 /* Only try algorithms three times */
863 #define RT_MAX_TRIES 3
864
865 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
866 static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
867
868 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
869
870 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
871 {
872         if (!task_running(rq, p) &&
873             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
874             (p->rt.nr_cpus_allowed > 1))
875                 return 1;
876         return 0;
877 }
878
879 /* Return the second highest RT task, NULL otherwise */
880 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
881 {
882         struct task_struct *next = NULL;
883         struct sched_rt_entity *rt_se;
884         struct rt_prio_array *array;
885         struct rt_rq *rt_rq;
886         int idx;
887
888         for_each_leaf_rt_rq(rt_rq, rq) {
889                 array = &rt_rq->active;
890                 idx = sched_find_first_bit(array->bitmap);
891  next_idx:
892                 if (idx >= MAX_RT_PRIO)
893                         continue;
894                 if (next && next->prio < idx)
895                         continue;
896                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
897                         struct task_struct *p = rt_task_of(rt_se);
898                         if (pick_rt_task(rq, p, cpu)) {
899                                 next = p;
900                                 break;
901                         }
902                 }
903                 if (!next) {
904                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
905                         goto next_idx;
906                 }
907         }
908
909         return next;
910 }
911
912 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
913
914 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
915 {
916         int first;
917
918         /* "this_cpu" is cheaper to preempt than a remote processor */
919         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
920                 return this_cpu;
921
922         first = first_cpu(*mask);
923         if (first != NR_CPUS)
924                 return first;
925
926         return -1;
927 }
928
929 static int find_lowest_rq(struct task_struct *task)
930 {
931         struct sched_domain *sd;
932         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
933         int this_cpu = smp_processor_id();
934         int cpu      = task_cpu(task);
935
936         if (task->rt.nr_cpus_allowed == 1)
937                 return -1; /* No other targets possible */
938
939         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
940                 return -1; /* No targets found */
941
942         /*
943          * Only consider CPUs that are usable for migration.
944          * I guess we might want to change cpupri_find() to ignore those
945          * in the first place.
946          */
947         cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
948
949         /*
950          * At this point we have built a mask of cpus representing the
951          * lowest priority tasks in the system.  Now we want to elect
952          * the best one based on our affinity and topology.
953          *
954          * We prioritize the last cpu that the task executed on since
955          * it is most likely cache-hot in that location.
956          */
957         if (cpu_isset(cpu, *lowest_mask))
958                 return cpu;
959
960         /*
961          * Otherwise, we consult the sched_domains span maps to figure
962          * out which cpu is logically closest to our hot cache data.
963          */
964         if (this_cpu == cpu)
965                 this_cpu = -1; /* Skip this_cpu opt if the same */
966
967         for_each_domain(cpu, sd) {
968                 if (sd->flags & SD_WAKE_AFFINE) {
969                         cpumask_t domain_mask;
970                         int       best_cpu;
971
972                         cpus_and(domain_mask, sd->span, *lowest_mask);
973
974                         best_cpu = pick_optimal_cpu(this_cpu,
975                                                     &domain_mask);
976                         if (best_cpu != -1)
977                                 return best_cpu;
978                 }
979         }
980
981         /*
982          * And finally, if there were no matches within the domains
983          * just give the caller *something* to work with from the compatible
984          * locations.
985          */
986         return pick_optimal_cpu(this_cpu, lowest_mask);
987 }
988
989 /* Will lock the rq it finds */
990 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
991 {
992         struct rq *lowest_rq = NULL;
993         int tries;
994         int cpu;
995
996         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
997                 cpu = find_lowest_rq(task);
998
999                 if ((cpu == -1) || (cpu == rq->cpu))
1000                         break;
1001
1002                 lowest_rq = cpu_rq(cpu);
1003
1004                 /* if the prio of this runqueue changed, try again */
1005                 if (double_lock_balance(rq, lowest_rq)) {
1006                         /*
1007                          * We had to unlock the run queue. In
1008                          * the mean time, task could have
1009                          * migrated already or had its affinity changed.
1010                          * Also make sure that it wasn't scheduled on its rq.
1011                          */
1012                         if (unlikely(task_rq(task) != rq ||
1013                                      !cpu_isset(lowest_rq->cpu,
1014                                                 task->cpus_allowed) ||
1015                                      task_running(rq, task) ||
1016                                      !task->se.on_rq)) {
1017
1018                                 spin_unlock(&lowest_rq->lock);
1019                                 lowest_rq = NULL;
1020                                 break;
1021                         }
1022                 }
1023
1024                 /* If this rq is still suitable use it. */
1025                 if (lowest_rq->rt.highest_prio > task->prio)
1026                         break;
1027
1028                 /* try again */
1029                 double_unlock_balance(rq, lowest_rq);
1030                 lowest_rq = NULL;
1031         }
1032
1033         return lowest_rq;
1034 }
1035
1036 /*
1037  * If the current CPU has more than one RT task, see if the non
1038  * running task can migrate over to a CPU that is running a task
1039  * of lesser priority.
1040  */
1041 static int push_rt_task(struct rq *rq)
1042 {
1043         struct task_struct *next_task;
1044         struct rq *lowest_rq;
1045         int ret = 0;
1046         int paranoid = RT_MAX_TRIES;
1047
1048         if (!rq->rt.overloaded)
1049                 return 0;
1050
1051         next_task = pick_next_highest_task_rt(rq, -1);
1052         if (!next_task)
1053                 return 0;
1054
1055  retry:
1056         if (unlikely(next_task == rq->curr)) {
1057                 WARN_ON(1);
1058                 return 0;
1059         }
1060
1061         /*
1062          * It's possible that the next_task slipped in of
1063          * higher priority than current. If that's the case
1064          * just reschedule current.
1065          */
1066         if (unlikely(next_task->prio < rq->curr->prio)) {
1067                 resched_task(rq->curr);
1068                 return 0;
1069         }
1070
1071         /* We might release rq lock */
1072         get_task_struct(next_task);
1073
1074         /* find_lock_lowest_rq locks the rq if found */
1075         lowest_rq = find_lock_lowest_rq(next_task, rq);
1076         if (!lowest_rq) {
1077                 struct task_struct *task;
1078                 /*
1079                  * find lock_lowest_rq releases rq->lock
1080                  * so it is possible that next_task has changed.
1081                  * If it has, then try again.
1082                  */
1083                 task = pick_next_highest_task_rt(rq, -1);
1084                 if (unlikely(task != next_task) && task && paranoid--) {
1085                         put_task_struct(next_task);
1086                         next_task = task;
1087                         goto retry;
1088                 }
1089                 goto out;
1090         }
1091
1092         deactivate_task(rq, next_task, 0);
1093         set_task_cpu(next_task, lowest_rq->cpu);
1094         activate_task(lowest_rq, next_task, 0);
1095
1096         resched_task(lowest_rq->curr);
1097
1098         double_unlock_balance(rq, lowest_rq);
1099
1100         ret = 1;
1101 out:
1102         put_task_struct(next_task);
1103
1104         return ret;
1105 }
1106
1107 /*
1108  * TODO: Currently we just use the second highest prio task on
1109  *       the queue, and stop when it can't migrate (or there's
1110  *       no more RT tasks).  There may be a case where a lower
1111  *       priority RT task has a different affinity than the
1112  *       higher RT task. In this case the lower RT task could
1113  *       possibly be able to migrate where as the higher priority
1114  *       RT task could not.  We currently ignore this issue.
1115  *       Enhancements are welcome!
1116  */
1117 static void push_rt_tasks(struct rq *rq)
1118 {
1119         /* push_rt_task will return true if it moved an RT */
1120         while (push_rt_task(rq))
1121                 ;
1122 }
1123
1124 static int pull_rt_task(struct rq *this_rq)
1125 {
1126         int this_cpu = this_rq->cpu, ret = 0, cpu;
1127         struct task_struct *p, *next;
1128         struct rq *src_rq;
1129
1130         if (likely(!rt_overloaded(this_rq)))
1131                 return 0;
1132
1133         next = pick_next_task_rt(this_rq);
1134
1135         for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1136                 if (this_cpu == cpu)
1137                         continue;
1138
1139                 src_rq = cpu_rq(cpu);
1140                 /*
1141                  * We can potentially drop this_rq's lock in
1142                  * double_lock_balance, and another CPU could
1143                  * steal our next task - hence we must cause
1144                  * the caller to recalculate the next task
1145                  * in that case:
1146                  */
1147                 if (double_lock_balance(this_rq, src_rq)) {
1148                         struct task_struct *old_next = next;
1149
1150                         next = pick_next_task_rt(this_rq);
1151                         if (next != old_next)
1152                                 ret = 1;
1153                 }
1154
1155                 /*
1156                  * Are there still pullable RT tasks?
1157                  */
1158                 if (src_rq->rt.rt_nr_running <= 1)
1159                         goto skip;
1160
1161                 p = pick_next_highest_task_rt(src_rq, this_cpu);
1162
1163                 /*
1164                  * Do we have an RT task that preempts
1165                  * the to-be-scheduled task?
1166                  */
1167                 if (p && (!next || (p->prio < next->prio))) {
1168                         WARN_ON(p == src_rq->curr);
1169                         WARN_ON(!p->se.on_rq);
1170
1171                         /*
1172                          * There's a chance that p is higher in priority
1173                          * than what's currently running on its cpu.
1174                          * This is just that p is wakeing up and hasn't
1175                          * had a chance to schedule. We only pull
1176                          * p if it is lower in priority than the
1177                          * current task on the run queue or
1178                          * this_rq next task is lower in prio than
1179                          * the current task on that rq.
1180                          */
1181                         if (p->prio < src_rq->curr->prio ||
1182                             (next && next->prio < src_rq->curr->prio))
1183                                 goto skip;
1184
1185                         ret = 1;
1186
1187                         deactivate_task(src_rq, p, 0);
1188                         set_task_cpu(p, this_cpu);
1189                         activate_task(this_rq, p, 0);
1190                         /*
1191                          * We continue with the search, just in
1192                          * case there's an even higher prio task
1193                          * in another runqueue. (low likelyhood
1194                          * but possible)
1195                          *
1196                          * Update next so that we won't pick a task
1197                          * on another cpu with a priority lower (or equal)
1198                          * than the one we just picked.
1199                          */
1200                         next = p;
1201
1202                 }
1203  skip:
1204                 double_unlock_balance(this_rq, src_rq);
1205         }
1206
1207         return ret;
1208 }
1209
1210 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1211 {
1212         /* Try to pull RT tasks here if we lower this rq's prio */
1213         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1214                 pull_rt_task(rq);
1215 }
1216
1217 static void post_schedule_rt(struct rq *rq)
1218 {
1219         /*
1220          * If we have more than one rt_task queued, then
1221          * see if we can push the other rt_tasks off to other CPUS.
1222          * Note we may release the rq lock, and since
1223          * the lock was owned by prev, we need to release it
1224          * first via finish_lock_switch and then reaquire it here.
1225          */
1226         if (unlikely(rq->rt.overloaded)) {
1227                 spin_lock_irq(&rq->lock);
1228                 push_rt_tasks(rq);
1229                 spin_unlock_irq(&rq->lock);
1230         }
1231 }
1232
1233 /*
1234  * If we are not running and we are not going to reschedule soon, we should
1235  * try to push tasks away now
1236  */
1237 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1238 {
1239         if (!task_running(rq, p) &&
1240             !test_tsk_need_resched(rq->curr) &&
1241             rq->rt.overloaded)
1242                 push_rt_tasks(rq);
1243 }
1244
1245 static unsigned long
1246 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1247                 unsigned long max_load_move,
1248                 struct sched_domain *sd, enum cpu_idle_type idle,
1249                 int *all_pinned, int *this_best_prio)
1250 {
1251         /* don't touch RT tasks */
1252         return 0;
1253 }
1254
1255 static int
1256 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1257                  struct sched_domain *sd, enum cpu_idle_type idle)
1258 {
1259         /* don't touch RT tasks */
1260         return 0;
1261 }
1262
1263 static void set_cpus_allowed_rt(struct task_struct *p,
1264                                 const cpumask_t *new_mask)
1265 {
1266         int weight = cpus_weight(*new_mask);
1267
1268         BUG_ON(!rt_task(p));
1269
1270         /*
1271          * Update the migration status of the RQ if we have an RT task
1272          * which is running AND changing its weight value.
1273          */
1274         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1275                 struct rq *rq = task_rq(p);
1276
1277                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1278                         rq->rt.rt_nr_migratory++;
1279                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1280                         BUG_ON(!rq->rt.rt_nr_migratory);
1281                         rq->rt.rt_nr_migratory--;
1282                 }
1283
1284                 update_rt_migration(rq);
1285         }
1286
1287         p->cpus_allowed    = *new_mask;
1288         p->rt.nr_cpus_allowed = weight;
1289 }
1290
1291 /* Assumes rq->lock is held */
1292 static void rq_online_rt(struct rq *rq)
1293 {
1294         if (rq->rt.overloaded)
1295                 rt_set_overload(rq);
1296
1297         __enable_runtime(rq);
1298
1299         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1300 }
1301
1302 /* Assumes rq->lock is held */
1303 static void rq_offline_rt(struct rq *rq)
1304 {
1305         if (rq->rt.overloaded)
1306                 rt_clear_overload(rq);
1307
1308         __disable_runtime(rq);
1309
1310         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1311 }
1312
1313 /*
1314  * When switch from the rt queue, we bring ourselves to a position
1315  * that we might want to pull RT tasks from other runqueues.
1316  */
1317 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1318                            int running)
1319 {
1320         /*
1321          * If there are other RT tasks then we will reschedule
1322          * and the scheduling of the other RT tasks will handle
1323          * the balancing. But if we are the last RT task
1324          * we may need to handle the pulling of RT tasks
1325          * now.
1326          */
1327         if (!rq->rt.rt_nr_running)
1328                 pull_rt_task(rq);
1329 }
1330 #endif /* CONFIG_SMP */
1331
1332 /*
1333  * When switching a task to RT, we may overload the runqueue
1334  * with RT tasks. In this case we try to push them off to
1335  * other runqueues.
1336  */
1337 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1338                            int running)
1339 {
1340         int check_resched = 1;
1341
1342         /*
1343          * If we are already running, then there's nothing
1344          * that needs to be done. But if we are not running
1345          * we may need to preempt the current running task.
1346          * If that current running task is also an RT task
1347          * then see if we can move to another run queue.
1348          */
1349         if (!running) {
1350 #ifdef CONFIG_SMP
1351                 if (rq->rt.overloaded && push_rt_task(rq) &&
1352                     /* Don't resched if we changed runqueues */
1353                     rq != task_rq(p))
1354                         check_resched = 0;
1355 #endif /* CONFIG_SMP */
1356                 if (check_resched && p->prio < rq->curr->prio)
1357                         resched_task(rq->curr);
1358         }
1359 }
1360
1361 /*
1362  * Priority of the task has changed. This may cause
1363  * us to initiate a push or pull.
1364  */
1365 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1366                             int oldprio, int running)
1367 {
1368         if (running) {
1369 #ifdef CONFIG_SMP
1370                 /*
1371                  * If our priority decreases while running, we
1372                  * may need to pull tasks to this runqueue.
1373                  */
1374                 if (oldprio < p->prio)
1375                         pull_rt_task(rq);
1376                 /*
1377                  * If there's a higher priority task waiting to run
1378                  * then reschedule. Note, the above pull_rt_task
1379                  * can release the rq lock and p could migrate.
1380                  * Only reschedule if p is still on the same runqueue.
1381                  */
1382                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1383                         resched_task(p);
1384 #else
1385                 /* For UP simply resched on drop of prio */
1386                 if (oldprio < p->prio)
1387                         resched_task(p);
1388 #endif /* CONFIG_SMP */
1389         } else {
1390                 /*
1391                  * This task is not running, but if it is
1392                  * greater than the current running task
1393                  * then reschedule.
1394                  */
1395                 if (p->prio < rq->curr->prio)
1396                         resched_task(rq->curr);
1397         }
1398 }
1399
1400 static void watchdog(struct rq *rq, struct task_struct *p)
1401 {
1402         unsigned long soft, hard;
1403
1404         if (!p->signal)
1405                 return;
1406
1407         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1408         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1409
1410         if (soft != RLIM_INFINITY) {
1411                 unsigned long next;
1412
1413                 p->rt.timeout++;
1414                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1415                 if (p->rt.timeout > next)
1416                         p->it_sched_expires = p->se.sum_exec_runtime;
1417         }
1418 }
1419
1420 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1421 {
1422         update_curr_rt(rq);
1423
1424         watchdog(rq, p);
1425
1426         /*
1427          * RR tasks need a special form of timeslice management.
1428          * FIFO tasks have no timeslices.
1429          */
1430         if (p->policy != SCHED_RR)
1431                 return;
1432
1433         if (--p->rt.time_slice)
1434                 return;
1435
1436         p->rt.time_slice = DEF_TIMESLICE;
1437
1438         /*
1439          * Requeue to the end of queue if we are not the only element
1440          * on the queue:
1441          */
1442         if (p->rt.run_list.prev != p->rt.run_list.next) {
1443                 requeue_task_rt(rq, p, 0);
1444                 set_tsk_need_resched(p);
1445         }
1446 }
1447
1448 static void set_curr_task_rt(struct rq *rq)
1449 {
1450         struct task_struct *p = rq->curr;
1451
1452         p->se.exec_start = rq->clock;
1453 }
1454
1455 static const struct sched_class rt_sched_class = {
1456         .next                   = &fair_sched_class,
1457         .enqueue_task           = enqueue_task_rt,
1458         .dequeue_task           = dequeue_task_rt,
1459         .yield_task             = yield_task_rt,
1460 #ifdef CONFIG_SMP
1461         .select_task_rq         = select_task_rq_rt,
1462 #endif /* CONFIG_SMP */
1463
1464         .check_preempt_curr     = check_preempt_curr_rt,
1465
1466         .pick_next_task         = pick_next_task_rt,
1467         .put_prev_task          = put_prev_task_rt,
1468
1469 #ifdef CONFIG_SMP
1470         .load_balance           = load_balance_rt,
1471         .move_one_task          = move_one_task_rt,
1472         .set_cpus_allowed       = set_cpus_allowed_rt,
1473         .rq_online              = rq_online_rt,
1474         .rq_offline             = rq_offline_rt,
1475         .pre_schedule           = pre_schedule_rt,
1476         .post_schedule          = post_schedule_rt,
1477         .task_wake_up           = task_wake_up_rt,
1478         .switched_from          = switched_from_rt,
1479 #endif
1480
1481         .set_curr_task          = set_curr_task_rt,
1482         .task_tick              = task_tick_rt,
1483
1484         .prio_changed           = prio_changed_rt,
1485         .switched_to            = switched_to_rt,
1486 };
1487
1488 #ifdef CONFIG_SCHED_DEBUG
1489 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1490
1491 static void print_rt_stats(struct seq_file *m, int cpu)
1492 {
1493         struct rt_rq *rt_rq;
1494
1495         rcu_read_lock();
1496         for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1497                 print_rt_rq(m, cpu, rt_rq);
1498         rcu_read_unlock();
1499 }
1500 #endif /* CONFIG_SCHED_DEBUG */