2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
10 static inline void update_curr_rt(struct rq *rq, u64 now)
12 struct task_struct *curr = rq->curr;
15 if (!task_has_rt_policy(curr))
18 delta_exec = now - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0))
21 if (unlikely(delta_exec > curr->se.exec_max))
22 curr->se.exec_max = delta_exec;
24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = now;
29 enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
31 struct rt_prio_array *array = &rq->rt.active;
33 list_add_tail(&p->run_list, array->queue + p->prio);
34 __set_bit(p->prio, array->bitmap);
38 * Adding/removing a task to/from a priority array:
41 dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
43 struct rt_prio_array *array = &rq->rt.active;
45 update_curr_rt(rq, now);
47 list_del(&p->run_list);
48 if (list_empty(array->queue + p->prio))
49 __clear_bit(p->prio, array->bitmap);
53 * Put task to the end of the run list without the overhead of dequeue
54 * followed by enqueue.
56 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
58 struct rt_prio_array *array = &rq->rt.active;
60 list_move_tail(&p->run_list, array->queue + p->prio);
64 yield_task_rt(struct rq *rq, struct task_struct *p)
66 requeue_task_rt(rq, p);
70 * Preempt the current task with a newly woken task if needed:
72 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
74 if (p->prio < rq->curr->prio)
75 resched_task(rq->curr);
78 static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
80 struct rt_prio_array *array = &rq->rt.active;
81 struct task_struct *next;
82 struct list_head *queue;
85 idx = sched_find_first_bit(array->bitmap);
86 if (idx >= MAX_RT_PRIO)
89 queue = array->queue + idx;
90 next = list_entry(queue->next, struct task_struct, run_list);
92 next->se.exec_start = now;
97 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now)
99 update_curr_rt(rq, now);
100 p->se.exec_start = 0;
104 * Load-balancing iterator. Note: while the runqueue stays locked
105 * during the whole iteration, the current task might be
106 * dequeued so the iterator has to be dequeue-safe. Here we
107 * achieve that by always pre-iterating before returning
110 static struct task_struct *load_balance_start_rt(void *arg)
113 struct rt_prio_array *array = &rq->rt.active;
114 struct list_head *head, *curr;
115 struct task_struct *p;
118 idx = sched_find_first_bit(array->bitmap);
119 if (idx >= MAX_RT_PRIO)
122 head = array->queue + idx;
125 p = list_entry(curr, struct task_struct, run_list);
129 rq->rt.rt_load_balance_idx = idx;
130 rq->rt.rt_load_balance_head = head;
131 rq->rt.rt_load_balance_curr = curr;
136 static struct task_struct *load_balance_next_rt(void *arg)
139 struct rt_prio_array *array = &rq->rt.active;
140 struct list_head *head, *curr;
141 struct task_struct *p;
144 idx = rq->rt.rt_load_balance_idx;
145 head = rq->rt.rt_load_balance_head;
146 curr = rq->rt.rt_load_balance_curr;
149 * If we arrived back to the head again then
150 * iterate to the next queue (if any):
152 if (unlikely(head == curr)) {
153 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
155 if (next_idx >= MAX_RT_PRIO)
159 head = array->queue + idx;
162 rq->rt.rt_load_balance_idx = idx;
163 rq->rt.rt_load_balance_head = head;
166 p = list_entry(curr, struct task_struct, run_list);
170 rq->rt.rt_load_balance_curr = curr;
176 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
177 unsigned long max_nr_move, unsigned long max_load_move,
178 struct sched_domain *sd, enum cpu_idle_type idle,
179 int *all_pinned, unsigned long *load_moved)
181 int this_best_prio, best_prio, best_prio_seen = 0;
183 struct rq_iterator rt_rq_iterator;
185 best_prio = sched_find_first_bit(busiest->rt.active.bitmap);
186 this_best_prio = sched_find_first_bit(this_rq->rt.active.bitmap);
189 * Enable handling of the case where there is more than one task
190 * with the best priority. If the current running task is one
191 * of those with prio==best_prio we know it won't be moved
192 * and therefore it's safe to override the skip (based on load)
193 * of any task we find with that prio.
195 if (busiest->curr->prio == best_prio)
198 rt_rq_iterator.start = load_balance_start_rt;
199 rt_rq_iterator.next = load_balance_next_rt;
200 /* pass 'busiest' rq argument into
201 * load_balance_[start|next]_rt iterators
203 rt_rq_iterator.arg = busiest;
205 nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
206 max_load_move, sd, idle, all_pinned, load_moved,
207 this_best_prio, best_prio, best_prio_seen,
213 static void task_tick_rt(struct rq *rq, struct task_struct *p)
216 * RR tasks need a special form of timeslice management.
217 * FIFO tasks have no timeslices.
219 if (p->policy != SCHED_RR)
225 p->time_slice = static_prio_timeslice(p->static_prio);
226 set_tsk_need_resched(p);
228 /* put it at the end of the queue: */
229 requeue_task_rt(rq, p);
233 * No parent/child timeslice management necessary for RT tasks,
234 * just activate them:
236 static void task_new_rt(struct rq *rq, struct task_struct *p)
238 activate_task(rq, p, 1);
241 static struct sched_class rt_sched_class __read_mostly = {
242 .enqueue_task = enqueue_task_rt,
243 .dequeue_task = dequeue_task_rt,
244 .yield_task = yield_task_rt,
246 .check_preempt_curr = check_preempt_curr_rt,
248 .pick_next_task = pick_next_task_rt,
249 .put_prev_task = put_prev_task_rt,
251 .load_balance = load_balance_rt,
253 .task_tick = task_tick_rt,
254 .task_new = task_new_rt,