Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 /*
7  * Update the current task's runtime statistics. Skip current tasks that
8  * are not in our scheduling class.
9  */
10 static inline void update_curr_rt(struct rq *rq, u64 now)
11 {
12         struct task_struct *curr = rq->curr;
13         u64 delta_exec;
14
15         if (!task_has_rt_policy(curr))
16                 return;
17
18         delta_exec = now - curr->se.exec_start;
19         if (unlikely((s64)delta_exec < 0))
20                 delta_exec = 0;
21         if (unlikely(delta_exec > curr->se.exec_max))
22                 curr->se.exec_max = delta_exec;
23
24         curr->se.sum_exec_runtime += delta_exec;
25         curr->se.exec_start = now;
26 }
27
28 static void
29 enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
30 {
31         struct rt_prio_array *array = &rq->rt.active;
32
33         list_add_tail(&p->run_list, array->queue + p->prio);
34         __set_bit(p->prio, array->bitmap);
35 }
36
37 /*
38  * Adding/removing a task to/from a priority array:
39  */
40 static void
41 dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
42 {
43         struct rt_prio_array *array = &rq->rt.active;
44
45         update_curr_rt(rq, now);
46
47         list_del(&p->run_list);
48         if (list_empty(array->queue + p->prio))
49                 __clear_bit(p->prio, array->bitmap);
50 }
51
52 /*
53  * Put task to the end of the run list without the overhead of dequeue
54  * followed by enqueue.
55  */
56 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
57 {
58         struct rt_prio_array *array = &rq->rt.active;
59
60         list_move_tail(&p->run_list, array->queue + p->prio);
61 }
62
63 static void
64 yield_task_rt(struct rq *rq, struct task_struct *p)
65 {
66         requeue_task_rt(rq, p);
67 }
68
69 /*
70  * Preempt the current task with a newly woken task if needed:
71  */
72 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
73 {
74         if (p->prio < rq->curr->prio)
75                 resched_task(rq->curr);
76 }
77
78 static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
79 {
80         struct rt_prio_array *array = &rq->rt.active;
81         struct task_struct *next;
82         struct list_head *queue;
83         int idx;
84
85         idx = sched_find_first_bit(array->bitmap);
86         if (idx >= MAX_RT_PRIO)
87                 return NULL;
88
89         queue = array->queue + idx;
90         next = list_entry(queue->next, struct task_struct, run_list);
91
92         next->se.exec_start = now;
93
94         return next;
95 }
96
97 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now)
98 {
99         update_curr_rt(rq, now);
100         p->se.exec_start = 0;
101 }
102
103 /*
104  * Load-balancing iterator. Note: while the runqueue stays locked
105  * during the whole iteration, the current task might be
106  * dequeued so the iterator has to be dequeue-safe. Here we
107  * achieve that by always pre-iterating before returning
108  * the current task:
109  */
110 static struct task_struct *load_balance_start_rt(void *arg)
111 {
112         struct rq *rq = arg;
113         struct rt_prio_array *array = &rq->rt.active;
114         struct list_head *head, *curr;
115         struct task_struct *p;
116         int idx;
117
118         idx = sched_find_first_bit(array->bitmap);
119         if (idx >= MAX_RT_PRIO)
120                 return NULL;
121
122         head = array->queue + idx;
123         curr = head->prev;
124
125         p = list_entry(curr, struct task_struct, run_list);
126
127         curr = curr->prev;
128
129         rq->rt.rt_load_balance_idx = idx;
130         rq->rt.rt_load_balance_head = head;
131         rq->rt.rt_load_balance_curr = curr;
132
133         return p;
134 }
135
136 static struct task_struct *load_balance_next_rt(void *arg)
137 {
138         struct rq *rq = arg;
139         struct rt_prio_array *array = &rq->rt.active;
140         struct list_head *head, *curr;
141         struct task_struct *p;
142         int idx;
143
144         idx = rq->rt.rt_load_balance_idx;
145         head = rq->rt.rt_load_balance_head;
146         curr = rq->rt.rt_load_balance_curr;
147
148         /*
149          * If we arrived back to the head again then
150          * iterate to the next queue (if any):
151          */
152         if (unlikely(head == curr)) {
153                 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
154
155                 if (next_idx >= MAX_RT_PRIO)
156                         return NULL;
157
158                 idx = next_idx;
159                 head = array->queue + idx;
160                 curr = head->prev;
161
162                 rq->rt.rt_load_balance_idx = idx;
163                 rq->rt.rt_load_balance_head = head;
164         }
165
166         p = list_entry(curr, struct task_struct, run_list);
167
168         curr = curr->prev;
169
170         rq->rt.rt_load_balance_curr = curr;
171
172         return p;
173 }
174
175 static int
176 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
177                         unsigned long max_nr_move, unsigned long max_load_move,
178                         struct sched_domain *sd, enum cpu_idle_type idle,
179                         int *all_pinned, unsigned long *load_moved)
180 {
181         int this_best_prio, best_prio, best_prio_seen = 0;
182         int nr_moved;
183         struct rq_iterator rt_rq_iterator;
184
185         best_prio = sched_find_first_bit(busiest->rt.active.bitmap);
186         this_best_prio = sched_find_first_bit(this_rq->rt.active.bitmap);
187
188         /*
189          * Enable handling of the case where there is more than one task
190          * with the best priority.   If the current running task is one
191          * of those with prio==best_prio we know it won't be moved
192          * and therefore it's safe to override the skip (based on load)
193          * of any task we find with that prio.
194          */
195         if (busiest->curr->prio == best_prio)
196                 best_prio_seen = 1;
197
198         rt_rq_iterator.start = load_balance_start_rt;
199         rt_rq_iterator.next = load_balance_next_rt;
200         /* pass 'busiest' rq argument into
201          * load_balance_[start|next]_rt iterators
202          */
203         rt_rq_iterator.arg = busiest;
204
205         nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
206                         max_load_move, sd, idle, all_pinned, load_moved,
207                         this_best_prio, best_prio, best_prio_seen,
208                         &rt_rq_iterator);
209
210         return nr_moved;
211 }
212
213 static void task_tick_rt(struct rq *rq, struct task_struct *p)
214 {
215         /*
216          * RR tasks need a special form of timeslice management.
217          * FIFO tasks have no timeslices.
218          */
219         if (p->policy != SCHED_RR)
220                 return;
221
222         if (--p->time_slice)
223                 return;
224
225         p->time_slice = static_prio_timeslice(p->static_prio);
226         set_tsk_need_resched(p);
227
228         /* put it at the end of the queue: */
229         requeue_task_rt(rq, p);
230 }
231
232 /*
233  * No parent/child timeslice management necessary for RT tasks,
234  * just activate them:
235  */
236 static void task_new_rt(struct rq *rq, struct task_struct *p)
237 {
238         activate_task(rq, p, 1);
239 }
240
241 static struct sched_class rt_sched_class __read_mostly = {
242         .enqueue_task           = enqueue_task_rt,
243         .dequeue_task           = dequeue_task_rt,
244         .yield_task             = yield_task_rt,
245
246         .check_preempt_curr     = check_preempt_curr_rt,
247
248         .pick_next_task         = pick_next_task_rt,
249         .put_prev_task          = put_prev_task_rt,
250
251         .load_balance           = load_balance_rt,
252
253         .task_tick              = task_tick_rt,
254         .task_new               = task_new_rt,
255 };