Merge branch 'fix/hda' into for-linus
[linux-2.6] / kernel / posix-cpu-timers.c
1 /*
2  * Implement CPU time clocks for the POSIX clock interface.
3  */
4
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
11
12 /*
13  * Called after updating RLIMIT_CPU to set timer expiration if necessary.
14  */
15 void update_rlimit_cpu(unsigned long rlim_new)
16 {
17         cputime_t cputime;
18
19         cputime = secs_to_cputime(rlim_new);
20         if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
21             cputime_lt(current->signal->it_prof_expires, cputime)) {
22                 spin_lock_irq(&current->sighand->siglock);
23                 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24                 spin_unlock_irq(&current->sighand->siglock);
25         }
26 }
27
28 static int check_clock(const clockid_t which_clock)
29 {
30         int error = 0;
31         struct task_struct *p;
32         const pid_t pid = CPUCLOCK_PID(which_clock);
33
34         if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
35                 return -EINVAL;
36
37         if (pid == 0)
38                 return 0;
39
40         read_lock(&tasklist_lock);
41         p = find_task_by_vpid(pid);
42         if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43                    same_thread_group(p, current) : thread_group_leader(p))) {
44                 error = -EINVAL;
45         }
46         read_unlock(&tasklist_lock);
47
48         return error;
49 }
50
51 static inline union cpu_time_count
52 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
53 {
54         union cpu_time_count ret;
55         ret.sched = 0;          /* high half always zero when .cpu used */
56         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
57                 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
58         } else {
59                 ret.cpu = timespec_to_cputime(tp);
60         }
61         return ret;
62 }
63
64 static void sample_to_timespec(const clockid_t which_clock,
65                                union cpu_time_count cpu,
66                                struct timespec *tp)
67 {
68         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69                 *tp = ns_to_timespec(cpu.sched);
70         else
71                 cputime_to_timespec(cpu.cpu, tp);
72 }
73
74 static inline int cpu_time_before(const clockid_t which_clock,
75                                   union cpu_time_count now,
76                                   union cpu_time_count then)
77 {
78         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79                 return now.sched < then.sched;
80         }  else {
81                 return cputime_lt(now.cpu, then.cpu);
82         }
83 }
84 static inline void cpu_time_add(const clockid_t which_clock,
85                                 union cpu_time_count *acc,
86                                 union cpu_time_count val)
87 {
88         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89                 acc->sched += val.sched;
90         }  else {
91                 acc->cpu = cputime_add(acc->cpu, val.cpu);
92         }
93 }
94 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
95                                                 union cpu_time_count a,
96                                                 union cpu_time_count b)
97 {
98         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
99                 a.sched -= b.sched;
100         }  else {
101                 a.cpu = cputime_sub(a.cpu, b.cpu);
102         }
103         return a;
104 }
105
106 /*
107  * Divide and limit the result to res >= 1
108  *
109  * This is necessary to prevent signal delivery starvation, when the result of
110  * the division would be rounded down to 0.
111  */
112 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
113 {
114         cputime_t res = cputime_div(time, div);
115
116         return max_t(cputime_t, res, 1);
117 }
118
119 /*
120  * Update expiry time from increment, and increase overrun count,
121  * given the current clock sample.
122  */
123 static void bump_cpu_timer(struct k_itimer *timer,
124                                   union cpu_time_count now)
125 {
126         int i;
127
128         if (timer->it.cpu.incr.sched == 0)
129                 return;
130
131         if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132                 unsigned long long delta, incr;
133
134                 if (now.sched < timer->it.cpu.expires.sched)
135                         return;
136                 incr = timer->it.cpu.incr.sched;
137                 delta = now.sched + incr - timer->it.cpu.expires.sched;
138                 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139                 for (i = 0; incr < delta - incr; i++)
140                         incr = incr << 1;
141                 for (; i >= 0; incr >>= 1, i--) {
142                         if (delta < incr)
143                                 continue;
144                         timer->it.cpu.expires.sched += incr;
145                         timer->it_overrun += 1 << i;
146                         delta -= incr;
147                 }
148         } else {
149                 cputime_t delta, incr;
150
151                 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
152                         return;
153                 incr = timer->it.cpu.incr.cpu;
154                 delta = cputime_sub(cputime_add(now.cpu, incr),
155                                     timer->it.cpu.expires.cpu);
156                 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157                 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158                              incr = cputime_add(incr, incr);
159                 for (; i >= 0; incr = cputime_halve(incr), i--) {
160                         if (cputime_lt(delta, incr))
161                                 continue;
162                         timer->it.cpu.expires.cpu =
163                                 cputime_add(timer->it.cpu.expires.cpu, incr);
164                         timer->it_overrun += 1 << i;
165                         delta = cputime_sub(delta, incr);
166                 }
167         }
168 }
169
170 static inline cputime_t prof_ticks(struct task_struct *p)
171 {
172         return cputime_add(p->utime, p->stime);
173 }
174 static inline cputime_t virt_ticks(struct task_struct *p)
175 {
176         return p->utime;
177 }
178
179 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
180 {
181         int error = check_clock(which_clock);
182         if (!error) {
183                 tp->tv_sec = 0;
184                 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
185                 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
186                         /*
187                          * If sched_clock is using a cycle counter, we
188                          * don't have any idea of its true resolution
189                          * exported, but it is much more than 1s/HZ.
190                          */
191                         tp->tv_nsec = 1;
192                 }
193         }
194         return error;
195 }
196
197 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
198 {
199         /*
200          * You can never reset a CPU clock, but we check for other errors
201          * in the call before failing with EPERM.
202          */
203         int error = check_clock(which_clock);
204         if (error == 0) {
205                 error = -EPERM;
206         }
207         return error;
208 }
209
210
211 /*
212  * Sample a per-thread clock for the given task.
213  */
214 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
215                             union cpu_time_count *cpu)
216 {
217         switch (CPUCLOCK_WHICH(which_clock)) {
218         default:
219                 return -EINVAL;
220         case CPUCLOCK_PROF:
221                 cpu->cpu = prof_ticks(p);
222                 break;
223         case CPUCLOCK_VIRT:
224                 cpu->cpu = virt_ticks(p);
225                 break;
226         case CPUCLOCK_SCHED:
227                 cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
228                 break;
229         }
230         return 0;
231 }
232
233 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234 {
235         struct sighand_struct *sighand;
236         struct signal_struct *sig;
237         struct task_struct *t;
238
239         *times = INIT_CPUTIME;
240
241         rcu_read_lock();
242         sighand = rcu_dereference(tsk->sighand);
243         if (!sighand)
244                 goto out;
245
246         sig = tsk->signal;
247
248         t = tsk;
249         do {
250                 times->utime = cputime_add(times->utime, t->utime);
251                 times->stime = cputime_add(times->stime, t->stime);
252                 times->sum_exec_runtime += t->se.sum_exec_runtime;
253
254                 t = next_thread(t);
255         } while (t != tsk);
256
257         times->utime = cputime_add(times->utime, sig->utime);
258         times->stime = cputime_add(times->stime, sig->stime);
259         times->sum_exec_runtime += sig->sum_sched_runtime;
260 out:
261         rcu_read_unlock();
262 }
263
264 static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
265 {
266         if (cputime_gt(b->utime, a->utime))
267                 a->utime = b->utime;
268
269         if (cputime_gt(b->stime, a->stime))
270                 a->stime = b->stime;
271
272         if (b->sum_exec_runtime > a->sum_exec_runtime)
273                 a->sum_exec_runtime = b->sum_exec_runtime;
274 }
275
276 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
277 {
278         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
279         struct task_cputime sum;
280         unsigned long flags;
281
282         spin_lock_irqsave(&cputimer->lock, flags);
283         if (!cputimer->running) {
284                 cputimer->running = 1;
285                 /*
286                  * The POSIX timer interface allows for absolute time expiry
287                  * values through the TIMER_ABSTIME flag, therefore we have
288                  * to synchronize the timer to the clock every time we start
289                  * it.
290                  */
291                 thread_group_cputime(tsk, &sum);
292                 update_gt_cputime(&cputimer->cputime, &sum);
293         }
294         *times = cputimer->cputime;
295         spin_unlock_irqrestore(&cputimer->lock, flags);
296 }
297
298 /*
299  * Sample a process (thread group) clock for the given group_leader task.
300  * Must be called with tasklist_lock held for reading.
301  */
302 static int cpu_clock_sample_group(const clockid_t which_clock,
303                                   struct task_struct *p,
304                                   union cpu_time_count *cpu)
305 {
306         struct task_cputime cputime;
307
308         thread_group_cputime(p, &cputime);
309         switch (CPUCLOCK_WHICH(which_clock)) {
310         default:
311                 return -EINVAL;
312         case CPUCLOCK_PROF:
313                 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
314                 break;
315         case CPUCLOCK_VIRT:
316                 cpu->cpu = cputime.utime;
317                 break;
318         case CPUCLOCK_SCHED:
319                 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
320                 break;
321         }
322         return 0;
323 }
324
325
326 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
327 {
328         const pid_t pid = CPUCLOCK_PID(which_clock);
329         int error = -EINVAL;
330         union cpu_time_count rtn;
331
332         if (pid == 0) {
333                 /*
334                  * Special case constant value for our own clocks.
335                  * We don't have to do any lookup to find ourselves.
336                  */
337                 if (CPUCLOCK_PERTHREAD(which_clock)) {
338                         /*
339                          * Sampling just ourselves we can do with no locking.
340                          */
341                         error = cpu_clock_sample(which_clock,
342                                                  current, &rtn);
343                 } else {
344                         read_lock(&tasklist_lock);
345                         error = cpu_clock_sample_group(which_clock,
346                                                        current, &rtn);
347                         read_unlock(&tasklist_lock);
348                 }
349         } else {
350                 /*
351                  * Find the given PID, and validate that the caller
352                  * should be able to see it.
353                  */
354                 struct task_struct *p;
355                 rcu_read_lock();
356                 p = find_task_by_vpid(pid);
357                 if (p) {
358                         if (CPUCLOCK_PERTHREAD(which_clock)) {
359                                 if (same_thread_group(p, current)) {
360                                         error = cpu_clock_sample(which_clock,
361                                                                  p, &rtn);
362                                 }
363                         } else {
364                                 read_lock(&tasklist_lock);
365                                 if (thread_group_leader(p) && p->signal) {
366                                         error =
367                                             cpu_clock_sample_group(which_clock,
368                                                                    p, &rtn);
369                                 }
370                                 read_unlock(&tasklist_lock);
371                         }
372                 }
373                 rcu_read_unlock();
374         }
375
376         if (error)
377                 return error;
378         sample_to_timespec(which_clock, rtn, tp);
379         return 0;
380 }
381
382
383 /*
384  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
385  * This is called from sys_timer_create with the new timer already locked.
386  */
387 int posix_cpu_timer_create(struct k_itimer *new_timer)
388 {
389         int ret = 0;
390         const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
391         struct task_struct *p;
392
393         if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
394                 return -EINVAL;
395
396         INIT_LIST_HEAD(&new_timer->it.cpu.entry);
397         new_timer->it.cpu.incr.sched = 0;
398         new_timer->it.cpu.expires.sched = 0;
399
400         read_lock(&tasklist_lock);
401         if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
402                 if (pid == 0) {
403                         p = current;
404                 } else {
405                         p = find_task_by_vpid(pid);
406                         if (p && !same_thread_group(p, current))
407                                 p = NULL;
408                 }
409         } else {
410                 if (pid == 0) {
411                         p = current->group_leader;
412                 } else {
413                         p = find_task_by_vpid(pid);
414                         if (p && !thread_group_leader(p))
415                                 p = NULL;
416                 }
417         }
418         new_timer->it.cpu.task = p;
419         if (p) {
420                 get_task_struct(p);
421         } else {
422                 ret = -EINVAL;
423         }
424         read_unlock(&tasklist_lock);
425
426         return ret;
427 }
428
429 /*
430  * Clean up a CPU-clock timer that is about to be destroyed.
431  * This is called from timer deletion with the timer already locked.
432  * If we return TIMER_RETRY, it's necessary to release the timer's lock
433  * and try again.  (This happens when the timer is in the middle of firing.)
434  */
435 int posix_cpu_timer_del(struct k_itimer *timer)
436 {
437         struct task_struct *p = timer->it.cpu.task;
438         int ret = 0;
439
440         if (likely(p != NULL)) {
441                 read_lock(&tasklist_lock);
442                 if (unlikely(p->signal == NULL)) {
443                         /*
444                          * We raced with the reaping of the task.
445                          * The deletion should have cleared us off the list.
446                          */
447                         BUG_ON(!list_empty(&timer->it.cpu.entry));
448                 } else {
449                         spin_lock(&p->sighand->siglock);
450                         if (timer->it.cpu.firing)
451                                 ret = TIMER_RETRY;
452                         else
453                                 list_del(&timer->it.cpu.entry);
454                         spin_unlock(&p->sighand->siglock);
455                 }
456                 read_unlock(&tasklist_lock);
457
458                 if (!ret)
459                         put_task_struct(p);
460         }
461
462         return ret;
463 }
464
465 /*
466  * Clean out CPU timers still ticking when a thread exited.  The task
467  * pointer is cleared, and the expiry time is replaced with the residual
468  * time for later timer_gettime calls to return.
469  * This must be called with the siglock held.
470  */
471 static void cleanup_timers(struct list_head *head,
472                            cputime_t utime, cputime_t stime,
473                            unsigned long long sum_exec_runtime)
474 {
475         struct cpu_timer_list *timer, *next;
476         cputime_t ptime = cputime_add(utime, stime);
477
478         list_for_each_entry_safe(timer, next, head, entry) {
479                 list_del_init(&timer->entry);
480                 if (cputime_lt(timer->expires.cpu, ptime)) {
481                         timer->expires.cpu = cputime_zero;
482                 } else {
483                         timer->expires.cpu = cputime_sub(timer->expires.cpu,
484                                                          ptime);
485                 }
486         }
487
488         ++head;
489         list_for_each_entry_safe(timer, next, head, entry) {
490                 list_del_init(&timer->entry);
491                 if (cputime_lt(timer->expires.cpu, utime)) {
492                         timer->expires.cpu = cputime_zero;
493                 } else {
494                         timer->expires.cpu = cputime_sub(timer->expires.cpu,
495                                                          utime);
496                 }
497         }
498
499         ++head;
500         list_for_each_entry_safe(timer, next, head, entry) {
501                 list_del_init(&timer->entry);
502                 if (timer->expires.sched < sum_exec_runtime) {
503                         timer->expires.sched = 0;
504                 } else {
505                         timer->expires.sched -= sum_exec_runtime;
506                 }
507         }
508 }
509
510 /*
511  * These are both called with the siglock held, when the current thread
512  * is being reaped.  When the final (leader) thread in the group is reaped,
513  * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
514  */
515 void posix_cpu_timers_exit(struct task_struct *tsk)
516 {
517         cleanup_timers(tsk->cpu_timers,
518                        tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
519
520 }
521 void posix_cpu_timers_exit_group(struct task_struct *tsk)
522 {
523         struct task_cputime cputime;
524
525         thread_group_cputimer(tsk, &cputime);
526         cleanup_timers(tsk->signal->cpu_timers,
527                        cputime.utime, cputime.stime, cputime.sum_exec_runtime);
528 }
529
530 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
531 {
532         /*
533          * That's all for this thread or process.
534          * We leave our residual in expires to be reported.
535          */
536         put_task_struct(timer->it.cpu.task);
537         timer->it.cpu.task = NULL;
538         timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
539                                              timer->it.cpu.expires,
540                                              now);
541 }
542
543 /*
544  * Insert the timer on the appropriate list before any timers that
545  * expire later.  This must be called with the tasklist_lock held
546  * for reading, and interrupts disabled.
547  */
548 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
549 {
550         struct task_struct *p = timer->it.cpu.task;
551         struct list_head *head, *listpos;
552         struct cpu_timer_list *const nt = &timer->it.cpu;
553         struct cpu_timer_list *next;
554         unsigned long i;
555
556         head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
557                 p->cpu_timers : p->signal->cpu_timers);
558         head += CPUCLOCK_WHICH(timer->it_clock);
559
560         BUG_ON(!irqs_disabled());
561         spin_lock(&p->sighand->siglock);
562
563         listpos = head;
564         if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
565                 list_for_each_entry(next, head, entry) {
566                         if (next->expires.sched > nt->expires.sched)
567                                 break;
568                         listpos = &next->entry;
569                 }
570         } else {
571                 list_for_each_entry(next, head, entry) {
572                         if (cputime_gt(next->expires.cpu, nt->expires.cpu))
573                                 break;
574                         listpos = &next->entry;
575                 }
576         }
577         list_add(&nt->entry, listpos);
578
579         if (listpos == head) {
580                 /*
581                  * We are the new earliest-expiring timer.
582                  * If we are a thread timer, there can always
583                  * be a process timer telling us to stop earlier.
584                  */
585
586                 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
587                         switch (CPUCLOCK_WHICH(timer->it_clock)) {
588                         default:
589                                 BUG();
590                         case CPUCLOCK_PROF:
591                                 if (cputime_eq(p->cputime_expires.prof_exp,
592                                                cputime_zero) ||
593                                     cputime_gt(p->cputime_expires.prof_exp,
594                                                nt->expires.cpu))
595                                         p->cputime_expires.prof_exp =
596                                                 nt->expires.cpu;
597                                 break;
598                         case CPUCLOCK_VIRT:
599                                 if (cputime_eq(p->cputime_expires.virt_exp,
600                                                cputime_zero) ||
601                                     cputime_gt(p->cputime_expires.virt_exp,
602                                                nt->expires.cpu))
603                                         p->cputime_expires.virt_exp =
604                                                 nt->expires.cpu;
605                                 break;
606                         case CPUCLOCK_SCHED:
607                                 if (p->cputime_expires.sched_exp == 0 ||
608                                     p->cputime_expires.sched_exp >
609                                                         nt->expires.sched)
610                                         p->cputime_expires.sched_exp =
611                                                 nt->expires.sched;
612                                 break;
613                         }
614                 } else {
615                         /*
616                          * For a process timer, set the cached expiration time.
617                          */
618                         switch (CPUCLOCK_WHICH(timer->it_clock)) {
619                         default:
620                                 BUG();
621                         case CPUCLOCK_VIRT:
622                                 if (!cputime_eq(p->signal->it_virt_expires,
623                                                 cputime_zero) &&
624                                     cputime_lt(p->signal->it_virt_expires,
625                                                timer->it.cpu.expires.cpu))
626                                         break;
627                                 p->signal->cputime_expires.virt_exp =
628                                         timer->it.cpu.expires.cpu;
629                                 break;
630                         case CPUCLOCK_PROF:
631                                 if (!cputime_eq(p->signal->it_prof_expires,
632                                                 cputime_zero) &&
633                                     cputime_lt(p->signal->it_prof_expires,
634                                                timer->it.cpu.expires.cpu))
635                                         break;
636                                 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
637                                 if (i != RLIM_INFINITY &&
638                                     i <= cputime_to_secs(timer->it.cpu.expires.cpu))
639                                         break;
640                                 p->signal->cputime_expires.prof_exp =
641                                         timer->it.cpu.expires.cpu;
642                                 break;
643                         case CPUCLOCK_SCHED:
644                                 p->signal->cputime_expires.sched_exp =
645                                         timer->it.cpu.expires.sched;
646                                 break;
647                         }
648                 }
649         }
650
651         spin_unlock(&p->sighand->siglock);
652 }
653
654 /*
655  * The timer is locked, fire it and arrange for its reload.
656  */
657 static void cpu_timer_fire(struct k_itimer *timer)
658 {
659         if (unlikely(timer->sigq == NULL)) {
660                 /*
661                  * This a special case for clock_nanosleep,
662                  * not a normal timer from sys_timer_create.
663                  */
664                 wake_up_process(timer->it_process);
665                 timer->it.cpu.expires.sched = 0;
666         } else if (timer->it.cpu.incr.sched == 0) {
667                 /*
668                  * One-shot timer.  Clear it as soon as it's fired.
669                  */
670                 posix_timer_event(timer, 0);
671                 timer->it.cpu.expires.sched = 0;
672         } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
673                 /*
674                  * The signal did not get queued because the signal
675                  * was ignored, so we won't get any callback to
676                  * reload the timer.  But we need to keep it
677                  * ticking in case the signal is deliverable next time.
678                  */
679                 posix_cpu_timer_schedule(timer);
680         }
681 }
682
683 /*
684  * Guts of sys_timer_settime for CPU timers.
685  * This is called with the timer locked and interrupts disabled.
686  * If we return TIMER_RETRY, it's necessary to release the timer's lock
687  * and try again.  (This happens when the timer is in the middle of firing.)
688  */
689 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
690                         struct itimerspec *new, struct itimerspec *old)
691 {
692         struct task_struct *p = timer->it.cpu.task;
693         union cpu_time_count old_expires, new_expires, val;
694         int ret;
695
696         if (unlikely(p == NULL)) {
697                 /*
698                  * Timer refers to a dead task's clock.
699                  */
700                 return -ESRCH;
701         }
702
703         new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
704
705         read_lock(&tasklist_lock);
706         /*
707          * We need the tasklist_lock to protect against reaping that
708          * clears p->signal.  If p has just been reaped, we can no
709          * longer get any information about it at all.
710          */
711         if (unlikely(p->signal == NULL)) {
712                 read_unlock(&tasklist_lock);
713                 put_task_struct(p);
714                 timer->it.cpu.task = NULL;
715                 return -ESRCH;
716         }
717
718         /*
719          * Disarm any old timer after extracting its expiry time.
720          */
721         BUG_ON(!irqs_disabled());
722
723         ret = 0;
724         spin_lock(&p->sighand->siglock);
725         old_expires = timer->it.cpu.expires;
726         if (unlikely(timer->it.cpu.firing)) {
727                 timer->it.cpu.firing = -1;
728                 ret = TIMER_RETRY;
729         } else
730                 list_del_init(&timer->it.cpu.entry);
731         spin_unlock(&p->sighand->siglock);
732
733         /*
734          * We need to sample the current value to convert the new
735          * value from to relative and absolute, and to convert the
736          * old value from absolute to relative.  To set a process
737          * timer, we need a sample to balance the thread expiry
738          * times (in arm_timer).  With an absolute time, we must
739          * check if it's already passed.  In short, we need a sample.
740          */
741         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
742                 cpu_clock_sample(timer->it_clock, p, &val);
743         } else {
744                 cpu_clock_sample_group(timer->it_clock, p, &val);
745         }
746
747         if (old) {
748                 if (old_expires.sched == 0) {
749                         old->it_value.tv_sec = 0;
750                         old->it_value.tv_nsec = 0;
751                 } else {
752                         /*
753                          * Update the timer in case it has
754                          * overrun already.  If it has,
755                          * we'll report it as having overrun
756                          * and with the next reloaded timer
757                          * already ticking, though we are
758                          * swallowing that pending
759                          * notification here to install the
760                          * new setting.
761                          */
762                         bump_cpu_timer(timer, val);
763                         if (cpu_time_before(timer->it_clock, val,
764                                             timer->it.cpu.expires)) {
765                                 old_expires = cpu_time_sub(
766                                         timer->it_clock,
767                                         timer->it.cpu.expires, val);
768                                 sample_to_timespec(timer->it_clock,
769                                                    old_expires,
770                                                    &old->it_value);
771                         } else {
772                                 old->it_value.tv_nsec = 1;
773                                 old->it_value.tv_sec = 0;
774                         }
775                 }
776         }
777
778         if (unlikely(ret)) {
779                 /*
780                  * We are colliding with the timer actually firing.
781                  * Punt after filling in the timer's old value, and
782                  * disable this firing since we are already reporting
783                  * it as an overrun (thanks to bump_cpu_timer above).
784                  */
785                 read_unlock(&tasklist_lock);
786                 goto out;
787         }
788
789         if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
790                 cpu_time_add(timer->it_clock, &new_expires, val);
791         }
792
793         /*
794          * Install the new expiry time (or zero).
795          * For a timer with no notification action, we don't actually
796          * arm the timer (we'll just fake it for timer_gettime).
797          */
798         timer->it.cpu.expires = new_expires;
799         if (new_expires.sched != 0 &&
800             (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
801             cpu_time_before(timer->it_clock, val, new_expires)) {
802                 arm_timer(timer, val);
803         }
804
805         read_unlock(&tasklist_lock);
806
807         /*
808          * Install the new reload setting, and
809          * set up the signal and overrun bookkeeping.
810          */
811         timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
812                                                 &new->it_interval);
813
814         /*
815          * This acts as a modification timestamp for the timer,
816          * so any automatic reload attempt will punt on seeing
817          * that we have reset the timer manually.
818          */
819         timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
820                 ~REQUEUE_PENDING;
821         timer->it_overrun_last = 0;
822         timer->it_overrun = -1;
823
824         if (new_expires.sched != 0 &&
825             (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
826             !cpu_time_before(timer->it_clock, val, new_expires)) {
827                 /*
828                  * The designated time already passed, so we notify
829                  * immediately, even if the thread never runs to
830                  * accumulate more time on this clock.
831                  */
832                 cpu_timer_fire(timer);
833         }
834
835         ret = 0;
836  out:
837         if (old) {
838                 sample_to_timespec(timer->it_clock,
839                                    timer->it.cpu.incr, &old->it_interval);
840         }
841         return ret;
842 }
843
844 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
845 {
846         union cpu_time_count now;
847         struct task_struct *p = timer->it.cpu.task;
848         int clear_dead;
849
850         /*
851          * Easy part: convert the reload time.
852          */
853         sample_to_timespec(timer->it_clock,
854                            timer->it.cpu.incr, &itp->it_interval);
855
856         if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all.  */
857                 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
858                 return;
859         }
860
861         if (unlikely(p == NULL)) {
862                 /*
863                  * This task already died and the timer will never fire.
864                  * In this case, expires is actually the dead value.
865                  */
866         dead:
867                 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
868                                    &itp->it_value);
869                 return;
870         }
871
872         /*
873          * Sample the clock to take the difference with the expiry time.
874          */
875         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
876                 cpu_clock_sample(timer->it_clock, p, &now);
877                 clear_dead = p->exit_state;
878         } else {
879                 read_lock(&tasklist_lock);
880                 if (unlikely(p->signal == NULL)) {
881                         /*
882                          * The process has been reaped.
883                          * We can't even collect a sample any more.
884                          * Call the timer disarmed, nothing else to do.
885                          */
886                         put_task_struct(p);
887                         timer->it.cpu.task = NULL;
888                         timer->it.cpu.expires.sched = 0;
889                         read_unlock(&tasklist_lock);
890                         goto dead;
891                 } else {
892                         cpu_clock_sample_group(timer->it_clock, p, &now);
893                         clear_dead = (unlikely(p->exit_state) &&
894                                       thread_group_empty(p));
895                 }
896                 read_unlock(&tasklist_lock);
897         }
898
899         if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
900                 if (timer->it.cpu.incr.sched == 0 &&
901                     cpu_time_before(timer->it_clock,
902                                     timer->it.cpu.expires, now)) {
903                         /*
904                          * Do-nothing timer expired and has no reload,
905                          * so it's as if it was never set.
906                          */
907                         timer->it.cpu.expires.sched = 0;
908                         itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
909                         return;
910                 }
911                 /*
912                  * Account for any expirations and reloads that should
913                  * have happened.
914                  */
915                 bump_cpu_timer(timer, now);
916         }
917
918         if (unlikely(clear_dead)) {
919                 /*
920                  * We've noticed that the thread is dead, but
921                  * not yet reaped.  Take this opportunity to
922                  * drop our task ref.
923                  */
924                 clear_dead_task(timer, now);
925                 goto dead;
926         }
927
928         if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
929                 sample_to_timespec(timer->it_clock,
930                                    cpu_time_sub(timer->it_clock,
931                                                 timer->it.cpu.expires, now),
932                                    &itp->it_value);
933         } else {
934                 /*
935                  * The timer should have expired already, but the firing
936                  * hasn't taken place yet.  Say it's just about to expire.
937                  */
938                 itp->it_value.tv_nsec = 1;
939                 itp->it_value.tv_sec = 0;
940         }
941 }
942
943 /*
944  * Check for any per-thread CPU timers that have fired and move them off
945  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
946  * tsk->it_*_expires values to reflect the remaining thread CPU timers.
947  */
948 static void check_thread_timers(struct task_struct *tsk,
949                                 struct list_head *firing)
950 {
951         int maxfire;
952         struct list_head *timers = tsk->cpu_timers;
953         struct signal_struct *const sig = tsk->signal;
954
955         maxfire = 20;
956         tsk->cputime_expires.prof_exp = cputime_zero;
957         while (!list_empty(timers)) {
958                 struct cpu_timer_list *t = list_first_entry(timers,
959                                                       struct cpu_timer_list,
960                                                       entry);
961                 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
962                         tsk->cputime_expires.prof_exp = t->expires.cpu;
963                         break;
964                 }
965                 t->firing = 1;
966                 list_move_tail(&t->entry, firing);
967         }
968
969         ++timers;
970         maxfire = 20;
971         tsk->cputime_expires.virt_exp = cputime_zero;
972         while (!list_empty(timers)) {
973                 struct cpu_timer_list *t = list_first_entry(timers,
974                                                       struct cpu_timer_list,
975                                                       entry);
976                 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
977                         tsk->cputime_expires.virt_exp = t->expires.cpu;
978                         break;
979                 }
980                 t->firing = 1;
981                 list_move_tail(&t->entry, firing);
982         }
983
984         ++timers;
985         maxfire = 20;
986         tsk->cputime_expires.sched_exp = 0;
987         while (!list_empty(timers)) {
988                 struct cpu_timer_list *t = list_first_entry(timers,
989                                                       struct cpu_timer_list,
990                                                       entry);
991                 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
992                         tsk->cputime_expires.sched_exp = t->expires.sched;
993                         break;
994                 }
995                 t->firing = 1;
996                 list_move_tail(&t->entry, firing);
997         }
998
999         /*
1000          * Check for the special case thread timers.
1001          */
1002         if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1003                 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1004                 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1005
1006                 if (hard != RLIM_INFINITY &&
1007                     tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
1008                         /*
1009                          * At the hard limit, we just die.
1010                          * No need to calculate anything else now.
1011                          */
1012                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1013                         return;
1014                 }
1015                 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1016                         /*
1017                          * At the soft limit, send a SIGXCPU every second.
1018                          */
1019                         if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1020                             < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1021                                 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1022                                                                 USEC_PER_SEC;
1023                         }
1024                         printk(KERN_INFO
1025                                 "RT Watchdog Timeout: %s[%d]\n",
1026                                 tsk->comm, task_pid_nr(tsk));
1027                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1028                 }
1029         }
1030 }
1031
1032 static void stop_process_timers(struct task_struct *tsk)
1033 {
1034         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
1035         unsigned long flags;
1036
1037         if (!cputimer->running)
1038                 return;
1039
1040         spin_lock_irqsave(&cputimer->lock, flags);
1041         cputimer->running = 0;
1042         spin_unlock_irqrestore(&cputimer->lock, flags);
1043 }
1044
1045 /*
1046  * Check for any per-thread CPU timers that have fired and move them
1047  * off the tsk->*_timers list onto the firing list.  Per-thread timers
1048  * have already been taken off.
1049  */
1050 static void check_process_timers(struct task_struct *tsk,
1051                                  struct list_head *firing)
1052 {
1053         int maxfire;
1054         struct signal_struct *const sig = tsk->signal;
1055         cputime_t utime, ptime, virt_expires, prof_expires;
1056         unsigned long long sum_sched_runtime, sched_expires;
1057         struct list_head *timers = sig->cpu_timers;
1058         struct task_cputime cputime;
1059
1060         /*
1061          * Don't sample the current process CPU clocks if there are no timers.
1062          */
1063         if (list_empty(&timers[CPUCLOCK_PROF]) &&
1064             cputime_eq(sig->it_prof_expires, cputime_zero) &&
1065             sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1066             list_empty(&timers[CPUCLOCK_VIRT]) &&
1067             cputime_eq(sig->it_virt_expires, cputime_zero) &&
1068             list_empty(&timers[CPUCLOCK_SCHED])) {
1069                 stop_process_timers(tsk);
1070                 return;
1071         }
1072
1073         /*
1074          * Collect the current process totals.
1075          */
1076         thread_group_cputimer(tsk, &cputime);
1077         utime = cputime.utime;
1078         ptime = cputime_add(utime, cputime.stime);
1079         sum_sched_runtime = cputime.sum_exec_runtime;
1080         maxfire = 20;
1081         prof_expires = cputime_zero;
1082         while (!list_empty(timers)) {
1083                 struct cpu_timer_list *tl = list_first_entry(timers,
1084                                                       struct cpu_timer_list,
1085                                                       entry);
1086                 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1087                         prof_expires = tl->expires.cpu;
1088                         break;
1089                 }
1090                 tl->firing = 1;
1091                 list_move_tail(&tl->entry, firing);
1092         }
1093
1094         ++timers;
1095         maxfire = 20;
1096         virt_expires = cputime_zero;
1097         while (!list_empty(timers)) {
1098                 struct cpu_timer_list *tl = list_first_entry(timers,
1099                                                       struct cpu_timer_list,
1100                                                       entry);
1101                 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1102                         virt_expires = tl->expires.cpu;
1103                         break;
1104                 }
1105                 tl->firing = 1;
1106                 list_move_tail(&tl->entry, firing);
1107         }
1108
1109         ++timers;
1110         maxfire = 20;
1111         sched_expires = 0;
1112         while (!list_empty(timers)) {
1113                 struct cpu_timer_list *tl = list_first_entry(timers,
1114                                                       struct cpu_timer_list,
1115                                                       entry);
1116                 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1117                         sched_expires = tl->expires.sched;
1118                         break;
1119                 }
1120                 tl->firing = 1;
1121                 list_move_tail(&tl->entry, firing);
1122         }
1123
1124         /*
1125          * Check for the special case process timers.
1126          */
1127         if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1128                 if (cputime_ge(ptime, sig->it_prof_expires)) {
1129                         /* ITIMER_PROF fires and reloads.  */
1130                         sig->it_prof_expires = sig->it_prof_incr;
1131                         if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1132                                 sig->it_prof_expires = cputime_add(
1133                                         sig->it_prof_expires, ptime);
1134                         }
1135                         __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1136                 }
1137                 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1138                     (cputime_eq(prof_expires, cputime_zero) ||
1139                      cputime_lt(sig->it_prof_expires, prof_expires))) {
1140                         prof_expires = sig->it_prof_expires;
1141                 }
1142         }
1143         if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1144                 if (cputime_ge(utime, sig->it_virt_expires)) {
1145                         /* ITIMER_VIRTUAL fires and reloads.  */
1146                         sig->it_virt_expires = sig->it_virt_incr;
1147                         if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1148                                 sig->it_virt_expires = cputime_add(
1149                                         sig->it_virt_expires, utime);
1150                         }
1151                         __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1152                 }
1153                 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1154                     (cputime_eq(virt_expires, cputime_zero) ||
1155                      cputime_lt(sig->it_virt_expires, virt_expires))) {
1156                         virt_expires = sig->it_virt_expires;
1157                 }
1158         }
1159         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1160                 unsigned long psecs = cputime_to_secs(ptime);
1161                 cputime_t x;
1162                 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1163                         /*
1164                          * At the hard limit, we just die.
1165                          * No need to calculate anything else now.
1166                          */
1167                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1168                         return;
1169                 }
1170                 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1171                         /*
1172                          * At the soft limit, send a SIGXCPU every second.
1173                          */
1174                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1175                         if (sig->rlim[RLIMIT_CPU].rlim_cur
1176                             < sig->rlim[RLIMIT_CPU].rlim_max) {
1177                                 sig->rlim[RLIMIT_CPU].rlim_cur++;
1178                         }
1179                 }
1180                 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1181                 if (cputime_eq(prof_expires, cputime_zero) ||
1182                     cputime_lt(x, prof_expires)) {
1183                         prof_expires = x;
1184                 }
1185         }
1186
1187         if (!cputime_eq(prof_expires, cputime_zero) &&
1188             (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1189              cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1190                 sig->cputime_expires.prof_exp = prof_expires;
1191         if (!cputime_eq(virt_expires, cputime_zero) &&
1192             (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1193              cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1194                 sig->cputime_expires.virt_exp = virt_expires;
1195         if (sched_expires != 0 &&
1196             (sig->cputime_expires.sched_exp == 0 ||
1197              sig->cputime_expires.sched_exp > sched_expires))
1198                 sig->cputime_expires.sched_exp = sched_expires;
1199 }
1200
1201 /*
1202  * This is called from the signal code (via do_schedule_next_timer)
1203  * when the last timer signal was delivered and we have to reload the timer.
1204  */
1205 void posix_cpu_timer_schedule(struct k_itimer *timer)
1206 {
1207         struct task_struct *p = timer->it.cpu.task;
1208         union cpu_time_count now;
1209
1210         if (unlikely(p == NULL))
1211                 /*
1212                  * The task was cleaned up already, no future firings.
1213                  */
1214                 goto out;
1215
1216         /*
1217          * Fetch the current sample and update the timer's expiry time.
1218          */
1219         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1220                 cpu_clock_sample(timer->it_clock, p, &now);
1221                 bump_cpu_timer(timer, now);
1222                 if (unlikely(p->exit_state)) {
1223                         clear_dead_task(timer, now);
1224                         goto out;
1225                 }
1226                 read_lock(&tasklist_lock); /* arm_timer needs it.  */
1227         } else {
1228                 read_lock(&tasklist_lock);
1229                 if (unlikely(p->signal == NULL)) {
1230                         /*
1231                          * The process has been reaped.
1232                          * We can't even collect a sample any more.
1233                          */
1234                         put_task_struct(p);
1235                         timer->it.cpu.task = p = NULL;
1236                         timer->it.cpu.expires.sched = 0;
1237                         goto out_unlock;
1238                 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1239                         /*
1240                          * We've noticed that the thread is dead, but
1241                          * not yet reaped.  Take this opportunity to
1242                          * drop our task ref.
1243                          */
1244                         clear_dead_task(timer, now);
1245                         goto out_unlock;
1246                 }
1247                 cpu_clock_sample_group(timer->it_clock, p, &now);
1248                 bump_cpu_timer(timer, now);
1249                 /* Leave the tasklist_lock locked for the call below.  */
1250         }
1251
1252         /*
1253          * Now re-arm for the new expiry time.
1254          */
1255         arm_timer(timer, now);
1256
1257 out_unlock:
1258         read_unlock(&tasklist_lock);
1259
1260 out:
1261         timer->it_overrun_last = timer->it_overrun;
1262         timer->it_overrun = -1;
1263         ++timer->it_requeue_pending;
1264 }
1265
1266 /**
1267  * task_cputime_zero - Check a task_cputime struct for all zero fields.
1268  *
1269  * @cputime:    The struct to compare.
1270  *
1271  * Checks @cputime to see if all fields are zero.  Returns true if all fields
1272  * are zero, false if any field is nonzero.
1273  */
1274 static inline int task_cputime_zero(const struct task_cputime *cputime)
1275 {
1276         if (cputime_eq(cputime->utime, cputime_zero) &&
1277             cputime_eq(cputime->stime, cputime_zero) &&
1278             cputime->sum_exec_runtime == 0)
1279                 return 1;
1280         return 0;
1281 }
1282
1283 /**
1284  * task_cputime_expired - Compare two task_cputime entities.
1285  *
1286  * @sample:     The task_cputime structure to be checked for expiration.
1287  * @expires:    Expiration times, against which @sample will be checked.
1288  *
1289  * Checks @sample against @expires to see if any field of @sample has expired.
1290  * Returns true if any field of the former is greater than the corresponding
1291  * field of the latter if the latter field is set.  Otherwise returns false.
1292  */
1293 static inline int task_cputime_expired(const struct task_cputime *sample,
1294                                         const struct task_cputime *expires)
1295 {
1296         if (!cputime_eq(expires->utime, cputime_zero) &&
1297             cputime_ge(sample->utime, expires->utime))
1298                 return 1;
1299         if (!cputime_eq(expires->stime, cputime_zero) &&
1300             cputime_ge(cputime_add(sample->utime, sample->stime),
1301                        expires->stime))
1302                 return 1;
1303         if (expires->sum_exec_runtime != 0 &&
1304             sample->sum_exec_runtime >= expires->sum_exec_runtime)
1305                 return 1;
1306         return 0;
1307 }
1308
1309 /**
1310  * fastpath_timer_check - POSIX CPU timers fast path.
1311  *
1312  * @tsk:        The task (thread) being checked.
1313  *
1314  * Check the task and thread group timers.  If both are zero (there are no
1315  * timers set) return false.  Otherwise snapshot the task and thread group
1316  * timers and compare them with the corresponding expiration times.  Return
1317  * true if a timer has expired, else return false.
1318  */
1319 static inline int fastpath_timer_check(struct task_struct *tsk)
1320 {
1321         struct signal_struct *sig;
1322
1323         /* tsk == current, ensure it is safe to use ->signal/sighand */
1324         if (unlikely(tsk->exit_state))
1325                 return 0;
1326
1327         if (!task_cputime_zero(&tsk->cputime_expires)) {
1328                 struct task_cputime task_sample = {
1329                         .utime = tsk->utime,
1330                         .stime = tsk->stime,
1331                         .sum_exec_runtime = tsk->se.sum_exec_runtime
1332                 };
1333
1334                 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1335                         return 1;
1336         }
1337
1338         sig = tsk->signal;
1339         if (!task_cputime_zero(&sig->cputime_expires)) {
1340                 struct task_cputime group_sample;
1341
1342                 thread_group_cputimer(tsk, &group_sample);
1343                 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1344                         return 1;
1345         }
1346         return 0;
1347 }
1348
1349 /*
1350  * This is called from the timer interrupt handler.  The irq handler has
1351  * already updated our counts.  We need to check if any timers fire now.
1352  * Interrupts are disabled.
1353  */
1354 void run_posix_cpu_timers(struct task_struct *tsk)
1355 {
1356         LIST_HEAD(firing);
1357         struct k_itimer *timer, *next;
1358
1359         BUG_ON(!irqs_disabled());
1360
1361         /*
1362          * The fast path checks that there are no expired thread or thread
1363          * group timers.  If that's so, just return.
1364          */
1365         if (!fastpath_timer_check(tsk))
1366                 return;
1367
1368         spin_lock(&tsk->sighand->siglock);
1369         /*
1370          * Here we take off tsk->signal->cpu_timers[N] and
1371          * tsk->cpu_timers[N] all the timers that are firing, and
1372          * put them on the firing list.
1373          */
1374         check_thread_timers(tsk, &firing);
1375         check_process_timers(tsk, &firing);
1376
1377         /*
1378          * We must release these locks before taking any timer's lock.
1379          * There is a potential race with timer deletion here, as the
1380          * siglock now protects our private firing list.  We have set
1381          * the firing flag in each timer, so that a deletion attempt
1382          * that gets the timer lock before we do will give it up and
1383          * spin until we've taken care of that timer below.
1384          */
1385         spin_unlock(&tsk->sighand->siglock);
1386
1387         /*
1388          * Now that all the timers on our list have the firing flag,
1389          * noone will touch their list entries but us.  We'll take
1390          * each timer's lock before clearing its firing flag, so no
1391          * timer call will interfere.
1392          */
1393         list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1394                 int firing;
1395                 spin_lock(&timer->it_lock);
1396                 list_del_init(&timer->it.cpu.entry);
1397                 firing = timer->it.cpu.firing;
1398                 timer->it.cpu.firing = 0;
1399                 /*
1400                  * The firing flag is -1 if we collided with a reset
1401                  * of the timer, which already reported this
1402                  * almost-firing as an overrun.  So don't generate an event.
1403                  */
1404                 if (likely(firing >= 0)) {
1405                         cpu_timer_fire(timer);
1406                 }
1407                 spin_unlock(&timer->it_lock);
1408         }
1409 }
1410
1411 /*
1412  * Sample a process (thread group) timer for the given group_leader task.
1413  * Must be called with tasklist_lock held for reading.
1414  */
1415 static int cpu_timer_sample_group(const clockid_t which_clock,
1416                                   struct task_struct *p,
1417                                   union cpu_time_count *cpu)
1418 {
1419         struct task_cputime cputime;
1420
1421         thread_group_cputimer(p, &cputime);
1422         switch (CPUCLOCK_WHICH(which_clock)) {
1423         default:
1424                 return -EINVAL;
1425         case CPUCLOCK_PROF:
1426                 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1427                 break;
1428         case CPUCLOCK_VIRT:
1429                 cpu->cpu = cputime.utime;
1430                 break;
1431         case CPUCLOCK_SCHED:
1432                 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
1433                 break;
1434         }
1435         return 0;
1436 }
1437
1438 /*
1439  * Set one of the process-wide special case CPU timers.
1440  * The tsk->sighand->siglock must be held by the caller.
1441  * The *newval argument is relative and we update it to be absolute, *oldval
1442  * is absolute and we update it to be relative.
1443  */
1444 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1445                            cputime_t *newval, cputime_t *oldval)
1446 {
1447         union cpu_time_count now;
1448         struct list_head *head;
1449
1450         BUG_ON(clock_idx == CPUCLOCK_SCHED);
1451         cpu_timer_sample_group(clock_idx, tsk, &now);
1452
1453         if (oldval) {
1454                 if (!cputime_eq(*oldval, cputime_zero)) {
1455                         if (cputime_le(*oldval, now.cpu)) {
1456                                 /* Just about to fire. */
1457                                 *oldval = jiffies_to_cputime(1);
1458                         } else {
1459                                 *oldval = cputime_sub(*oldval, now.cpu);
1460                         }
1461                 }
1462
1463                 if (cputime_eq(*newval, cputime_zero))
1464                         return;
1465                 *newval = cputime_add(*newval, now.cpu);
1466
1467                 /*
1468                  * If the RLIMIT_CPU timer will expire before the
1469                  * ITIMER_PROF timer, we have nothing else to do.
1470                  */
1471                 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1472                     < cputime_to_secs(*newval))
1473                         return;
1474         }
1475
1476         /*
1477          * Check whether there are any process timers already set to fire
1478          * before this one.  If so, we don't have anything more to do.
1479          */
1480         head = &tsk->signal->cpu_timers[clock_idx];
1481         if (list_empty(head) ||
1482             cputime_ge(list_first_entry(head,
1483                                   struct cpu_timer_list, entry)->expires.cpu,
1484                        *newval)) {
1485                 switch (clock_idx) {
1486                 case CPUCLOCK_PROF:
1487                         tsk->signal->cputime_expires.prof_exp = *newval;
1488                         break;
1489                 case CPUCLOCK_VIRT:
1490                         tsk->signal->cputime_expires.virt_exp = *newval;
1491                         break;
1492                 }
1493         }
1494 }
1495
1496 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1497                             struct timespec *rqtp, struct itimerspec *it)
1498 {
1499         struct k_itimer timer;
1500         int error;
1501
1502         /*
1503          * Set up a temporary timer and then wait for it to go off.
1504          */
1505         memset(&timer, 0, sizeof timer);
1506         spin_lock_init(&timer.it_lock);
1507         timer.it_clock = which_clock;
1508         timer.it_overrun = -1;
1509         error = posix_cpu_timer_create(&timer);
1510         timer.it_process = current;
1511         if (!error) {
1512                 static struct itimerspec zero_it;
1513
1514                 memset(it, 0, sizeof *it);
1515                 it->it_value = *rqtp;
1516
1517                 spin_lock_irq(&timer.it_lock);
1518                 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1519                 if (error) {
1520                         spin_unlock_irq(&timer.it_lock);
1521                         return error;
1522                 }
1523
1524                 while (!signal_pending(current)) {
1525                         if (timer.it.cpu.expires.sched == 0) {
1526                                 /*
1527                                  * Our timer fired and was reset.
1528                                  */
1529                                 spin_unlock_irq(&timer.it_lock);
1530                                 return 0;
1531                         }
1532
1533                         /*
1534                          * Block until cpu_timer_fire (or a signal) wakes us.
1535                          */
1536                         __set_current_state(TASK_INTERRUPTIBLE);
1537                         spin_unlock_irq(&timer.it_lock);
1538                         schedule();
1539                         spin_lock_irq(&timer.it_lock);
1540                 }
1541
1542                 /*
1543                  * We were interrupted by a signal.
1544                  */
1545                 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1546                 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1547                 spin_unlock_irq(&timer.it_lock);
1548
1549                 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1550                         /*
1551                          * It actually did fire already.
1552                          */
1553                         return 0;
1554                 }
1555
1556                 error = -ERESTART_RESTARTBLOCK;
1557         }
1558
1559         return error;
1560 }
1561
1562 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1563                      struct timespec *rqtp, struct timespec __user *rmtp)
1564 {
1565         struct restart_block *restart_block =
1566             &current_thread_info()->restart_block;
1567         struct itimerspec it;
1568         int error;
1569
1570         /*
1571          * Diagnose required errors first.
1572          */
1573         if (CPUCLOCK_PERTHREAD(which_clock) &&
1574             (CPUCLOCK_PID(which_clock) == 0 ||
1575              CPUCLOCK_PID(which_clock) == current->pid))
1576                 return -EINVAL;
1577
1578         error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1579
1580         if (error == -ERESTART_RESTARTBLOCK) {
1581
1582                 if (flags & TIMER_ABSTIME)
1583                         return -ERESTARTNOHAND;
1584                 /*
1585                  * Report back to the user the time still remaining.
1586                  */
1587                 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1588                         return -EFAULT;
1589
1590                 restart_block->fn = posix_cpu_nsleep_restart;
1591                 restart_block->arg0 = which_clock;
1592                 restart_block->arg1 = (unsigned long) rmtp;
1593                 restart_block->arg2 = rqtp->tv_sec;
1594                 restart_block->arg3 = rqtp->tv_nsec;
1595         }
1596         return error;
1597 }
1598
1599 long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1600 {
1601         clockid_t which_clock = restart_block->arg0;
1602         struct timespec __user *rmtp;
1603         struct timespec t;
1604         struct itimerspec it;
1605         int error;
1606
1607         rmtp = (struct timespec __user *) restart_block->arg1;
1608         t.tv_sec = restart_block->arg2;
1609         t.tv_nsec = restart_block->arg3;
1610
1611         restart_block->fn = do_no_restart_syscall;
1612         error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1613
1614         if (error == -ERESTART_RESTARTBLOCK) {
1615                 /*
1616                  * Report back to the user the time still remaining.
1617                  */
1618                 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1619                         return -EFAULT;
1620
1621                 restart_block->fn = posix_cpu_nsleep_restart;
1622                 restart_block->arg0 = which_clock;
1623                 restart_block->arg1 = (unsigned long) rmtp;
1624                 restart_block->arg2 = t.tv_sec;
1625                 restart_block->arg3 = t.tv_nsec;
1626         }
1627         return error;
1628
1629 }
1630
1631
1632 #define PROCESS_CLOCK   MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1633 #define THREAD_CLOCK    MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1634
1635 static int process_cpu_clock_getres(const clockid_t which_clock,
1636                                     struct timespec *tp)
1637 {
1638         return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1639 }
1640 static int process_cpu_clock_get(const clockid_t which_clock,
1641                                  struct timespec *tp)
1642 {
1643         return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1644 }
1645 static int process_cpu_timer_create(struct k_itimer *timer)
1646 {
1647         timer->it_clock = PROCESS_CLOCK;
1648         return posix_cpu_timer_create(timer);
1649 }
1650 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1651                               struct timespec *rqtp,
1652                               struct timespec __user *rmtp)
1653 {
1654         return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1655 }
1656 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1657 {
1658         return -EINVAL;
1659 }
1660 static int thread_cpu_clock_getres(const clockid_t which_clock,
1661                                    struct timespec *tp)
1662 {
1663         return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1664 }
1665 static int thread_cpu_clock_get(const clockid_t which_clock,
1666                                 struct timespec *tp)
1667 {
1668         return posix_cpu_clock_get(THREAD_CLOCK, tp);
1669 }
1670 static int thread_cpu_timer_create(struct k_itimer *timer)
1671 {
1672         timer->it_clock = THREAD_CLOCK;
1673         return posix_cpu_timer_create(timer);
1674 }
1675 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1676                               struct timespec *rqtp, struct timespec __user *rmtp)
1677 {
1678         return -EINVAL;
1679 }
1680 static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1681 {
1682         return -EINVAL;
1683 }
1684
1685 static __init int init_posix_cpu_timers(void)
1686 {
1687         struct k_clock process = {
1688                 .clock_getres = process_cpu_clock_getres,
1689                 .clock_get = process_cpu_clock_get,
1690                 .clock_set = do_posix_clock_nosettime,
1691                 .timer_create = process_cpu_timer_create,
1692                 .nsleep = process_cpu_nsleep,
1693                 .nsleep_restart = process_cpu_nsleep_restart,
1694         };
1695         struct k_clock thread = {
1696                 .clock_getres = thread_cpu_clock_getres,
1697                 .clock_get = thread_cpu_clock_get,
1698                 .clock_set = do_posix_clock_nosettime,
1699                 .timer_create = thread_cpu_timer_create,
1700                 .nsleep = thread_cpu_nsleep,
1701                 .nsleep_restart = thread_cpu_nsleep_restart,
1702         };
1703
1704         register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1705         register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1706
1707         return 0;
1708 }
1709 __initcall(init_posix_cpu_timers);