use __set_task_state() for TRACED/STOPPED tasks
[linux-2.6] / kernel / posix-cpu-timers.c
1 /*
2  * Implement CPU time clocks for the POSIX clock interface.
3  */
4
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <asm/uaccess.h>
8 #include <linux/errno.h>
9
10 static int check_clock(const clockid_t which_clock)
11 {
12         int error = 0;
13         struct task_struct *p;
14         const pid_t pid = CPUCLOCK_PID(which_clock);
15
16         if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
17                 return -EINVAL;
18
19         if (pid == 0)
20                 return 0;
21
22         read_lock(&tasklist_lock);
23         p = find_task_by_pid(pid);
24         if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
25                    same_thread_group(p, current) : thread_group_leader(p))) {
26                 error = -EINVAL;
27         }
28         read_unlock(&tasklist_lock);
29
30         return error;
31 }
32
33 static inline union cpu_time_count
34 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
35 {
36         union cpu_time_count ret;
37         ret.sched = 0;          /* high half always zero when .cpu used */
38         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
39                 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
40         } else {
41                 ret.cpu = timespec_to_cputime(tp);
42         }
43         return ret;
44 }
45
46 static void sample_to_timespec(const clockid_t which_clock,
47                                union cpu_time_count cpu,
48                                struct timespec *tp)
49 {
50         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
51                 tp->tv_sec = div_long_long_rem(cpu.sched,
52                                                NSEC_PER_SEC, &tp->tv_nsec);
53         } else {
54                 cputime_to_timespec(cpu.cpu, tp);
55         }
56 }
57
58 static inline int cpu_time_before(const clockid_t which_clock,
59                                   union cpu_time_count now,
60                                   union cpu_time_count then)
61 {
62         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
63                 return now.sched < then.sched;
64         }  else {
65                 return cputime_lt(now.cpu, then.cpu);
66         }
67 }
68 static inline void cpu_time_add(const clockid_t which_clock,
69                                 union cpu_time_count *acc,
70                                 union cpu_time_count val)
71 {
72         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
73                 acc->sched += val.sched;
74         }  else {
75                 acc->cpu = cputime_add(acc->cpu, val.cpu);
76         }
77 }
78 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
79                                                 union cpu_time_count a,
80                                                 union cpu_time_count b)
81 {
82         if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
83                 a.sched -= b.sched;
84         }  else {
85                 a.cpu = cputime_sub(a.cpu, b.cpu);
86         }
87         return a;
88 }
89
90 /*
91  * Divide and limit the result to res >= 1
92  *
93  * This is necessary to prevent signal delivery starvation, when the result of
94  * the division would be rounded down to 0.
95  */
96 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
97 {
98         cputime_t res = cputime_div(time, div);
99
100         return max_t(cputime_t, res, 1);
101 }
102
103 /*
104  * Update expiry time from increment, and increase overrun count,
105  * given the current clock sample.
106  */
107 static void bump_cpu_timer(struct k_itimer *timer,
108                                   union cpu_time_count now)
109 {
110         int i;
111
112         if (timer->it.cpu.incr.sched == 0)
113                 return;
114
115         if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
116                 unsigned long long delta, incr;
117
118                 if (now.sched < timer->it.cpu.expires.sched)
119                         return;
120                 incr = timer->it.cpu.incr.sched;
121                 delta = now.sched + incr - timer->it.cpu.expires.sched;
122                 /* Don't use (incr*2 < delta), incr*2 might overflow. */
123                 for (i = 0; incr < delta - incr; i++)
124                         incr = incr << 1;
125                 for (; i >= 0; incr >>= 1, i--) {
126                         if (delta < incr)
127                                 continue;
128                         timer->it.cpu.expires.sched += incr;
129                         timer->it_overrun += 1 << i;
130                         delta -= incr;
131                 }
132         } else {
133                 cputime_t delta, incr;
134
135                 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
136                         return;
137                 incr = timer->it.cpu.incr.cpu;
138                 delta = cputime_sub(cputime_add(now.cpu, incr),
139                                     timer->it.cpu.expires.cpu);
140                 /* Don't use (incr*2 < delta), incr*2 might overflow. */
141                 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
142                              incr = cputime_add(incr, incr);
143                 for (; i >= 0; incr = cputime_halve(incr), i--) {
144                         if (cputime_lt(delta, incr))
145                                 continue;
146                         timer->it.cpu.expires.cpu =
147                                 cputime_add(timer->it.cpu.expires.cpu, incr);
148                         timer->it_overrun += 1 << i;
149                         delta = cputime_sub(delta, incr);
150                 }
151         }
152 }
153
154 static inline cputime_t prof_ticks(struct task_struct *p)
155 {
156         return cputime_add(p->utime, p->stime);
157 }
158 static inline cputime_t virt_ticks(struct task_struct *p)
159 {
160         return p->utime;
161 }
162 static inline unsigned long long sched_ns(struct task_struct *p)
163 {
164         return task_sched_runtime(p);
165 }
166
167 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
168 {
169         int error = check_clock(which_clock);
170         if (!error) {
171                 tp->tv_sec = 0;
172                 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
173                 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
174                         /*
175                          * If sched_clock is using a cycle counter, we
176                          * don't have any idea of its true resolution
177                          * exported, but it is much more than 1s/HZ.
178                          */
179                         tp->tv_nsec = 1;
180                 }
181         }
182         return error;
183 }
184
185 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
186 {
187         /*
188          * You can never reset a CPU clock, but we check for other errors
189          * in the call before failing with EPERM.
190          */
191         int error = check_clock(which_clock);
192         if (error == 0) {
193                 error = -EPERM;
194         }
195         return error;
196 }
197
198
199 /*
200  * Sample a per-thread clock for the given task.
201  */
202 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
203                             union cpu_time_count *cpu)
204 {
205         switch (CPUCLOCK_WHICH(which_clock)) {
206         default:
207                 return -EINVAL;
208         case CPUCLOCK_PROF:
209                 cpu->cpu = prof_ticks(p);
210                 break;
211         case CPUCLOCK_VIRT:
212                 cpu->cpu = virt_ticks(p);
213                 break;
214         case CPUCLOCK_SCHED:
215                 cpu->sched = sched_ns(p);
216                 break;
217         }
218         return 0;
219 }
220
221 /*
222  * Sample a process (thread group) clock for the given group_leader task.
223  * Must be called with tasklist_lock held for reading.
224  * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
225  */
226 static int cpu_clock_sample_group_locked(unsigned int clock_idx,
227                                          struct task_struct *p,
228                                          union cpu_time_count *cpu)
229 {
230         struct task_struct *t = p;
231         switch (clock_idx) {
232         default:
233                 return -EINVAL;
234         case CPUCLOCK_PROF:
235                 cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
236                 do {
237                         cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
238                         t = next_thread(t);
239                 } while (t != p);
240                 break;
241         case CPUCLOCK_VIRT:
242                 cpu->cpu = p->signal->utime;
243                 do {
244                         cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
245                         t = next_thread(t);
246                 } while (t != p);
247                 break;
248         case CPUCLOCK_SCHED:
249                 cpu->sched = p->signal->sum_sched_runtime;
250                 /* Add in each other live thread.  */
251                 while ((t = next_thread(t)) != p) {
252                         cpu->sched += t->se.sum_exec_runtime;
253                 }
254                 cpu->sched += sched_ns(p);
255                 break;
256         }
257         return 0;
258 }
259
260 /*
261  * Sample a process (thread group) clock for the given group_leader task.
262  * Must be called with tasklist_lock held for reading.
263  */
264 static int cpu_clock_sample_group(const clockid_t which_clock,
265                                   struct task_struct *p,
266                                   union cpu_time_count *cpu)
267 {
268         int ret;
269         unsigned long flags;
270         spin_lock_irqsave(&p->sighand->siglock, flags);
271         ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
272                                             cpu);
273         spin_unlock_irqrestore(&p->sighand->siglock, flags);
274         return ret;
275 }
276
277
278 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
279 {
280         const pid_t pid = CPUCLOCK_PID(which_clock);
281         int error = -EINVAL;
282         union cpu_time_count rtn;
283
284         if (pid == 0) {
285                 /*
286                  * Special case constant value for our own clocks.
287                  * We don't have to do any lookup to find ourselves.
288                  */
289                 if (CPUCLOCK_PERTHREAD(which_clock)) {
290                         /*
291                          * Sampling just ourselves we can do with no locking.
292                          */
293                         error = cpu_clock_sample(which_clock,
294                                                  current, &rtn);
295                 } else {
296                         read_lock(&tasklist_lock);
297                         error = cpu_clock_sample_group(which_clock,
298                                                        current, &rtn);
299                         read_unlock(&tasklist_lock);
300                 }
301         } else {
302                 /*
303                  * Find the given PID, and validate that the caller
304                  * should be able to see it.
305                  */
306                 struct task_struct *p;
307                 rcu_read_lock();
308                 p = find_task_by_pid(pid);
309                 if (p) {
310                         if (CPUCLOCK_PERTHREAD(which_clock)) {
311                                 if (same_thread_group(p, current)) {
312                                         error = cpu_clock_sample(which_clock,
313                                                                  p, &rtn);
314                                 }
315                         } else {
316                                 read_lock(&tasklist_lock);
317                                 if (thread_group_leader(p) && p->signal) {
318                                         error =
319                                             cpu_clock_sample_group(which_clock,
320                                                                    p, &rtn);
321                                 }
322                                 read_unlock(&tasklist_lock);
323                         }
324                 }
325                 rcu_read_unlock();
326         }
327
328         if (error)
329                 return error;
330         sample_to_timespec(which_clock, rtn, tp);
331         return 0;
332 }
333
334
335 /*
336  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
337  * This is called from sys_timer_create with the new timer already locked.
338  */
339 int posix_cpu_timer_create(struct k_itimer *new_timer)
340 {
341         int ret = 0;
342         const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
343         struct task_struct *p;
344
345         if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
346                 return -EINVAL;
347
348         INIT_LIST_HEAD(&new_timer->it.cpu.entry);
349         new_timer->it.cpu.incr.sched = 0;
350         new_timer->it.cpu.expires.sched = 0;
351
352         read_lock(&tasklist_lock);
353         if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
354                 if (pid == 0) {
355                         p = current;
356                 } else {
357                         p = find_task_by_pid(pid);
358                         if (p && !same_thread_group(p, current))
359                                 p = NULL;
360                 }
361         } else {
362                 if (pid == 0) {
363                         p = current->group_leader;
364                 } else {
365                         p = find_task_by_pid(pid);
366                         if (p && !thread_group_leader(p))
367                                 p = NULL;
368                 }
369         }
370         new_timer->it.cpu.task = p;
371         if (p) {
372                 get_task_struct(p);
373         } else {
374                 ret = -EINVAL;
375         }
376         read_unlock(&tasklist_lock);
377
378         return ret;
379 }
380
381 /*
382  * Clean up a CPU-clock timer that is about to be destroyed.
383  * This is called from timer deletion with the timer already locked.
384  * If we return TIMER_RETRY, it's necessary to release the timer's lock
385  * and try again.  (This happens when the timer is in the middle of firing.)
386  */
387 int posix_cpu_timer_del(struct k_itimer *timer)
388 {
389         struct task_struct *p = timer->it.cpu.task;
390         int ret = 0;
391
392         if (likely(p != NULL)) {
393                 read_lock(&tasklist_lock);
394                 if (unlikely(p->signal == NULL)) {
395                         /*
396                          * We raced with the reaping of the task.
397                          * The deletion should have cleared us off the list.
398                          */
399                         BUG_ON(!list_empty(&timer->it.cpu.entry));
400                 } else {
401                         spin_lock(&p->sighand->siglock);
402                         if (timer->it.cpu.firing)
403                                 ret = TIMER_RETRY;
404                         else
405                                 list_del(&timer->it.cpu.entry);
406                         spin_unlock(&p->sighand->siglock);
407                 }
408                 read_unlock(&tasklist_lock);
409
410                 if (!ret)
411                         put_task_struct(p);
412         }
413
414         return ret;
415 }
416
417 /*
418  * Clean out CPU timers still ticking when a thread exited.  The task
419  * pointer is cleared, and the expiry time is replaced with the residual
420  * time for later timer_gettime calls to return.
421  * This must be called with the siglock held.
422  */
423 static void cleanup_timers(struct list_head *head,
424                            cputime_t utime, cputime_t stime,
425                            unsigned long long sum_exec_runtime)
426 {
427         struct cpu_timer_list *timer, *next;
428         cputime_t ptime = cputime_add(utime, stime);
429
430         list_for_each_entry_safe(timer, next, head, entry) {
431                 list_del_init(&timer->entry);
432                 if (cputime_lt(timer->expires.cpu, ptime)) {
433                         timer->expires.cpu = cputime_zero;
434                 } else {
435                         timer->expires.cpu = cputime_sub(timer->expires.cpu,
436                                                          ptime);
437                 }
438         }
439
440         ++head;
441         list_for_each_entry_safe(timer, next, head, entry) {
442                 list_del_init(&timer->entry);
443                 if (cputime_lt(timer->expires.cpu, utime)) {
444                         timer->expires.cpu = cputime_zero;
445                 } else {
446                         timer->expires.cpu = cputime_sub(timer->expires.cpu,
447                                                          utime);
448                 }
449         }
450
451         ++head;
452         list_for_each_entry_safe(timer, next, head, entry) {
453                 list_del_init(&timer->entry);
454                 if (timer->expires.sched < sum_exec_runtime) {
455                         timer->expires.sched = 0;
456                 } else {
457                         timer->expires.sched -= sum_exec_runtime;
458                 }
459         }
460 }
461
462 /*
463  * These are both called with the siglock held, when the current thread
464  * is being reaped.  When the final (leader) thread in the group is reaped,
465  * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
466  */
467 void posix_cpu_timers_exit(struct task_struct *tsk)
468 {
469         cleanup_timers(tsk->cpu_timers,
470                        tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
471
472 }
473 void posix_cpu_timers_exit_group(struct task_struct *tsk)
474 {
475         cleanup_timers(tsk->signal->cpu_timers,
476                        cputime_add(tsk->utime, tsk->signal->utime),
477                        cputime_add(tsk->stime, tsk->signal->stime),
478                      tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
479 }
480
481
482 /*
483  * Set the expiry times of all the threads in the process so one of them
484  * will go off before the process cumulative expiry total is reached.
485  */
486 static void process_timer_rebalance(struct task_struct *p,
487                                     unsigned int clock_idx,
488                                     union cpu_time_count expires,
489                                     union cpu_time_count val)
490 {
491         cputime_t ticks, left;
492         unsigned long long ns, nsleft;
493         struct task_struct *t = p;
494         unsigned int nthreads = atomic_read(&p->signal->live);
495
496         if (!nthreads)
497                 return;
498
499         switch (clock_idx) {
500         default:
501                 BUG();
502                 break;
503         case CPUCLOCK_PROF:
504                 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
505                                        nthreads);
506                 do {
507                         if (likely(!(t->flags & PF_EXITING))) {
508                                 ticks = cputime_add(prof_ticks(t), left);
509                                 if (cputime_eq(t->it_prof_expires,
510                                                cputime_zero) ||
511                                     cputime_gt(t->it_prof_expires, ticks)) {
512                                         t->it_prof_expires = ticks;
513                                 }
514                         }
515                         t = next_thread(t);
516                 } while (t != p);
517                 break;
518         case CPUCLOCK_VIRT:
519                 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
520                                        nthreads);
521                 do {
522                         if (likely(!(t->flags & PF_EXITING))) {
523                                 ticks = cputime_add(virt_ticks(t), left);
524                                 if (cputime_eq(t->it_virt_expires,
525                                                cputime_zero) ||
526                                     cputime_gt(t->it_virt_expires, ticks)) {
527                                         t->it_virt_expires = ticks;
528                                 }
529                         }
530                         t = next_thread(t);
531                 } while (t != p);
532                 break;
533         case CPUCLOCK_SCHED:
534                 nsleft = expires.sched - val.sched;
535                 do_div(nsleft, nthreads);
536                 nsleft = max_t(unsigned long long, nsleft, 1);
537                 do {
538                         if (likely(!(t->flags & PF_EXITING))) {
539                                 ns = t->se.sum_exec_runtime + nsleft;
540                                 if (t->it_sched_expires == 0 ||
541                                     t->it_sched_expires > ns) {
542                                         t->it_sched_expires = ns;
543                                 }
544                         }
545                         t = next_thread(t);
546                 } while (t != p);
547                 break;
548         }
549 }
550
551 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
552 {
553         /*
554          * That's all for this thread or process.
555          * We leave our residual in expires to be reported.
556          */
557         put_task_struct(timer->it.cpu.task);
558         timer->it.cpu.task = NULL;
559         timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
560                                              timer->it.cpu.expires,
561                                              now);
562 }
563
564 /*
565  * Insert the timer on the appropriate list before any timers that
566  * expire later.  This must be called with the tasklist_lock held
567  * for reading, and interrupts disabled.
568  */
569 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
570 {
571         struct task_struct *p = timer->it.cpu.task;
572         struct list_head *head, *listpos;
573         struct cpu_timer_list *const nt = &timer->it.cpu;
574         struct cpu_timer_list *next;
575         unsigned long i;
576
577         head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
578                 p->cpu_timers : p->signal->cpu_timers);
579         head += CPUCLOCK_WHICH(timer->it_clock);
580
581         BUG_ON(!irqs_disabled());
582         spin_lock(&p->sighand->siglock);
583
584         listpos = head;
585         if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
586                 list_for_each_entry(next, head, entry) {
587                         if (next->expires.sched > nt->expires.sched)
588                                 break;
589                         listpos = &next->entry;
590                 }
591         } else {
592                 list_for_each_entry(next, head, entry) {
593                         if (cputime_gt(next->expires.cpu, nt->expires.cpu))
594                                 break;
595                         listpos = &next->entry;
596                 }
597         }
598         list_add(&nt->entry, listpos);
599
600         if (listpos == head) {
601                 /*
602                  * We are the new earliest-expiring timer.
603                  * If we are a thread timer, there can always
604                  * be a process timer telling us to stop earlier.
605                  */
606
607                 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
608                         switch (CPUCLOCK_WHICH(timer->it_clock)) {
609                         default:
610                                 BUG();
611                         case CPUCLOCK_PROF:
612                                 if (cputime_eq(p->it_prof_expires,
613                                                cputime_zero) ||
614                                     cputime_gt(p->it_prof_expires,
615                                                nt->expires.cpu))
616                                         p->it_prof_expires = nt->expires.cpu;
617                                 break;
618                         case CPUCLOCK_VIRT:
619                                 if (cputime_eq(p->it_virt_expires,
620                                                cputime_zero) ||
621                                     cputime_gt(p->it_virt_expires,
622                                                nt->expires.cpu))
623                                         p->it_virt_expires = nt->expires.cpu;
624                                 break;
625                         case CPUCLOCK_SCHED:
626                                 if (p->it_sched_expires == 0 ||
627                                     p->it_sched_expires > nt->expires.sched)
628                                         p->it_sched_expires = nt->expires.sched;
629                                 break;
630                         }
631                 } else {
632                         /*
633                          * For a process timer, we must balance
634                          * all the live threads' expirations.
635                          */
636                         switch (CPUCLOCK_WHICH(timer->it_clock)) {
637                         default:
638                                 BUG();
639                         case CPUCLOCK_VIRT:
640                                 if (!cputime_eq(p->signal->it_virt_expires,
641                                                 cputime_zero) &&
642                                     cputime_lt(p->signal->it_virt_expires,
643                                                timer->it.cpu.expires.cpu))
644                                         break;
645                                 goto rebalance;
646                         case CPUCLOCK_PROF:
647                                 if (!cputime_eq(p->signal->it_prof_expires,
648                                                 cputime_zero) &&
649                                     cputime_lt(p->signal->it_prof_expires,
650                                                timer->it.cpu.expires.cpu))
651                                         break;
652                                 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
653                                 if (i != RLIM_INFINITY &&
654                                     i <= cputime_to_secs(timer->it.cpu.expires.cpu))
655                                         break;
656                                 goto rebalance;
657                         case CPUCLOCK_SCHED:
658                         rebalance:
659                                 process_timer_rebalance(
660                                         timer->it.cpu.task,
661                                         CPUCLOCK_WHICH(timer->it_clock),
662                                         timer->it.cpu.expires, now);
663                                 break;
664                         }
665                 }
666         }
667
668         spin_unlock(&p->sighand->siglock);
669 }
670
671 /*
672  * The timer is locked, fire it and arrange for its reload.
673  */
674 static void cpu_timer_fire(struct k_itimer *timer)
675 {
676         if (unlikely(timer->sigq == NULL)) {
677                 /*
678                  * This a special case for clock_nanosleep,
679                  * not a normal timer from sys_timer_create.
680                  */
681                 wake_up_process(timer->it_process);
682                 timer->it.cpu.expires.sched = 0;
683         } else if (timer->it.cpu.incr.sched == 0) {
684                 /*
685                  * One-shot timer.  Clear it as soon as it's fired.
686                  */
687                 posix_timer_event(timer, 0);
688                 timer->it.cpu.expires.sched = 0;
689         } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
690                 /*
691                  * The signal did not get queued because the signal
692                  * was ignored, so we won't get any callback to
693                  * reload the timer.  But we need to keep it
694                  * ticking in case the signal is deliverable next time.
695                  */
696                 posix_cpu_timer_schedule(timer);
697         }
698 }
699
700 /*
701  * Guts of sys_timer_settime for CPU timers.
702  * This is called with the timer locked and interrupts disabled.
703  * If we return TIMER_RETRY, it's necessary to release the timer's lock
704  * and try again.  (This happens when the timer is in the middle of firing.)
705  */
706 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
707                         struct itimerspec *new, struct itimerspec *old)
708 {
709         struct task_struct *p = timer->it.cpu.task;
710         union cpu_time_count old_expires, new_expires, val;
711         int ret;
712
713         if (unlikely(p == NULL)) {
714                 /*
715                  * Timer refers to a dead task's clock.
716                  */
717                 return -ESRCH;
718         }
719
720         new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
721
722         read_lock(&tasklist_lock);
723         /*
724          * We need the tasklist_lock to protect against reaping that
725          * clears p->signal.  If p has just been reaped, we can no
726          * longer get any information about it at all.
727          */
728         if (unlikely(p->signal == NULL)) {
729                 read_unlock(&tasklist_lock);
730                 put_task_struct(p);
731                 timer->it.cpu.task = NULL;
732                 return -ESRCH;
733         }
734
735         /*
736          * Disarm any old timer after extracting its expiry time.
737          */
738         BUG_ON(!irqs_disabled());
739
740         ret = 0;
741         spin_lock(&p->sighand->siglock);
742         old_expires = timer->it.cpu.expires;
743         if (unlikely(timer->it.cpu.firing)) {
744                 timer->it.cpu.firing = -1;
745                 ret = TIMER_RETRY;
746         } else
747                 list_del_init(&timer->it.cpu.entry);
748         spin_unlock(&p->sighand->siglock);
749
750         /*
751          * We need to sample the current value to convert the new
752          * value from to relative and absolute, and to convert the
753          * old value from absolute to relative.  To set a process
754          * timer, we need a sample to balance the thread expiry
755          * times (in arm_timer).  With an absolute time, we must
756          * check if it's already passed.  In short, we need a sample.
757          */
758         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
759                 cpu_clock_sample(timer->it_clock, p, &val);
760         } else {
761                 cpu_clock_sample_group(timer->it_clock, p, &val);
762         }
763
764         if (old) {
765                 if (old_expires.sched == 0) {
766                         old->it_value.tv_sec = 0;
767                         old->it_value.tv_nsec = 0;
768                 } else {
769                         /*
770                          * Update the timer in case it has
771                          * overrun already.  If it has,
772                          * we'll report it as having overrun
773                          * and with the next reloaded timer
774                          * already ticking, though we are
775                          * swallowing that pending
776                          * notification here to install the
777                          * new setting.
778                          */
779                         bump_cpu_timer(timer, val);
780                         if (cpu_time_before(timer->it_clock, val,
781                                             timer->it.cpu.expires)) {
782                                 old_expires = cpu_time_sub(
783                                         timer->it_clock,
784                                         timer->it.cpu.expires, val);
785                                 sample_to_timespec(timer->it_clock,
786                                                    old_expires,
787                                                    &old->it_value);
788                         } else {
789                                 old->it_value.tv_nsec = 1;
790                                 old->it_value.tv_sec = 0;
791                         }
792                 }
793         }
794
795         if (unlikely(ret)) {
796                 /*
797                  * We are colliding with the timer actually firing.
798                  * Punt after filling in the timer's old value, and
799                  * disable this firing since we are already reporting
800                  * it as an overrun (thanks to bump_cpu_timer above).
801                  */
802                 read_unlock(&tasklist_lock);
803                 goto out;
804         }
805
806         if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
807                 cpu_time_add(timer->it_clock, &new_expires, val);
808         }
809
810         /*
811          * Install the new expiry time (or zero).
812          * For a timer with no notification action, we don't actually
813          * arm the timer (we'll just fake it for timer_gettime).
814          */
815         timer->it.cpu.expires = new_expires;
816         if (new_expires.sched != 0 &&
817             (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
818             cpu_time_before(timer->it_clock, val, new_expires)) {
819                 arm_timer(timer, val);
820         }
821
822         read_unlock(&tasklist_lock);
823
824         /*
825          * Install the new reload setting, and
826          * set up the signal and overrun bookkeeping.
827          */
828         timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
829                                                 &new->it_interval);
830
831         /*
832          * This acts as a modification timestamp for the timer,
833          * so any automatic reload attempt will punt on seeing
834          * that we have reset the timer manually.
835          */
836         timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
837                 ~REQUEUE_PENDING;
838         timer->it_overrun_last = 0;
839         timer->it_overrun = -1;
840
841         if (new_expires.sched != 0 &&
842             (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
843             !cpu_time_before(timer->it_clock, val, new_expires)) {
844                 /*
845                  * The designated time already passed, so we notify
846                  * immediately, even if the thread never runs to
847                  * accumulate more time on this clock.
848                  */
849                 cpu_timer_fire(timer);
850         }
851
852         ret = 0;
853  out:
854         if (old) {
855                 sample_to_timespec(timer->it_clock,
856                                    timer->it.cpu.incr, &old->it_interval);
857         }
858         return ret;
859 }
860
861 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
862 {
863         union cpu_time_count now;
864         struct task_struct *p = timer->it.cpu.task;
865         int clear_dead;
866
867         /*
868          * Easy part: convert the reload time.
869          */
870         sample_to_timespec(timer->it_clock,
871                            timer->it.cpu.incr, &itp->it_interval);
872
873         if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all.  */
874                 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
875                 return;
876         }
877
878         if (unlikely(p == NULL)) {
879                 /*
880                  * This task already died and the timer will never fire.
881                  * In this case, expires is actually the dead value.
882                  */
883         dead:
884                 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
885                                    &itp->it_value);
886                 return;
887         }
888
889         /*
890          * Sample the clock to take the difference with the expiry time.
891          */
892         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
893                 cpu_clock_sample(timer->it_clock, p, &now);
894                 clear_dead = p->exit_state;
895         } else {
896                 read_lock(&tasklist_lock);
897                 if (unlikely(p->signal == NULL)) {
898                         /*
899                          * The process has been reaped.
900                          * We can't even collect a sample any more.
901                          * Call the timer disarmed, nothing else to do.
902                          */
903                         put_task_struct(p);
904                         timer->it.cpu.task = NULL;
905                         timer->it.cpu.expires.sched = 0;
906                         read_unlock(&tasklist_lock);
907                         goto dead;
908                 } else {
909                         cpu_clock_sample_group(timer->it_clock, p, &now);
910                         clear_dead = (unlikely(p->exit_state) &&
911                                       thread_group_empty(p));
912                 }
913                 read_unlock(&tasklist_lock);
914         }
915
916         if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
917                 if (timer->it.cpu.incr.sched == 0 &&
918                     cpu_time_before(timer->it_clock,
919                                     timer->it.cpu.expires, now)) {
920                         /*
921                          * Do-nothing timer expired and has no reload,
922                          * so it's as if it was never set.
923                          */
924                         timer->it.cpu.expires.sched = 0;
925                         itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
926                         return;
927                 }
928                 /*
929                  * Account for any expirations and reloads that should
930                  * have happened.
931                  */
932                 bump_cpu_timer(timer, now);
933         }
934
935         if (unlikely(clear_dead)) {
936                 /*
937                  * We've noticed that the thread is dead, but
938                  * not yet reaped.  Take this opportunity to
939                  * drop our task ref.
940                  */
941                 clear_dead_task(timer, now);
942                 goto dead;
943         }
944
945         if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
946                 sample_to_timespec(timer->it_clock,
947                                    cpu_time_sub(timer->it_clock,
948                                                 timer->it.cpu.expires, now),
949                                    &itp->it_value);
950         } else {
951                 /*
952                  * The timer should have expired already, but the firing
953                  * hasn't taken place yet.  Say it's just about to expire.
954                  */
955                 itp->it_value.tv_nsec = 1;
956                 itp->it_value.tv_sec = 0;
957         }
958 }
959
960 /*
961  * Check for any per-thread CPU timers that have fired and move them off
962  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
963  * tsk->it_*_expires values to reflect the remaining thread CPU timers.
964  */
965 static void check_thread_timers(struct task_struct *tsk,
966                                 struct list_head *firing)
967 {
968         int maxfire;
969         struct list_head *timers = tsk->cpu_timers;
970         struct signal_struct *const sig = tsk->signal;
971
972         maxfire = 20;
973         tsk->it_prof_expires = cputime_zero;
974         while (!list_empty(timers)) {
975                 struct cpu_timer_list *t = list_first_entry(timers,
976                                                       struct cpu_timer_list,
977                                                       entry);
978                 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
979                         tsk->it_prof_expires = t->expires.cpu;
980                         break;
981                 }
982                 t->firing = 1;
983                 list_move_tail(&t->entry, firing);
984         }
985
986         ++timers;
987         maxfire = 20;
988         tsk->it_virt_expires = cputime_zero;
989         while (!list_empty(timers)) {
990                 struct cpu_timer_list *t = list_first_entry(timers,
991                                                       struct cpu_timer_list,
992                                                       entry);
993                 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
994                         tsk->it_virt_expires = t->expires.cpu;
995                         break;
996                 }
997                 t->firing = 1;
998                 list_move_tail(&t->entry, firing);
999         }
1000
1001         ++timers;
1002         maxfire = 20;
1003         tsk->it_sched_expires = 0;
1004         while (!list_empty(timers)) {
1005                 struct cpu_timer_list *t = list_first_entry(timers,
1006                                                       struct cpu_timer_list,
1007                                                       entry);
1008                 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
1009                         tsk->it_sched_expires = t->expires.sched;
1010                         break;
1011                 }
1012                 t->firing = 1;
1013                 list_move_tail(&t->entry, firing);
1014         }
1015
1016         /*
1017          * Check for the special case thread timers.
1018          */
1019         if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1020                 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1021                 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1022
1023                 if (hard != RLIM_INFINITY &&
1024                     tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
1025                         /*
1026                          * At the hard limit, we just die.
1027                          * No need to calculate anything else now.
1028                          */
1029                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1030                         return;
1031                 }
1032                 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1033                         /*
1034                          * At the soft limit, send a SIGXCPU every second.
1035                          */
1036                         if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1037                             < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1038                                 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1039                                                                 USEC_PER_SEC;
1040                         }
1041                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1042                 }
1043         }
1044 }
1045
1046 /*
1047  * Check for any per-thread CPU timers that have fired and move them
1048  * off the tsk->*_timers list onto the firing list.  Per-thread timers
1049  * have already been taken off.
1050  */
1051 static void check_process_timers(struct task_struct *tsk,
1052                                  struct list_head *firing)
1053 {
1054         int maxfire;
1055         struct signal_struct *const sig = tsk->signal;
1056         cputime_t utime, stime, ptime, virt_expires, prof_expires;
1057         unsigned long long sum_sched_runtime, sched_expires;
1058         struct task_struct *t;
1059         struct list_head *timers = sig->cpu_timers;
1060
1061         /*
1062          * Don't sample the current process CPU clocks if there are no timers.
1063          */
1064         if (list_empty(&timers[CPUCLOCK_PROF]) &&
1065             cputime_eq(sig->it_prof_expires, cputime_zero) &&
1066             sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1067             list_empty(&timers[CPUCLOCK_VIRT]) &&
1068             cputime_eq(sig->it_virt_expires, cputime_zero) &&
1069             list_empty(&timers[CPUCLOCK_SCHED]))
1070                 return;
1071
1072         /*
1073          * Collect the current process totals.
1074          */
1075         utime = sig->utime;
1076         stime = sig->stime;
1077         sum_sched_runtime = sig->sum_sched_runtime;
1078         t = tsk;
1079         do {
1080                 utime = cputime_add(utime, t->utime);
1081                 stime = cputime_add(stime, t->stime);
1082                 sum_sched_runtime += t->se.sum_exec_runtime;
1083                 t = next_thread(t);
1084         } while (t != tsk);
1085         ptime = cputime_add(utime, stime);
1086
1087         maxfire = 20;
1088         prof_expires = cputime_zero;
1089         while (!list_empty(timers)) {
1090                 struct cpu_timer_list *t = list_first_entry(timers,
1091                                                       struct cpu_timer_list,
1092                                                       entry);
1093                 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
1094                         prof_expires = t->expires.cpu;
1095                         break;
1096                 }
1097                 t->firing = 1;
1098                 list_move_tail(&t->entry, firing);
1099         }
1100
1101         ++timers;
1102         maxfire = 20;
1103         virt_expires = cputime_zero;
1104         while (!list_empty(timers)) {
1105                 struct cpu_timer_list *t = list_first_entry(timers,
1106                                                       struct cpu_timer_list,
1107                                                       entry);
1108                 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
1109                         virt_expires = t->expires.cpu;
1110                         break;
1111                 }
1112                 t->firing = 1;
1113                 list_move_tail(&t->entry, firing);
1114         }
1115
1116         ++timers;
1117         maxfire = 20;
1118         sched_expires = 0;
1119         while (!list_empty(timers)) {
1120                 struct cpu_timer_list *t = list_first_entry(timers,
1121                                                       struct cpu_timer_list,
1122                                                       entry);
1123                 if (!--maxfire || sum_sched_runtime < t->expires.sched) {
1124                         sched_expires = t->expires.sched;
1125                         break;
1126                 }
1127                 t->firing = 1;
1128                 list_move_tail(&t->entry, firing);
1129         }
1130
1131         /*
1132          * Check for the special case process timers.
1133          */
1134         if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1135                 if (cputime_ge(ptime, sig->it_prof_expires)) {
1136                         /* ITIMER_PROF fires and reloads.  */
1137                         sig->it_prof_expires = sig->it_prof_incr;
1138                         if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1139                                 sig->it_prof_expires = cputime_add(
1140                                         sig->it_prof_expires, ptime);
1141                         }
1142                         __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1143                 }
1144                 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1145                     (cputime_eq(prof_expires, cputime_zero) ||
1146                      cputime_lt(sig->it_prof_expires, prof_expires))) {
1147                         prof_expires = sig->it_prof_expires;
1148                 }
1149         }
1150         if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1151                 if (cputime_ge(utime, sig->it_virt_expires)) {
1152                         /* ITIMER_VIRTUAL fires and reloads.  */
1153                         sig->it_virt_expires = sig->it_virt_incr;
1154                         if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1155                                 sig->it_virt_expires = cputime_add(
1156                                         sig->it_virt_expires, utime);
1157                         }
1158                         __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1159                 }
1160                 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1161                     (cputime_eq(virt_expires, cputime_zero) ||
1162                      cputime_lt(sig->it_virt_expires, virt_expires))) {
1163                         virt_expires = sig->it_virt_expires;
1164                 }
1165         }
1166         if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1167                 unsigned long psecs = cputime_to_secs(ptime);
1168                 cputime_t x;
1169                 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1170                         /*
1171                          * At the hard limit, we just die.
1172                          * No need to calculate anything else now.
1173                          */
1174                         __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1175                         return;
1176                 }
1177                 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1178                         /*
1179                          * At the soft limit, send a SIGXCPU every second.
1180                          */
1181                         __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1182                         if (sig->rlim[RLIMIT_CPU].rlim_cur
1183                             < sig->rlim[RLIMIT_CPU].rlim_max) {
1184                                 sig->rlim[RLIMIT_CPU].rlim_cur++;
1185                         }
1186                 }
1187                 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1188                 if (cputime_eq(prof_expires, cputime_zero) ||
1189                     cputime_lt(x, prof_expires)) {
1190                         prof_expires = x;
1191                 }
1192         }
1193
1194         if (!cputime_eq(prof_expires, cputime_zero) ||
1195             !cputime_eq(virt_expires, cputime_zero) ||
1196             sched_expires != 0) {
1197                 /*
1198                  * Rebalance the threads' expiry times for the remaining
1199                  * process CPU timers.
1200                  */
1201
1202                 cputime_t prof_left, virt_left, ticks;
1203                 unsigned long long sched_left, sched;
1204                 const unsigned int nthreads = atomic_read(&sig->live);
1205
1206                 if (!nthreads)
1207                         return;
1208
1209                 prof_left = cputime_sub(prof_expires, utime);
1210                 prof_left = cputime_sub(prof_left, stime);
1211                 prof_left = cputime_div_non_zero(prof_left, nthreads);
1212                 virt_left = cputime_sub(virt_expires, utime);
1213                 virt_left = cputime_div_non_zero(virt_left, nthreads);
1214                 if (sched_expires) {
1215                         sched_left = sched_expires - sum_sched_runtime;
1216                         do_div(sched_left, nthreads);
1217                         sched_left = max_t(unsigned long long, sched_left, 1);
1218                 } else {
1219                         sched_left = 0;
1220                 }
1221                 t = tsk;
1222                 do {
1223                         if (unlikely(t->flags & PF_EXITING))
1224                                 continue;
1225
1226                         ticks = cputime_add(cputime_add(t->utime, t->stime),
1227                                             prof_left);
1228                         if (!cputime_eq(prof_expires, cputime_zero) &&
1229                             (cputime_eq(t->it_prof_expires, cputime_zero) ||
1230                              cputime_gt(t->it_prof_expires, ticks))) {
1231                                 t->it_prof_expires = ticks;
1232                         }
1233
1234                         ticks = cputime_add(t->utime, virt_left);
1235                         if (!cputime_eq(virt_expires, cputime_zero) &&
1236                             (cputime_eq(t->it_virt_expires, cputime_zero) ||
1237                              cputime_gt(t->it_virt_expires, ticks))) {
1238                                 t->it_virt_expires = ticks;
1239                         }
1240
1241                         sched = t->se.sum_exec_runtime + sched_left;
1242                         if (sched_expires && (t->it_sched_expires == 0 ||
1243                                               t->it_sched_expires > sched)) {
1244                                 t->it_sched_expires = sched;
1245                         }
1246                 } while ((t = next_thread(t)) != tsk);
1247         }
1248 }
1249
1250 /*
1251  * This is called from the signal code (via do_schedule_next_timer)
1252  * when the last timer signal was delivered and we have to reload the timer.
1253  */
1254 void posix_cpu_timer_schedule(struct k_itimer *timer)
1255 {
1256         struct task_struct *p = timer->it.cpu.task;
1257         union cpu_time_count now;
1258
1259         if (unlikely(p == NULL))
1260                 /*
1261                  * The task was cleaned up already, no future firings.
1262                  */
1263                 goto out;
1264
1265         /*
1266          * Fetch the current sample and update the timer's expiry time.
1267          */
1268         if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1269                 cpu_clock_sample(timer->it_clock, p, &now);
1270                 bump_cpu_timer(timer, now);
1271                 if (unlikely(p->exit_state)) {
1272                         clear_dead_task(timer, now);
1273                         goto out;
1274                 }
1275                 read_lock(&tasklist_lock); /* arm_timer needs it.  */
1276         } else {
1277                 read_lock(&tasklist_lock);
1278                 if (unlikely(p->signal == NULL)) {
1279                         /*
1280                          * The process has been reaped.
1281                          * We can't even collect a sample any more.
1282                          */
1283                         put_task_struct(p);
1284                         timer->it.cpu.task = p = NULL;
1285                         timer->it.cpu.expires.sched = 0;
1286                         goto out_unlock;
1287                 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1288                         /*
1289                          * We've noticed that the thread is dead, but
1290                          * not yet reaped.  Take this opportunity to
1291                          * drop our task ref.
1292                          */
1293                         clear_dead_task(timer, now);
1294                         goto out_unlock;
1295                 }
1296                 cpu_clock_sample_group(timer->it_clock, p, &now);
1297                 bump_cpu_timer(timer, now);
1298                 /* Leave the tasklist_lock locked for the call below.  */
1299         }
1300
1301         /*
1302          * Now re-arm for the new expiry time.
1303          */
1304         arm_timer(timer, now);
1305
1306 out_unlock:
1307         read_unlock(&tasklist_lock);
1308
1309 out:
1310         timer->it_overrun_last = timer->it_overrun;
1311         timer->it_overrun = -1;
1312         ++timer->it_requeue_pending;
1313 }
1314
1315 /*
1316  * This is called from the timer interrupt handler.  The irq handler has
1317  * already updated our counts.  We need to check if any timers fire now.
1318  * Interrupts are disabled.
1319  */
1320 void run_posix_cpu_timers(struct task_struct *tsk)
1321 {
1322         LIST_HEAD(firing);
1323         struct k_itimer *timer, *next;
1324
1325         BUG_ON(!irqs_disabled());
1326
1327 #define UNEXPIRED(clock) \
1328                 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
1329                  cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
1330
1331         if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
1332             (tsk->it_sched_expires == 0 ||
1333              tsk->se.sum_exec_runtime < tsk->it_sched_expires))
1334                 return;
1335
1336 #undef  UNEXPIRED
1337
1338         /*
1339          * Double-check with locks held.
1340          */
1341         read_lock(&tasklist_lock);
1342         if (likely(tsk->signal != NULL)) {
1343                 spin_lock(&tsk->sighand->siglock);
1344
1345                 /*
1346                  * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1347                  * all the timers that are firing, and put them on the firing list.
1348                  */
1349                 check_thread_timers(tsk, &firing);
1350                 check_process_timers(tsk, &firing);
1351
1352                 /*
1353                  * We must release these locks before taking any timer's lock.
1354                  * There is a potential race with timer deletion here, as the
1355                  * siglock now protects our private firing list.  We have set
1356                  * the firing flag in each timer, so that a deletion attempt
1357                  * that gets the timer lock before we do will give it up and
1358                  * spin until we've taken care of that timer below.
1359                  */
1360                 spin_unlock(&tsk->sighand->siglock);
1361         }
1362         read_unlock(&tasklist_lock);
1363
1364         /*
1365          * Now that all the timers on our list have the firing flag,
1366          * noone will touch their list entries but us.  We'll take
1367          * each timer's lock before clearing its firing flag, so no
1368          * timer call will interfere.
1369          */
1370         list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1371                 int firing;
1372                 spin_lock(&timer->it_lock);
1373                 list_del_init(&timer->it.cpu.entry);
1374                 firing = timer->it.cpu.firing;
1375                 timer->it.cpu.firing = 0;
1376                 /*
1377                  * The firing flag is -1 if we collided with a reset
1378                  * of the timer, which already reported this
1379                  * almost-firing as an overrun.  So don't generate an event.
1380                  */
1381                 if (likely(firing >= 0)) {
1382                         cpu_timer_fire(timer);
1383                 }
1384                 spin_unlock(&timer->it_lock);
1385         }
1386 }
1387
1388 /*
1389  * Set one of the process-wide special case CPU timers.
1390  * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
1391  * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
1392  * absolute; non-null for ITIMER_*, where *newval is relative and we update
1393  * it to be absolute, *oldval is absolute and we update it to be relative.
1394  */
1395 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1396                            cputime_t *newval, cputime_t *oldval)
1397 {
1398         union cpu_time_count now;
1399         struct list_head *head;
1400
1401         BUG_ON(clock_idx == CPUCLOCK_SCHED);
1402         cpu_clock_sample_group_locked(clock_idx, tsk, &now);
1403
1404         if (oldval) {
1405                 if (!cputime_eq(*oldval, cputime_zero)) {
1406                         if (cputime_le(*oldval, now.cpu)) {
1407                                 /* Just about to fire. */
1408                                 *oldval = jiffies_to_cputime(1);
1409                         } else {
1410                                 *oldval = cputime_sub(*oldval, now.cpu);
1411                         }
1412                 }
1413
1414                 if (cputime_eq(*newval, cputime_zero))
1415                         return;
1416                 *newval = cputime_add(*newval, now.cpu);
1417
1418                 /*
1419                  * If the RLIMIT_CPU timer will expire before the
1420                  * ITIMER_PROF timer, we have nothing else to do.
1421                  */
1422                 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1423                     < cputime_to_secs(*newval))
1424                         return;
1425         }
1426
1427         /*
1428          * Check whether there are any process timers already set to fire
1429          * before this one.  If so, we don't have anything more to do.
1430          */
1431         head = &tsk->signal->cpu_timers[clock_idx];
1432         if (list_empty(head) ||
1433             cputime_ge(list_first_entry(head,
1434                                   struct cpu_timer_list, entry)->expires.cpu,
1435                        *newval)) {
1436                 /*
1437                  * Rejigger each thread's expiry time so that one will
1438                  * notice before we hit the process-cumulative expiry time.
1439                  */
1440                 union cpu_time_count expires = { .sched = 0 };
1441                 expires.cpu = *newval;
1442                 process_timer_rebalance(tsk, clock_idx, expires, now);
1443         }
1444 }
1445
1446 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1447                             struct timespec *rqtp, struct itimerspec *it)
1448 {
1449         struct k_itimer timer;
1450         int error;
1451
1452         /*
1453          * Set up a temporary timer and then wait for it to go off.
1454          */
1455         memset(&timer, 0, sizeof timer);
1456         spin_lock_init(&timer.it_lock);
1457         timer.it_clock = which_clock;
1458         timer.it_overrun = -1;
1459         error = posix_cpu_timer_create(&timer);
1460         timer.it_process = current;
1461         if (!error) {
1462                 static struct itimerspec zero_it;
1463
1464                 memset(it, 0, sizeof *it);
1465                 it->it_value = *rqtp;
1466
1467                 spin_lock_irq(&timer.it_lock);
1468                 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1469                 if (error) {
1470                         spin_unlock_irq(&timer.it_lock);
1471                         return error;
1472                 }
1473
1474                 while (!signal_pending(current)) {
1475                         if (timer.it.cpu.expires.sched == 0) {
1476                                 /*
1477                                  * Our timer fired and was reset.
1478                                  */
1479                                 spin_unlock_irq(&timer.it_lock);
1480                                 return 0;
1481                         }
1482
1483                         /*
1484                          * Block until cpu_timer_fire (or a signal) wakes us.
1485                          */
1486                         __set_current_state(TASK_INTERRUPTIBLE);
1487                         spin_unlock_irq(&timer.it_lock);
1488                         schedule();
1489                         spin_lock_irq(&timer.it_lock);
1490                 }
1491
1492                 /*
1493                  * We were interrupted by a signal.
1494                  */
1495                 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1496                 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1497                 spin_unlock_irq(&timer.it_lock);
1498
1499                 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1500                         /*
1501                          * It actually did fire already.
1502                          */
1503                         return 0;
1504                 }
1505
1506                 error = -ERESTART_RESTARTBLOCK;
1507         }
1508
1509         return error;
1510 }
1511
1512 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1513                      struct timespec *rqtp, struct timespec __user *rmtp)
1514 {
1515         struct restart_block *restart_block =
1516             &current_thread_info()->restart_block;
1517         struct itimerspec it;
1518         int error;
1519
1520         /*
1521          * Diagnose required errors first.
1522          */
1523         if (CPUCLOCK_PERTHREAD(which_clock) &&
1524             (CPUCLOCK_PID(which_clock) == 0 ||
1525              CPUCLOCK_PID(which_clock) == current->pid))
1526                 return -EINVAL;
1527
1528         error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1529
1530         if (error == -ERESTART_RESTARTBLOCK) {
1531
1532                 if (flags & TIMER_ABSTIME)
1533                         return -ERESTARTNOHAND;
1534                 /*
1535                  * Report back to the user the time still remaining.
1536                  */
1537                 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1538                         return -EFAULT;
1539
1540                 restart_block->fn = posix_cpu_nsleep_restart;
1541                 restart_block->arg0 = which_clock;
1542                 restart_block->arg1 = (unsigned long) rmtp;
1543                 restart_block->arg2 = rqtp->tv_sec;
1544                 restart_block->arg3 = rqtp->tv_nsec;
1545         }
1546         return error;
1547 }
1548
1549 long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1550 {
1551         clockid_t which_clock = restart_block->arg0;
1552         struct timespec __user *rmtp;
1553         struct timespec t;
1554         struct itimerspec it;
1555         int error;
1556
1557         rmtp = (struct timespec __user *) restart_block->arg1;
1558         t.tv_sec = restart_block->arg2;
1559         t.tv_nsec = restart_block->arg3;
1560
1561         restart_block->fn = do_no_restart_syscall;
1562         error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1563
1564         if (error == -ERESTART_RESTARTBLOCK) {
1565                 /*
1566                  * Report back to the user the time still remaining.
1567                  */
1568                 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1569                         return -EFAULT;
1570
1571                 restart_block->fn = posix_cpu_nsleep_restart;
1572                 restart_block->arg0 = which_clock;
1573                 restart_block->arg1 = (unsigned long) rmtp;
1574                 restart_block->arg2 = t.tv_sec;
1575                 restart_block->arg3 = t.tv_nsec;
1576         }
1577         return error;
1578
1579 }
1580
1581
1582 #define PROCESS_CLOCK   MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1583 #define THREAD_CLOCK    MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1584
1585 static int process_cpu_clock_getres(const clockid_t which_clock,
1586                                     struct timespec *tp)
1587 {
1588         return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1589 }
1590 static int process_cpu_clock_get(const clockid_t which_clock,
1591                                  struct timespec *tp)
1592 {
1593         return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1594 }
1595 static int process_cpu_timer_create(struct k_itimer *timer)
1596 {
1597         timer->it_clock = PROCESS_CLOCK;
1598         return posix_cpu_timer_create(timer);
1599 }
1600 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1601                               struct timespec *rqtp,
1602                               struct timespec __user *rmtp)
1603 {
1604         return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1605 }
1606 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1607 {
1608         return -EINVAL;
1609 }
1610 static int thread_cpu_clock_getres(const clockid_t which_clock,
1611                                    struct timespec *tp)
1612 {
1613         return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1614 }
1615 static int thread_cpu_clock_get(const clockid_t which_clock,
1616                                 struct timespec *tp)
1617 {
1618         return posix_cpu_clock_get(THREAD_CLOCK, tp);
1619 }
1620 static int thread_cpu_timer_create(struct k_itimer *timer)
1621 {
1622         timer->it_clock = THREAD_CLOCK;
1623         return posix_cpu_timer_create(timer);
1624 }
1625 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1626                               struct timespec *rqtp, struct timespec __user *rmtp)
1627 {
1628         return -EINVAL;
1629 }
1630 static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1631 {
1632         return -EINVAL;
1633 }
1634
1635 static __init int init_posix_cpu_timers(void)
1636 {
1637         struct k_clock process = {
1638                 .clock_getres = process_cpu_clock_getres,
1639                 .clock_get = process_cpu_clock_get,
1640                 .clock_set = do_posix_clock_nosettime,
1641                 .timer_create = process_cpu_timer_create,
1642                 .nsleep = process_cpu_nsleep,
1643                 .nsleep_restart = process_cpu_nsleep_restart,
1644         };
1645         struct k_clock thread = {
1646                 .clock_getres = thread_cpu_clock_getres,
1647                 .clock_get = thread_cpu_clock_get,
1648                 .clock_set = do_posix_clock_nosettime,
1649                 .timer_create = thread_cpu_timer_create,
1650                 .nsleep = thread_cpu_nsleep,
1651                 .nsleep_restart = thread_cpu_nsleep_restart,
1652         };
1653
1654         register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1655         register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1656
1657         return 0;
1658 }
1659 __initcall(init_posix_cpu_timers);