read_lock(&tasklist_lock);
p = find_task_by_pid(pid);
- if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
- p->tgid != current->tgid : p->tgid != pid)) {
+ if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
+ same_thread_group(p, current) : thread_group_leader(p))) {
error = -EINVAL;
}
read_unlock(&tasklist_lock);
p = find_task_by_pid(pid);
if (p) {
if (CPUCLOCK_PERTHREAD(which_clock)) {
- if (p->tgid == current->tgid) {
+ if (same_thread_group(p, current)) {
error = cpu_clock_sample(which_clock,
p, &rtn);
}
} else {
read_lock(&tasklist_lock);
- if (p->tgid == pid && p->signal) {
+ if (thread_group_leader(p) && p->signal) {
error =
cpu_clock_sample_group(which_clock,
p, &rtn);
p = current;
} else {
p = find_task_by_pid(pid);
- if (p && p->tgid != current->tgid)
+ if (p && !same_thread_group(p, current))
p = NULL;
}
} else {
p = current->group_leader;
} else {
p = find_task_by_pid(pid);
- if (p && p->tgid != pid)
+ if (p && !thread_group_leader(p))
p = NULL;
}
}
{
int maxfire;
struct list_head *timers = tsk->cpu_timers;
+ struct signal_struct *const sig = tsk->signal;
maxfire = 20;
tsk->it_prof_expires = cputime_zero;
t->firing = 1;
list_move_tail(&t->entry, firing);
}
+
+ /*
+ * Check for the special case thread timers.
+ */
+ if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
+ unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
+ unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
+
+ if (hard != RLIM_INFINITY &&
+ tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
+ /*
+ * At the hard limit, we just die.
+ * No need to calculate anything else now.
+ */
+ __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
+ return;
+ }
+ if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+ /*
+ * At the soft limit, send a SIGXCPU every second.
+ */
+ if (sig->rlim[RLIMIT_RTTIME].rlim_cur
+ < sig->rlim[RLIMIT_RTTIME].rlim_max) {
+ sig->rlim[RLIMIT_RTTIME].rlim_cur +=
+ USEC_PER_SEC;
+ }
+ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ }
+ }
}
/*