2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/delayacct.h>
22 #include <linux/cpumask.h>
23 #include <linux/percpu.h>
24 #include <net/genetlink.h>
25 #include <asm/atomic.h>
28 * Maximum length of a cpumask that can be specified in
29 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
31 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
33 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
34 static int family_registered;
35 kmem_cache_t *taskstats_cache;
37 static struct genl_family family = {
38 .id = GENL_ID_GENERATE,
39 .name = TASKSTATS_GENL_NAME,
40 .version = TASKSTATS_GENL_VERSION,
41 .maxattr = TASKSTATS_CMD_ATTR_MAX,
44 static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
46 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
47 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
48 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
49 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
52 struct list_head list;
56 struct listener_list {
57 struct rw_semaphore sem;
58 struct list_head list;
60 static DEFINE_PER_CPU(struct listener_list, listener_array);
68 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
69 void **replyp, size_t size)
75 * If new attributes are added, please revisit this allocation
77 skb = nlmsg_new(size);
82 int seq = get_cpu_var(taskstats_seqnum)++;
83 put_cpu_var(taskstats_seqnum);
85 reply = genlmsg_put(skb, 0, seq,
89 reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
103 * Send taskstats data in @skb to listener with nl_pid @pid
105 static int send_reply(struct sk_buff *skb, pid_t pid)
107 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
108 void *reply = genlmsg_data(genlhdr);
111 rc = genlmsg_end(skb, reply);
117 return genlmsg_unicast(skb, pid);
121 * Send taskstats data in @skb to listeners registered for @cpu's exit data
123 static int send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
125 struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
126 struct listener_list *listeners;
127 struct listener *s, *tmp;
128 struct sk_buff *skb_next, *skb_cur = skb;
129 void *reply = genlmsg_data(genlhdr);
132 rc = genlmsg_end(skb, reply);
139 listeners = &per_cpu(listener_array, cpu);
140 down_write(&listeners->sem);
141 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
143 if (!list_is_last(&s->list, &listeners->list)) {
144 skb_next = skb_clone(skb_cur, GFP_KERNEL);
151 ret = genlmsg_unicast(skb_cur, s->pid);
152 if (ret == -ECONNREFUSED) {
159 up_write(&listeners->sem);
164 static int fill_pid(pid_t pid, struct task_struct *pidtsk,
165 struct taskstats *stats)
168 struct task_struct *tsk = pidtsk;
171 read_lock(&tasklist_lock);
172 tsk = find_task_by_pid(pid);
174 read_unlock(&tasklist_lock);
177 get_task_struct(tsk);
178 read_unlock(&tasklist_lock);
180 get_task_struct(tsk);
183 * Each accounting subsystem adds calls to its functions to
184 * fill in relevant parts of struct taskstsats as follows
186 * rc = per-task-foo(stats, tsk);
191 rc = delayacct_add_tsk(stats, tsk);
192 stats->version = TASKSTATS_VERSION;
194 /* Define err: label here if needed */
195 put_task_struct(tsk);
200 static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
201 struct taskstats *stats)
203 struct task_struct *tsk, *first;
207 * Add additional stats from live tasks except zombie thread group
208 * leaders who are already counted with the dead tasks
212 read_lock(&tasklist_lock);
213 first = find_task_by_pid(tgid);
215 read_unlock(&tasklist_lock);
218 get_task_struct(first);
219 read_unlock(&tasklist_lock);
221 get_task_struct(first);
223 /* Start with stats from dead tasks */
224 spin_lock_irqsave(&first->signal->stats_lock, flags);
225 if (first->signal->stats)
226 memcpy(stats, first->signal->stats, sizeof(*stats));
227 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
230 read_lock(&tasklist_lock);
232 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
235 * Accounting subsystem can call its functions here to
236 * fill in relevant parts of struct taskstsats as follows
238 * per-task-foo(stats, tsk);
240 delayacct_add_tsk(stats, tsk);
242 } while_each_thread(first, tsk);
243 read_unlock(&tasklist_lock);
244 stats->version = TASKSTATS_VERSION;
247 * Accounting subsytems can also add calls here to modify
248 * fields of taskstats.
255 static void fill_tgid_exit(struct task_struct *tsk)
259 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
260 if (!tsk->signal->stats)
264 * Each accounting subsystem calls its functions here to
265 * accumalate its per-task stats for tsk, into the per-tgid structure
267 * per-task-foo(tsk->signal->stats, tsk);
269 delayacct_add_tsk(tsk->signal->stats, tsk);
271 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
275 static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
277 struct listener_list *listeners;
278 struct listener *s, *tmp;
280 cpumask_t mask = *maskp;
282 if (!cpus_subset(mask, cpu_possible_map))
285 if (isadd == REGISTER) {
286 for_each_cpu_mask(cpu, mask) {
287 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
292 INIT_LIST_HEAD(&s->list);
294 listeners = &per_cpu(listener_array, cpu);
295 down_write(&listeners->sem);
296 list_add(&s->list, &listeners->list);
297 up_write(&listeners->sem);
302 /* Deregister or cleanup */
304 for_each_cpu_mask(cpu, mask) {
305 listeners = &per_cpu(listener_array, cpu);
306 down_write(&listeners->sem);
307 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
314 up_write(&listeners->sem);
319 static int parse(struct nlattr *na, cpumask_t *mask)
328 if (len > TASKSTATS_CPUMASK_MAXLEN)
332 data = kmalloc(len, GFP_KERNEL);
335 nla_strlcpy(data, na, len);
336 ret = cpulist_parse(data, *mask);
341 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
344 struct sk_buff *rep_skb;
345 struct taskstats stats;
351 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
355 return add_del_listener(info->snd_pid, &mask, REGISTER);
357 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
361 return add_del_listener(info->snd_pid, &mask, DEREGISTER);
364 * Size includes space for nested attributes
366 size = nla_total_size(sizeof(u32)) +
367 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
369 memset(&stats, 0, sizeof(stats));
370 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
374 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
375 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
376 rc = fill_pid(pid, NULL, &stats);
380 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
381 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
382 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
384 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
385 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
386 rc = fill_tgid(tgid, NULL, &stats);
390 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
391 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
392 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
399 nla_nest_end(rep_skb, na);
401 return send_reply(rep_skb, info->snd_pid);
404 return genlmsg_cancel(rep_skb, reply);
410 void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
412 struct listener_list *listeners;
413 struct taskstats *tmp;
415 * This is the cpu on which the task is exiting currently and will
416 * be the one for which the exit event is sent, even if the cpu
417 * on which this function is running changes later.
419 *mycpu = raw_smp_processor_id();
422 tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
426 listeners = &per_cpu(listener_array, *mycpu);
427 down_read(&listeners->sem);
428 if (!list_empty(&listeners->list)) {
432 up_read(&listeners->sem);
436 /* Send pid data out on exit */
437 void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
438 int group_dead, unsigned int mycpu)
441 struct sk_buff *rep_skb;
448 if (!family_registered || !tidstats)
451 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
452 is_thread_group = tsk->signal->stats ? 1 : 0;
453 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
457 * Size includes space for nested attributes
459 size = nla_total_size(sizeof(u32)) +
460 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
463 size = 2 * size; /* PID + STATS + TGID + STATS */
465 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
469 rc = fill_pid(tsk->pid, tsk, tidstats);
473 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
474 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
475 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
477 nla_nest_end(rep_skb, na);
479 if (!is_thread_group)
483 * tsk has/had a thread group so fill the tsk->signal->stats structure
484 * Doesn't matter if tsk is the leader or the last group member leaving
491 na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
492 NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
493 /* No locking needed for tsk->signal->stats since group is dead */
494 NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
495 *tsk->signal->stats);
496 nla_nest_end(rep_skb, na);
499 send_cpu_listeners(rep_skb, mycpu);
503 genlmsg_cancel(rep_skb, reply);
511 static struct genl_ops taskstats_ops = {
512 .cmd = TASKSTATS_CMD_GET,
513 .doit = taskstats_user_cmd,
514 .policy = taskstats_cmd_get_policy,
517 /* Needed early in initialization */
518 void __init taskstats_init_early(void)
522 taskstats_cache = kmem_cache_create("taskstats_cache",
523 sizeof(struct taskstats),
524 0, SLAB_PANIC, NULL, NULL);
525 for_each_possible_cpu(i) {
526 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
527 init_rwsem(&(per_cpu(listener_array, i).sem));
531 static int __init taskstats_init(void)
535 rc = genl_register_family(&family);
539 rc = genl_register_ops(&family, &taskstats_ops);
543 family_registered = 1;
546 genl_unregister_family(&family);
551 * late initcall ensures initialization of statistics collection
552 * mechanisms precedes initialization of the taskstats interface
554 late_initcall(taskstats_init);