2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/cgroupstats.h>
26 #include <linux/cgroup.h>
28 #include <linux/file.h>
29 #include <net/genetlink.h>
30 #include <asm/atomic.h>
33 * Maximum length of a cpumask that can be specified in
34 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
36 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
38 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
39 static int family_registered;
40 struct kmem_cache *taskstats_cache;
42 static struct genl_family family = {
43 .id = GENL_ID_GENERATE,
44 .name = TASKSTATS_GENL_NAME,
45 .version = TASKSTATS_GENL_VERSION,
46 .maxattr = TASKSTATS_CMD_ATTR_MAX,
49 static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
51 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
52 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
54 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56 static struct nla_policy
57 cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] __read_mostly = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
62 struct list_head list;
67 struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
71 static DEFINE_PER_CPU(struct listener_list, listener_array);
79 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
86 * If new attributes are added, please revisit this allocation
88 skb = genlmsg_new(size, GFP_KERNEL);
93 int seq = get_cpu_var(taskstats_seqnum)++;
94 put_cpu_var(taskstats_seqnum);
96 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
98 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
109 * Send taskstats data in @skb to listener with nl_pid @pid
111 static int send_reply(struct sk_buff *skb, pid_t pid)
113 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
114 void *reply = genlmsg_data(genlhdr);
117 rc = genlmsg_end(skb, reply);
123 return genlmsg_unicast(skb, pid);
127 * Send taskstats data in @skb to listeners registered for @cpu's exit data
129 static void send_cpu_listeners(struct sk_buff *skb,
130 struct listener_list *listeners)
132 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
133 struct listener *s, *tmp;
134 struct sk_buff *skb_next, *skb_cur = skb;
135 void *reply = genlmsg_data(genlhdr);
136 int rc, delcount = 0;
138 rc = genlmsg_end(skb, reply);
145 down_read(&listeners->sem);
146 list_for_each_entry(s, &listeners->list, list) {
148 if (!list_is_last(&s->list, &listeners->list)) {
149 skb_next = skb_clone(skb_cur, GFP_KERNEL);
153 rc = genlmsg_unicast(skb_cur, s->pid);
154 if (rc == -ECONNREFUSED) {
160 up_read(&listeners->sem);
168 /* Delete invalidated entries */
169 down_write(&listeners->sem);
170 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
176 up_write(&listeners->sem);
179 static int fill_pid(pid_t pid, struct task_struct *tsk,
180 struct taskstats *stats)
186 tsk = find_task_by_pid(pid);
188 get_task_struct(tsk);
193 get_task_struct(tsk);
195 memset(stats, 0, sizeof(*stats));
197 * Each accounting subsystem adds calls to its functions to
198 * fill in relevant parts of struct taskstsats as follows
200 * per-task-foo(stats, tsk);
203 delayacct_add_tsk(stats, tsk);
205 /* fill in basic acct fields */
206 stats->version = TASKSTATS_VERSION;
207 stats->nvcsw = tsk->nvcsw;
208 stats->nivcsw = tsk->nivcsw;
209 bacct_add_tsk(stats, tsk);
211 /* fill in extended acct fields */
212 xacct_add_tsk(stats, tsk);
214 /* Define err: label here if needed */
215 put_task_struct(tsk);
220 static int fill_tgid(pid_t tgid, struct task_struct *first,
221 struct taskstats *stats)
223 struct task_struct *tsk;
228 * Add additional stats from live tasks except zombie thread group
229 * leaders who are already counted with the dead tasks
233 first = find_task_by_pid(tgid);
235 if (!first || !lock_task_sighand(first, &flags))
238 if (first->signal->stats)
239 memcpy(stats, first->signal->stats, sizeof(*stats));
241 memset(stats, 0, sizeof(*stats));
248 * Accounting subsystem can call its functions here to
249 * fill in relevant parts of struct taskstsats as follows
251 * per-task-foo(stats, tsk);
253 delayacct_add_tsk(stats, tsk);
255 stats->nvcsw += tsk->nvcsw;
256 stats->nivcsw += tsk->nivcsw;
257 } while_each_thread(first, tsk);
259 unlock_task_sighand(first, &flags);
264 stats->version = TASKSTATS_VERSION;
266 * Accounting subsystems can also add calls here to modify
267 * fields of taskstats.
273 static void fill_tgid_exit(struct task_struct *tsk)
277 spin_lock_irqsave(&tsk->sighand->siglock, flags);
278 if (!tsk->signal->stats)
282 * Each accounting subsystem calls its functions here to
283 * accumalate its per-task stats for tsk, into the per-tgid structure
285 * per-task-foo(tsk->signal->stats, tsk);
287 delayacct_add_tsk(tsk->signal->stats, tsk);
289 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
293 static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
295 struct listener_list *listeners;
296 struct listener *s, *tmp;
298 cpumask_t mask = *maskp;
300 if (!cpus_subset(mask, cpu_possible_map))
303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
310 INIT_LIST_HEAD(&s->list);
313 listeners = &per_cpu(listener_array, cpu);
314 down_write(&listeners->sem);
315 list_add(&s->list, &listeners->list);
316 up_write(&listeners->sem);
321 /* Deregister or cleanup */
323 for_each_cpu_mask(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
333 up_write(&listeners->sem);
338 static int parse(struct nlattr *na, cpumask_t *mask)
347 if (len > TASKSTATS_CPUMASK_MAXLEN)
351 data = kmalloc(len, GFP_KERNEL);
354 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, *mask);
360 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
362 struct nlattr *na, *ret;
365 aggr = (type == TASKSTATS_TYPE_PID)
366 ? TASKSTATS_TYPE_AGGR_PID
367 : TASKSTATS_TYPE_AGGR_TGID;
369 na = nla_nest_start(skb, aggr);
372 if (nla_put(skb, type, sizeof(pid), &pid) < 0)
374 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
377 nla_nest_end(skb, na);
379 return nla_data(ret);
384 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
387 struct sk_buff *rep_skb;
388 struct cgroupstats *stats;
395 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
399 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
400 file = fget_light(fd, &fput_needed);
402 size = nla_total_size(sizeof(struct cgroupstats));
404 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
409 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
410 sizeof(struct cgroupstats));
411 stats = nla_data(na);
412 memset(stats, 0, sizeof(*stats));
414 rc = cgroupstats_build(stats, file->f_dentry);
418 fput_light(file, fput_needed);
419 return send_reply(rep_skb, info->snd_pid);
424 fput_light(file, fput_needed);
429 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
432 struct sk_buff *rep_skb;
433 struct taskstats *stats;
437 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
441 return add_del_listener(info->snd_pid, &mask, REGISTER);
443 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
447 return add_del_listener(info->snd_pid, &mask, DEREGISTER);
450 * Size includes space for nested attributes
452 size = nla_total_size(sizeof(u32)) +
453 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
455 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
460 if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
461 u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
462 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
466 rc = fill_pid(pid, NULL, stats);
469 } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
470 u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
471 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
475 rc = fill_tgid(tgid, NULL, stats);
481 return send_reply(rep_skb, info->snd_pid);
487 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
489 struct signal_struct *sig = tsk->signal;
490 struct taskstats *stats;
492 if (sig->stats || thread_group_empty(tsk))
495 /* No problem if kmem_cache_zalloc() fails */
496 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
498 spin_lock_irq(&tsk->sighand->siglock);
503 spin_unlock_irq(&tsk->sighand->siglock);
506 kmem_cache_free(taskstats_cache, stats);
511 /* Send pid data out on exit */
512 void taskstats_exit(struct task_struct *tsk, int group_dead)
515 struct listener_list *listeners;
516 struct taskstats *stats;
517 struct sk_buff *rep_skb;
521 if (!family_registered)
525 * Size includes space for nested attributes
527 size = nla_total_size(sizeof(u32)) +
528 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
530 is_thread_group = !!taskstats_tgid_alloc(tsk);
531 if (is_thread_group) {
532 /* PID + STATS + TGID + STATS */
534 /* fill the tsk->signal->stats structure */
538 listeners = &__raw_get_cpu_var(listener_array);
539 if (list_empty(&listeners->list))
542 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
546 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
550 rc = fill_pid(tsk->pid, tsk, stats);
555 * Doesn't matter if tsk is the leader or the last group member leaving
557 if (!is_thread_group || !group_dead)
560 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
564 memcpy(stats, tsk->signal->stats, sizeof(*stats));
567 send_cpu_listeners(rep_skb, listeners);
573 static struct genl_ops taskstats_ops = {
574 .cmd = TASKSTATS_CMD_GET,
575 .doit = taskstats_user_cmd,
576 .policy = taskstats_cmd_get_policy,
579 static struct genl_ops cgroupstats_ops = {
580 .cmd = CGROUPSTATS_CMD_GET,
581 .doit = cgroupstats_user_cmd,
582 .policy = cgroupstats_cmd_get_policy,
585 /* Needed early in initialization */
586 void __init taskstats_init_early(void)
590 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
591 for_each_possible_cpu(i) {
592 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
593 init_rwsem(&(per_cpu(listener_array, i).sem));
597 static int __init taskstats_init(void)
601 rc = genl_register_family(&family);
605 rc = genl_register_ops(&family, &taskstats_ops);
609 rc = genl_register_ops(&family, &cgroupstats_ops);
613 family_registered = 1;
614 printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
617 genl_unregister_ops(&family, &taskstats_ops);
619 genl_unregister_family(&family);
624 * late initcall ensures initialization of statistics collection
625 * mechanisms precedes initialization of the taskstats interface
627 late_initcall(taskstats_init);