2 * Workqueue statistical tracer.
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9 #include <trace/workqueue.h>
10 #include <linux/list.h>
11 #include "trace_stat.h"
15 /* A cpu workqueue thread */
16 struct cpu_workqueue_stats {
17 struct list_head list;
18 /* Useful to know if we print the cpu headers */
22 /* Can be inserted from interrupt or user context, need to be atomic */
25 * Don't need to be atomic, works are serialized in a single workqueue thread
28 unsigned int executed;
31 /* List of workqueue threads on one cpu */
32 struct workqueue_global_stats {
33 struct list_head list;
37 /* Don't need a global lock because allocated before the workqueues, and
40 static struct workqueue_global_stats *all_workqueue_stat;
42 /* Insertion of a work */
44 probe_workqueue_insertion(struct task_struct *wq_thread,
45 struct work_struct *work)
47 int cpu = cpumask_first(&wq_thread->cpus_allowed);
48 struct cpu_workqueue_stats *node, *next;
51 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
52 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list,
54 if (node->pid == wq_thread->pid) {
55 atomic_inc(&node->inserted);
59 pr_debug("trace_workqueue: entry not found\n");
61 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
64 /* Execution of a work */
66 probe_workqueue_execution(struct task_struct *wq_thread,
67 struct work_struct *work)
69 int cpu = cpumask_first(&wq_thread->cpus_allowed);
70 struct cpu_workqueue_stats *node, *next;
73 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
74 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list,
76 if (node->pid == wq_thread->pid) {
81 pr_debug("trace_workqueue: entry not found\n");
83 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
86 /* Creation of a cpu workqueue thread */
87 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
89 struct cpu_workqueue_stats *cws;
92 WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
94 /* Workqueues are sometimes created in atomic context */
95 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
97 pr_warning("trace_workqueue: not enough memory\n");
100 tracing_record_cmdline(wq_thread);
102 INIT_LIST_HEAD(&cws->list);
105 cws->pid = wq_thread->pid;
107 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
108 if (list_empty(&all_workqueue_stat[cpu].list))
109 cws->first_entry = true;
110 list_add_tail(&cws->list, &all_workqueue_stat[cpu].list);
111 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
114 /* Destruction of a cpu workqueue thread */
115 static void probe_workqueue_destruction(struct task_struct *wq_thread)
117 /* Workqueue only execute on one cpu */
118 int cpu = cpumask_first(&wq_thread->cpus_allowed);
119 struct cpu_workqueue_stats *node, *next;
122 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
123 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list,
125 if (node->pid == wq_thread->pid) {
126 list_del(&node->list);
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
134 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
138 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141 struct cpu_workqueue_stats *ret = NULL;
144 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
146 if (!list_empty(&all_workqueue_stat[cpu].list))
147 ret = list_entry(all_workqueue_stat[cpu].list.next,
148 struct cpu_workqueue_stats, list);
150 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
155 static void *workqueue_stat_start(void)
160 for_each_possible_cpu(cpu) {
161 ret = workqueue_stat_start_cpu(cpu);
168 static void *workqueue_stat_next(void *prev, int idx)
170 struct cpu_workqueue_stats *prev_cws = prev;
171 int cpu = prev_cws->cpu;
175 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
176 if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) {
177 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
178 for (++cpu ; cpu < num_possible_cpus(); cpu++) {
179 ret = workqueue_stat_start_cpu(cpu);
185 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
187 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
191 static int workqueue_stat_show(struct seq_file *s, void *p)
193 struct cpu_workqueue_stats *cws = p;
197 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
198 atomic_read(&cws->inserted),
200 trace_find_cmdline(cws->pid));
202 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags);
203 if (&cws->list == all_workqueue_stat[cpu].list.next)
205 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags);
210 static int workqueue_stat_headers(struct seq_file *s)
212 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
213 seq_printf(s, "# | | | |\n\n");
217 struct tracer_stat workqueue_stats __read_mostly = {
218 .name = "workqueues",
219 .stat_start = workqueue_stat_start,
220 .stat_next = workqueue_stat_next,
221 .stat_show = workqueue_stat_show,
222 .stat_headers = workqueue_stat_headers
226 int __init stat_workqueue_init(void)
228 if (register_stat_tracer(&workqueue_stats)) {
229 pr_warning("Unable to register workqueue stat tracer\n");
235 fs_initcall(stat_workqueue_init);
238 * Workqueues are created very early, just after pre-smp initcalls.
239 * So we must register our tracepoints at this stage.
241 int __init trace_workqueue_early_init(void)
245 ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
249 ret = register_trace_workqueue_execution(probe_workqueue_execution);
253 ret = register_trace_workqueue_creation(probe_workqueue_creation);
257 ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
261 all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats)
262 * num_possible_cpus(), GFP_KERNEL);
264 if (!all_workqueue_stat) {
265 pr_warning("trace_workqueue: not enough memory\n");
269 for_each_possible_cpu(cpu) {
270 spin_lock_init(&all_workqueue_stat[cpu].lock);
271 INIT_LIST_HEAD(&all_workqueue_stat[cpu].list);
277 unregister_trace_workqueue_creation(probe_workqueue_creation);
279 unregister_trace_workqueue_execution(probe_workqueue_execution);
281 unregister_trace_workqueue_insertion(probe_workqueue_insertion);
283 pr_warning("trace_workqueue: unable to trace workqueues\n");
287 early_initcall(trace_workqueue_early_init);