Merge commit 'v2.6.29-rc4' into tracing/core
[linux-2.6] / kernel / trace / trace_workqueue.c
1 /*
2  * Workqueue statistical tracer.
3  *
4  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  *
6  */
7
8
9 #include <trace/workqueue.h>
10 #include <linux/list.h>
11 #include <linux/percpu.h>
12 #include "trace_stat.h"
13 #include "trace.h"
14
15
16 /* A cpu workqueue thread */
17 struct cpu_workqueue_stats {
18         struct list_head            list;
19 /* Useful to know if we print the cpu headers */
20         bool                        first_entry;
21         int                         cpu;
22         pid_t                       pid;
23 /* Can be inserted from interrupt or user context, need to be atomic */
24         atomic_t                    inserted;
25 /*
26  *  Don't need to be atomic, works are serialized in a single workqueue thread
27  *  on a single CPU.
28  */
29         unsigned int                executed;
30 };
31
32 /* List of workqueue threads on one cpu */
33 struct workqueue_global_stats {
34         struct list_head        list;
35         spinlock_t              lock;
36 };
37
38 /* Don't need a global lock because allocated before the workqueues, and
39  * never freed.
40  */
41 static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42 #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
43
44 /* Insertion of a work */
45 static void
46 probe_workqueue_insertion(struct task_struct *wq_thread,
47                           struct work_struct *work)
48 {
49         int cpu = cpumask_first(&wq_thread->cpus_allowed);
50         struct cpu_workqueue_stats *node, *next;
51         unsigned long flags;
52
53         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
54         list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
55                                                         list) {
56                 if (node->pid == wq_thread->pid) {
57                         atomic_inc(&node->inserted);
58                         goto found;
59                 }
60         }
61         pr_debug("trace_workqueue: entry not found\n");
62 found:
63         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
64 }
65
66 /* Execution of a work */
67 static void
68 probe_workqueue_execution(struct task_struct *wq_thread,
69                           struct work_struct *work)
70 {
71         int cpu = cpumask_first(&wq_thread->cpus_allowed);
72         struct cpu_workqueue_stats *node, *next;
73         unsigned long flags;
74
75         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76         list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
77                                                         list) {
78                 if (node->pid == wq_thread->pid) {
79                         node->executed++;
80                         goto found;
81                 }
82         }
83         pr_debug("trace_workqueue: entry not found\n");
84 found:
85         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
86 }
87
88 /* Creation of a cpu workqueue thread */
89 static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
90 {
91         struct cpu_workqueue_stats *cws;
92         unsigned long flags;
93
94         WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
95
96         /* Workqueues are sometimes created in atomic context */
97         cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
98         if (!cws) {
99                 pr_warning("trace_workqueue: not enough memory\n");
100                 return;
101         }
102         tracing_record_cmdline(wq_thread);
103
104         INIT_LIST_HEAD(&cws->list);
105         cws->cpu = cpu;
106
107         cws->pid = wq_thread->pid;
108
109         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
110         if (list_empty(&workqueue_cpu_stat(cpu)->list))
111                 cws->first_entry = true;
112         list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
113         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
114 }
115
116 /* Destruction of a cpu workqueue thread */
117 static void probe_workqueue_destruction(struct task_struct *wq_thread)
118 {
119         /* Workqueue only execute on one cpu */
120         int cpu = cpumask_first(&wq_thread->cpus_allowed);
121         struct cpu_workqueue_stats *node, *next;
122         unsigned long flags;
123
124         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
125         list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
126                                                         list) {
127                 if (node->pid == wq_thread->pid) {
128                         list_del(&node->list);
129                         kfree(node);
130                         goto found;
131                 }
132         }
133
134         pr_debug("trace_workqueue: don't find workqueue to destroy\n");
135 found:
136         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
137
138 }
139
140 static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141 {
142         unsigned long flags;
143         struct cpu_workqueue_stats *ret = NULL;
144
145
146         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
147
148         if (!list_empty(&workqueue_cpu_stat(cpu)->list))
149                 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
150                                  struct cpu_workqueue_stats, list);
151
152         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
153
154         return ret;
155 }
156
157 static void *workqueue_stat_start(void)
158 {
159         int cpu;
160         void *ret = NULL;
161
162         for_each_possible_cpu(cpu) {
163                 ret = workqueue_stat_start_cpu(cpu);
164                 if (ret)
165                         return ret;
166         }
167         return NULL;
168 }
169
170 static void *workqueue_stat_next(void *prev, int idx)
171 {
172         struct cpu_workqueue_stats *prev_cws = prev;
173         int cpu = prev_cws->cpu;
174         unsigned long flags;
175         void *ret = NULL;
176
177         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
178         if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
179                 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
180                 for (++cpu ; cpu < num_possible_cpus(); cpu++) {
181                         ret = workqueue_stat_start_cpu(cpu);
182                         if (ret)
183                                 return ret;
184                 }
185                 return NULL;
186         }
187         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
188
189         return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
190                           list);
191 }
192
193 static int workqueue_stat_show(struct seq_file *s, void *p)
194 {
195         struct cpu_workqueue_stats *cws = p;
196         unsigned long flags;
197         int cpu = cws->cpu;
198
199         seq_printf(s, "%3d %6d     %6u       %s\n", cws->cpu,
200                    atomic_read(&cws->inserted),
201                    cws->executed,
202                    trace_find_cmdline(cws->pid));
203
204         spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
205         if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
206                 seq_printf(s, "\n");
207         spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
208
209         return 0;
210 }
211
212 static int workqueue_stat_headers(struct seq_file *s)
213 {
214         seq_printf(s, "# CPU  INSERTED  EXECUTED   NAME\n");
215         seq_printf(s, "# |      |         |          |\n\n");
216         return 0;
217 }
218
219 struct tracer_stat workqueue_stats __read_mostly = {
220         .name = "workqueues",
221         .stat_start = workqueue_stat_start,
222         .stat_next = workqueue_stat_next,
223         .stat_show = workqueue_stat_show,
224         .stat_headers = workqueue_stat_headers
225 };
226
227
228 int __init stat_workqueue_init(void)
229 {
230         if (register_stat_tracer(&workqueue_stats)) {
231                 pr_warning("Unable to register workqueue stat tracer\n");
232                 return 1;
233         }
234
235         return 0;
236 }
237 fs_initcall(stat_workqueue_init);
238
239 /*
240  * Workqueues are created very early, just after pre-smp initcalls.
241  * So we must register our tracepoints at this stage.
242  */
243 int __init trace_workqueue_early_init(void)
244 {
245         int ret, cpu;
246
247         ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
248         if (ret)
249                 goto out;
250
251         ret = register_trace_workqueue_execution(probe_workqueue_execution);
252         if (ret)
253                 goto no_insertion;
254
255         ret = register_trace_workqueue_creation(probe_workqueue_creation);
256         if (ret)
257                 goto no_execution;
258
259         ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
260         if (ret)
261                 goto no_creation;
262
263         for_each_possible_cpu(cpu) {
264                 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
265                 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
266         }
267
268         return 0;
269
270 no_creation:
271         unregister_trace_workqueue_creation(probe_workqueue_creation);
272 no_execution:
273         unregister_trace_workqueue_execution(probe_workqueue_execution);
274 no_insertion:
275         unregister_trace_workqueue_insertion(probe_workqueue_insertion);
276 out:
277         pr_warning("trace_workqueue: unable to trace workqueues\n");
278
279         return 1;
280 }
281 early_initcall(trace_workqueue_early_init);