Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /** |
2 | * @file cpu_buffer.c | |
3 | * | |
2cc28b9f | 4 | * @remark Copyright 2002-2009 OProfile authors |
1da177e4 LT |
5 | * @remark Read the file COPYING |
6 | * | |
7 | * @author John Levon <levon@movementarian.org> | |
345c2573 | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
2cc28b9f | 9 | * @author Robert Richter <robert.richter@amd.com> |
1da177e4 LT |
10 | * |
11 | * Each CPU has a local buffer that stores PC value/event | |
12 | * pairs. We also log context switches when we notice them. | |
13 | * Eventually each CPU's buffer is processed into the global | |
14 | * event buffer by sync_buffer(). | |
15 | * | |
16 | * We use a local buffer for two reasons: an NMI or similar | |
17 | * interrupt cannot synchronise, and high sampling rates | |
18 | * would lead to catastrophic global synchronisation if | |
19 | * a global buffer was used. | |
20 | */ | |
21 | ||
22 | #include <linux/sched.h> | |
23 | #include <linux/oprofile.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/errno.h> | |
6a18037d | 26 | |
1da177e4 LT |
27 | #include "event_buffer.h" |
28 | #include "cpu_buffer.h" | |
29 | #include "buffer_sync.h" | |
30 | #include "oprof.h" | |
31 | ||
6dad828b RR |
32 | #define OP_BUFFER_FLAGS 0 |
33 | ||
34 | /* | |
35 | * Read and write access is using spin locking. Thus, writing to the | |
36 | * buffer by NMI handler (x86) could occur also during critical | |
37 | * sections when reading the buffer. To avoid this, there are 2 | |
38 | * buffers for independent read and write access. Read access is in | |
39 | * process context only, write access only in the NMI handler. If the | |
40 | * read buffer runs empty, both buffers are swapped atomically. There | |
41 | * is potentially a small window during swapping where the buffers are | |
42 | * disabled and samples could be lost. | |
43 | * | |
44 | * Using 2 buffers is a little bit overhead, but the solution is clear | |
45 | * and does not require changes in the ring buffer implementation. It | |
46 | * can be changed to a single buffer solution when the ring buffer | |
47 | * access is implemented as non-locking atomic code. | |
48 | */ | |
9966718d RR |
49 | static struct ring_buffer *op_ring_buffer_read; |
50 | static struct ring_buffer *op_ring_buffer_write; | |
8b8b4988 | 51 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
1da177e4 | 52 | |
c4028958 | 53 | static void wq_sync_buffer(struct work_struct *work); |
1da177e4 LT |
54 | |
55 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | |
56 | static int work_enabled; | |
57 | ||
a5598ca0 CL |
58 | unsigned long oprofile_get_cpu_buffer_size(void) |
59 | { | |
bd2172f5 | 60 | return oprofile_cpu_buffer_size; |
a5598ca0 CL |
61 | } |
62 | ||
63 | void oprofile_cpu_buffer_inc_smpl_lost(void) | |
64 | { | |
65 | struct oprofile_cpu_buffer *cpu_buf | |
66 | = &__get_cpu_var(cpu_buffer); | |
67 | ||
68 | cpu_buf->sample_lost_overflow++; | |
69 | } | |
70 | ||
30015776 RR |
71 | void free_cpu_buffers(void) |
72 | { | |
73 | if (op_ring_buffer_read) | |
74 | ring_buffer_free(op_ring_buffer_read); | |
75 | op_ring_buffer_read = NULL; | |
76 | if (op_ring_buffer_write) | |
77 | ring_buffer_free(op_ring_buffer_write); | |
78 | op_ring_buffer_write = NULL; | |
79 | } | |
80 | ||
1da177e4 LT |
81 | int alloc_cpu_buffers(void) |
82 | { | |
83 | int i; | |
6a18037d | 84 | |
bd2172f5 | 85 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
6a18037d | 86 | |
6dad828b RR |
87 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
88 | if (!op_ring_buffer_read) | |
89 | goto fail; | |
90 | op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); | |
91 | if (!op_ring_buffer_write) | |
92 | goto fail; | |
93 | ||
4bd9b9dc | 94 | for_each_possible_cpu(i) { |
608dfddd | 95 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
6a18037d | 96 | |
1da177e4 LT |
97 | b->last_task = NULL; |
98 | b->last_is_kernel = -1; | |
99 | b->tracing = 0; | |
100 | b->buffer_size = buffer_size; | |
1da177e4 LT |
101 | b->sample_received = 0; |
102 | b->sample_lost_overflow = 0; | |
df9d177a PE |
103 | b->backtrace_aborted = 0; |
104 | b->sample_invalid_eip = 0; | |
1da177e4 | 105 | b->cpu = i; |
c4028958 | 106 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
1da177e4 LT |
107 | } |
108 | return 0; | |
109 | ||
110 | fail: | |
111 | free_cpu_buffers(); | |
112 | return -ENOMEM; | |
113 | } | |
1da177e4 LT |
114 | |
115 | void start_cpu_work(void) | |
116 | { | |
117 | int i; | |
118 | ||
119 | work_enabled = 1; | |
120 | ||
121 | for_each_online_cpu(i) { | |
608dfddd | 122 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
123 | |
124 | /* | |
125 | * Spread the work by 1 jiffy per cpu so they dont all | |
126 | * fire at once. | |
127 | */ | |
128 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); | |
129 | } | |
130 | } | |
131 | ||
1da177e4 LT |
132 | void end_cpu_work(void) |
133 | { | |
134 | int i; | |
135 | ||
136 | work_enabled = 0; | |
137 | ||
138 | for_each_online_cpu(i) { | |
608dfddd | 139 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
1da177e4 LT |
140 | |
141 | cancel_delayed_work(&b->work); | |
142 | } | |
143 | ||
144 | flush_scheduled_work(); | |
145 | } | |
146 | ||
2cc28b9f RR |
147 | /* |
148 | * This function prepares the cpu buffer to write a sample. | |
149 | * | |
150 | * Struct op_entry is used during operations on the ring buffer while | |
151 | * struct op_sample contains the data that is stored in the ring | |
152 | * buffer. Struct entry can be uninitialized. The function reserves a | |
153 | * data array that is specified by size. Use | |
154 | * op_cpu_buffer_write_commit() after preparing the sample. In case of | |
155 | * errors a null pointer is returned, otherwise the pointer to the | |
156 | * sample. | |
157 | * | |
158 | */ | |
159 | struct op_sample | |
160 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) | |
9966718d | 161 | { |
2cc28b9f RR |
162 | entry->event = ring_buffer_lock_reserve |
163 | (op_ring_buffer_write, sizeof(struct op_sample) + | |
164 | size * sizeof(entry->sample->data[0]), &entry->irq_flags); | |
9966718d RR |
165 | if (entry->event) |
166 | entry->sample = ring_buffer_event_data(entry->event); | |
167 | else | |
168 | entry->sample = NULL; | |
169 | ||
170 | if (!entry->sample) | |
2cc28b9f | 171 | return NULL; |
9966718d | 172 | |
2cc28b9f RR |
173 | entry->size = size; |
174 | entry->data = entry->sample->data; | |
175 | ||
176 | return entry->sample; | |
9966718d RR |
177 | } |
178 | ||
179 | int op_cpu_buffer_write_commit(struct op_entry *entry) | |
180 | { | |
181 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, | |
182 | entry->irq_flags); | |
183 | } | |
184 | ||
2d87b14c | 185 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
9966718d RR |
186 | { |
187 | struct ring_buffer_event *e; | |
188 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | |
189 | if (e) | |
2d87b14c | 190 | goto event; |
9966718d RR |
191 | if (ring_buffer_swap_cpu(op_ring_buffer_read, |
192 | op_ring_buffer_write, | |
193 | cpu)) | |
194 | return NULL; | |
195 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); | |
196 | if (e) | |
2d87b14c | 197 | goto event; |
9966718d | 198 | return NULL; |
2d87b14c RR |
199 | |
200 | event: | |
201 | entry->event = e; | |
202 | entry->sample = ring_buffer_event_data(e); | |
203 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) | |
204 | / sizeof(entry->sample->data[0]); | |
205 | entry->data = entry->sample->data; | |
206 | return entry->sample; | |
9966718d RR |
207 | } |
208 | ||
209 | unsigned long op_cpu_buffer_entries(int cpu) | |
210 | { | |
211 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) | |
212 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); | |
213 | } | |
214 | ||
ae735e99 RR |
215 | static int |
216 | op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, | |
217 | int is_kernel, struct task_struct *task) | |
218 | { | |
219 | struct op_entry entry; | |
220 | struct op_sample *sample; | |
221 | unsigned long flags; | |
222 | int size; | |
223 | ||
224 | flags = 0; | |
225 | ||
226 | if (backtrace) | |
227 | flags |= TRACE_BEGIN; | |
228 | ||
229 | /* notice a switch from user->kernel or vice versa */ | |
230 | is_kernel = !!is_kernel; | |
231 | if (cpu_buf->last_is_kernel != is_kernel) { | |
232 | cpu_buf->last_is_kernel = is_kernel; | |
233 | flags |= KERNEL_CTX_SWITCH; | |
234 | if (is_kernel) | |
235 | flags |= IS_KERNEL; | |
236 | } | |
237 | ||
238 | /* notice a task switch */ | |
239 | if (cpu_buf->last_task != task) { | |
240 | cpu_buf->last_task = task; | |
241 | flags |= USER_CTX_SWITCH; | |
242 | } | |
243 | ||
244 | if (!flags) | |
245 | /* nothing to do */ | |
246 | return 0; | |
247 | ||
248 | if (flags & USER_CTX_SWITCH) | |
249 | size = 1; | |
250 | else | |
251 | size = 0; | |
252 | ||
253 | sample = op_cpu_buffer_write_reserve(&entry, size); | |
254 | if (!sample) | |
255 | return -ENOMEM; | |
256 | ||
257 | sample->eip = ESCAPE_CODE; | |
258 | sample->event = flags; | |
259 | ||
260 | if (size) | |
d9928c25 | 261 | op_cpu_buffer_add_data(&entry, (unsigned long)task); |
ae735e99 RR |
262 | |
263 | op_cpu_buffer_write_commit(&entry); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
211117ff | 268 | static inline int |
d0e23384 RR |
269 | op_add_sample(struct oprofile_cpu_buffer *cpu_buf, |
270 | unsigned long pc, unsigned long event) | |
1da177e4 | 271 | { |
6dad828b | 272 | struct op_entry entry; |
2cc28b9f | 273 | struct op_sample *sample; |
6dad828b | 274 | |
2cc28b9f RR |
275 | sample = op_cpu_buffer_write_reserve(&entry, 0); |
276 | if (!sample) | |
277 | return -ENOMEM; | |
6dad828b | 278 | |
2cc28b9f RR |
279 | sample->eip = pc; |
280 | sample->event = event; | |
6dad828b | 281 | |
3967e93e | 282 | return op_cpu_buffer_write_commit(&entry); |
1da177e4 LT |
283 | } |
284 | ||
ae735e99 RR |
285 | /* |
286 | * This must be safe from any context. | |
1da177e4 LT |
287 | * |
288 | * is_kernel is needed because on some architectures you cannot | |
289 | * tell if you are in kernel or user space simply by looking at | |
290 | * pc. We tag this in the buffer by generating kernel enter/exit | |
291 | * events whenever is_kernel changes | |
292 | */ | |
ae735e99 RR |
293 | static int |
294 | log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, | |
295 | unsigned long backtrace, int is_kernel, unsigned long event) | |
1da177e4 | 296 | { |
1da177e4 LT |
297 | cpu_buf->sample_received++; |
298 | ||
df9d177a PE |
299 | if (pc == ESCAPE_CODE) { |
300 | cpu_buf->sample_invalid_eip++; | |
301 | return 0; | |
302 | } | |
303 | ||
ae735e99 RR |
304 | if (op_add_code(cpu_buf, backtrace, is_kernel, current)) |
305 | goto fail; | |
6a18037d | 306 | |
d0e23384 | 307 | if (op_add_sample(cpu_buf, pc, event)) |
211117ff RR |
308 | goto fail; |
309 | ||
1da177e4 | 310 | return 1; |
211117ff RR |
311 | |
312 | fail: | |
313 | cpu_buf->sample_lost_overflow++; | |
314 | return 0; | |
1da177e4 LT |
315 | } |
316 | ||
6352d92d | 317 | static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 | 318 | { |
1da177e4 | 319 | cpu_buf->tracing = 1; |
1da177e4 LT |
320 | } |
321 | ||
6352d92d | 322 | static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
1da177e4 LT |
323 | { |
324 | cpu_buf->tracing = 0; | |
325 | } | |
326 | ||
d45d23be RR |
327 | static inline void |
328 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, | |
329 | unsigned long event, int is_kernel) | |
1da177e4 | 330 | { |
608dfddd | 331 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
ae735e99 | 332 | unsigned long backtrace = oprofile_backtrace_depth; |
1da177e4 | 333 | |
fd13f6c8 RR |
334 | /* |
335 | * if log_sample() fail we can't backtrace since we lost the | |
336 | * source of this event | |
337 | */ | |
ae735e99 RR |
338 | if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) |
339 | /* failed */ | |
340 | return; | |
341 | ||
342 | if (!backtrace) | |
343 | return; | |
6352d92d | 344 | |
ae735e99 RR |
345 | oprofile_begin_trace(cpu_buf); |
346 | oprofile_ops.backtrace(regs, backtrace); | |
1da177e4 LT |
347 | oprofile_end_trace(cpu_buf); |
348 | } | |
349 | ||
d45d23be RR |
350 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
351 | unsigned long event, int is_kernel) | |
352 | { | |
353 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); | |
354 | } | |
355 | ||
27357716 BR |
356 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
357 | { | |
358 | int is_kernel = !user_mode(regs); | |
359 | unsigned long pc = profile_pc(regs); | |
360 | ||
d45d23be | 361 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
27357716 BR |
362 | } |
363 | ||
1acda878 RR |
364 | /* |
365 | * Add samples with data to the ring buffer. | |
366 | * | |
14f0ca8e RR |
367 | * Use oprofile_add_data(&entry, val) to add data and |
368 | * oprofile_write_commit(&entry) to commit the sample. | |
1acda878 | 369 | */ |
14f0ca8e RR |
370 | void |
371 | oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, | |
1acda878 | 372 | unsigned long pc, int code, int size) |
345c2573 | 373 | { |
1acda878 | 374 | struct op_sample *sample; |
e2fee276 RR |
375 | int is_kernel = !user_mode(regs); |
376 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
345c2573 BK |
377 | |
378 | cpu_buf->sample_received++; | |
379 | ||
1acda878 RR |
380 | /* no backtraces for samples with data */ |
381 | if (op_add_code(cpu_buf, 0, is_kernel, current)) | |
382 | goto fail; | |
383 | ||
384 | sample = op_cpu_buffer_write_reserve(entry, size + 2); | |
385 | if (!sample) | |
386 | goto fail; | |
387 | sample->eip = ESCAPE_CODE; | |
388 | sample->event = 0; /* no flags */ | |
345c2573 | 389 | |
1acda878 RR |
390 | op_cpu_buffer_add_data(entry, code); |
391 | op_cpu_buffer_add_data(entry, pc); | |
345c2573 | 392 | |
1acda878 | 393 | return; |
345c2573 | 394 | |
1acda878 | 395 | fail: |
fdb6a8f4 | 396 | entry->event = NULL; |
1acda878 | 397 | cpu_buf->sample_lost_overflow++; |
345c2573 BK |
398 | } |
399 | ||
14f0ca8e RR |
400 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
401 | { | |
fdb6a8f4 RR |
402 | if (!entry->event) |
403 | return 0; | |
14f0ca8e RR |
404 | return op_cpu_buffer_add_data(entry, val); |
405 | } | |
406 | ||
407 | int oprofile_write_commit(struct op_entry *entry) | |
408 | { | |
fdb6a8f4 RR |
409 | if (!entry->event) |
410 | return -EINVAL; | |
14f0ca8e RR |
411 | return op_cpu_buffer_write_commit(entry); |
412 | } | |
413 | ||
1da177e4 LT |
414 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
415 | { | |
608dfddd | 416 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
ae735e99 | 417 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
1da177e4 LT |
418 | } |
419 | ||
1da177e4 LT |
420 | void oprofile_add_trace(unsigned long pc) |
421 | { | |
608dfddd | 422 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
1da177e4 LT |
423 | |
424 | if (!cpu_buf->tracing) | |
425 | return; | |
426 | ||
fd13f6c8 RR |
427 | /* |
428 | * broken frame can give an eip with the same value as an | |
429 | * escape code, abort the trace if we get it | |
430 | */ | |
211117ff RR |
431 | if (pc == ESCAPE_CODE) |
432 | goto fail; | |
433 | ||
d0e23384 | 434 | if (op_add_sample(cpu_buf, pc, 0)) |
211117ff | 435 | goto fail; |
1da177e4 | 436 | |
211117ff RR |
437 | return; |
438 | fail: | |
439 | cpu_buf->tracing = 0; | |
440 | cpu_buf->backtrace_aborted++; | |
441 | return; | |
1da177e4 LT |
442 | } |
443 | ||
1da177e4 LT |
444 | /* |
445 | * This serves to avoid cpu buffer overflow, and makes sure | |
446 | * the task mortuary progresses | |
447 | * | |
448 | * By using schedule_delayed_work_on and then schedule_delayed_work | |
449 | * we guarantee this will stay on the correct cpu | |
450 | */ | |
c4028958 | 451 | static void wq_sync_buffer(struct work_struct *work) |
1da177e4 | 452 | { |
25ad2913 | 453 | struct oprofile_cpu_buffer *b = |
c4028958 | 454 | container_of(work, struct oprofile_cpu_buffer, work.work); |
1da177e4 | 455 | if (b->cpu != smp_processor_id()) { |
bd17b625 | 456 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", |
1da177e4 | 457 | smp_processor_id(), b->cpu); |
4bd9b9dc CA |
458 | |
459 | if (!cpu_online(b->cpu)) { | |
460 | cancel_delayed_work(&b->work); | |
461 | return; | |
462 | } | |
1da177e4 LT |
463 | } |
464 | sync_buffer(b->cpu); | |
465 | ||
466 | /* don't re-add the work if we're shutting down */ | |
467 | if (work_enabled) | |
468 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); | |
469 | } |