4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
9 * This is the core of the buffer management. Each
10 * CPU buffer is processed and entered into the
11 * global event buffer. Such processing is necessary
12 * in several circumstances, mentioned below.
14 * The processing does the job of converting the
15 * transitory EIP value into a persistent dentry/offset
16 * value that the profiler can record at its leisure.
18 * See fs/dcookies.c for a description of the dentry/offset
23 #include <linux/workqueue.h>
24 #include <linux/notifier.h>
25 #include <linux/dcookies.h>
26 #include <linux/profile.h>
27 #include <linux/module.h>
30 #include "oprofile_stats.h"
31 #include "event_buffer.h"
32 #include "cpu_buffer.h"
33 #include "buffer_sync.h"
35 static LIST_HEAD(dying_tasks);
36 static LIST_HEAD(dead_tasks);
37 static cpumask_t marked_cpus = CPU_MASK_NONE;
38 static DEFINE_SPINLOCK(task_mortuary);
39 static void process_task_mortuary(void);
42 /* Take ownership of the task struct and place it on the
43 * list for processing. Only after two full buffer syncs
44 * does the task eventually get freed, because by then
45 * we are sure we will not reference it again.
47 static int task_free_notify(struct notifier_block * self, unsigned long val, void * data)
49 struct task_struct * task = data;
50 spin_lock(&task_mortuary);
51 list_add(&task->tasks, &dying_tasks);
52 spin_unlock(&task_mortuary);
57 /* The task is on its way out. A sync of the buffer means we can catch
58 * any remaining samples for this task.
60 static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data)
62 /* To avoid latency problems, we only process the current CPU,
63 * hoping that most samples for the task are on this CPU
65 sync_buffer(raw_smp_processor_id());
70 /* The task is about to try a do_munmap(). We peek at what it's going to
71 * do, and if it's an executable region, process the samples first, so
72 * we don't lose any. This does not have to be exact, it's a QoI issue
75 static int munmap_notify(struct notifier_block * self, unsigned long val, void * data)
77 unsigned long addr = (unsigned long)data;
78 struct mm_struct * mm = current->mm;
79 struct vm_area_struct * mpnt;
81 down_read(&mm->mmap_sem);
83 mpnt = find_vma(mm, addr);
84 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
85 up_read(&mm->mmap_sem);
86 /* To avoid latency problems, we only process the current CPU,
87 * hoping that most samples for the task are on this CPU
89 sync_buffer(raw_smp_processor_id());
93 up_read(&mm->mmap_sem);
98 /* We need to be told about new modules so we don't attribute to a previously
99 * loaded module, or drop the samples on the floor.
101 static int module_load_notify(struct notifier_block * self, unsigned long val, void * data)
103 #ifdef CONFIG_MODULES
104 if (val != MODULE_STATE_COMING)
107 /* FIXME: should we process all CPU buffers ? */
109 add_event_entry(ESCAPE_CODE);
110 add_event_entry(MODULE_LOADED_CODE);
117 static struct notifier_block task_free_nb = {
118 .notifier_call = task_free_notify,
121 static struct notifier_block task_exit_nb = {
122 .notifier_call = task_exit_notify,
125 static struct notifier_block munmap_nb = {
126 .notifier_call = munmap_notify,
129 static struct notifier_block module_load_nb = {
130 .notifier_call = module_load_notify,
134 static void end_sync(void)
137 /* make sure we don't leak task structs */
138 process_task_mortuary();
139 process_task_mortuary();
149 err = task_handoff_register(&task_free_nb);
152 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
155 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
158 err = register_module_notifier(&module_load_nb);
165 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
167 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
169 task_handoff_unregister(&task_free_nb);
178 unregister_module_notifier(&module_load_nb);
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
180 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
181 task_handoff_unregister(&task_free_nb);
186 /* Optimisation. We can manage without taking the dcookie sem
187 * because we cannot reach this code without at least one
188 * dcookie user still being registered (namely, the reader
189 * of the event buffer). */
190 static inline unsigned long fast_get_dcookie(struct dentry * dentry,
191 struct vfsmount * vfsmnt)
193 unsigned long cookie;
195 if (dentry->d_cookie)
196 return (unsigned long)dentry;
197 get_dcookie(dentry, vfsmnt, &cookie);
202 /* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
203 * which corresponds loosely to "application name". This is
204 * not strictly necessary but allows oprofile to associate
205 * shared-library samples with particular applications
207 static unsigned long get_exec_dcookie(struct mm_struct * mm)
209 unsigned long cookie = NO_COOKIE;
210 struct vm_area_struct * vma;
215 for (vma = mm->mmap; vma; vma = vma->vm_next) {
218 if (!(vma->vm_flags & VM_EXECUTABLE))
220 cookie = fast_get_dcookie(vma->vm_file->f_dentry,
221 vma->vm_file->f_vfsmnt);
230 /* Convert the EIP value of a sample into a persistent dentry/offset
231 * pair that can then be added to the global event buffer. We make
232 * sure to do this lookup before a mm->mmap modification happens so
233 * we don't lose track.
235 static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset)
237 unsigned long cookie = NO_COOKIE;
238 struct vm_area_struct * vma;
240 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
242 if (addr < vma->vm_start || addr >= vma->vm_end)
246 cookie = fast_get_dcookie(vma->vm_file->f_dentry,
247 vma->vm_file->f_vfsmnt);
248 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
251 /* must be an anonymous map */
259 cookie = INVALID_COOKIE;
265 static unsigned long last_cookie = INVALID_COOKIE;
267 static void add_cpu_switch(int i)
269 add_event_entry(ESCAPE_CODE);
270 add_event_entry(CPU_SWITCH_CODE);
272 last_cookie = INVALID_COOKIE;
275 static void add_kernel_ctx_switch(unsigned int in_kernel)
277 add_event_entry(ESCAPE_CODE);
279 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
281 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
285 add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
287 add_event_entry(ESCAPE_CODE);
288 add_event_entry(CTX_SWITCH_CODE);
289 add_event_entry(task->pid);
290 add_event_entry(cookie);
291 /* Another code for daemon back-compat */
292 add_event_entry(ESCAPE_CODE);
293 add_event_entry(CTX_TGID_CODE);
294 add_event_entry(task->tgid);
298 static void add_cookie_switch(unsigned long cookie)
300 add_event_entry(ESCAPE_CODE);
301 add_event_entry(COOKIE_SWITCH_CODE);
302 add_event_entry(cookie);
306 static void add_trace_begin(void)
308 add_event_entry(ESCAPE_CODE);
309 add_event_entry(TRACE_BEGIN_CODE);
313 static void add_sample_entry(unsigned long offset, unsigned long event)
315 add_event_entry(offset);
316 add_event_entry(event);
320 static int add_us_sample(struct mm_struct * mm, struct op_sample * s)
322 unsigned long cookie;
325 cookie = lookup_dcookie(mm, s->eip, &offset);
327 if (cookie == INVALID_COOKIE) {
328 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
332 if (cookie != last_cookie) {
333 add_cookie_switch(cookie);
334 last_cookie = cookie;
337 add_sample_entry(offset, s->event);
343 /* Add a sample to the global event buffer. If possible the
344 * sample is converted into a persistent dentry/offset pair
345 * for later lookup from userspace.
348 add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
351 add_sample_entry(s->eip, s->event);
354 return add_us_sample(mm, s);
356 atomic_inc(&oprofile_stats.sample_lost_no_mm);
362 static void release_mm(struct mm_struct * mm)
366 up_read(&mm->mmap_sem);
371 static struct mm_struct * take_tasks_mm(struct task_struct * task)
373 struct mm_struct * mm = get_task_mm(task);
375 down_read(&mm->mmap_sem);
380 static inline int is_code(unsigned long val)
382 return val == ESCAPE_CODE;
386 /* "acquire" as many cpu buffer slots as we can */
387 static unsigned long get_slots(struct oprofile_cpu_buffer * b)
389 unsigned long head = b->head_pos;
390 unsigned long tail = b->tail_pos;
393 * Subtle. This resets the persistent last_task
394 * and in_kernel values used for switching notes.
395 * BUT, there is a small window between reading
396 * head_pos, and this call, that means samples
397 * can appear at the new head position, but not
398 * be prefixed with the notes for switching
399 * kernel mode or a task switch. This small hole
400 * can lead to mis-attribution or samples where
401 * we don't know if it's in the kernel or not,
402 * at the start of an event buffer.
409 return head + (b->buffer_size - tail);
413 static void increment_tail(struct oprofile_cpu_buffer * b)
415 unsigned long new_tail = b->tail_pos + 1;
419 if (new_tail < b->buffer_size)
420 b->tail_pos = new_tail;
426 /* Move tasks along towards death. Any tasks on dead_tasks
427 * will definitely have no remaining references in any
428 * CPU buffers at this point, because we use two lists,
429 * and to have reached the list, it must have gone through
430 * one full sync already.
432 static void process_task_mortuary(void)
434 struct list_head * pos;
435 struct list_head * pos2;
436 struct task_struct * task;
438 spin_lock(&task_mortuary);
440 list_for_each_safe(pos, pos2, &dead_tasks) {
441 task = list_entry(pos, struct task_struct, tasks);
442 list_del(&task->tasks);
446 list_for_each_safe(pos, pos2, &dying_tasks) {
447 task = list_entry(pos, struct task_struct, tasks);
448 list_del(&task->tasks);
449 list_add_tail(&task->tasks, &dead_tasks);
452 spin_unlock(&task_mortuary);
456 static void mark_done(int cpu)
460 cpu_set(cpu, marked_cpus);
462 for_each_online_cpu(i) {
463 if (!cpu_isset(i, marked_cpus))
467 /* All CPUs have been processed at least once,
468 * we can process the mortuary once
470 process_task_mortuary();
472 cpus_clear(marked_cpus);
476 /* FIXME: this is not sufficient if we implement syscall barrier backtrace
477 * traversal, the code switch to sb_sample_start at first kernel enter/exit
478 * switch so we need a fifth state and some special handling in sync_buffer()
487 /* Sync one of the CPU's buffers into the global event buffer.
488 * Here we need to go through each batch of samples punctuated
489 * by context switch notes, taking the task's mmap_sem and doing
490 * lookup in task->mm->mmap to convert EIP into dcookie/offset
493 void sync_buffer(int cpu)
495 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
496 struct mm_struct *mm = NULL;
497 struct task_struct * new;
498 unsigned long cookie = 0;
501 sync_buffer_state state = sb_buffer_start;
502 unsigned long available;
508 /* Remember, only we can modify tail_pos */
510 available = get_slots(cpu_buf);
512 for (i = 0; i < available; ++i) {
513 struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
515 if (is_code(s->eip)) {
516 if (s->event <= CPU_IS_KERNEL) {
517 /* kernel/userspace switch */
518 in_kernel = s->event;
519 if (state == sb_buffer_start)
520 state = sb_sample_start;
521 add_kernel_ctx_switch(s->event);
522 } else if (s->event == CPU_TRACE_BEGIN) {
526 struct mm_struct * oldmm = mm;
528 /* userspace context switch */
529 new = (struct task_struct *)s->event;
532 mm = take_tasks_mm(new);
534 cookie = get_exec_dcookie(mm);
535 add_user_ctx_switch(new, cookie);
538 if (state >= sb_bt_start &&
539 !add_sample(mm, s, in_kernel)) {
540 if (state == sb_bt_start) {
541 state = sb_bt_ignore;
542 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
547 increment_tail(cpu_buf);