2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
8 * For licensing details see kernel-base/COPYING
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/file.h>
16 #include <linux/poll.h>
17 #include <linux/sysfs.h>
18 #include <linux/ptrace.h>
19 #include <linux/percpu.h>
20 #include <linux/vmstat.h>
21 #include <linux/hardirq.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/perf_counter.h>
29 #include <asm/irq_regs.h>
32 * Each CPU has a list of per CPU counters:
34 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
36 int perf_max_counters __read_mostly = 1;
37 static int perf_reserved_percpu __read_mostly;
38 static int perf_overcommit __read_mostly = 1;
41 * Mutex for (sysadmin-configurable) counter reservations:
43 static DEFINE_MUTEX(perf_resource_mutex);
46 * Architecture provided APIs - weak aliases:
48 extern __weak const struct hw_perf_counter_ops *
49 hw_perf_counter_init(struct perf_counter *counter)
54 u64 __weak hw_perf_save_disable(void) { return 0; }
55 void __weak hw_perf_restore(u64 ctrl) { barrier(); }
56 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
57 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
58 struct perf_cpu_context *cpuctx,
59 struct perf_counter_context *ctx, int cpu)
64 void __weak perf_counter_print_debug(void) { }
67 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
69 struct perf_counter *group_leader = counter->group_leader;
72 * Depending on whether it is a standalone or sibling counter,
73 * add it straight to the context's counter list, or to the group
74 * leader's sibling list:
76 if (counter->group_leader == counter)
77 list_add_tail(&counter->list_entry, &ctx->counter_list);
79 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
80 group_leader->nr_siblings++;
83 list_add_rcu(&counter->event_entry, &ctx->event_list);
87 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
89 struct perf_counter *sibling, *tmp;
91 list_del_init(&counter->list_entry);
92 list_del_rcu(&counter->event_entry);
94 if (counter->group_leader != counter)
95 counter->group_leader->nr_siblings--;
98 * If this was a group counter with sibling counters then
99 * upgrade the siblings to singleton counters by adding them
100 * to the context list directly:
102 list_for_each_entry_safe(sibling, tmp,
103 &counter->sibling_list, list_entry) {
105 list_move_tail(&sibling->list_entry, &ctx->counter_list);
106 sibling->group_leader = sibling;
111 counter_sched_out(struct perf_counter *counter,
112 struct perf_cpu_context *cpuctx,
113 struct perf_counter_context *ctx)
115 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
118 counter->state = PERF_COUNTER_STATE_INACTIVE;
119 counter->hw_ops->disable(counter);
122 if (!is_software_counter(counter))
123 cpuctx->active_oncpu--;
125 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
126 cpuctx->exclusive = 0;
130 group_sched_out(struct perf_counter *group_counter,
131 struct perf_cpu_context *cpuctx,
132 struct perf_counter_context *ctx)
134 struct perf_counter *counter;
136 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
139 counter_sched_out(group_counter, cpuctx, ctx);
142 * Schedule out siblings (if any):
144 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
145 counter_sched_out(counter, cpuctx, ctx);
147 if (group_counter->hw_event.exclusive)
148 cpuctx->exclusive = 0;
152 * Cross CPU call to remove a performance counter
154 * We disable the counter on the hardware level first. After that we
155 * remove it from the context list.
157 static void __perf_counter_remove_from_context(void *info)
159 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
160 struct perf_counter *counter = info;
161 struct perf_counter_context *ctx = counter->ctx;
166 * If this is a task context, we need to check whether it is
167 * the current task context of this cpu. If not it has been
168 * scheduled out before the smp call arrived.
170 if (ctx->task && cpuctx->task_ctx != ctx)
173 curr_rq_lock_irq_save(&flags);
174 spin_lock(&ctx->lock);
176 counter_sched_out(counter, cpuctx, ctx);
178 counter->task = NULL;
182 * Protect the list operation against NMI by disabling the
183 * counters on a global level. NOP for non NMI based counters.
185 perf_flags = hw_perf_save_disable();
186 list_del_counter(counter, ctx);
187 hw_perf_restore(perf_flags);
191 * Allow more per task counters with respect to the
194 cpuctx->max_pertask =
195 min(perf_max_counters - ctx->nr_counters,
196 perf_max_counters - perf_reserved_percpu);
199 spin_unlock(&ctx->lock);
200 curr_rq_unlock_irq_restore(&flags);
205 * Remove the counter from a task's (or a CPU's) list of counters.
207 * Must be called with counter->mutex and ctx->mutex held.
209 * CPU counters are removed with a smp call. For task counters we only
210 * call when the task is on a CPU.
212 static void perf_counter_remove_from_context(struct perf_counter *counter)
214 struct perf_counter_context *ctx = counter->ctx;
215 struct task_struct *task = ctx->task;
219 * Per cpu counters are removed via an smp call and
220 * the removal is always sucessful.
222 smp_call_function_single(counter->cpu,
223 __perf_counter_remove_from_context,
229 task_oncpu_function_call(task, __perf_counter_remove_from_context,
232 spin_lock_irq(&ctx->lock);
234 * If the context is active we need to retry the smp call.
236 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
237 spin_unlock_irq(&ctx->lock);
242 * The lock prevents that this context is scheduled in so we
243 * can remove the counter safely, if the call above did not
246 if (!list_empty(&counter->list_entry)) {
248 list_del_counter(counter, ctx);
249 counter->task = NULL;
251 spin_unlock_irq(&ctx->lock);
255 * Cross CPU call to disable a performance counter
257 static void __perf_counter_disable(void *info)
259 struct perf_counter *counter = info;
260 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
261 struct perf_counter_context *ctx = counter->ctx;
265 * If this is a per-task counter, need to check whether this
266 * counter's task is the current task on this cpu.
268 if (ctx->task && cpuctx->task_ctx != ctx)
271 curr_rq_lock_irq_save(&flags);
272 spin_lock(&ctx->lock);
275 * If the counter is on, turn it off.
276 * If it is in error state, leave it in error state.
278 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
279 if (counter == counter->group_leader)
280 group_sched_out(counter, cpuctx, ctx);
282 counter_sched_out(counter, cpuctx, ctx);
283 counter->state = PERF_COUNTER_STATE_OFF;
286 spin_unlock(&ctx->lock);
287 curr_rq_unlock_irq_restore(&flags);
293 static void perf_counter_disable(struct perf_counter *counter)
295 struct perf_counter_context *ctx = counter->ctx;
296 struct task_struct *task = ctx->task;
300 * Disable the counter on the cpu that it's on
302 smp_call_function_single(counter->cpu, __perf_counter_disable,
308 task_oncpu_function_call(task, __perf_counter_disable, counter);
310 spin_lock_irq(&ctx->lock);
312 * If the counter is still active, we need to retry the cross-call.
314 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
315 spin_unlock_irq(&ctx->lock);
320 * Since we have the lock this context can't be scheduled
321 * in, so we can change the state safely.
323 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
324 counter->state = PERF_COUNTER_STATE_OFF;
326 spin_unlock_irq(&ctx->lock);
330 * Disable a counter and all its children.
332 static void perf_counter_disable_family(struct perf_counter *counter)
334 struct perf_counter *child;
336 perf_counter_disable(counter);
339 * Lock the mutex to protect the list of children
341 mutex_lock(&counter->mutex);
342 list_for_each_entry(child, &counter->child_list, child_list)
343 perf_counter_disable(child);
344 mutex_unlock(&counter->mutex);
348 counter_sched_in(struct perf_counter *counter,
349 struct perf_cpu_context *cpuctx,
350 struct perf_counter_context *ctx,
353 if (counter->state <= PERF_COUNTER_STATE_OFF)
356 counter->state = PERF_COUNTER_STATE_ACTIVE;
357 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
359 * The new state must be visible before we turn it on in the hardware:
363 if (counter->hw_ops->enable(counter)) {
364 counter->state = PERF_COUNTER_STATE_INACTIVE;
369 if (!is_software_counter(counter))
370 cpuctx->active_oncpu++;
373 if (counter->hw_event.exclusive)
374 cpuctx->exclusive = 1;
380 * Return 1 for a group consisting entirely of software counters,
381 * 0 if the group contains any hardware counters.
383 static int is_software_only_group(struct perf_counter *leader)
385 struct perf_counter *counter;
387 if (!is_software_counter(leader))
390 list_for_each_entry(counter, &leader->sibling_list, list_entry)
391 if (!is_software_counter(counter))
398 * Work out whether we can put this counter group on the CPU now.
400 static int group_can_go_on(struct perf_counter *counter,
401 struct perf_cpu_context *cpuctx,
405 * Groups consisting entirely of software counters can always go on.
407 if (is_software_only_group(counter))
410 * If an exclusive group is already on, no other hardware
411 * counters can go on.
413 if (cpuctx->exclusive)
416 * If this group is exclusive and there are already
417 * counters on the CPU, it can't go on.
419 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
422 * Otherwise, try to add it if all previous groups were able
429 * Cross CPU call to install and enable a performance counter
431 static void __perf_install_in_context(void *info)
433 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
434 struct perf_counter *counter = info;
435 struct perf_counter_context *ctx = counter->ctx;
436 struct perf_counter *leader = counter->group_leader;
437 int cpu = smp_processor_id();
443 * If this is a task context, we need to check whether it is
444 * the current task context of this cpu. If not it has been
445 * scheduled out before the smp call arrived.
447 if (ctx->task && cpuctx->task_ctx != ctx)
450 curr_rq_lock_irq_save(&flags);
451 spin_lock(&ctx->lock);
454 * Protect the list operation against NMI by disabling the
455 * counters on a global level. NOP for non NMI based counters.
457 perf_flags = hw_perf_save_disable();
459 list_add_counter(counter, ctx);
461 counter->prev_state = PERF_COUNTER_STATE_OFF;
464 * Don't put the counter on if it is disabled or if
465 * it is in a group and the group isn't on.
467 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
468 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
472 * An exclusive counter can't go on if there are already active
473 * hardware counters, and no hardware counter can go on if there
474 * is already an exclusive counter on.
476 if (!group_can_go_on(counter, cpuctx, 1))
479 err = counter_sched_in(counter, cpuctx, ctx, cpu);
483 * This counter couldn't go on. If it is in a group
484 * then we have to pull the whole group off.
485 * If the counter group is pinned then put it in error state.
487 if (leader != counter)
488 group_sched_out(leader, cpuctx, ctx);
489 if (leader->hw_event.pinned)
490 leader->state = PERF_COUNTER_STATE_ERROR;
493 if (!err && !ctx->task && cpuctx->max_pertask)
494 cpuctx->max_pertask--;
497 hw_perf_restore(perf_flags);
499 spin_unlock(&ctx->lock);
500 curr_rq_unlock_irq_restore(&flags);
504 * Attach a performance counter to a context
506 * First we add the counter to the list with the hardware enable bit
507 * in counter->hw_config cleared.
509 * If the counter is attached to a task which is on a CPU we use a smp
510 * call to enable it in the task context. The task might have been
511 * scheduled away, but we check this in the smp call again.
513 * Must be called with ctx->mutex held.
516 perf_install_in_context(struct perf_counter_context *ctx,
517 struct perf_counter *counter,
520 struct task_struct *task = ctx->task;
524 * Per cpu counters are installed via an smp call and
525 * the install is always sucessful.
527 smp_call_function_single(cpu, __perf_install_in_context,
532 counter->task = task;
534 task_oncpu_function_call(task, __perf_install_in_context,
537 spin_lock_irq(&ctx->lock);
539 * we need to retry the smp call.
541 if (ctx->is_active && list_empty(&counter->list_entry)) {
542 spin_unlock_irq(&ctx->lock);
547 * The lock prevents that this context is scheduled in so we
548 * can add the counter safely, if it the call above did not
551 if (list_empty(&counter->list_entry)) {
552 list_add_counter(counter, ctx);
555 spin_unlock_irq(&ctx->lock);
559 * Cross CPU call to enable a performance counter
561 static void __perf_counter_enable(void *info)
563 struct perf_counter *counter = info;
564 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
565 struct perf_counter_context *ctx = counter->ctx;
566 struct perf_counter *leader = counter->group_leader;
571 * If this is a per-task counter, need to check whether this
572 * counter's task is the current task on this cpu.
574 if (ctx->task && cpuctx->task_ctx != ctx)
577 curr_rq_lock_irq_save(&flags);
578 spin_lock(&ctx->lock);
580 counter->prev_state = counter->state;
581 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
583 counter->state = PERF_COUNTER_STATE_INACTIVE;
586 * If the counter is in a group and isn't the group leader,
587 * then don't put it on unless the group is on.
589 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
592 if (!group_can_go_on(counter, cpuctx, 1))
595 err = counter_sched_in(counter, cpuctx, ctx,
600 * If this counter can't go on and it's part of a
601 * group, then the whole group has to come off.
603 if (leader != counter)
604 group_sched_out(leader, cpuctx, ctx);
605 if (leader->hw_event.pinned)
606 leader->state = PERF_COUNTER_STATE_ERROR;
610 spin_unlock(&ctx->lock);
611 curr_rq_unlock_irq_restore(&flags);
617 static void perf_counter_enable(struct perf_counter *counter)
619 struct perf_counter_context *ctx = counter->ctx;
620 struct task_struct *task = ctx->task;
624 * Enable the counter on the cpu that it's on
626 smp_call_function_single(counter->cpu, __perf_counter_enable,
631 spin_lock_irq(&ctx->lock);
632 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
636 * If the counter is in error state, clear that first.
637 * That way, if we see the counter in error state below, we
638 * know that it has gone back into error state, as distinct
639 * from the task having been scheduled away before the
640 * cross-call arrived.
642 if (counter->state == PERF_COUNTER_STATE_ERROR)
643 counter->state = PERF_COUNTER_STATE_OFF;
646 spin_unlock_irq(&ctx->lock);
647 task_oncpu_function_call(task, __perf_counter_enable, counter);
649 spin_lock_irq(&ctx->lock);
652 * If the context is active and the counter is still off,
653 * we need to retry the cross-call.
655 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
659 * Since we have the lock this context can't be scheduled
660 * in, so we can change the state safely.
662 if (counter->state == PERF_COUNTER_STATE_OFF)
663 counter->state = PERF_COUNTER_STATE_INACTIVE;
665 spin_unlock_irq(&ctx->lock);
669 * Enable a counter and all its children.
671 static void perf_counter_enable_family(struct perf_counter *counter)
673 struct perf_counter *child;
675 perf_counter_enable(counter);
678 * Lock the mutex to protect the list of children
680 mutex_lock(&counter->mutex);
681 list_for_each_entry(child, &counter->child_list, child_list)
682 perf_counter_enable(child);
683 mutex_unlock(&counter->mutex);
686 void __perf_counter_sched_out(struct perf_counter_context *ctx,
687 struct perf_cpu_context *cpuctx)
689 struct perf_counter *counter;
692 spin_lock(&ctx->lock);
694 if (likely(!ctx->nr_counters))
697 flags = hw_perf_save_disable();
698 if (ctx->nr_active) {
699 list_for_each_entry(counter, &ctx->counter_list, list_entry)
700 group_sched_out(counter, cpuctx, ctx);
702 hw_perf_restore(flags);
704 spin_unlock(&ctx->lock);
708 * Called from scheduler to remove the counters of the current task,
709 * with interrupts disabled.
711 * We stop each counter and update the counter value in counter->count.
713 * This does not protect us against NMI, but disable()
714 * sets the disabled bit in the control field of counter _before_
715 * accessing the counter control register. If a NMI hits, then it will
716 * not restart the counter.
718 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
720 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
721 struct perf_counter_context *ctx = &task->perf_counter_ctx;
722 struct pt_regs *regs;
724 if (likely(!cpuctx->task_ctx))
727 regs = task_pt_regs(task);
728 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
729 __perf_counter_sched_out(ctx, cpuctx);
731 cpuctx->task_ctx = NULL;
734 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
736 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
740 group_sched_in(struct perf_counter *group_counter,
741 struct perf_cpu_context *cpuctx,
742 struct perf_counter_context *ctx,
745 struct perf_counter *counter, *partial_group;
748 if (group_counter->state == PERF_COUNTER_STATE_OFF)
751 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
753 return ret < 0 ? ret : 0;
755 group_counter->prev_state = group_counter->state;
756 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
760 * Schedule in siblings as one group (if any):
762 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
763 counter->prev_state = counter->state;
764 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
765 partial_group = counter;
774 * Groups can be scheduled in as one unit only, so undo any
775 * partial group before returning:
777 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
778 if (counter == partial_group)
780 counter_sched_out(counter, cpuctx, ctx);
782 counter_sched_out(group_counter, cpuctx, ctx);
788 __perf_counter_sched_in(struct perf_counter_context *ctx,
789 struct perf_cpu_context *cpuctx, int cpu)
791 struct perf_counter *counter;
795 spin_lock(&ctx->lock);
797 if (likely(!ctx->nr_counters))
800 flags = hw_perf_save_disable();
803 * First go through the list and put on any pinned groups
804 * in order to give them the best chance of going on.
806 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
807 if (counter->state <= PERF_COUNTER_STATE_OFF ||
808 !counter->hw_event.pinned)
810 if (counter->cpu != -1 && counter->cpu != cpu)
813 if (group_can_go_on(counter, cpuctx, 1))
814 group_sched_in(counter, cpuctx, ctx, cpu);
817 * If this pinned group hasn't been scheduled,
818 * put it in error state.
820 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
821 counter->state = PERF_COUNTER_STATE_ERROR;
824 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
826 * Ignore counters in OFF or ERROR state, and
827 * ignore pinned counters since we did them already.
829 if (counter->state <= PERF_COUNTER_STATE_OFF ||
830 counter->hw_event.pinned)
834 * Listen to the 'cpu' scheduling filter constraint
837 if (counter->cpu != -1 && counter->cpu != cpu)
840 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
841 if (group_sched_in(counter, cpuctx, ctx, cpu))
845 hw_perf_restore(flags);
847 spin_unlock(&ctx->lock);
851 * Called from scheduler to add the counters of the current task
852 * with interrupts disabled.
854 * We restore the counter value and then enable it.
856 * This does not protect us against NMI, but enable()
857 * sets the enabled bit in the control field of counter _before_
858 * accessing the counter control register. If a NMI hits, then it will
859 * keep the counter running.
861 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
863 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
864 struct perf_counter_context *ctx = &task->perf_counter_ctx;
866 __perf_counter_sched_in(ctx, cpuctx, cpu);
867 cpuctx->task_ctx = ctx;
870 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
872 struct perf_counter_context *ctx = &cpuctx->ctx;
874 __perf_counter_sched_in(ctx, cpuctx, cpu);
877 int perf_counter_task_disable(void)
879 struct task_struct *curr = current;
880 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
881 struct perf_counter *counter;
886 if (likely(!ctx->nr_counters))
889 curr_rq_lock_irq_save(&flags);
890 cpu = smp_processor_id();
892 /* force the update of the task clock: */
893 __task_delta_exec(curr, 1);
895 perf_counter_task_sched_out(curr, cpu);
897 spin_lock(&ctx->lock);
900 * Disable all the counters:
902 perf_flags = hw_perf_save_disable();
904 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
905 if (counter->state != PERF_COUNTER_STATE_ERROR)
906 counter->state = PERF_COUNTER_STATE_OFF;
909 hw_perf_restore(perf_flags);
911 spin_unlock(&ctx->lock);
913 curr_rq_unlock_irq_restore(&flags);
918 int perf_counter_task_enable(void)
920 struct task_struct *curr = current;
921 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
922 struct perf_counter *counter;
927 if (likely(!ctx->nr_counters))
930 curr_rq_lock_irq_save(&flags);
931 cpu = smp_processor_id();
933 /* force the update of the task clock: */
934 __task_delta_exec(curr, 1);
936 perf_counter_task_sched_out(curr, cpu);
938 spin_lock(&ctx->lock);
941 * Disable all the counters:
943 perf_flags = hw_perf_save_disable();
945 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
946 if (counter->state > PERF_COUNTER_STATE_OFF)
948 counter->state = PERF_COUNTER_STATE_INACTIVE;
949 counter->hw_event.disabled = 0;
951 hw_perf_restore(perf_flags);
953 spin_unlock(&ctx->lock);
955 perf_counter_task_sched_in(curr, cpu);
957 curr_rq_unlock_irq_restore(&flags);
963 * Round-robin a context's counters:
965 static void rotate_ctx(struct perf_counter_context *ctx)
967 struct perf_counter *counter;
970 if (!ctx->nr_counters)
973 spin_lock(&ctx->lock);
975 * Rotate the first entry last (works just fine for group counters too):
977 perf_flags = hw_perf_save_disable();
978 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
979 list_move_tail(&counter->list_entry, &ctx->counter_list);
982 hw_perf_restore(perf_flags);
984 spin_unlock(&ctx->lock);
987 void perf_counter_task_tick(struct task_struct *curr, int cpu)
989 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
990 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
991 const int rotate_percpu = 0;
994 perf_counter_cpu_sched_out(cpuctx);
995 perf_counter_task_sched_out(curr, cpu);
998 rotate_ctx(&cpuctx->ctx);
1002 perf_counter_cpu_sched_in(cpuctx, cpu);
1003 perf_counter_task_sched_in(curr, cpu);
1007 * Cross CPU call to read the hardware counter
1009 static void __read(void *info)
1011 struct perf_counter *counter = info;
1012 unsigned long flags;
1014 curr_rq_lock_irq_save(&flags);
1015 counter->hw_ops->read(counter);
1016 curr_rq_unlock_irq_restore(&flags);
1019 static u64 perf_counter_read(struct perf_counter *counter)
1022 * If counter is enabled and currently active on a CPU, update the
1023 * value in the counter structure:
1025 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1026 smp_call_function_single(counter->oncpu,
1027 __read, counter, 1);
1030 return atomic64_read(&counter->count);
1033 static void put_context(struct perf_counter_context *ctx)
1036 put_task_struct(ctx->task);
1039 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1041 struct perf_cpu_context *cpuctx;
1042 struct perf_counter_context *ctx;
1043 struct task_struct *task;
1046 * If cpu is not a wildcard then this is a percpu counter:
1049 /* Must be root to operate on a CPU counter: */
1050 if (!capable(CAP_SYS_ADMIN))
1051 return ERR_PTR(-EACCES);
1053 if (cpu < 0 || cpu > num_possible_cpus())
1054 return ERR_PTR(-EINVAL);
1057 * We could be clever and allow to attach a counter to an
1058 * offline CPU and activate it when the CPU comes up, but
1061 if (!cpu_isset(cpu, cpu_online_map))
1062 return ERR_PTR(-ENODEV);
1064 cpuctx = &per_cpu(perf_cpu_context, cpu);
1074 task = find_task_by_vpid(pid);
1076 get_task_struct(task);
1080 return ERR_PTR(-ESRCH);
1082 ctx = &task->perf_counter_ctx;
1085 /* Reuse ptrace permission checks for now. */
1086 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1088 return ERR_PTR(-EACCES);
1094 static void free_counter_rcu(struct rcu_head *head)
1096 struct perf_counter *counter;
1098 counter = container_of(head, struct perf_counter, rcu_head);
1102 static void free_counter(struct perf_counter *counter)
1104 if (counter->destroy)
1105 counter->destroy(counter);
1107 call_rcu(&counter->rcu_head, free_counter_rcu);
1111 * Called when the last reference to the file is gone.
1113 static int perf_release(struct inode *inode, struct file *file)
1115 struct perf_counter *counter = file->private_data;
1116 struct perf_counter_context *ctx = counter->ctx;
1118 file->private_data = NULL;
1120 mutex_lock(&ctx->mutex);
1121 mutex_lock(&counter->mutex);
1123 perf_counter_remove_from_context(counter);
1125 mutex_unlock(&counter->mutex);
1126 mutex_unlock(&ctx->mutex);
1128 free_counter(counter);
1135 * Read the performance counter - simple non blocking version for now
1138 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1142 if (count < sizeof(cntval))
1146 * Return end-of-file for a read on a counter that is in
1147 * error state (i.e. because it was pinned but it couldn't be
1148 * scheduled on to the CPU at some point).
1150 if (counter->state == PERF_COUNTER_STATE_ERROR)
1153 mutex_lock(&counter->mutex);
1154 cntval = perf_counter_read(counter);
1155 mutex_unlock(&counter->mutex);
1157 return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1161 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1163 struct perf_counter *counter = file->private_data;
1165 return perf_read_hw(counter, buf, count);
1168 static unsigned int perf_poll(struct file *file, poll_table *wait)
1170 struct perf_counter *counter = file->private_data;
1171 struct perf_mmap_data *data;
1172 unsigned int events;
1175 data = rcu_dereference(counter->data);
1177 events = atomic_xchg(&data->wakeup, 0);
1182 poll_wait(file, &counter->waitq, wait);
1187 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1189 struct perf_counter *counter = file->private_data;
1193 case PERF_COUNTER_IOC_ENABLE:
1194 perf_counter_enable_family(counter);
1196 case PERF_COUNTER_IOC_DISABLE:
1197 perf_counter_disable_family(counter);
1205 static void __perf_counter_update_userpage(struct perf_counter *counter,
1206 struct perf_mmap_data *data)
1208 struct perf_counter_mmap_page *userpg = data->user_page;
1211 * Disable preemption so as to not let the corresponding user-space
1212 * spin too long if we get preempted.
1217 userpg->index = counter->hw.idx;
1218 userpg->offset = atomic64_read(&counter->count);
1219 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1220 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1222 userpg->data_head = atomic_read(&data->head);
1228 void perf_counter_update_userpage(struct perf_counter *counter)
1230 struct perf_mmap_data *data;
1233 data = rcu_dereference(counter->data);
1235 __perf_counter_update_userpage(counter, data);
1239 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1241 struct perf_counter *counter = vma->vm_file->private_data;
1242 struct perf_mmap_data *data;
1243 int ret = VM_FAULT_SIGBUS;
1246 data = rcu_dereference(counter->data);
1250 if (vmf->pgoff == 0) {
1251 vmf->page = virt_to_page(data->user_page);
1253 int nr = vmf->pgoff - 1;
1255 if ((unsigned)nr > data->nr_pages)
1258 vmf->page = virt_to_page(data->data_pages[nr]);
1260 get_page(vmf->page);
1268 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1270 struct perf_mmap_data *data;
1274 WARN_ON(atomic_read(&counter->mmap_count));
1276 size = sizeof(struct perf_mmap_data);
1277 size += nr_pages * sizeof(void *);
1279 data = kzalloc(size, GFP_KERNEL);
1283 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1284 if (!data->user_page)
1285 goto fail_user_page;
1287 for (i = 0; i < nr_pages; i++) {
1288 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1289 if (!data->data_pages[i])
1290 goto fail_data_pages;
1293 data->nr_pages = nr_pages;
1295 rcu_assign_pointer(counter->data, data);
1300 for (i--; i >= 0; i--)
1301 free_page((unsigned long)data->data_pages[i]);
1303 free_page((unsigned long)data->user_page);
1312 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1314 struct perf_mmap_data *data = container_of(rcu_head,
1315 struct perf_mmap_data, rcu_head);
1318 free_page((unsigned long)data->user_page);
1319 for (i = 0; i < data->nr_pages; i++)
1320 free_page((unsigned long)data->data_pages[i]);
1324 static void perf_mmap_data_free(struct perf_counter *counter)
1326 struct perf_mmap_data *data = counter->data;
1328 WARN_ON(atomic_read(&counter->mmap_count));
1330 rcu_assign_pointer(counter->data, NULL);
1331 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1334 static void perf_mmap_open(struct vm_area_struct *vma)
1336 struct perf_counter *counter = vma->vm_file->private_data;
1338 atomic_inc(&counter->mmap_count);
1341 static void perf_mmap_close(struct vm_area_struct *vma)
1343 struct perf_counter *counter = vma->vm_file->private_data;
1345 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1346 &counter->mmap_mutex)) {
1347 perf_mmap_data_free(counter);
1348 mutex_unlock(&counter->mmap_mutex);
1352 static struct vm_operations_struct perf_mmap_vmops = {
1353 .open = perf_mmap_open,
1354 .close = perf_mmap_close,
1355 .fault = perf_mmap_fault,
1358 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1360 struct perf_counter *counter = file->private_data;
1361 unsigned long vma_size;
1362 unsigned long nr_pages;
1363 unsigned long locked, lock_limit;
1366 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1369 vma_size = vma->vm_end - vma->vm_start;
1370 nr_pages = (vma_size / PAGE_SIZE) - 1;
1372 if (nr_pages == 0 || !is_power_of_2(nr_pages))
1375 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1378 if (vma->vm_pgoff != 0)
1381 locked = vma_size >> PAGE_SHIFT;
1382 locked += vma->vm_mm->locked_vm;
1384 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1385 lock_limit >>= PAGE_SHIFT;
1387 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
1390 mutex_lock(&counter->mmap_mutex);
1391 if (atomic_inc_not_zero(&counter->mmap_count))
1394 WARN_ON(counter->data);
1395 ret = perf_mmap_data_alloc(counter, nr_pages);
1397 atomic_set(&counter->mmap_count, 1);
1399 mutex_unlock(&counter->mmap_mutex);
1401 vma->vm_flags &= ~VM_MAYWRITE;
1402 vma->vm_flags |= VM_RESERVED;
1403 vma->vm_ops = &perf_mmap_vmops;
1408 static const struct file_operations perf_fops = {
1409 .release = perf_release,
1412 .unlocked_ioctl = perf_ioctl,
1413 .compat_ioctl = perf_ioctl,
1421 struct perf_output_handle {
1422 struct perf_counter *counter;
1423 struct perf_mmap_data *data;
1424 unsigned int offset;
1429 static int perf_output_begin(struct perf_output_handle *handle,
1430 struct perf_counter *counter, unsigned int size)
1432 struct perf_mmap_data *data;
1433 unsigned int offset, head;
1436 data = rcu_dereference(counter->data);
1440 if (!data->nr_pages)
1444 offset = head = atomic_read(&data->head);
1446 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1448 handle->counter = counter;
1449 handle->data = data;
1450 handle->offset = offset;
1451 handle->head = head;
1452 handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1462 static void perf_output_copy(struct perf_output_handle *handle,
1463 void *buf, unsigned int len)
1465 unsigned int pages_mask;
1466 unsigned int offset;
1470 offset = handle->offset;
1471 pages_mask = handle->data->nr_pages - 1;
1472 pages = handle->data->data_pages;
1475 unsigned int page_offset;
1478 nr = (offset >> PAGE_SHIFT) & pages_mask;
1479 page_offset = offset & (PAGE_SIZE - 1);
1480 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1482 memcpy(pages[nr] + page_offset, buf, size);
1489 handle->offset = offset;
1491 WARN_ON_ONCE(handle->offset > handle->head);
1494 #define perf_output_put(handle, x) \
1495 perf_output_copy((handle), &(x), sizeof(x))
1497 static void perf_output_end(struct perf_output_handle *handle, int nmi)
1499 if (handle->wakeup) {
1500 (void)atomic_xchg(&handle->data->wakeup, POLL_IN);
1501 __perf_counter_update_userpage(handle->counter, handle->data);
1503 handle->counter->wakeup_pending = 1;
1504 set_perf_counter_pending();
1506 wake_up(&handle->counter->waitq);
1511 static int perf_output_write(struct perf_counter *counter, int nmi,
1512 void *buf, ssize_t size)
1514 struct perf_output_handle handle;
1517 ret = perf_output_begin(&handle, counter, size);
1521 perf_output_copy(&handle, buf, size);
1522 perf_output_end(&handle, nmi);
1528 static void perf_output_simple(struct perf_counter *counter,
1529 int nmi, struct pt_regs *regs)
1532 struct perf_event_header header;
1536 event.header.type = PERF_EVENT_IP;
1537 event.header.size = sizeof(event);
1538 event.ip = instruction_pointer(regs);
1540 perf_output_write(counter, nmi, &event, sizeof(event));
1543 static void perf_output_group(struct perf_counter *counter, int nmi)
1545 struct perf_output_handle handle;
1546 struct perf_event_header header;
1547 struct perf_counter *leader, *sub;
1555 size = sizeof(header) + counter->nr_siblings * sizeof(entry);
1557 ret = perf_output_begin(&handle, counter, size);
1561 header.type = PERF_EVENT_GROUP;
1564 perf_output_put(&handle, header);
1566 leader = counter->group_leader;
1567 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1569 sub->hw_ops->read(sub);
1571 entry.event = sub->hw_event.config;
1572 entry.counter = atomic64_read(&sub->count);
1574 perf_output_put(&handle, entry);
1577 perf_output_end(&handle, nmi);
1580 void perf_counter_output(struct perf_counter *counter,
1581 int nmi, struct pt_regs *regs)
1583 switch (counter->hw_event.record_type) {
1584 case PERF_RECORD_SIMPLE:
1587 case PERF_RECORD_IRQ:
1588 perf_output_simple(counter, nmi, regs);
1591 case PERF_RECORD_GROUP:
1592 perf_output_group(counter, nmi);
1598 * Generic software counter infrastructure
1601 static void perf_swcounter_update(struct perf_counter *counter)
1603 struct hw_perf_counter *hwc = &counter->hw;
1608 prev = atomic64_read(&hwc->prev_count);
1609 now = atomic64_read(&hwc->count);
1610 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1615 atomic64_add(delta, &counter->count);
1616 atomic64_sub(delta, &hwc->period_left);
1619 static void perf_swcounter_set_period(struct perf_counter *counter)
1621 struct hw_perf_counter *hwc = &counter->hw;
1622 s64 left = atomic64_read(&hwc->period_left);
1623 s64 period = hwc->irq_period;
1625 if (unlikely(left <= -period)) {
1627 atomic64_set(&hwc->period_left, left);
1630 if (unlikely(left <= 0)) {
1632 atomic64_add(period, &hwc->period_left);
1635 atomic64_set(&hwc->prev_count, -left);
1636 atomic64_set(&hwc->count, -left);
1639 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1641 struct perf_counter *counter;
1642 struct pt_regs *regs;
1644 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1645 counter->hw_ops->read(counter);
1647 regs = get_irq_regs();
1649 * In case we exclude kernel IPs or are somehow not in interrupt
1650 * context, provide the next best thing, the user IP.
1652 if ((counter->hw_event.exclude_kernel || !regs) &&
1653 !counter->hw_event.exclude_user)
1654 regs = task_pt_regs(current);
1657 perf_counter_output(counter, 0, regs);
1659 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1661 return HRTIMER_RESTART;
1664 static void perf_swcounter_overflow(struct perf_counter *counter,
1665 int nmi, struct pt_regs *regs)
1667 perf_swcounter_update(counter);
1668 perf_swcounter_set_period(counter);
1669 perf_counter_output(counter, nmi, regs);
1672 static int perf_swcounter_match(struct perf_counter *counter,
1673 enum perf_event_types type,
1674 u32 event, struct pt_regs *regs)
1676 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1679 if (perf_event_raw(&counter->hw_event))
1682 if (perf_event_type(&counter->hw_event) != type)
1685 if (perf_event_id(&counter->hw_event) != event)
1688 if (counter->hw_event.exclude_user && user_mode(regs))
1691 if (counter->hw_event.exclude_kernel && !user_mode(regs))
1697 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1698 int nmi, struct pt_regs *regs)
1700 int neg = atomic64_add_negative(nr, &counter->hw.count);
1701 if (counter->hw.irq_period && !neg)
1702 perf_swcounter_overflow(counter, nmi, regs);
1705 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1706 enum perf_event_types type, u32 event,
1707 u64 nr, int nmi, struct pt_regs *regs)
1709 struct perf_counter *counter;
1711 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1715 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1716 if (perf_swcounter_match(counter, type, event, regs))
1717 perf_swcounter_add(counter, nr, nmi, regs);
1722 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1725 return &cpuctx->recursion[3];
1728 return &cpuctx->recursion[2];
1731 return &cpuctx->recursion[1];
1733 return &cpuctx->recursion[0];
1736 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1737 u64 nr, int nmi, struct pt_regs *regs)
1739 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1740 int *recursion = perf_swcounter_recursion_context(cpuctx);
1748 perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1749 if (cpuctx->task_ctx) {
1750 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1758 put_cpu_var(perf_cpu_context);
1761 void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1763 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1766 static void perf_swcounter_read(struct perf_counter *counter)
1768 perf_swcounter_update(counter);
1771 static int perf_swcounter_enable(struct perf_counter *counter)
1773 perf_swcounter_set_period(counter);
1777 static void perf_swcounter_disable(struct perf_counter *counter)
1779 perf_swcounter_update(counter);
1782 static const struct hw_perf_counter_ops perf_ops_generic = {
1783 .enable = perf_swcounter_enable,
1784 .disable = perf_swcounter_disable,
1785 .read = perf_swcounter_read,
1789 * Software counter: cpu wall time clock
1792 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1794 int cpu = raw_smp_processor_id();
1798 now = cpu_clock(cpu);
1799 prev = atomic64_read(&counter->hw.prev_count);
1800 atomic64_set(&counter->hw.prev_count, now);
1801 atomic64_add(now - prev, &counter->count);
1804 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1806 struct hw_perf_counter *hwc = &counter->hw;
1807 int cpu = raw_smp_processor_id();
1809 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
1810 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1811 hwc->hrtimer.function = perf_swcounter_hrtimer;
1812 if (hwc->irq_period) {
1813 __hrtimer_start_range_ns(&hwc->hrtimer,
1814 ns_to_ktime(hwc->irq_period), 0,
1815 HRTIMER_MODE_REL, 0);
1821 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1823 hrtimer_cancel(&counter->hw.hrtimer);
1824 cpu_clock_perf_counter_update(counter);
1827 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1829 cpu_clock_perf_counter_update(counter);
1832 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1833 .enable = cpu_clock_perf_counter_enable,
1834 .disable = cpu_clock_perf_counter_disable,
1835 .read = cpu_clock_perf_counter_read,
1839 * Software counter: task time clock
1843 * Called from within the scheduler:
1845 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1847 struct task_struct *curr = counter->task;
1850 delta = __task_delta_exec(curr, update);
1852 return curr->se.sum_exec_runtime + delta;
1855 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1860 prev = atomic64_read(&counter->hw.prev_count);
1862 atomic64_set(&counter->hw.prev_count, now);
1866 atomic64_add(delta, &counter->count);
1869 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1871 struct hw_perf_counter *hwc = &counter->hw;
1873 atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
1874 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1875 hwc->hrtimer.function = perf_swcounter_hrtimer;
1876 if (hwc->irq_period) {
1877 __hrtimer_start_range_ns(&hwc->hrtimer,
1878 ns_to_ktime(hwc->irq_period), 0,
1879 HRTIMER_MODE_REL, 0);
1885 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1887 hrtimer_cancel(&counter->hw.hrtimer);
1888 task_clock_perf_counter_update(counter,
1889 task_clock_perf_counter_val(counter, 0));
1892 static void task_clock_perf_counter_read(struct perf_counter *counter)
1894 task_clock_perf_counter_update(counter,
1895 task_clock_perf_counter_val(counter, 1));
1898 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1899 .enable = task_clock_perf_counter_enable,
1900 .disable = task_clock_perf_counter_disable,
1901 .read = task_clock_perf_counter_read,
1905 * Software counter: cpu migrations
1908 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1910 struct task_struct *curr = counter->ctx->task;
1913 return curr->se.nr_migrations;
1914 return cpu_nr_migrations(smp_processor_id());
1917 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1922 prev = atomic64_read(&counter->hw.prev_count);
1923 now = get_cpu_migrations(counter);
1925 atomic64_set(&counter->hw.prev_count, now);
1929 atomic64_add(delta, &counter->count);
1932 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1934 cpu_migrations_perf_counter_update(counter);
1937 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1939 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1940 atomic64_set(&counter->hw.prev_count,
1941 get_cpu_migrations(counter));
1945 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1947 cpu_migrations_perf_counter_update(counter);
1950 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1951 .enable = cpu_migrations_perf_counter_enable,
1952 .disable = cpu_migrations_perf_counter_disable,
1953 .read = cpu_migrations_perf_counter_read,
1956 #ifdef CONFIG_EVENT_PROFILE
1957 void perf_tpcounter_event(int event_id)
1959 struct pt_regs *regs = get_irq_regs();
1962 regs = task_pt_regs(current);
1964 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
1967 extern int ftrace_profile_enable(int);
1968 extern void ftrace_profile_disable(int);
1970 static void tp_perf_counter_destroy(struct perf_counter *counter)
1972 ftrace_profile_disable(perf_event_id(&counter->hw_event));
1975 static const struct hw_perf_counter_ops *
1976 tp_perf_counter_init(struct perf_counter *counter)
1978 int event_id = perf_event_id(&counter->hw_event);
1981 ret = ftrace_profile_enable(event_id);
1985 counter->destroy = tp_perf_counter_destroy;
1986 counter->hw.irq_period = counter->hw_event.irq_period;
1988 return &perf_ops_generic;
1991 static const struct hw_perf_counter_ops *
1992 tp_perf_counter_init(struct perf_counter *counter)
1998 static const struct hw_perf_counter_ops *
1999 sw_perf_counter_init(struct perf_counter *counter)
2001 struct perf_counter_hw_event *hw_event = &counter->hw_event;
2002 const struct hw_perf_counter_ops *hw_ops = NULL;
2003 struct hw_perf_counter *hwc = &counter->hw;
2006 * Software counters (currently) can't in general distinguish
2007 * between user, kernel and hypervisor events.
2008 * However, context switches and cpu migrations are considered
2009 * to be kernel events, and page faults are never hypervisor
2012 switch (perf_event_id(&counter->hw_event)) {
2013 case PERF_COUNT_CPU_CLOCK:
2014 hw_ops = &perf_ops_cpu_clock;
2016 if (hw_event->irq_period && hw_event->irq_period < 10000)
2017 hw_event->irq_period = 10000;
2019 case PERF_COUNT_TASK_CLOCK:
2021 * If the user instantiates this as a per-cpu counter,
2022 * use the cpu_clock counter instead.
2024 if (counter->ctx->task)
2025 hw_ops = &perf_ops_task_clock;
2027 hw_ops = &perf_ops_cpu_clock;
2029 if (hw_event->irq_period && hw_event->irq_period < 10000)
2030 hw_event->irq_period = 10000;
2032 case PERF_COUNT_PAGE_FAULTS:
2033 case PERF_COUNT_PAGE_FAULTS_MIN:
2034 case PERF_COUNT_PAGE_FAULTS_MAJ:
2035 case PERF_COUNT_CONTEXT_SWITCHES:
2036 hw_ops = &perf_ops_generic;
2038 case PERF_COUNT_CPU_MIGRATIONS:
2039 if (!counter->hw_event.exclude_kernel)
2040 hw_ops = &perf_ops_cpu_migrations;
2045 hwc->irq_period = hw_event->irq_period;
2051 * Allocate and initialize a counter structure
2053 static struct perf_counter *
2054 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2056 struct perf_counter_context *ctx,
2057 struct perf_counter *group_leader,
2060 const struct hw_perf_counter_ops *hw_ops;
2061 struct perf_counter *counter;
2063 counter = kzalloc(sizeof(*counter), gfpflags);
2068 * Single counters are their own group leaders, with an
2069 * empty sibling list:
2072 group_leader = counter;
2074 mutex_init(&counter->mutex);
2075 INIT_LIST_HEAD(&counter->list_entry);
2076 INIT_LIST_HEAD(&counter->event_entry);
2077 INIT_LIST_HEAD(&counter->sibling_list);
2078 init_waitqueue_head(&counter->waitq);
2080 mutex_init(&counter->mmap_mutex);
2082 INIT_LIST_HEAD(&counter->child_list);
2085 counter->hw_event = *hw_event;
2086 counter->wakeup_pending = 0;
2087 counter->group_leader = group_leader;
2088 counter->hw_ops = NULL;
2091 counter->state = PERF_COUNTER_STATE_INACTIVE;
2092 if (hw_event->disabled)
2093 counter->state = PERF_COUNTER_STATE_OFF;
2097 if (perf_event_raw(hw_event)) {
2098 hw_ops = hw_perf_counter_init(counter);
2102 switch (perf_event_type(hw_event)) {
2103 case PERF_TYPE_HARDWARE:
2104 hw_ops = hw_perf_counter_init(counter);
2107 case PERF_TYPE_SOFTWARE:
2108 hw_ops = sw_perf_counter_init(counter);
2111 case PERF_TYPE_TRACEPOINT:
2112 hw_ops = tp_perf_counter_init(counter);
2121 counter->hw_ops = hw_ops;
2127 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2129 * @hw_event_uptr: event type attributes for monitoring/sampling
2132 * @group_fd: group leader counter fd
2134 SYSCALL_DEFINE5(perf_counter_open,
2135 const struct perf_counter_hw_event __user *, hw_event_uptr,
2136 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2138 struct perf_counter *counter, *group_leader;
2139 struct perf_counter_hw_event hw_event;
2140 struct perf_counter_context *ctx;
2141 struct file *counter_file = NULL;
2142 struct file *group_file = NULL;
2143 int fput_needed = 0;
2144 int fput_needed2 = 0;
2147 /* for future expandability... */
2151 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2155 * Get the target context (task or percpu):
2157 ctx = find_get_context(pid, cpu);
2159 return PTR_ERR(ctx);
2162 * Look up the group leader (we will attach this counter to it):
2164 group_leader = NULL;
2165 if (group_fd != -1) {
2167 group_file = fget_light(group_fd, &fput_needed);
2169 goto err_put_context;
2170 if (group_file->f_op != &perf_fops)
2171 goto err_put_context;
2173 group_leader = group_file->private_data;
2175 * Do not allow a recursive hierarchy (this new sibling
2176 * becoming part of another group-sibling):
2178 if (group_leader->group_leader != group_leader)
2179 goto err_put_context;
2181 * Do not allow to attach to a group in a different
2182 * task or CPU context:
2184 if (group_leader->ctx != ctx)
2185 goto err_put_context;
2187 * Only a group leader can be exclusive or pinned
2189 if (hw_event.exclusive || hw_event.pinned)
2190 goto err_put_context;
2194 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2197 goto err_put_context;
2199 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2201 goto err_free_put_context;
2203 counter_file = fget_light(ret, &fput_needed2);
2205 goto err_free_put_context;
2207 counter->filp = counter_file;
2208 mutex_lock(&ctx->mutex);
2209 perf_install_in_context(ctx, counter, cpu);
2210 mutex_unlock(&ctx->mutex);
2212 fput_light(counter_file, fput_needed2);
2215 fput_light(group_file, fput_needed);
2219 err_free_put_context:
2229 * Initialize the perf_counter context in a task_struct:
2232 __perf_counter_init_context(struct perf_counter_context *ctx,
2233 struct task_struct *task)
2235 memset(ctx, 0, sizeof(*ctx));
2236 spin_lock_init(&ctx->lock);
2237 mutex_init(&ctx->mutex);
2238 INIT_LIST_HEAD(&ctx->counter_list);
2239 INIT_LIST_HEAD(&ctx->event_list);
2244 * inherit a counter from parent task to child task:
2246 static struct perf_counter *
2247 inherit_counter(struct perf_counter *parent_counter,
2248 struct task_struct *parent,
2249 struct perf_counter_context *parent_ctx,
2250 struct task_struct *child,
2251 struct perf_counter *group_leader,
2252 struct perf_counter_context *child_ctx)
2254 struct perf_counter *child_counter;
2257 * Instead of creating recursive hierarchies of counters,
2258 * we link inherited counters back to the original parent,
2259 * which has a filp for sure, which we use as the reference
2262 if (parent_counter->parent)
2263 parent_counter = parent_counter->parent;
2265 child_counter = perf_counter_alloc(&parent_counter->hw_event,
2266 parent_counter->cpu, child_ctx,
2267 group_leader, GFP_KERNEL);
2272 * Link it up in the child's context:
2274 child_counter->task = child;
2275 list_add_counter(child_counter, child_ctx);
2276 child_ctx->nr_counters++;
2278 child_counter->parent = parent_counter;
2280 * inherit into child's child as well:
2282 child_counter->hw_event.inherit = 1;
2285 * Get a reference to the parent filp - we will fput it
2286 * when the child counter exits. This is safe to do because
2287 * we are in the parent and we know that the filp still
2288 * exists and has a nonzero count:
2290 atomic_long_inc(&parent_counter->filp->f_count);
2293 * Link this into the parent counter's child list
2295 mutex_lock(&parent_counter->mutex);
2296 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2299 * Make the child state follow the state of the parent counter,
2300 * not its hw_event.disabled bit. We hold the parent's mutex,
2301 * so we won't race with perf_counter_{en,dis}able_family.
2303 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2304 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2306 child_counter->state = PERF_COUNTER_STATE_OFF;
2308 mutex_unlock(&parent_counter->mutex);
2310 return child_counter;
2313 static int inherit_group(struct perf_counter *parent_counter,
2314 struct task_struct *parent,
2315 struct perf_counter_context *parent_ctx,
2316 struct task_struct *child,
2317 struct perf_counter_context *child_ctx)
2319 struct perf_counter *leader;
2320 struct perf_counter *sub;
2322 leader = inherit_counter(parent_counter, parent, parent_ctx,
2323 child, NULL, child_ctx);
2326 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2327 if (!inherit_counter(sub, parent, parent_ctx,
2328 child, leader, child_ctx))
2334 static void sync_child_counter(struct perf_counter *child_counter,
2335 struct perf_counter *parent_counter)
2337 u64 parent_val, child_val;
2339 parent_val = atomic64_read(&parent_counter->count);
2340 child_val = atomic64_read(&child_counter->count);
2343 * Add back the child's count to the parent's count:
2345 atomic64_add(child_val, &parent_counter->count);
2348 * Remove this counter from the parent's list
2350 mutex_lock(&parent_counter->mutex);
2351 list_del_init(&child_counter->child_list);
2352 mutex_unlock(&parent_counter->mutex);
2355 * Release the parent counter, if this was the last
2358 fput(parent_counter->filp);
2362 __perf_counter_exit_task(struct task_struct *child,
2363 struct perf_counter *child_counter,
2364 struct perf_counter_context *child_ctx)
2366 struct perf_counter *parent_counter;
2367 struct perf_counter *sub, *tmp;
2370 * If we do not self-reap then we have to wait for the
2371 * child task to unschedule (it will happen for sure),
2372 * so that its counter is at its final count. (This
2373 * condition triggers rarely - child tasks usually get
2374 * off their CPU before the parent has a chance to
2375 * get this far into the reaping action)
2377 if (child != current) {
2378 wait_task_inactive(child, 0);
2379 list_del_init(&child_counter->list_entry);
2381 struct perf_cpu_context *cpuctx;
2382 unsigned long flags;
2386 * Disable and unlink this counter.
2388 * Be careful about zapping the list - IRQ/NMI context
2389 * could still be processing it:
2391 curr_rq_lock_irq_save(&flags);
2392 perf_flags = hw_perf_save_disable();
2394 cpuctx = &__get_cpu_var(perf_cpu_context);
2396 group_sched_out(child_counter, cpuctx, child_ctx);
2398 list_del_init(&child_counter->list_entry);
2400 child_ctx->nr_counters--;
2402 hw_perf_restore(perf_flags);
2403 curr_rq_unlock_irq_restore(&flags);
2406 parent_counter = child_counter->parent;
2408 * It can happen that parent exits first, and has counters
2409 * that are still around due to the child reference. These
2410 * counters need to be zapped - but otherwise linger.
2412 if (parent_counter) {
2413 sync_child_counter(child_counter, parent_counter);
2414 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2417 sync_child_counter(sub, sub->parent);
2421 free_counter(child_counter);
2426 * When a child task exits, feed back counter values to parent counters.
2428 * Note: we may be running in child context, but the PID is not hashed
2429 * anymore so new counters will not be added.
2431 void perf_counter_exit_task(struct task_struct *child)
2433 struct perf_counter *child_counter, *tmp;
2434 struct perf_counter_context *child_ctx;
2436 child_ctx = &child->perf_counter_ctx;
2438 if (likely(!child_ctx->nr_counters))
2441 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2443 __perf_counter_exit_task(child, child_counter, child_ctx);
2447 * Initialize the perf_counter context in task_struct
2449 void perf_counter_init_task(struct task_struct *child)
2451 struct perf_counter_context *child_ctx, *parent_ctx;
2452 struct perf_counter *counter;
2453 struct task_struct *parent = current;
2455 child_ctx = &child->perf_counter_ctx;
2456 parent_ctx = &parent->perf_counter_ctx;
2458 __perf_counter_init_context(child_ctx, child);
2461 * This is executed from the parent task context, so inherit
2462 * counters that have been marked for cloning:
2465 if (likely(!parent_ctx->nr_counters))
2469 * Lock the parent list. No need to lock the child - not PID
2470 * hashed yet and not running, so nobody can access it.
2472 mutex_lock(&parent_ctx->mutex);
2475 * We dont have to disable NMIs - we are only looking at
2476 * the list, not manipulating it:
2478 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2479 if (!counter->hw_event.inherit)
2482 if (inherit_group(counter, parent,
2483 parent_ctx, child, child_ctx))
2487 mutex_unlock(&parent_ctx->mutex);
2490 static void __cpuinit perf_counter_init_cpu(int cpu)
2492 struct perf_cpu_context *cpuctx;
2494 cpuctx = &per_cpu(perf_cpu_context, cpu);
2495 __perf_counter_init_context(&cpuctx->ctx, NULL);
2497 mutex_lock(&perf_resource_mutex);
2498 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2499 mutex_unlock(&perf_resource_mutex);
2501 hw_perf_counter_setup(cpu);
2504 #ifdef CONFIG_HOTPLUG_CPU
2505 static void __perf_counter_exit_cpu(void *info)
2507 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2508 struct perf_counter_context *ctx = &cpuctx->ctx;
2509 struct perf_counter *counter, *tmp;
2511 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2512 __perf_counter_remove_from_context(counter);
2514 static void perf_counter_exit_cpu(int cpu)
2516 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2517 struct perf_counter_context *ctx = &cpuctx->ctx;
2519 mutex_lock(&ctx->mutex);
2520 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2521 mutex_unlock(&ctx->mutex);
2524 static inline void perf_counter_exit_cpu(int cpu) { }
2527 static int __cpuinit
2528 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2530 unsigned int cpu = (long)hcpu;
2534 case CPU_UP_PREPARE:
2535 case CPU_UP_PREPARE_FROZEN:
2536 perf_counter_init_cpu(cpu);
2539 case CPU_DOWN_PREPARE:
2540 case CPU_DOWN_PREPARE_FROZEN:
2541 perf_counter_exit_cpu(cpu);
2551 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2552 .notifier_call = perf_cpu_notify,
2555 static int __init perf_counter_init(void)
2557 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2558 (void *)(long)smp_processor_id());
2559 register_cpu_notifier(&perf_cpu_nb);
2563 early_initcall(perf_counter_init);
2565 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2567 return sprintf(buf, "%d\n", perf_reserved_percpu);
2571 perf_set_reserve_percpu(struct sysdev_class *class,
2575 struct perf_cpu_context *cpuctx;
2579 err = strict_strtoul(buf, 10, &val);
2582 if (val > perf_max_counters)
2585 mutex_lock(&perf_resource_mutex);
2586 perf_reserved_percpu = val;
2587 for_each_online_cpu(cpu) {
2588 cpuctx = &per_cpu(perf_cpu_context, cpu);
2589 spin_lock_irq(&cpuctx->ctx.lock);
2590 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2591 perf_max_counters - perf_reserved_percpu);
2592 cpuctx->max_pertask = mpt;
2593 spin_unlock_irq(&cpuctx->ctx.lock);
2595 mutex_unlock(&perf_resource_mutex);
2600 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2602 return sprintf(buf, "%d\n", perf_overcommit);
2606 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2611 err = strict_strtoul(buf, 10, &val);
2617 mutex_lock(&perf_resource_mutex);
2618 perf_overcommit = val;
2619 mutex_unlock(&perf_resource_mutex);
2624 static SYSDEV_CLASS_ATTR(
2627 perf_show_reserve_percpu,
2628 perf_set_reserve_percpu
2631 static SYSDEV_CLASS_ATTR(
2634 perf_show_overcommit,
2638 static struct attribute *perfclass_attrs[] = {
2639 &attr_reserve_percpu.attr,
2640 &attr_overcommit.attr,
2644 static struct attribute_group perfclass_attr_group = {
2645 .attrs = perfclass_attrs,
2646 .name = "perf_counters",
2649 static int __init perf_counter_sysfs_init(void)
2651 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2652 &perfclass_attr_group);
2654 device_initcall(perf_counter_sysfs_init);