2 * Performance counter core code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
31 #include <asm/irq_regs.h>
34 * Each CPU has a list of per CPU counters:
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
47 * perf counter paranoia level:
49 * 1 - disallow cpu counters to unpriv
50 * 2 - disallow kernel profiling to unpriv
52 int sysctl_perf_counter_paranoid __read_mostly;
54 static inline bool perf_paranoid_cpu(void)
56 return sysctl_perf_counter_paranoid > 0;
59 static inline bool perf_paranoid_kernel(void)
61 return sysctl_perf_counter_paranoid > 1;
64 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
67 * max perf counter sample rate
69 int sysctl_perf_counter_sample_rate __read_mostly = 100000;
71 static atomic64_t perf_counter_id;
74 * Lock for (sysadmin-configurable) counter reservations:
76 static DEFINE_SPINLOCK(perf_resource_lock);
79 * Architecture provided APIs - weak aliases:
81 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
86 void __weak hw_perf_disable(void) { barrier(); }
87 void __weak hw_perf_enable(void) { barrier(); }
89 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
92 hw_perf_group_sched_in(struct perf_counter *group_leader,
93 struct perf_cpu_context *cpuctx,
94 struct perf_counter_context *ctx, int cpu)
99 void __weak perf_counter_print_debug(void) { }
101 static DEFINE_PER_CPU(int, disable_count);
103 void __perf_disable(void)
105 __get_cpu_var(disable_count)++;
108 bool __perf_enable(void)
110 return !--__get_cpu_var(disable_count);
113 void perf_disable(void)
119 void perf_enable(void)
125 static void get_ctx(struct perf_counter_context *ctx)
127 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
130 static void free_ctx(struct rcu_head *head)
132 struct perf_counter_context *ctx;
134 ctx = container_of(head, struct perf_counter_context, rcu_head);
138 static void put_ctx(struct perf_counter_context *ctx)
140 if (atomic_dec_and_test(&ctx->refcount)) {
142 put_ctx(ctx->parent_ctx);
144 put_task_struct(ctx->task);
145 call_rcu(&ctx->rcu_head, free_ctx);
149 static void unclone_ctx(struct perf_counter_context *ctx)
151 if (ctx->parent_ctx) {
152 put_ctx(ctx->parent_ctx);
153 ctx->parent_ctx = NULL;
158 * Get the perf_counter_context for a task and lock it.
159 * This has to cope with with the fact that until it is locked,
160 * the context could get moved to another task.
162 static struct perf_counter_context *
163 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
165 struct perf_counter_context *ctx;
169 ctx = rcu_dereference(task->perf_counter_ctxp);
172 * If this context is a clone of another, it might
173 * get swapped for another underneath us by
174 * perf_counter_task_sched_out, though the
175 * rcu_read_lock() protects us from any context
176 * getting freed. Lock the context and check if it
177 * got swapped before we could get the lock, and retry
178 * if so. If we locked the right context, then it
179 * can't get swapped on us any more.
181 spin_lock_irqsave(&ctx->lock, *flags);
182 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
183 spin_unlock_irqrestore(&ctx->lock, *flags);
187 if (!atomic_inc_not_zero(&ctx->refcount)) {
188 spin_unlock_irqrestore(&ctx->lock, *flags);
197 * Get the context for a task and increment its pin_count so it
198 * can't get swapped to another task. This also increments its
199 * reference count so that the context can't get freed.
201 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
203 struct perf_counter_context *ctx;
206 ctx = perf_lock_task_context(task, &flags);
209 spin_unlock_irqrestore(&ctx->lock, flags);
214 static void perf_unpin_context(struct perf_counter_context *ctx)
218 spin_lock_irqsave(&ctx->lock, flags);
220 spin_unlock_irqrestore(&ctx->lock, flags);
225 * Add a counter from the lists for its context.
226 * Must be called with ctx->mutex and ctx->lock held.
229 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
231 struct perf_counter *group_leader = counter->group_leader;
234 * Depending on whether it is a standalone or sibling counter,
235 * add it straight to the context's counter list, or to the group
236 * leader's sibling list:
238 if (group_leader == counter)
239 list_add_tail(&counter->list_entry, &ctx->counter_list);
241 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
242 group_leader->nr_siblings++;
245 list_add_rcu(&counter->event_entry, &ctx->event_list);
247 if (counter->attr.inherit_stat)
252 * Remove a counter from the lists for its context.
253 * Must be called with ctx->mutex and ctx->lock held.
256 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
258 struct perf_counter *sibling, *tmp;
260 if (list_empty(&counter->list_entry))
263 if (counter->attr.inherit_stat)
266 list_del_init(&counter->list_entry);
267 list_del_rcu(&counter->event_entry);
269 if (counter->group_leader != counter)
270 counter->group_leader->nr_siblings--;
273 * If this was a group counter with sibling counters then
274 * upgrade the siblings to singleton counters by adding them
275 * to the context list directly:
277 list_for_each_entry_safe(sibling, tmp,
278 &counter->sibling_list, list_entry) {
280 list_move_tail(&sibling->list_entry, &ctx->counter_list);
281 sibling->group_leader = sibling;
286 counter_sched_out(struct perf_counter *counter,
287 struct perf_cpu_context *cpuctx,
288 struct perf_counter_context *ctx)
290 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
293 counter->state = PERF_COUNTER_STATE_INACTIVE;
294 counter->tstamp_stopped = ctx->time;
295 counter->pmu->disable(counter);
298 if (!is_software_counter(counter))
299 cpuctx->active_oncpu--;
301 if (counter->attr.exclusive || !cpuctx->active_oncpu)
302 cpuctx->exclusive = 0;
306 group_sched_out(struct perf_counter *group_counter,
307 struct perf_cpu_context *cpuctx,
308 struct perf_counter_context *ctx)
310 struct perf_counter *counter;
312 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
315 counter_sched_out(group_counter, cpuctx, ctx);
318 * Schedule out siblings (if any):
320 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
321 counter_sched_out(counter, cpuctx, ctx);
323 if (group_counter->attr.exclusive)
324 cpuctx->exclusive = 0;
328 * Cross CPU call to remove a performance counter
330 * We disable the counter on the hardware level first. After that we
331 * remove it from the context list.
333 static void __perf_counter_remove_from_context(void *info)
335 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
336 struct perf_counter *counter = info;
337 struct perf_counter_context *ctx = counter->ctx;
340 * If this is a task context, we need to check whether it is
341 * the current task context of this cpu. If not it has been
342 * scheduled out before the smp call arrived.
344 if (ctx->task && cpuctx->task_ctx != ctx)
347 spin_lock(&ctx->lock);
349 * Protect the list operation against NMI by disabling the
350 * counters on a global level.
354 counter_sched_out(counter, cpuctx, ctx);
356 list_del_counter(counter, ctx);
360 * Allow more per task counters with respect to the
363 cpuctx->max_pertask =
364 min(perf_max_counters - ctx->nr_counters,
365 perf_max_counters - perf_reserved_percpu);
369 spin_unlock(&ctx->lock);
374 * Remove the counter from a task's (or a CPU's) list of counters.
376 * Must be called with ctx->mutex held.
378 * CPU counters are removed with a smp call. For task counters we only
379 * call when the task is on a CPU.
381 * If counter->ctx is a cloned context, callers must make sure that
382 * every task struct that counter->ctx->task could possibly point to
383 * remains valid. This is OK when called from perf_release since
384 * that only calls us on the top-level context, which can't be a clone.
385 * When called from perf_counter_exit_task, it's OK because the
386 * context has been detached from its task.
388 static void perf_counter_remove_from_context(struct perf_counter *counter)
390 struct perf_counter_context *ctx = counter->ctx;
391 struct task_struct *task = ctx->task;
395 * Per cpu counters are removed via an smp call and
396 * the removal is always sucessful.
398 smp_call_function_single(counter->cpu,
399 __perf_counter_remove_from_context,
405 task_oncpu_function_call(task, __perf_counter_remove_from_context,
408 spin_lock_irq(&ctx->lock);
410 * If the context is active we need to retry the smp call.
412 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
413 spin_unlock_irq(&ctx->lock);
418 * The lock prevents that this context is scheduled in so we
419 * can remove the counter safely, if the call above did not
422 if (!list_empty(&counter->list_entry)) {
423 list_del_counter(counter, ctx);
425 spin_unlock_irq(&ctx->lock);
428 static inline u64 perf_clock(void)
430 return cpu_clock(smp_processor_id());
434 * Update the record of the current time in a context.
436 static void update_context_time(struct perf_counter_context *ctx)
438 u64 now = perf_clock();
440 ctx->time += now - ctx->timestamp;
441 ctx->timestamp = now;
445 * Update the total_time_enabled and total_time_running fields for a counter.
447 static void update_counter_times(struct perf_counter *counter)
449 struct perf_counter_context *ctx = counter->ctx;
452 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
455 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
457 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
458 run_end = counter->tstamp_stopped;
462 counter->total_time_running = run_end - counter->tstamp_running;
466 * Update total_time_enabled and total_time_running for all counters in a group.
468 static void update_group_times(struct perf_counter *leader)
470 struct perf_counter *counter;
472 update_counter_times(leader);
473 list_for_each_entry(counter, &leader->sibling_list, list_entry)
474 update_counter_times(counter);
478 * Cross CPU call to disable a performance counter
480 static void __perf_counter_disable(void *info)
482 struct perf_counter *counter = info;
483 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
484 struct perf_counter_context *ctx = counter->ctx;
487 * If this is a per-task counter, need to check whether this
488 * counter's task is the current task on this cpu.
490 if (ctx->task && cpuctx->task_ctx != ctx)
493 spin_lock(&ctx->lock);
496 * If the counter is on, turn it off.
497 * If it is in error state, leave it in error state.
499 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
500 update_context_time(ctx);
501 update_counter_times(counter);
502 if (counter == counter->group_leader)
503 group_sched_out(counter, cpuctx, ctx);
505 counter_sched_out(counter, cpuctx, ctx);
506 counter->state = PERF_COUNTER_STATE_OFF;
509 spin_unlock(&ctx->lock);
515 * If counter->ctx is a cloned context, callers must make sure that
516 * every task struct that counter->ctx->task could possibly point to
517 * remains valid. This condition is satisifed when called through
518 * perf_counter_for_each_child or perf_counter_for_each because they
519 * hold the top-level counter's child_mutex, so any descendant that
520 * goes to exit will block in sync_child_counter.
521 * When called from perf_pending_counter it's OK because counter->ctx
522 * is the current context on this CPU and preemption is disabled,
523 * hence we can't get into perf_counter_task_sched_out for this context.
525 static void perf_counter_disable(struct perf_counter *counter)
527 struct perf_counter_context *ctx = counter->ctx;
528 struct task_struct *task = ctx->task;
532 * Disable the counter on the cpu that it's on
534 smp_call_function_single(counter->cpu, __perf_counter_disable,
540 task_oncpu_function_call(task, __perf_counter_disable, counter);
542 spin_lock_irq(&ctx->lock);
544 * If the counter is still active, we need to retry the cross-call.
546 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
547 spin_unlock_irq(&ctx->lock);
552 * Since we have the lock this context can't be scheduled
553 * in, so we can change the state safely.
555 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
556 update_counter_times(counter);
557 counter->state = PERF_COUNTER_STATE_OFF;
560 spin_unlock_irq(&ctx->lock);
564 counter_sched_in(struct perf_counter *counter,
565 struct perf_cpu_context *cpuctx,
566 struct perf_counter_context *ctx,
569 if (counter->state <= PERF_COUNTER_STATE_OFF)
572 counter->state = PERF_COUNTER_STATE_ACTIVE;
573 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
575 * The new state must be visible before we turn it on in the hardware:
579 if (counter->pmu->enable(counter)) {
580 counter->state = PERF_COUNTER_STATE_INACTIVE;
585 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
587 if (!is_software_counter(counter))
588 cpuctx->active_oncpu++;
591 if (counter->attr.exclusive)
592 cpuctx->exclusive = 1;
598 group_sched_in(struct perf_counter *group_counter,
599 struct perf_cpu_context *cpuctx,
600 struct perf_counter_context *ctx,
603 struct perf_counter *counter, *partial_group;
606 if (group_counter->state == PERF_COUNTER_STATE_OFF)
609 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
611 return ret < 0 ? ret : 0;
613 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
617 * Schedule in siblings as one group (if any):
619 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
620 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
621 partial_group = counter;
630 * Groups can be scheduled in as one unit only, so undo any
631 * partial group before returning:
633 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
634 if (counter == partial_group)
636 counter_sched_out(counter, cpuctx, ctx);
638 counter_sched_out(group_counter, cpuctx, ctx);
644 * Return 1 for a group consisting entirely of software counters,
645 * 0 if the group contains any hardware counters.
647 static int is_software_only_group(struct perf_counter *leader)
649 struct perf_counter *counter;
651 if (!is_software_counter(leader))
654 list_for_each_entry(counter, &leader->sibling_list, list_entry)
655 if (!is_software_counter(counter))
662 * Work out whether we can put this counter group on the CPU now.
664 static int group_can_go_on(struct perf_counter *counter,
665 struct perf_cpu_context *cpuctx,
669 * Groups consisting entirely of software counters can always go on.
671 if (is_software_only_group(counter))
674 * If an exclusive group is already on, no other hardware
675 * counters can go on.
677 if (cpuctx->exclusive)
680 * If this group is exclusive and there are already
681 * counters on the CPU, it can't go on.
683 if (counter->attr.exclusive && cpuctx->active_oncpu)
686 * Otherwise, try to add it if all previous groups were able
692 static void add_counter_to_ctx(struct perf_counter *counter,
693 struct perf_counter_context *ctx)
695 list_add_counter(counter, ctx);
696 counter->tstamp_enabled = ctx->time;
697 counter->tstamp_running = ctx->time;
698 counter->tstamp_stopped = ctx->time;
702 * Cross CPU call to install and enable a performance counter
704 * Must be called with ctx->mutex held
706 static void __perf_install_in_context(void *info)
708 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
709 struct perf_counter *counter = info;
710 struct perf_counter_context *ctx = counter->ctx;
711 struct perf_counter *leader = counter->group_leader;
712 int cpu = smp_processor_id();
716 * If this is a task context, we need to check whether it is
717 * the current task context of this cpu. If not it has been
718 * scheduled out before the smp call arrived.
719 * Or possibly this is the right context but it isn't
720 * on this cpu because it had no counters.
722 if (ctx->task && cpuctx->task_ctx != ctx) {
723 if (cpuctx->task_ctx || ctx->task != current)
725 cpuctx->task_ctx = ctx;
728 spin_lock(&ctx->lock);
730 update_context_time(ctx);
733 * Protect the list operation against NMI by disabling the
734 * counters on a global level. NOP for non NMI based counters.
738 add_counter_to_ctx(counter, ctx);
741 * Don't put the counter on if it is disabled or if
742 * it is in a group and the group isn't on.
744 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
745 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
749 * An exclusive counter can't go on if there are already active
750 * hardware counters, and no hardware counter can go on if there
751 * is already an exclusive counter on.
753 if (!group_can_go_on(counter, cpuctx, 1))
756 err = counter_sched_in(counter, cpuctx, ctx, cpu);
760 * This counter couldn't go on. If it is in a group
761 * then we have to pull the whole group off.
762 * If the counter group is pinned then put it in error state.
764 if (leader != counter)
765 group_sched_out(leader, cpuctx, ctx);
766 if (leader->attr.pinned) {
767 update_group_times(leader);
768 leader->state = PERF_COUNTER_STATE_ERROR;
772 if (!err && !ctx->task && cpuctx->max_pertask)
773 cpuctx->max_pertask--;
778 spin_unlock(&ctx->lock);
782 * Attach a performance counter to a context
784 * First we add the counter to the list with the hardware enable bit
785 * in counter->hw_config cleared.
787 * If the counter is attached to a task which is on a CPU we use a smp
788 * call to enable it in the task context. The task might have been
789 * scheduled away, but we check this in the smp call again.
791 * Must be called with ctx->mutex held.
794 perf_install_in_context(struct perf_counter_context *ctx,
795 struct perf_counter *counter,
798 struct task_struct *task = ctx->task;
802 * Per cpu counters are installed via an smp call and
803 * the install is always sucessful.
805 smp_call_function_single(cpu, __perf_install_in_context,
811 task_oncpu_function_call(task, __perf_install_in_context,
814 spin_lock_irq(&ctx->lock);
816 * we need to retry the smp call.
818 if (ctx->is_active && list_empty(&counter->list_entry)) {
819 spin_unlock_irq(&ctx->lock);
824 * The lock prevents that this context is scheduled in so we
825 * can add the counter safely, if it the call above did not
828 if (list_empty(&counter->list_entry))
829 add_counter_to_ctx(counter, ctx);
830 spin_unlock_irq(&ctx->lock);
834 * Cross CPU call to enable a performance counter
836 static void __perf_counter_enable(void *info)
838 struct perf_counter *counter = info;
839 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
840 struct perf_counter_context *ctx = counter->ctx;
841 struct perf_counter *leader = counter->group_leader;
845 * If this is a per-task counter, need to check whether this
846 * counter's task is the current task on this cpu.
848 if (ctx->task && cpuctx->task_ctx != ctx) {
849 if (cpuctx->task_ctx || ctx->task != current)
851 cpuctx->task_ctx = ctx;
854 spin_lock(&ctx->lock);
856 update_context_time(ctx);
858 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
860 counter->state = PERF_COUNTER_STATE_INACTIVE;
861 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
864 * If the counter is in a group and isn't the group leader,
865 * then don't put it on unless the group is on.
867 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
870 if (!group_can_go_on(counter, cpuctx, 1)) {
874 if (counter == leader)
875 err = group_sched_in(counter, cpuctx, ctx,
878 err = counter_sched_in(counter, cpuctx, ctx,
885 * If this counter can't go on and it's part of a
886 * group, then the whole group has to come off.
888 if (leader != counter)
889 group_sched_out(leader, cpuctx, ctx);
890 if (leader->attr.pinned) {
891 update_group_times(leader);
892 leader->state = PERF_COUNTER_STATE_ERROR;
897 spin_unlock(&ctx->lock);
903 * If counter->ctx is a cloned context, callers must make sure that
904 * every task struct that counter->ctx->task could possibly point to
905 * remains valid. This condition is satisfied when called through
906 * perf_counter_for_each_child or perf_counter_for_each as described
907 * for perf_counter_disable.
909 static void perf_counter_enable(struct perf_counter *counter)
911 struct perf_counter_context *ctx = counter->ctx;
912 struct task_struct *task = ctx->task;
916 * Enable the counter on the cpu that it's on
918 smp_call_function_single(counter->cpu, __perf_counter_enable,
923 spin_lock_irq(&ctx->lock);
924 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
928 * If the counter is in error state, clear that first.
929 * That way, if we see the counter in error state below, we
930 * know that it has gone back into error state, as distinct
931 * from the task having been scheduled away before the
932 * cross-call arrived.
934 if (counter->state == PERF_COUNTER_STATE_ERROR)
935 counter->state = PERF_COUNTER_STATE_OFF;
938 spin_unlock_irq(&ctx->lock);
939 task_oncpu_function_call(task, __perf_counter_enable, counter);
941 spin_lock_irq(&ctx->lock);
944 * If the context is active and the counter is still off,
945 * we need to retry the cross-call.
947 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
951 * Since we have the lock this context can't be scheduled
952 * in, so we can change the state safely.
954 if (counter->state == PERF_COUNTER_STATE_OFF) {
955 counter->state = PERF_COUNTER_STATE_INACTIVE;
956 counter->tstamp_enabled =
957 ctx->time - counter->total_time_enabled;
960 spin_unlock_irq(&ctx->lock);
963 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
966 * not supported on inherited counters
968 if (counter->attr.inherit)
971 atomic_add(refresh, &counter->event_limit);
972 perf_counter_enable(counter);
977 void __perf_counter_sched_out(struct perf_counter_context *ctx,
978 struct perf_cpu_context *cpuctx)
980 struct perf_counter *counter;
982 spin_lock(&ctx->lock);
984 if (likely(!ctx->nr_counters))
986 update_context_time(ctx);
989 if (ctx->nr_active) {
990 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
991 if (counter != counter->group_leader)
992 counter_sched_out(counter, cpuctx, ctx);
994 group_sched_out(counter, cpuctx, ctx);
999 spin_unlock(&ctx->lock);
1003 * Test whether two contexts are equivalent, i.e. whether they
1004 * have both been cloned from the same version of the same context
1005 * and they both have the same number of enabled counters.
1006 * If the number of enabled counters is the same, then the set
1007 * of enabled counters should be the same, because these are both
1008 * inherited contexts, therefore we can't access individual counters
1009 * in them directly with an fd; we can only enable/disable all
1010 * counters via prctl, or enable/disable all counters in a family
1011 * via ioctl, which will have the same effect on both contexts.
1013 static int context_equiv(struct perf_counter_context *ctx1,
1014 struct perf_counter_context *ctx2)
1016 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1017 && ctx1->parent_gen == ctx2->parent_gen
1018 && !ctx1->pin_count && !ctx2->pin_count;
1021 static void __perf_counter_read(void *counter);
1023 static void __perf_counter_sync_stat(struct perf_counter *counter,
1024 struct perf_counter *next_counter)
1028 if (!counter->attr.inherit_stat)
1032 * Update the counter value, we cannot use perf_counter_read()
1033 * because we're in the middle of a context switch and have IRQs
1034 * disabled, which upsets smp_call_function_single(), however
1035 * we know the counter must be on the current CPU, therefore we
1036 * don't need to use it.
1038 switch (counter->state) {
1039 case PERF_COUNTER_STATE_ACTIVE:
1040 __perf_counter_read(counter);
1043 case PERF_COUNTER_STATE_INACTIVE:
1044 update_counter_times(counter);
1052 * In order to keep per-task stats reliable we need to flip the counter
1053 * values when we flip the contexts.
1055 value = atomic64_read(&next_counter->count);
1056 value = atomic64_xchg(&counter->count, value);
1057 atomic64_set(&next_counter->count, value);
1059 swap(counter->total_time_enabled, next_counter->total_time_enabled);
1060 swap(counter->total_time_running, next_counter->total_time_running);
1063 * Since we swizzled the values, update the user visible data too.
1065 perf_counter_update_userpage(counter);
1066 perf_counter_update_userpage(next_counter);
1069 #define list_next_entry(pos, member) \
1070 list_entry(pos->member.next, typeof(*pos), member)
1072 static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1073 struct perf_counter_context *next_ctx)
1075 struct perf_counter *counter, *next_counter;
1080 counter = list_first_entry(&ctx->event_list,
1081 struct perf_counter, event_entry);
1083 next_counter = list_first_entry(&next_ctx->event_list,
1084 struct perf_counter, event_entry);
1086 while (&counter->event_entry != &ctx->event_list &&
1087 &next_counter->event_entry != &next_ctx->event_list) {
1089 __perf_counter_sync_stat(counter, next_counter);
1091 counter = list_next_entry(counter, event_entry);
1092 next_counter = list_next_entry(counter, event_entry);
1097 * Called from scheduler to remove the counters of the current task,
1098 * with interrupts disabled.
1100 * We stop each counter and update the counter value in counter->count.
1102 * This does not protect us against NMI, but disable()
1103 * sets the disabled bit in the control field of counter _before_
1104 * accessing the counter control register. If a NMI hits, then it will
1105 * not restart the counter.
1107 void perf_counter_task_sched_out(struct task_struct *task,
1108 struct task_struct *next, int cpu)
1110 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1111 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1112 struct perf_counter_context *next_ctx;
1113 struct perf_counter_context *parent;
1114 struct pt_regs *regs;
1117 regs = task_pt_regs(task);
1118 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1120 if (likely(!ctx || !cpuctx->task_ctx))
1123 update_context_time(ctx);
1126 parent = rcu_dereference(ctx->parent_ctx);
1127 next_ctx = next->perf_counter_ctxp;
1128 if (parent && next_ctx &&
1129 rcu_dereference(next_ctx->parent_ctx) == parent) {
1131 * Looks like the two contexts are clones, so we might be
1132 * able to optimize the context switch. We lock both
1133 * contexts and check that they are clones under the
1134 * lock (including re-checking that neither has been
1135 * uncloned in the meantime). It doesn't matter which
1136 * order we take the locks because no other cpu could
1137 * be trying to lock both of these tasks.
1139 spin_lock(&ctx->lock);
1140 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1141 if (context_equiv(ctx, next_ctx)) {
1143 * XXX do we need a memory barrier of sorts
1144 * wrt to rcu_dereference() of perf_counter_ctxp
1146 task->perf_counter_ctxp = next_ctx;
1147 next->perf_counter_ctxp = ctx;
1149 next_ctx->task = task;
1152 perf_counter_sync_stat(ctx, next_ctx);
1154 spin_unlock(&next_ctx->lock);
1155 spin_unlock(&ctx->lock);
1160 __perf_counter_sched_out(ctx, cpuctx);
1161 cpuctx->task_ctx = NULL;
1166 * Called with IRQs disabled
1168 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1170 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1172 if (!cpuctx->task_ctx)
1175 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1178 __perf_counter_sched_out(ctx, cpuctx);
1179 cpuctx->task_ctx = NULL;
1183 * Called with IRQs disabled
1185 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1187 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1191 __perf_counter_sched_in(struct perf_counter_context *ctx,
1192 struct perf_cpu_context *cpuctx, int cpu)
1194 struct perf_counter *counter;
1197 spin_lock(&ctx->lock);
1199 if (likely(!ctx->nr_counters))
1202 ctx->timestamp = perf_clock();
1207 * First go through the list and put on any pinned groups
1208 * in order to give them the best chance of going on.
1210 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1211 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1212 !counter->attr.pinned)
1214 if (counter->cpu != -1 && counter->cpu != cpu)
1217 if (counter != counter->group_leader)
1218 counter_sched_in(counter, cpuctx, ctx, cpu);
1220 if (group_can_go_on(counter, cpuctx, 1))
1221 group_sched_in(counter, cpuctx, ctx, cpu);
1225 * If this pinned group hasn't been scheduled,
1226 * put it in error state.
1228 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1229 update_group_times(counter);
1230 counter->state = PERF_COUNTER_STATE_ERROR;
1234 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1236 * Ignore counters in OFF or ERROR state, and
1237 * ignore pinned counters since we did them already.
1239 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1240 counter->attr.pinned)
1244 * Listen to the 'cpu' scheduling filter constraint
1247 if (counter->cpu != -1 && counter->cpu != cpu)
1250 if (counter != counter->group_leader) {
1251 if (counter_sched_in(counter, cpuctx, ctx, cpu))
1254 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1255 if (group_sched_in(counter, cpuctx, ctx, cpu))
1262 spin_unlock(&ctx->lock);
1266 * Called from scheduler to add the counters of the current task
1267 * with interrupts disabled.
1269 * We restore the counter value and then enable it.
1271 * This does not protect us against NMI, but enable()
1272 * sets the enabled bit in the control field of counter _before_
1273 * accessing the counter control register. If a NMI hits, then it will
1274 * keep the counter running.
1276 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1278 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1279 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1283 if (cpuctx->task_ctx == ctx)
1285 __perf_counter_sched_in(ctx, cpuctx, cpu);
1286 cpuctx->task_ctx = ctx;
1289 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1291 struct perf_counter_context *ctx = &cpuctx->ctx;
1293 __perf_counter_sched_in(ctx, cpuctx, cpu);
1296 #define MAX_INTERRUPTS (~0ULL)
1298 static void perf_log_throttle(struct perf_counter *counter, int enable);
1299 static void perf_log_period(struct perf_counter *counter, u64 period);
1301 static void perf_adjust_period(struct perf_counter *counter, u64 events)
1303 struct hw_perf_counter *hwc = &counter->hw;
1304 u64 period, sample_period;
1307 events *= hwc->sample_period;
1308 period = div64_u64(events, counter->attr.sample_freq);
1310 delta = (s64)(period - hwc->sample_period);
1311 delta = (delta + 7) / 8; /* low pass filter */
1313 sample_period = hwc->sample_period + delta;
1318 perf_log_period(counter, sample_period);
1320 hwc->sample_period = sample_period;
1323 static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1325 struct perf_counter *counter;
1326 struct hw_perf_counter *hwc;
1327 u64 interrupts, freq;
1329 spin_lock(&ctx->lock);
1330 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1331 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1336 interrupts = hwc->interrupts;
1337 hwc->interrupts = 0;
1340 * unthrottle counters on the tick
1342 if (interrupts == MAX_INTERRUPTS) {
1343 perf_log_throttle(counter, 1);
1344 counter->pmu->unthrottle(counter);
1345 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1348 if (!counter->attr.freq || !counter->attr.sample_freq)
1352 * if the specified freq < HZ then we need to skip ticks
1354 if (counter->attr.sample_freq < HZ) {
1355 freq = counter->attr.sample_freq;
1357 hwc->freq_count += freq;
1358 hwc->freq_interrupts += interrupts;
1360 if (hwc->freq_count < HZ)
1363 interrupts = hwc->freq_interrupts;
1364 hwc->freq_interrupts = 0;
1365 hwc->freq_count -= HZ;
1369 perf_adjust_period(counter, freq * interrupts);
1372 * In order to avoid being stalled by an (accidental) huge
1373 * sample period, force reset the sample period if we didn't
1374 * get any events in this freq period.
1378 counter->pmu->disable(counter);
1379 atomic64_set(&hwc->period_left, 0);
1380 counter->pmu->enable(counter);
1384 spin_unlock(&ctx->lock);
1388 * Round-robin a context's counters:
1390 static void rotate_ctx(struct perf_counter_context *ctx)
1392 struct perf_counter *counter;
1394 if (!ctx->nr_counters)
1397 spin_lock(&ctx->lock);
1399 * Rotate the first entry last (works just fine for group counters too):
1402 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1403 list_move_tail(&counter->list_entry, &ctx->counter_list);
1408 spin_unlock(&ctx->lock);
1411 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1413 struct perf_cpu_context *cpuctx;
1414 struct perf_counter_context *ctx;
1416 if (!atomic_read(&nr_counters))
1419 cpuctx = &per_cpu(perf_cpu_context, cpu);
1420 ctx = curr->perf_counter_ctxp;
1422 perf_ctx_adjust_freq(&cpuctx->ctx);
1424 perf_ctx_adjust_freq(ctx);
1426 perf_counter_cpu_sched_out(cpuctx);
1428 __perf_counter_task_sched_out(ctx);
1430 rotate_ctx(&cpuctx->ctx);
1434 perf_counter_cpu_sched_in(cpuctx, cpu);
1436 perf_counter_task_sched_in(curr, cpu);
1440 * Enable all of a task's counters that have been marked enable-on-exec.
1441 * This expects task == current.
1443 static void perf_counter_enable_on_exec(struct task_struct *task)
1445 struct perf_counter_context *ctx;
1446 struct perf_counter *counter;
1447 unsigned long flags;
1450 local_irq_save(flags);
1451 ctx = task->perf_counter_ctxp;
1452 if (!ctx || !ctx->nr_counters)
1455 __perf_counter_task_sched_out(ctx);
1457 spin_lock(&ctx->lock);
1459 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1460 if (!counter->attr.enable_on_exec)
1462 counter->attr.enable_on_exec = 0;
1463 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1465 counter->state = PERF_COUNTER_STATE_INACTIVE;
1466 counter->tstamp_enabled =
1467 ctx->time - counter->total_time_enabled;
1472 * Unclone this context if we enabled any counter.
1477 spin_unlock(&ctx->lock);
1479 perf_counter_task_sched_in(task, smp_processor_id());
1481 local_irq_restore(flags);
1485 * Cross CPU call to read the hardware counter
1487 static void __perf_counter_read(void *info)
1489 struct perf_counter *counter = info;
1490 struct perf_counter_context *ctx = counter->ctx;
1491 unsigned long flags;
1493 local_irq_save(flags);
1495 update_context_time(ctx);
1496 counter->pmu->read(counter);
1497 update_counter_times(counter);
1498 local_irq_restore(flags);
1501 static u64 perf_counter_read(struct perf_counter *counter)
1504 * If counter is enabled and currently active on a CPU, update the
1505 * value in the counter structure:
1507 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1508 smp_call_function_single(counter->oncpu,
1509 __perf_counter_read, counter, 1);
1510 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1511 update_counter_times(counter);
1514 return atomic64_read(&counter->count);
1518 * Initialize the perf_counter context in a task_struct:
1521 __perf_counter_init_context(struct perf_counter_context *ctx,
1522 struct task_struct *task)
1524 memset(ctx, 0, sizeof(*ctx));
1525 spin_lock_init(&ctx->lock);
1526 mutex_init(&ctx->mutex);
1527 INIT_LIST_HEAD(&ctx->counter_list);
1528 INIT_LIST_HEAD(&ctx->event_list);
1529 atomic_set(&ctx->refcount, 1);
1533 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1535 struct perf_counter_context *ctx;
1536 struct perf_cpu_context *cpuctx;
1537 struct task_struct *task;
1538 unsigned long flags;
1542 * If cpu is not a wildcard then this is a percpu counter:
1545 /* Must be root to operate on a CPU counter: */
1546 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1547 return ERR_PTR(-EACCES);
1549 if (cpu < 0 || cpu > num_possible_cpus())
1550 return ERR_PTR(-EINVAL);
1553 * We could be clever and allow to attach a counter to an
1554 * offline CPU and activate it when the CPU comes up, but
1557 if (!cpu_isset(cpu, cpu_online_map))
1558 return ERR_PTR(-ENODEV);
1560 cpuctx = &per_cpu(perf_cpu_context, cpu);
1571 task = find_task_by_vpid(pid);
1573 get_task_struct(task);
1577 return ERR_PTR(-ESRCH);
1580 * Can't attach counters to a dying task.
1583 if (task->flags & PF_EXITING)
1586 /* Reuse ptrace permission checks for now. */
1588 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1592 ctx = perf_lock_task_context(task, &flags);
1595 spin_unlock_irqrestore(&ctx->lock, flags);
1599 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1603 __perf_counter_init_context(ctx, task);
1605 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1607 * We raced with some other task; use
1608 * the context they set.
1613 get_task_struct(task);
1616 put_task_struct(task);
1620 put_task_struct(task);
1621 return ERR_PTR(err);
1624 static void free_counter_rcu(struct rcu_head *head)
1626 struct perf_counter *counter;
1628 counter = container_of(head, struct perf_counter, rcu_head);
1630 put_pid_ns(counter->ns);
1634 static void perf_pending_sync(struct perf_counter *counter);
1636 static void free_counter(struct perf_counter *counter)
1638 perf_pending_sync(counter);
1640 if (!counter->parent) {
1641 atomic_dec(&nr_counters);
1642 if (counter->attr.mmap)
1643 atomic_dec(&nr_mmap_counters);
1644 if (counter->attr.comm)
1645 atomic_dec(&nr_comm_counters);
1648 if (counter->destroy)
1649 counter->destroy(counter);
1651 put_ctx(counter->ctx);
1652 call_rcu(&counter->rcu_head, free_counter_rcu);
1656 * Called when the last reference to the file is gone.
1658 static int perf_release(struct inode *inode, struct file *file)
1660 struct perf_counter *counter = file->private_data;
1661 struct perf_counter_context *ctx = counter->ctx;
1663 file->private_data = NULL;
1665 WARN_ON_ONCE(ctx->parent_ctx);
1666 mutex_lock(&ctx->mutex);
1667 perf_counter_remove_from_context(counter);
1668 mutex_unlock(&ctx->mutex);
1670 mutex_lock(&counter->owner->perf_counter_mutex);
1671 list_del_init(&counter->owner_entry);
1672 mutex_unlock(&counter->owner->perf_counter_mutex);
1673 put_task_struct(counter->owner);
1675 free_counter(counter);
1681 * Read the performance counter - simple non blocking version for now
1684 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1690 * Return end-of-file for a read on a counter that is in
1691 * error state (i.e. because it was pinned but it couldn't be
1692 * scheduled on to the CPU at some point).
1694 if (counter->state == PERF_COUNTER_STATE_ERROR)
1697 WARN_ON_ONCE(counter->ctx->parent_ctx);
1698 mutex_lock(&counter->child_mutex);
1699 values[0] = perf_counter_read(counter);
1701 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1702 values[n++] = counter->total_time_enabled +
1703 atomic64_read(&counter->child_total_time_enabled);
1704 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1705 values[n++] = counter->total_time_running +
1706 atomic64_read(&counter->child_total_time_running);
1707 if (counter->attr.read_format & PERF_FORMAT_ID)
1708 values[n++] = counter->id;
1709 mutex_unlock(&counter->child_mutex);
1711 if (count < n * sizeof(u64))
1713 count = n * sizeof(u64);
1715 if (copy_to_user(buf, values, count))
1722 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1724 struct perf_counter *counter = file->private_data;
1726 return perf_read_hw(counter, buf, count);
1729 static unsigned int perf_poll(struct file *file, poll_table *wait)
1731 struct perf_counter *counter = file->private_data;
1732 struct perf_mmap_data *data;
1733 unsigned int events = POLL_HUP;
1736 data = rcu_dereference(counter->data);
1738 events = atomic_xchg(&data->poll, 0);
1741 poll_wait(file, &counter->waitq, wait);
1746 static void perf_counter_reset(struct perf_counter *counter)
1748 (void)perf_counter_read(counter);
1749 atomic64_set(&counter->count, 0);
1750 perf_counter_update_userpage(counter);
1754 * Holding the top-level counter's child_mutex means that any
1755 * descendant process that has inherited this counter will block
1756 * in sync_child_counter if it goes to exit, thus satisfying the
1757 * task existence requirements of perf_counter_enable/disable.
1759 static void perf_counter_for_each_child(struct perf_counter *counter,
1760 void (*func)(struct perf_counter *))
1762 struct perf_counter *child;
1764 WARN_ON_ONCE(counter->ctx->parent_ctx);
1765 mutex_lock(&counter->child_mutex);
1767 list_for_each_entry(child, &counter->child_list, child_list)
1769 mutex_unlock(&counter->child_mutex);
1772 static void perf_counter_for_each(struct perf_counter *counter,
1773 void (*func)(struct perf_counter *))
1775 struct perf_counter_context *ctx = counter->ctx;
1776 struct perf_counter *sibling;
1778 WARN_ON_ONCE(ctx->parent_ctx);
1779 mutex_lock(&ctx->mutex);
1780 counter = counter->group_leader;
1782 perf_counter_for_each_child(counter, func);
1784 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1785 perf_counter_for_each_child(counter, func);
1786 mutex_unlock(&ctx->mutex);
1789 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1791 struct perf_counter_context *ctx = counter->ctx;
1796 if (!counter->attr.sample_period)
1799 size = copy_from_user(&value, arg, sizeof(value));
1800 if (size != sizeof(value))
1806 spin_lock_irq(&ctx->lock);
1807 if (counter->attr.freq) {
1808 if (value > sysctl_perf_counter_sample_rate) {
1813 counter->attr.sample_freq = value;
1815 perf_log_period(counter, value);
1817 counter->attr.sample_period = value;
1818 counter->hw.sample_period = value;
1821 spin_unlock_irq(&ctx->lock);
1826 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1828 struct perf_counter *counter = file->private_data;
1829 void (*func)(struct perf_counter *);
1833 case PERF_COUNTER_IOC_ENABLE:
1834 func = perf_counter_enable;
1836 case PERF_COUNTER_IOC_DISABLE:
1837 func = perf_counter_disable;
1839 case PERF_COUNTER_IOC_RESET:
1840 func = perf_counter_reset;
1843 case PERF_COUNTER_IOC_REFRESH:
1844 return perf_counter_refresh(counter, arg);
1846 case PERF_COUNTER_IOC_PERIOD:
1847 return perf_counter_period(counter, (u64 __user *)arg);
1853 if (flags & PERF_IOC_FLAG_GROUP)
1854 perf_counter_for_each(counter, func);
1856 perf_counter_for_each_child(counter, func);
1861 int perf_counter_task_enable(void)
1863 struct perf_counter *counter;
1865 mutex_lock(¤t->perf_counter_mutex);
1866 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1867 perf_counter_for_each_child(counter, perf_counter_enable);
1868 mutex_unlock(¤t->perf_counter_mutex);
1873 int perf_counter_task_disable(void)
1875 struct perf_counter *counter;
1877 mutex_lock(¤t->perf_counter_mutex);
1878 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1879 perf_counter_for_each_child(counter, perf_counter_disable);
1880 mutex_unlock(¤t->perf_counter_mutex);
1885 static int perf_counter_index(struct perf_counter *counter)
1887 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1890 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
1894 * Callers need to ensure there can be no nesting of this function, otherwise
1895 * the seqlock logic goes bad. We can not serialize this because the arch
1896 * code calls this from NMI context.
1898 void perf_counter_update_userpage(struct perf_counter *counter)
1900 struct perf_counter_mmap_page *userpg;
1901 struct perf_mmap_data *data;
1904 data = rcu_dereference(counter->data);
1908 userpg = data->user_page;
1911 * Disable preemption so as to not let the corresponding user-space
1912 * spin too long if we get preempted.
1917 userpg->index = perf_counter_index(counter);
1918 userpg->offset = atomic64_read(&counter->count);
1919 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1920 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1922 userpg->time_enabled = counter->total_time_enabled +
1923 atomic64_read(&counter->child_total_time_enabled);
1925 userpg->time_running = counter->total_time_running +
1926 atomic64_read(&counter->child_total_time_running);
1935 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1937 struct perf_counter *counter = vma->vm_file->private_data;
1938 struct perf_mmap_data *data;
1939 int ret = VM_FAULT_SIGBUS;
1941 if (vmf->flags & FAULT_FLAG_MKWRITE) {
1942 if (vmf->pgoff == 0)
1948 data = rcu_dereference(counter->data);
1952 if (vmf->pgoff == 0) {
1953 vmf->page = virt_to_page(data->user_page);
1955 int nr = vmf->pgoff - 1;
1957 if ((unsigned)nr > data->nr_pages)
1960 if (vmf->flags & FAULT_FLAG_WRITE)
1963 vmf->page = virt_to_page(data->data_pages[nr]);
1966 get_page(vmf->page);
1967 vmf->page->mapping = vma->vm_file->f_mapping;
1968 vmf->page->index = vmf->pgoff;
1977 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1979 struct perf_mmap_data *data;
1983 WARN_ON(atomic_read(&counter->mmap_count));
1985 size = sizeof(struct perf_mmap_data);
1986 size += nr_pages * sizeof(void *);
1988 data = kzalloc(size, GFP_KERNEL);
1992 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1993 if (!data->user_page)
1994 goto fail_user_page;
1996 for (i = 0; i < nr_pages; i++) {
1997 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1998 if (!data->data_pages[i])
1999 goto fail_data_pages;
2002 data->nr_pages = nr_pages;
2003 atomic_set(&data->lock, -1);
2005 rcu_assign_pointer(counter->data, data);
2010 for (i--; i >= 0; i--)
2011 free_page((unsigned long)data->data_pages[i]);
2013 free_page((unsigned long)data->user_page);
2022 static void perf_mmap_free_page(unsigned long addr)
2024 struct page *page = virt_to_page(addr);
2026 page->mapping = NULL;
2030 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2032 struct perf_mmap_data *data;
2035 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2037 perf_mmap_free_page((unsigned long)data->user_page);
2038 for (i = 0; i < data->nr_pages; i++)
2039 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2044 static void perf_mmap_data_free(struct perf_counter *counter)
2046 struct perf_mmap_data *data = counter->data;
2048 WARN_ON(atomic_read(&counter->mmap_count));
2050 rcu_assign_pointer(counter->data, NULL);
2051 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2054 static void perf_mmap_open(struct vm_area_struct *vma)
2056 struct perf_counter *counter = vma->vm_file->private_data;
2058 atomic_inc(&counter->mmap_count);
2061 static void perf_mmap_close(struct vm_area_struct *vma)
2063 struct perf_counter *counter = vma->vm_file->private_data;
2065 WARN_ON_ONCE(counter->ctx->parent_ctx);
2066 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
2067 struct user_struct *user = current_user();
2069 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
2070 vma->vm_mm->locked_vm -= counter->data->nr_locked;
2071 perf_mmap_data_free(counter);
2072 mutex_unlock(&counter->mmap_mutex);
2076 static struct vm_operations_struct perf_mmap_vmops = {
2077 .open = perf_mmap_open,
2078 .close = perf_mmap_close,
2079 .fault = perf_mmap_fault,
2080 .page_mkwrite = perf_mmap_fault,
2083 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2085 struct perf_counter *counter = file->private_data;
2086 unsigned long user_locked, user_lock_limit;
2087 struct user_struct *user = current_user();
2088 unsigned long locked, lock_limit;
2089 unsigned long vma_size;
2090 unsigned long nr_pages;
2091 long user_extra, extra;
2094 if (!(vma->vm_flags & VM_SHARED))
2097 vma_size = vma->vm_end - vma->vm_start;
2098 nr_pages = (vma_size / PAGE_SIZE) - 1;
2101 * If we have data pages ensure they're a power-of-two number, so we
2102 * can do bitmasks instead of modulo.
2104 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2107 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2110 if (vma->vm_pgoff != 0)
2113 WARN_ON_ONCE(counter->ctx->parent_ctx);
2114 mutex_lock(&counter->mmap_mutex);
2115 if (atomic_inc_not_zero(&counter->mmap_count)) {
2116 if (nr_pages != counter->data->nr_pages)
2121 user_extra = nr_pages + 1;
2122 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
2125 * Increase the limit linearly with more CPUs:
2127 user_lock_limit *= num_online_cpus();
2129 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2132 if (user_locked > user_lock_limit)
2133 extra = user_locked - user_lock_limit;
2135 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2136 lock_limit >>= PAGE_SHIFT;
2137 locked = vma->vm_mm->locked_vm + extra;
2139 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
2144 WARN_ON(counter->data);
2145 ret = perf_mmap_data_alloc(counter, nr_pages);
2149 atomic_set(&counter->mmap_count, 1);
2150 atomic_long_add(user_extra, &user->locked_vm);
2151 vma->vm_mm->locked_vm += extra;
2152 counter->data->nr_locked = extra;
2153 if (vma->vm_flags & VM_WRITE)
2154 counter->data->writable = 1;
2157 mutex_unlock(&counter->mmap_mutex);
2159 vma->vm_flags |= VM_RESERVED;
2160 vma->vm_ops = &perf_mmap_vmops;
2165 static int perf_fasync(int fd, struct file *filp, int on)
2167 struct inode *inode = filp->f_path.dentry->d_inode;
2168 struct perf_counter *counter = filp->private_data;
2171 mutex_lock(&inode->i_mutex);
2172 retval = fasync_helper(fd, filp, on, &counter->fasync);
2173 mutex_unlock(&inode->i_mutex);
2181 static const struct file_operations perf_fops = {
2182 .release = perf_release,
2185 .unlocked_ioctl = perf_ioctl,
2186 .compat_ioctl = perf_ioctl,
2188 .fasync = perf_fasync,
2192 * Perf counter wakeup
2194 * If there's data, ensure we set the poll() state and publish everything
2195 * to user-space before waking everybody up.
2198 void perf_counter_wakeup(struct perf_counter *counter)
2200 wake_up_all(&counter->waitq);
2202 if (counter->pending_kill) {
2203 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2204 counter->pending_kill = 0;
2211 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2213 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2214 * single linked list and use cmpxchg() to add entries lockless.
2217 static void perf_pending_counter(struct perf_pending_entry *entry)
2219 struct perf_counter *counter = container_of(entry,
2220 struct perf_counter, pending);
2222 if (counter->pending_disable) {
2223 counter->pending_disable = 0;
2224 perf_counter_disable(counter);
2227 if (counter->pending_wakeup) {
2228 counter->pending_wakeup = 0;
2229 perf_counter_wakeup(counter);
2233 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2235 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2239 static void perf_pending_queue(struct perf_pending_entry *entry,
2240 void (*func)(struct perf_pending_entry *))
2242 struct perf_pending_entry **head;
2244 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2249 head = &get_cpu_var(perf_pending_head);
2252 entry->next = *head;
2253 } while (cmpxchg(head, entry->next, entry) != entry->next);
2255 set_perf_counter_pending();
2257 put_cpu_var(perf_pending_head);
2260 static int __perf_pending_run(void)
2262 struct perf_pending_entry *list;
2265 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2266 while (list != PENDING_TAIL) {
2267 void (*func)(struct perf_pending_entry *);
2268 struct perf_pending_entry *entry = list;
2275 * Ensure we observe the unqueue before we issue the wakeup,
2276 * so that we won't be waiting forever.
2277 * -- see perf_not_pending().
2288 static inline int perf_not_pending(struct perf_counter *counter)
2291 * If we flush on whatever cpu we run, there is a chance we don't
2295 __perf_pending_run();
2299 * Ensure we see the proper queue state before going to sleep
2300 * so that we do not miss the wakeup. -- see perf_pending_handle()
2303 return counter->pending.next == NULL;
2306 static void perf_pending_sync(struct perf_counter *counter)
2308 wait_event(counter->waitq, perf_not_pending(counter));
2311 void perf_counter_do_pending(void)
2313 __perf_pending_run();
2317 * Callchain support -- arch specific
2320 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2329 struct perf_output_handle {
2330 struct perf_counter *counter;
2331 struct perf_mmap_data *data;
2333 unsigned long offset;
2337 unsigned long flags;
2340 static bool perf_output_space(struct perf_mmap_data *data,
2341 unsigned int offset, unsigned int head)
2346 if (!data->writable)
2349 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2351 * Userspace could choose to issue a mb() before updating the tail
2352 * pointer. So that all reads will be completed before the write is
2355 tail = ACCESS_ONCE(data->user_page->data_tail);
2358 offset = (offset - tail) & mask;
2359 head = (head - tail) & mask;
2361 if ((int)(head - offset) < 0)
2367 static void perf_output_wakeup(struct perf_output_handle *handle)
2369 atomic_set(&handle->data->poll, POLL_IN);
2372 handle->counter->pending_wakeup = 1;
2373 perf_pending_queue(&handle->counter->pending,
2374 perf_pending_counter);
2376 perf_counter_wakeup(handle->counter);
2380 * Curious locking construct.
2382 * We need to ensure a later event doesn't publish a head when a former
2383 * event isn't done writing. However since we need to deal with NMIs we
2384 * cannot fully serialize things.
2386 * What we do is serialize between CPUs so we only have to deal with NMI
2387 * nesting on a single CPU.
2389 * We only publish the head (and generate a wakeup) when the outer-most
2392 static void perf_output_lock(struct perf_output_handle *handle)
2394 struct perf_mmap_data *data = handle->data;
2399 local_irq_save(handle->flags);
2400 cpu = smp_processor_id();
2402 if (in_nmi() && atomic_read(&data->lock) == cpu)
2405 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2411 static void perf_output_unlock(struct perf_output_handle *handle)
2413 struct perf_mmap_data *data = handle->data;
2417 data->done_head = data->head;
2419 if (!handle->locked)
2424 * The xchg implies a full barrier that ensures all writes are done
2425 * before we publish the new head, matched by a rmb() in userspace when
2426 * reading this position.
2428 while ((head = atomic_long_xchg(&data->done_head, 0)))
2429 data->user_page->data_head = head;
2432 * NMI can happen here, which means we can miss a done_head update.
2435 cpu = atomic_xchg(&data->lock, -1);
2436 WARN_ON_ONCE(cpu != smp_processor_id());
2439 * Therefore we have to validate we did not indeed do so.
2441 if (unlikely(atomic_long_read(&data->done_head))) {
2443 * Since we had it locked, we can lock it again.
2445 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2451 if (atomic_xchg(&data->wakeup, 0))
2452 perf_output_wakeup(handle);
2454 local_irq_restore(handle->flags);
2457 static void perf_output_copy(struct perf_output_handle *handle,
2458 const void *buf, unsigned int len)
2460 unsigned int pages_mask;
2461 unsigned int offset;
2465 offset = handle->offset;
2466 pages_mask = handle->data->nr_pages - 1;
2467 pages = handle->data->data_pages;
2470 unsigned int page_offset;
2473 nr = (offset >> PAGE_SHIFT) & pages_mask;
2474 page_offset = offset & (PAGE_SIZE - 1);
2475 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2477 memcpy(pages[nr] + page_offset, buf, size);
2484 handle->offset = offset;
2487 * Check we didn't copy past our reservation window, taking the
2488 * possible unsigned int wrap into account.
2490 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2493 #define perf_output_put(handle, x) \
2494 perf_output_copy((handle), &(x), sizeof(x))
2496 static int perf_output_begin(struct perf_output_handle *handle,
2497 struct perf_counter *counter, unsigned int size,
2498 int nmi, int sample)
2500 struct perf_mmap_data *data;
2501 unsigned int offset, head;
2504 struct perf_event_header header;
2510 * For inherited counters we send all the output towards the parent.
2512 if (counter->parent)
2513 counter = counter->parent;
2516 data = rcu_dereference(counter->data);
2520 handle->data = data;
2521 handle->counter = counter;
2523 handle->sample = sample;
2525 if (!data->nr_pages)
2528 have_lost = atomic_read(&data->lost);
2530 size += sizeof(lost_event);
2532 perf_output_lock(handle);
2535 offset = head = atomic_long_read(&data->head);
2537 if (unlikely(!perf_output_space(data, offset, head)))
2539 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2541 handle->offset = offset;
2542 handle->head = head;
2544 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2545 atomic_set(&data->wakeup, 1);
2548 lost_event.header.type = PERF_EVENT_LOST;
2549 lost_event.header.misc = 0;
2550 lost_event.header.size = sizeof(lost_event);
2551 lost_event.id = counter->id;
2552 lost_event.lost = atomic_xchg(&data->lost, 0);
2554 perf_output_put(handle, lost_event);
2560 atomic_inc(&data->lost);
2561 perf_output_unlock(handle);
2568 static void perf_output_end(struct perf_output_handle *handle)
2570 struct perf_counter *counter = handle->counter;
2571 struct perf_mmap_data *data = handle->data;
2573 int wakeup_events = counter->attr.wakeup_events;
2575 if (handle->sample && wakeup_events) {
2576 int events = atomic_inc_return(&data->events);
2577 if (events >= wakeup_events) {
2578 atomic_sub(wakeup_events, &data->events);
2579 atomic_set(&data->wakeup, 1);
2583 perf_output_unlock(handle);
2587 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2590 * only top level counters have the pid namespace they were created in
2592 if (counter->parent)
2593 counter = counter->parent;
2595 return task_tgid_nr_ns(p, counter->ns);
2598 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2601 * only top level counters have the pid namespace they were created in
2603 if (counter->parent)
2604 counter = counter->parent;
2606 return task_pid_nr_ns(p, counter->ns);
2609 static void perf_counter_output(struct perf_counter *counter, int nmi,
2610 struct perf_sample_data *data)
2613 u64 sample_type = counter->attr.sample_type;
2614 struct perf_output_handle handle;
2615 struct perf_event_header header;
2624 struct perf_callchain_entry *callchain = NULL;
2625 int callchain_size = 0;
2631 header.type = PERF_EVENT_SAMPLE;
2632 header.size = sizeof(header);
2635 header.misc |= perf_misc_flags(data->regs);
2637 if (sample_type & PERF_SAMPLE_IP) {
2638 ip = perf_instruction_pointer(data->regs);
2639 header.size += sizeof(ip);
2642 if (sample_type & PERF_SAMPLE_TID) {
2643 /* namespace issues */
2644 tid_entry.pid = perf_counter_pid(counter, current);
2645 tid_entry.tid = perf_counter_tid(counter, current);
2647 header.size += sizeof(tid_entry);
2650 if (sample_type & PERF_SAMPLE_TIME) {
2652 * Maybe do better on x86 and provide cpu_clock_nmi()
2654 time = sched_clock();
2656 header.size += sizeof(u64);
2659 if (sample_type & PERF_SAMPLE_ADDR)
2660 header.size += sizeof(u64);
2662 if (sample_type & PERF_SAMPLE_ID)
2663 header.size += sizeof(u64);
2665 if (sample_type & PERF_SAMPLE_CPU) {
2666 header.size += sizeof(cpu_entry);
2668 cpu_entry.cpu = raw_smp_processor_id();
2669 cpu_entry.reserved = 0;
2672 if (sample_type & PERF_SAMPLE_PERIOD)
2673 header.size += sizeof(u64);
2675 if (sample_type & PERF_SAMPLE_GROUP) {
2676 header.size += sizeof(u64) +
2677 counter->nr_siblings * sizeof(group_entry);
2680 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2681 callchain = perf_callchain(data->regs);
2684 callchain_size = (1 + callchain->nr) * sizeof(u64);
2685 header.size += callchain_size;
2687 header.size += sizeof(u64);
2690 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2694 perf_output_put(&handle, header);
2696 if (sample_type & PERF_SAMPLE_IP)
2697 perf_output_put(&handle, ip);
2699 if (sample_type & PERF_SAMPLE_TID)
2700 perf_output_put(&handle, tid_entry);
2702 if (sample_type & PERF_SAMPLE_TIME)
2703 perf_output_put(&handle, time);
2705 if (sample_type & PERF_SAMPLE_ADDR)
2706 perf_output_put(&handle, data->addr);
2708 if (sample_type & PERF_SAMPLE_ID)
2709 perf_output_put(&handle, counter->id);
2711 if (sample_type & PERF_SAMPLE_CPU)
2712 perf_output_put(&handle, cpu_entry);
2714 if (sample_type & PERF_SAMPLE_PERIOD)
2715 perf_output_put(&handle, data->period);
2718 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2720 if (sample_type & PERF_SAMPLE_GROUP) {
2721 struct perf_counter *leader, *sub;
2722 u64 nr = counter->nr_siblings;
2724 perf_output_put(&handle, nr);
2726 leader = counter->group_leader;
2727 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2729 sub->pmu->read(sub);
2731 group_entry.id = sub->id;
2732 group_entry.counter = atomic64_read(&sub->count);
2734 perf_output_put(&handle, group_entry);
2738 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2740 perf_output_copy(&handle, callchain, callchain_size);
2743 perf_output_put(&handle, nr);
2747 perf_output_end(&handle);
2754 struct perf_read_event {
2755 struct perf_event_header header;
2764 perf_counter_read_event(struct perf_counter *counter,
2765 struct task_struct *task)
2767 struct perf_output_handle handle;
2768 struct perf_read_event event = {
2770 .type = PERF_EVENT_READ,
2772 .size = sizeof(event) - sizeof(event.format),
2774 .pid = perf_counter_pid(counter, task),
2775 .tid = perf_counter_tid(counter, task),
2776 .value = atomic64_read(&counter->count),
2780 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2781 event.header.size += sizeof(u64);
2782 event.format[i++] = counter->total_time_enabled;
2785 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2786 event.header.size += sizeof(u64);
2787 event.format[i++] = counter->total_time_running;
2790 if (counter->attr.read_format & PERF_FORMAT_ID) {
2793 event.header.size += sizeof(u64);
2794 if (counter->parent)
2795 id = counter->parent->id;
2799 event.format[i++] = id;
2802 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2806 perf_output_copy(&handle, &event, event.header.size);
2807 perf_output_end(&handle);
2814 struct perf_fork_event {
2815 struct task_struct *task;
2818 struct perf_event_header header;
2825 static void perf_counter_fork_output(struct perf_counter *counter,
2826 struct perf_fork_event *fork_event)
2828 struct perf_output_handle handle;
2829 int size = fork_event->event.header.size;
2830 struct task_struct *task = fork_event->task;
2831 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2836 fork_event->event.pid = perf_counter_pid(counter, task);
2837 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2839 perf_output_put(&handle, fork_event->event);
2840 perf_output_end(&handle);
2843 static int perf_counter_fork_match(struct perf_counter *counter)
2845 if (counter->attr.comm || counter->attr.mmap)
2851 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2852 struct perf_fork_event *fork_event)
2854 struct perf_counter *counter;
2856 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2860 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2861 if (perf_counter_fork_match(counter))
2862 perf_counter_fork_output(counter, fork_event);
2867 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2869 struct perf_cpu_context *cpuctx;
2870 struct perf_counter_context *ctx;
2872 cpuctx = &get_cpu_var(perf_cpu_context);
2873 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2874 put_cpu_var(perf_cpu_context);
2878 * doesn't really matter which of the child contexts the
2879 * events ends up in.
2881 ctx = rcu_dereference(current->perf_counter_ctxp);
2883 perf_counter_fork_ctx(ctx, fork_event);
2887 void perf_counter_fork(struct task_struct *task)
2889 struct perf_fork_event fork_event;
2891 if (!atomic_read(&nr_comm_counters) &&
2892 !atomic_read(&nr_mmap_counters))
2895 fork_event = (struct perf_fork_event){
2899 .type = PERF_EVENT_FORK,
2901 .size = sizeof(fork_event.event),
2908 perf_counter_fork_event(&fork_event);
2915 struct perf_comm_event {
2916 struct task_struct *task;
2921 struct perf_event_header header;
2928 static void perf_counter_comm_output(struct perf_counter *counter,
2929 struct perf_comm_event *comm_event)
2931 struct perf_output_handle handle;
2932 int size = comm_event->event.header.size;
2933 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2938 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2939 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2941 perf_output_put(&handle, comm_event->event);
2942 perf_output_copy(&handle, comm_event->comm,
2943 comm_event->comm_size);
2944 perf_output_end(&handle);
2947 static int perf_counter_comm_match(struct perf_counter *counter)
2949 if (counter->attr.comm)
2955 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2956 struct perf_comm_event *comm_event)
2958 struct perf_counter *counter;
2960 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2964 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2965 if (perf_counter_comm_match(counter))
2966 perf_counter_comm_output(counter, comm_event);
2971 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2973 struct perf_cpu_context *cpuctx;
2974 struct perf_counter_context *ctx;
2976 char comm[TASK_COMM_LEN];
2978 memset(comm, 0, sizeof(comm));
2979 strncpy(comm, comm_event->task->comm, sizeof(comm));
2980 size = ALIGN(strlen(comm)+1, sizeof(u64));
2982 comm_event->comm = comm;
2983 comm_event->comm_size = size;
2985 comm_event->event.header.size = sizeof(comm_event->event) + size;
2987 cpuctx = &get_cpu_var(perf_cpu_context);
2988 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2989 put_cpu_var(perf_cpu_context);
2993 * doesn't really matter which of the child contexts the
2994 * events ends up in.
2996 ctx = rcu_dereference(current->perf_counter_ctxp);
2998 perf_counter_comm_ctx(ctx, comm_event);
3002 void perf_counter_comm(struct task_struct *task)
3004 struct perf_comm_event comm_event;
3006 if (task->perf_counter_ctxp)
3007 perf_counter_enable_on_exec(task);
3009 if (!atomic_read(&nr_comm_counters))
3012 comm_event = (struct perf_comm_event){
3018 .type = PERF_EVENT_COMM,
3027 perf_counter_comm_event(&comm_event);
3034 struct perf_mmap_event {
3035 struct vm_area_struct *vma;
3037 const char *file_name;
3041 struct perf_event_header header;
3051 static void perf_counter_mmap_output(struct perf_counter *counter,
3052 struct perf_mmap_event *mmap_event)
3054 struct perf_output_handle handle;
3055 int size = mmap_event->event.header.size;
3056 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3061 mmap_event->event.pid = perf_counter_pid(counter, current);
3062 mmap_event->event.tid = perf_counter_tid(counter, current);
3064 perf_output_put(&handle, mmap_event->event);
3065 perf_output_copy(&handle, mmap_event->file_name,
3066 mmap_event->file_size);
3067 perf_output_end(&handle);
3070 static int perf_counter_mmap_match(struct perf_counter *counter,
3071 struct perf_mmap_event *mmap_event)
3073 if (counter->attr.mmap)
3079 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
3080 struct perf_mmap_event *mmap_event)
3082 struct perf_counter *counter;
3084 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3088 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3089 if (perf_counter_mmap_match(counter, mmap_event))
3090 perf_counter_mmap_output(counter, mmap_event);
3095 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3097 struct perf_cpu_context *cpuctx;
3098 struct perf_counter_context *ctx;
3099 struct vm_area_struct *vma = mmap_event->vma;
3100 struct file *file = vma->vm_file;
3106 memset(tmp, 0, sizeof(tmp));
3110 * d_path works from the end of the buffer backwards, so we
3111 * need to add enough zero bytes after the string to handle
3112 * the 64bit alignment we do later.
3114 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3116 name = strncpy(tmp, "//enomem", sizeof(tmp));
3119 name = d_path(&file->f_path, buf, PATH_MAX);
3121 name = strncpy(tmp, "//toolong", sizeof(tmp));
3125 if (arch_vma_name(mmap_event->vma)) {
3126 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3132 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3136 name = strncpy(tmp, "//anon", sizeof(tmp));
3141 size = ALIGN(strlen(name)+1, sizeof(u64));
3143 mmap_event->file_name = name;
3144 mmap_event->file_size = size;
3146 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
3148 cpuctx = &get_cpu_var(perf_cpu_context);
3149 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3150 put_cpu_var(perf_cpu_context);
3154 * doesn't really matter which of the child contexts the
3155 * events ends up in.
3157 ctx = rcu_dereference(current->perf_counter_ctxp);
3159 perf_counter_mmap_ctx(ctx, mmap_event);
3165 void __perf_counter_mmap(struct vm_area_struct *vma)
3167 struct perf_mmap_event mmap_event;
3169 if (!atomic_read(&nr_mmap_counters))
3172 mmap_event = (struct perf_mmap_event){
3178 .type = PERF_EVENT_MMAP,
3184 .start = vma->vm_start,
3185 .len = vma->vm_end - vma->vm_start,
3186 .pgoff = vma->vm_pgoff,
3190 perf_counter_mmap_event(&mmap_event);
3194 * Log sample_period changes so that analyzing tools can re-normalize the
3199 struct perf_event_header header;
3205 static void perf_log_period(struct perf_counter *counter, u64 period)
3207 struct perf_output_handle handle;
3208 struct freq_event event;
3211 if (counter->hw.sample_period == period)
3214 if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
3217 event = (struct freq_event) {
3219 .type = PERF_EVENT_PERIOD,
3221 .size = sizeof(event),
3223 .time = sched_clock(),
3228 ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3232 perf_output_put(&handle, event);
3233 perf_output_end(&handle);
3237 * IRQ throttle logging
3240 static void perf_log_throttle(struct perf_counter *counter, int enable)
3242 struct perf_output_handle handle;
3246 struct perf_event_header header;
3249 } throttle_event = {
3251 .type = PERF_EVENT_THROTTLE + 1,
3253 .size = sizeof(throttle_event),
3255 .time = sched_clock(),
3259 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3263 perf_output_put(&handle, throttle_event);
3264 perf_output_end(&handle);
3268 * Generic counter overflow handling, sampling.
3271 int perf_counter_overflow(struct perf_counter *counter, int nmi,
3272 struct perf_sample_data *data)
3274 int events = atomic_read(&counter->event_limit);
3275 int throttle = counter->pmu->unthrottle != NULL;
3276 struct hw_perf_counter *hwc = &counter->hw;
3282 if (hwc->interrupts != MAX_INTERRUPTS) {
3284 if (HZ * hwc->interrupts >
3285 (u64)sysctl_perf_counter_sample_rate) {
3286 hwc->interrupts = MAX_INTERRUPTS;
3287 perf_log_throttle(counter, 0);
3292 * Keep re-disabling counters even though on the previous
3293 * pass we disabled it - just in case we raced with a
3294 * sched-in and the counter got enabled again:
3300 if (counter->attr.freq) {
3301 u64 now = sched_clock();
3302 s64 delta = now - hwc->freq_stamp;
3304 hwc->freq_stamp = now;
3306 if (delta > 0 && delta < TICK_NSEC)
3307 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3311 * XXX event_limit might not quite work as expected on inherited
3315 counter->pending_kill = POLL_IN;
3316 if (events && atomic_dec_and_test(&counter->event_limit)) {
3318 counter->pending_kill = POLL_HUP;
3320 counter->pending_disable = 1;
3321 perf_pending_queue(&counter->pending,
3322 perf_pending_counter);
3324 perf_counter_disable(counter);
3327 perf_counter_output(counter, nmi, data);
3332 * Generic software counter infrastructure
3335 static void perf_swcounter_update(struct perf_counter *counter)
3337 struct hw_perf_counter *hwc = &counter->hw;
3342 prev = atomic64_read(&hwc->prev_count);
3343 now = atomic64_read(&hwc->count);
3344 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
3349 atomic64_add(delta, &counter->count);
3350 atomic64_sub(delta, &hwc->period_left);
3353 static void perf_swcounter_set_period(struct perf_counter *counter)
3355 struct hw_perf_counter *hwc = &counter->hw;
3356 s64 left = atomic64_read(&hwc->period_left);
3357 s64 period = hwc->sample_period;
3359 if (unlikely(left <= -period)) {
3361 atomic64_set(&hwc->period_left, left);
3362 hwc->last_period = period;
3365 if (unlikely(left <= 0)) {
3367 atomic64_add(period, &hwc->period_left);
3368 hwc->last_period = period;
3371 atomic64_set(&hwc->prev_count, -left);
3372 atomic64_set(&hwc->count, -left);
3375 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3377 enum hrtimer_restart ret = HRTIMER_RESTART;
3378 struct perf_sample_data data;
3379 struct perf_counter *counter;
3382 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3383 counter->pmu->read(counter);
3386 data.regs = get_irq_regs();
3388 * In case we exclude kernel IPs or are somehow not in interrupt
3389 * context, provide the next best thing, the user IP.
3391 if ((counter->attr.exclude_kernel || !data.regs) &&
3392 !counter->attr.exclude_user)
3393 data.regs = task_pt_regs(current);
3396 if (perf_counter_overflow(counter, 0, &data))
3397 ret = HRTIMER_NORESTART;
3400 period = max_t(u64, 10000, counter->hw.sample_period);
3401 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3406 static void perf_swcounter_overflow(struct perf_counter *counter,
3407 int nmi, struct perf_sample_data *data)
3409 data->period = counter->hw.last_period;
3411 perf_swcounter_update(counter);
3412 perf_swcounter_set_period(counter);
3413 if (perf_counter_overflow(counter, nmi, data))
3414 /* soft-disable the counter */
3418 static int perf_swcounter_is_counting(struct perf_counter *counter)
3420 struct perf_counter_context *ctx;
3421 unsigned long flags;
3424 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3427 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3431 * If the counter is inactive, it could be just because
3432 * its task is scheduled out, or because it's in a group
3433 * which could not go on the PMU. We want to count in
3434 * the first case but not the second. If the context is
3435 * currently active then an inactive software counter must
3436 * be the second case. If it's not currently active then
3437 * we need to know whether the counter was active when the
3438 * context was last active, which we can determine by
3439 * comparing counter->tstamp_stopped with ctx->time.
3441 * We are within an RCU read-side critical section,
3442 * which protects the existence of *ctx.
3445 spin_lock_irqsave(&ctx->lock, flags);
3447 /* Re-check state now we have the lock */
3448 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3449 counter->ctx->is_active ||
3450 counter->tstamp_stopped < ctx->time)
3452 spin_unlock_irqrestore(&ctx->lock, flags);
3456 static int perf_swcounter_match(struct perf_counter *counter,
3457 enum perf_type_id type,
3458 u32 event, struct pt_regs *regs)
3460 if (!perf_swcounter_is_counting(counter))
3463 if (counter->attr.type != type)
3465 if (counter->attr.config != event)
3469 if (counter->attr.exclude_user && user_mode(regs))
3472 if (counter->attr.exclude_kernel && !user_mode(regs))
3479 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3480 int nmi, struct perf_sample_data *data)
3482 int neg = atomic64_add_negative(nr, &counter->hw.count);
3484 if (counter->hw.sample_period && !neg && data->regs)
3485 perf_swcounter_overflow(counter, nmi, data);
3488 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3489 enum perf_type_id type,
3490 u32 event, u64 nr, int nmi,
3491 struct perf_sample_data *data)
3493 struct perf_counter *counter;
3495 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3499 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3500 if (perf_swcounter_match(counter, type, event, data->regs))
3501 perf_swcounter_add(counter, nr, nmi, data);
3506 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3509 return &cpuctx->recursion[3];
3512 return &cpuctx->recursion[2];
3515 return &cpuctx->recursion[1];
3517 return &cpuctx->recursion[0];
3520 static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3522 struct perf_sample_data *data)
3524 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3525 int *recursion = perf_swcounter_recursion_context(cpuctx);
3526 struct perf_counter_context *ctx;
3534 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3538 * doesn't really matter which of the child contexts the
3539 * events ends up in.
3541 ctx = rcu_dereference(current->perf_counter_ctxp);
3543 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
3550 put_cpu_var(perf_cpu_context);
3553 void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3554 struct pt_regs *regs, u64 addr)
3556 struct perf_sample_data data = {
3561 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
3564 static void perf_swcounter_read(struct perf_counter *counter)
3566 perf_swcounter_update(counter);
3569 static int perf_swcounter_enable(struct perf_counter *counter)
3571 perf_swcounter_set_period(counter);
3575 static void perf_swcounter_disable(struct perf_counter *counter)
3577 perf_swcounter_update(counter);
3580 static const struct pmu perf_ops_generic = {
3581 .enable = perf_swcounter_enable,
3582 .disable = perf_swcounter_disable,
3583 .read = perf_swcounter_read,
3587 * Software counter: cpu wall time clock
3590 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3592 int cpu = raw_smp_processor_id();
3596 now = cpu_clock(cpu);
3597 prev = atomic64_read(&counter->hw.prev_count);
3598 atomic64_set(&counter->hw.prev_count, now);
3599 atomic64_add(now - prev, &counter->count);
3602 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3604 struct hw_perf_counter *hwc = &counter->hw;
3605 int cpu = raw_smp_processor_id();
3607 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3608 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3609 hwc->hrtimer.function = perf_swcounter_hrtimer;
3610 if (hwc->sample_period) {
3611 u64 period = max_t(u64, 10000, hwc->sample_period);
3612 __hrtimer_start_range_ns(&hwc->hrtimer,
3613 ns_to_ktime(period), 0,
3614 HRTIMER_MODE_REL, 0);
3620 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3622 if (counter->hw.sample_period)
3623 hrtimer_cancel(&counter->hw.hrtimer);
3624 cpu_clock_perf_counter_update(counter);
3627 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3629 cpu_clock_perf_counter_update(counter);
3632 static const struct pmu perf_ops_cpu_clock = {
3633 .enable = cpu_clock_perf_counter_enable,
3634 .disable = cpu_clock_perf_counter_disable,
3635 .read = cpu_clock_perf_counter_read,
3639 * Software counter: task time clock
3642 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3647 prev = atomic64_xchg(&counter->hw.prev_count, now);
3649 atomic64_add(delta, &counter->count);
3652 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3654 struct hw_perf_counter *hwc = &counter->hw;
3657 now = counter->ctx->time;
3659 atomic64_set(&hwc->prev_count, now);
3660 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3661 hwc->hrtimer.function = perf_swcounter_hrtimer;
3662 if (hwc->sample_period) {
3663 u64 period = max_t(u64, 10000, hwc->sample_period);
3664 __hrtimer_start_range_ns(&hwc->hrtimer,
3665 ns_to_ktime(period), 0,
3666 HRTIMER_MODE_REL, 0);
3672 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3674 if (counter->hw.sample_period)
3675 hrtimer_cancel(&counter->hw.hrtimer);
3676 task_clock_perf_counter_update(counter, counter->ctx->time);
3680 static void task_clock_perf_counter_read(struct perf_counter *counter)
3685 update_context_time(counter->ctx);
3686 time = counter->ctx->time;
3688 u64 now = perf_clock();
3689 u64 delta = now - counter->ctx->timestamp;
3690 time = counter->ctx->time + delta;
3693 task_clock_perf_counter_update(counter, time);
3696 static const struct pmu perf_ops_task_clock = {
3697 .enable = task_clock_perf_counter_enable,
3698 .disable = task_clock_perf_counter_disable,
3699 .read = task_clock_perf_counter_read,
3702 #ifdef CONFIG_EVENT_PROFILE
3703 void perf_tpcounter_event(int event_id)
3705 struct perf_sample_data data = {
3706 .regs = get_irq_regs(),
3711 data.regs = task_pt_regs(current);
3713 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
3715 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3717 extern int ftrace_profile_enable(int);
3718 extern void ftrace_profile_disable(int);
3720 static void tp_perf_counter_destroy(struct perf_counter *counter)
3722 ftrace_profile_disable(counter->attr.config);
3725 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3727 if (ftrace_profile_enable(counter->attr.config))
3730 counter->destroy = tp_perf_counter_destroy;
3732 return &perf_ops_generic;
3735 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3741 atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
3743 static void sw_perf_counter_destroy(struct perf_counter *counter)
3745 u64 event = counter->attr.config;
3747 WARN_ON(counter->parent);
3749 atomic_dec(&perf_swcounter_enabled[event]);
3752 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3754 const struct pmu *pmu = NULL;
3755 u64 event = counter->attr.config;
3758 * Software counters (currently) can't in general distinguish
3759 * between user, kernel and hypervisor events.
3760 * However, context switches and cpu migrations are considered
3761 * to be kernel events, and page faults are never hypervisor
3765 case PERF_COUNT_SW_CPU_CLOCK:
3766 pmu = &perf_ops_cpu_clock;
3769 case PERF_COUNT_SW_TASK_CLOCK:
3771 * If the user instantiates this as a per-cpu counter,
3772 * use the cpu_clock counter instead.
3774 if (counter->ctx->task)
3775 pmu = &perf_ops_task_clock;
3777 pmu = &perf_ops_cpu_clock;
3780 case PERF_COUNT_SW_PAGE_FAULTS:
3781 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
3782 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
3783 case PERF_COUNT_SW_CONTEXT_SWITCHES:
3784 case PERF_COUNT_SW_CPU_MIGRATIONS:
3785 if (!counter->parent) {
3786 atomic_inc(&perf_swcounter_enabled[event]);
3787 counter->destroy = sw_perf_counter_destroy;
3789 pmu = &perf_ops_generic;
3797 * Allocate and initialize a counter structure
3799 static struct perf_counter *
3800 perf_counter_alloc(struct perf_counter_attr *attr,
3802 struct perf_counter_context *ctx,
3803 struct perf_counter *group_leader,
3804 struct perf_counter *parent_counter,
3807 const struct pmu *pmu;
3808 struct perf_counter *counter;
3809 struct hw_perf_counter *hwc;
3812 counter = kzalloc(sizeof(*counter), gfpflags);
3814 return ERR_PTR(-ENOMEM);
3817 * Single counters are their own group leaders, with an
3818 * empty sibling list:
3821 group_leader = counter;
3823 mutex_init(&counter->child_mutex);
3824 INIT_LIST_HEAD(&counter->child_list);
3826 INIT_LIST_HEAD(&counter->list_entry);
3827 INIT_LIST_HEAD(&counter->event_entry);
3828 INIT_LIST_HEAD(&counter->sibling_list);
3829 init_waitqueue_head(&counter->waitq);
3831 mutex_init(&counter->mmap_mutex);
3834 counter->attr = *attr;
3835 counter->group_leader = group_leader;
3836 counter->pmu = NULL;
3838 counter->oncpu = -1;
3840 counter->parent = parent_counter;
3842 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3843 counter->id = atomic64_inc_return(&perf_counter_id);
3845 counter->state = PERF_COUNTER_STATE_INACTIVE;
3848 counter->state = PERF_COUNTER_STATE_OFF;
3853 hwc->sample_period = attr->sample_period;
3854 if (attr->freq && attr->sample_freq)
3855 hwc->sample_period = 1;
3857 atomic64_set(&hwc->period_left, hwc->sample_period);
3860 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3862 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3865 switch (attr->type) {
3867 case PERF_TYPE_HARDWARE:
3868 case PERF_TYPE_HW_CACHE:
3869 pmu = hw_perf_counter_init(counter);
3872 case PERF_TYPE_SOFTWARE:
3873 pmu = sw_perf_counter_init(counter);
3876 case PERF_TYPE_TRACEPOINT:
3877 pmu = tp_perf_counter_init(counter);
3887 else if (IS_ERR(pmu))
3892 put_pid_ns(counter->ns);
3894 return ERR_PTR(err);
3899 if (!counter->parent) {
3900 atomic_inc(&nr_counters);
3901 if (counter->attr.mmap)
3902 atomic_inc(&nr_mmap_counters);
3903 if (counter->attr.comm)
3904 atomic_inc(&nr_comm_counters);
3910 static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3911 struct perf_counter_attr *attr)
3916 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3920 * zero the full structure, so that a short copy will be nice.
3922 memset(attr, 0, sizeof(*attr));
3924 ret = get_user(size, &uattr->size);
3928 if (size > PAGE_SIZE) /* silly large */
3931 if (!size) /* abi compat */
3932 size = PERF_ATTR_SIZE_VER0;
3934 if (size < PERF_ATTR_SIZE_VER0)
3938 * If we're handed a bigger struct than we know of,
3939 * ensure all the unknown bits are 0.
3941 if (size > sizeof(*attr)) {
3943 unsigned long __user *addr;
3944 unsigned long __user *end;
3946 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3947 sizeof(unsigned long));
3948 end = PTR_ALIGN((void __user *)uattr + size,
3949 sizeof(unsigned long));
3951 for (; addr < end; addr += sizeof(unsigned long)) {
3952 ret = get_user(val, addr);
3960 ret = copy_from_user(attr, uattr, size);
3965 * If the type exists, the corresponding creation will verify
3968 if (attr->type >= PERF_TYPE_MAX)
3971 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3974 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3977 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3984 put_user(sizeof(*attr), &uattr->size);
3990 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3992 * @attr_uptr: event type attributes for monitoring/sampling
3995 * @group_fd: group leader counter fd
3997 SYSCALL_DEFINE5(perf_counter_open,
3998 struct perf_counter_attr __user *, attr_uptr,
3999 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4001 struct perf_counter *counter, *group_leader;
4002 struct perf_counter_attr attr;
4003 struct perf_counter_context *ctx;
4004 struct file *counter_file = NULL;
4005 struct file *group_file = NULL;
4006 int fput_needed = 0;
4007 int fput_needed2 = 0;
4010 /* for future expandability... */
4014 ret = perf_copy_attr(attr_uptr, &attr);
4018 if (!attr.exclude_kernel) {
4019 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4024 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
4029 * Get the target context (task or percpu):
4031 ctx = find_get_context(pid, cpu);
4033 return PTR_ERR(ctx);
4036 * Look up the group leader (we will attach this counter to it):
4038 group_leader = NULL;
4039 if (group_fd != -1) {
4041 group_file = fget_light(group_fd, &fput_needed);
4043 goto err_put_context;
4044 if (group_file->f_op != &perf_fops)
4045 goto err_put_context;
4047 group_leader = group_file->private_data;
4049 * Do not allow a recursive hierarchy (this new sibling
4050 * becoming part of another group-sibling):
4052 if (group_leader->group_leader != group_leader)
4053 goto err_put_context;
4055 * Do not allow to attach to a group in a different
4056 * task or CPU context:
4058 if (group_leader->ctx != ctx)
4059 goto err_put_context;
4061 * Only a group leader can be exclusive or pinned
4063 if (attr.exclusive || attr.pinned)
4064 goto err_put_context;
4067 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
4069 ret = PTR_ERR(counter);
4070 if (IS_ERR(counter))
4071 goto err_put_context;
4073 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4075 goto err_free_put_context;
4077 counter_file = fget_light(ret, &fput_needed2);
4079 goto err_free_put_context;
4081 counter->filp = counter_file;
4082 WARN_ON_ONCE(ctx->parent_ctx);
4083 mutex_lock(&ctx->mutex);
4084 perf_install_in_context(ctx, counter, cpu);
4086 mutex_unlock(&ctx->mutex);
4088 counter->owner = current;
4089 get_task_struct(current);
4090 mutex_lock(¤t->perf_counter_mutex);
4091 list_add_tail(&counter->owner_entry, ¤t->perf_counter_list);
4092 mutex_unlock(¤t->perf_counter_mutex);
4094 fput_light(counter_file, fput_needed2);
4097 fput_light(group_file, fput_needed);
4101 err_free_put_context:
4111 * inherit a counter from parent task to child task:
4113 static struct perf_counter *
4114 inherit_counter(struct perf_counter *parent_counter,
4115 struct task_struct *parent,
4116 struct perf_counter_context *parent_ctx,
4117 struct task_struct *child,
4118 struct perf_counter *group_leader,
4119 struct perf_counter_context *child_ctx)
4121 struct perf_counter *child_counter;
4124 * Instead of creating recursive hierarchies of counters,
4125 * we link inherited counters back to the original parent,
4126 * which has a filp for sure, which we use as the reference
4129 if (parent_counter->parent)
4130 parent_counter = parent_counter->parent;
4132 child_counter = perf_counter_alloc(&parent_counter->attr,
4133 parent_counter->cpu, child_ctx,
4134 group_leader, parent_counter,
4136 if (IS_ERR(child_counter))
4137 return child_counter;
4141 * Make the child state follow the state of the parent counter,
4142 * not its attr.disabled bit. We hold the parent's mutex,
4143 * so we won't race with perf_counter_{en, dis}able_family.
4145 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
4146 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
4148 child_counter->state = PERF_COUNTER_STATE_OFF;
4150 if (parent_counter->attr.freq)
4151 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4154 * Link it up in the child's context:
4156 add_counter_to_ctx(child_counter, child_ctx);
4159 * Get a reference to the parent filp - we will fput it
4160 * when the child counter exits. This is safe to do because
4161 * we are in the parent and we know that the filp still
4162 * exists and has a nonzero count:
4164 atomic_long_inc(&parent_counter->filp->f_count);
4167 * Link this into the parent counter's child list
4169 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4170 mutex_lock(&parent_counter->child_mutex);
4171 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
4172 mutex_unlock(&parent_counter->child_mutex);
4174 return child_counter;
4177 static int inherit_group(struct perf_counter *parent_counter,
4178 struct task_struct *parent,
4179 struct perf_counter_context *parent_ctx,
4180 struct task_struct *child,
4181 struct perf_counter_context *child_ctx)
4183 struct perf_counter *leader;
4184 struct perf_counter *sub;
4185 struct perf_counter *child_ctr;
4187 leader = inherit_counter(parent_counter, parent, parent_ctx,
4188 child, NULL, child_ctx);
4190 return PTR_ERR(leader);
4191 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
4192 child_ctr = inherit_counter(sub, parent, parent_ctx,
4193 child, leader, child_ctx);
4194 if (IS_ERR(child_ctr))
4195 return PTR_ERR(child_ctr);
4200 static void sync_child_counter(struct perf_counter *child_counter,
4201 struct task_struct *child)
4203 struct perf_counter *parent_counter = child_counter->parent;
4206 if (child_counter->attr.inherit_stat)
4207 perf_counter_read_event(child_counter, child);
4209 child_val = atomic64_read(&child_counter->count);
4212 * Add back the child's count to the parent's count:
4214 atomic64_add(child_val, &parent_counter->count);
4215 atomic64_add(child_counter->total_time_enabled,
4216 &parent_counter->child_total_time_enabled);
4217 atomic64_add(child_counter->total_time_running,
4218 &parent_counter->child_total_time_running);
4221 * Remove this counter from the parent's list
4223 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4224 mutex_lock(&parent_counter->child_mutex);
4225 list_del_init(&child_counter->child_list);
4226 mutex_unlock(&parent_counter->child_mutex);
4229 * Release the parent counter, if this was the last
4232 fput(parent_counter->filp);
4236 __perf_counter_exit_task(struct perf_counter *child_counter,
4237 struct perf_counter_context *child_ctx,
4238 struct task_struct *child)
4240 struct perf_counter *parent_counter;
4242 update_counter_times(child_counter);
4243 perf_counter_remove_from_context(child_counter);
4245 parent_counter = child_counter->parent;
4247 * It can happen that parent exits first, and has counters
4248 * that are still around due to the child reference. These
4249 * counters need to be zapped - but otherwise linger.
4251 if (parent_counter) {
4252 sync_child_counter(child_counter, child);
4253 free_counter(child_counter);
4258 * When a child task exits, feed back counter values to parent counters.
4260 void perf_counter_exit_task(struct task_struct *child)
4262 struct perf_counter *child_counter, *tmp;
4263 struct perf_counter_context *child_ctx;
4264 unsigned long flags;
4266 if (likely(!child->perf_counter_ctxp))
4269 local_irq_save(flags);
4271 * We can't reschedule here because interrupts are disabled,
4272 * and either child is current or it is a task that can't be
4273 * scheduled, so we are now safe from rescheduling changing
4276 child_ctx = child->perf_counter_ctxp;
4277 __perf_counter_task_sched_out(child_ctx);
4280 * Take the context lock here so that if find_get_context is
4281 * reading child->perf_counter_ctxp, we wait until it has
4282 * incremented the context's refcount before we do put_ctx below.
4284 spin_lock(&child_ctx->lock);
4285 child->perf_counter_ctxp = NULL;
4287 * If this context is a clone; unclone it so it can't get
4288 * swapped to another process while we're removing all
4289 * the counters from it.
4291 unclone_ctx(child_ctx);
4292 spin_unlock(&child_ctx->lock);
4293 local_irq_restore(flags);
4296 * We can recurse on the same lock type through:
4298 * __perf_counter_exit_task()
4299 * sync_child_counter()
4300 * fput(parent_counter->filp)
4302 * mutex_lock(&ctx->mutex)
4304 * But since its the parent context it won't be the same instance.
4306 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4309 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4311 __perf_counter_exit_task(child_counter, child_ctx, child);
4314 * If the last counter was a group counter, it will have appended all
4315 * its siblings to the list, but we obtained 'tmp' before that which
4316 * will still point to the list head terminating the iteration.
4318 if (!list_empty(&child_ctx->counter_list))
4321 mutex_unlock(&child_ctx->mutex);
4327 * free an unexposed, unused context as created by inheritance by
4328 * init_task below, used by fork() in case of fail.
4330 void perf_counter_free_task(struct task_struct *task)
4332 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4333 struct perf_counter *counter, *tmp;
4338 mutex_lock(&ctx->mutex);
4340 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4341 struct perf_counter *parent = counter->parent;
4343 if (WARN_ON_ONCE(!parent))
4346 mutex_lock(&parent->child_mutex);
4347 list_del_init(&counter->child_list);
4348 mutex_unlock(&parent->child_mutex);
4352 list_del_counter(counter, ctx);
4353 free_counter(counter);
4356 if (!list_empty(&ctx->counter_list))
4359 mutex_unlock(&ctx->mutex);
4365 * Initialize the perf_counter context in task_struct
4367 int perf_counter_init_task(struct task_struct *child)
4369 struct perf_counter_context *child_ctx, *parent_ctx;
4370 struct perf_counter_context *cloned_ctx;
4371 struct perf_counter *counter;
4372 struct task_struct *parent = current;
4373 int inherited_all = 1;
4376 child->perf_counter_ctxp = NULL;
4378 mutex_init(&child->perf_counter_mutex);
4379 INIT_LIST_HEAD(&child->perf_counter_list);
4381 if (likely(!parent->perf_counter_ctxp))
4385 * This is executed from the parent task context, so inherit
4386 * counters that have been marked for cloning.
4387 * First allocate and initialize a context for the child.
4390 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4394 __perf_counter_init_context(child_ctx, child);
4395 child->perf_counter_ctxp = child_ctx;
4396 get_task_struct(child);
4399 * If the parent's context is a clone, pin it so it won't get
4402 parent_ctx = perf_pin_task_context(parent);
4405 * No need to check if parent_ctx != NULL here; since we saw
4406 * it non-NULL earlier, the only reason for it to become NULL
4407 * is if we exit, and since we're currently in the middle of
4408 * a fork we can't be exiting at the same time.
4412 * Lock the parent list. No need to lock the child - not PID
4413 * hashed yet and not running, so nobody can access it.
4415 mutex_lock(&parent_ctx->mutex);
4418 * We dont have to disable NMIs - we are only looking at
4419 * the list, not manipulating it:
4421 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4422 if (counter != counter->group_leader)
4425 if (!counter->attr.inherit) {
4430 ret = inherit_group(counter, parent, parent_ctx,
4438 if (inherited_all) {
4440 * Mark the child context as a clone of the parent
4441 * context, or of whatever the parent is a clone of.
4442 * Note that if the parent is a clone, it could get
4443 * uncloned at any point, but that doesn't matter
4444 * because the list of counters and the generation
4445 * count can't have changed since we took the mutex.
4447 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4449 child_ctx->parent_ctx = cloned_ctx;
4450 child_ctx->parent_gen = parent_ctx->parent_gen;
4452 child_ctx->parent_ctx = parent_ctx;
4453 child_ctx->parent_gen = parent_ctx->generation;
4455 get_ctx(child_ctx->parent_ctx);
4458 mutex_unlock(&parent_ctx->mutex);
4460 perf_unpin_context(parent_ctx);
4465 static void __cpuinit perf_counter_init_cpu(int cpu)
4467 struct perf_cpu_context *cpuctx;
4469 cpuctx = &per_cpu(perf_cpu_context, cpu);
4470 __perf_counter_init_context(&cpuctx->ctx, NULL);
4472 spin_lock(&perf_resource_lock);
4473 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4474 spin_unlock(&perf_resource_lock);
4476 hw_perf_counter_setup(cpu);
4479 #ifdef CONFIG_HOTPLUG_CPU
4480 static void __perf_counter_exit_cpu(void *info)
4482 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4483 struct perf_counter_context *ctx = &cpuctx->ctx;
4484 struct perf_counter *counter, *tmp;
4486 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4487 __perf_counter_remove_from_context(counter);
4489 static void perf_counter_exit_cpu(int cpu)
4491 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4492 struct perf_counter_context *ctx = &cpuctx->ctx;
4494 mutex_lock(&ctx->mutex);
4495 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4496 mutex_unlock(&ctx->mutex);
4499 static inline void perf_counter_exit_cpu(int cpu) { }
4502 static int __cpuinit
4503 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4505 unsigned int cpu = (long)hcpu;
4509 case CPU_UP_PREPARE:
4510 case CPU_UP_PREPARE_FROZEN:
4511 perf_counter_init_cpu(cpu);
4514 case CPU_DOWN_PREPARE:
4515 case CPU_DOWN_PREPARE_FROZEN:
4516 perf_counter_exit_cpu(cpu);
4527 * This has to have a higher priority than migration_notifier in sched.c.
4529 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4530 .notifier_call = perf_cpu_notify,
4534 void __init perf_counter_init(void)
4536 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4537 (void *)(long)smp_processor_id());
4538 register_cpu_notifier(&perf_cpu_nb);
4541 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4543 return sprintf(buf, "%d\n", perf_reserved_percpu);
4547 perf_set_reserve_percpu(struct sysdev_class *class,
4551 struct perf_cpu_context *cpuctx;
4555 err = strict_strtoul(buf, 10, &val);
4558 if (val > perf_max_counters)
4561 spin_lock(&perf_resource_lock);
4562 perf_reserved_percpu = val;
4563 for_each_online_cpu(cpu) {
4564 cpuctx = &per_cpu(perf_cpu_context, cpu);
4565 spin_lock_irq(&cpuctx->ctx.lock);
4566 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4567 perf_max_counters - perf_reserved_percpu);
4568 cpuctx->max_pertask = mpt;
4569 spin_unlock_irq(&cpuctx->ctx.lock);
4571 spin_unlock(&perf_resource_lock);
4576 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4578 return sprintf(buf, "%d\n", perf_overcommit);
4582 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4587 err = strict_strtoul(buf, 10, &val);
4593 spin_lock(&perf_resource_lock);
4594 perf_overcommit = val;
4595 spin_unlock(&perf_resource_lock);
4600 static SYSDEV_CLASS_ATTR(
4603 perf_show_reserve_percpu,
4604 perf_set_reserve_percpu
4607 static SYSDEV_CLASS_ATTR(
4610 perf_show_overcommit,
4614 static struct attribute *perfclass_attrs[] = {
4615 &attr_reserve_percpu.attr,
4616 &attr_overcommit.attr,
4620 static struct attribute_group perfclass_attr_group = {
4621 .attrs = perfclass_attrs,
4622 .name = "perf_counters",
4625 static int __init perf_counter_sysfs_init(void)
4627 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4628 &perfclass_attr_group);
4630 device_initcall(perf_counter_sysfs_init);