perf_counter: new output ABI - part 1
[linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *
8  *  For licensing details see kernel-base/COPYING
9  */
10
11 #include <linux/fs.h>
12 #include <linux/cpu.h>
13 #include <linux/smp.h>
14 #include <linux/file.h>
15 #include <linux/poll.h>
16 #include <linux/sysfs.h>
17 #include <linux/ptrace.h>
18 #include <linux/percpu.h>
19 #include <linux/uaccess.h>
20 #include <linux/syscalls.h>
21 #include <linux/anon_inodes.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/perf_counter.h>
24 #include <linux/mm.h>
25 #include <linux/vmstat.h>
26 #include <linux/rculist.h>
27 #include <linux/hardirq.h>
28
29 #include <asm/irq_regs.h>
30
31 /*
32  * Each CPU has a list of per CPU counters:
33  */
34 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
35
36 int perf_max_counters __read_mostly = 1;
37 static int perf_reserved_percpu __read_mostly;
38 static int perf_overcommit __read_mostly = 1;
39
40 /*
41  * Mutex for (sysadmin-configurable) counter reservations:
42  */
43 static DEFINE_MUTEX(perf_resource_mutex);
44
45 /*
46  * Architecture provided APIs - weak aliases:
47  */
48 extern __weak const struct hw_perf_counter_ops *
49 hw_perf_counter_init(struct perf_counter *counter)
50 {
51         return NULL;
52 }
53
54 u64 __weak hw_perf_save_disable(void)           { return 0; }
55 void __weak hw_perf_restore(u64 ctrl)           { barrier(); }
56 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
57 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
58                struct perf_cpu_context *cpuctx,
59                struct perf_counter_context *ctx, int cpu)
60 {
61         return 0;
62 }
63
64 void __weak perf_counter_print_debug(void)      { }
65
66 static void
67 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
68 {
69         struct perf_counter *group_leader = counter->group_leader;
70
71         /*
72          * Depending on whether it is a standalone or sibling counter,
73          * add it straight to the context's counter list, or to the group
74          * leader's sibling list:
75          */
76         if (counter->group_leader == counter)
77                 list_add_tail(&counter->list_entry, &ctx->counter_list);
78         else
79                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
80
81         list_add_rcu(&counter->event_entry, &ctx->event_list);
82 }
83
84 static void
85 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
86 {
87         struct perf_counter *sibling, *tmp;
88
89         list_del_init(&counter->list_entry);
90         list_del_rcu(&counter->event_entry);
91
92         /*
93          * If this was a group counter with sibling counters then
94          * upgrade the siblings to singleton counters by adding them
95          * to the context list directly:
96          */
97         list_for_each_entry_safe(sibling, tmp,
98                                  &counter->sibling_list, list_entry) {
99
100                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
101                 sibling->group_leader = sibling;
102         }
103 }
104
105 static void
106 counter_sched_out(struct perf_counter *counter,
107                   struct perf_cpu_context *cpuctx,
108                   struct perf_counter_context *ctx)
109 {
110         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
111                 return;
112
113         counter->state = PERF_COUNTER_STATE_INACTIVE;
114         counter->hw_ops->disable(counter);
115         counter->oncpu = -1;
116
117         if (!is_software_counter(counter))
118                 cpuctx->active_oncpu--;
119         ctx->nr_active--;
120         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
121                 cpuctx->exclusive = 0;
122 }
123
124 static void
125 group_sched_out(struct perf_counter *group_counter,
126                 struct perf_cpu_context *cpuctx,
127                 struct perf_counter_context *ctx)
128 {
129         struct perf_counter *counter;
130
131         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
132                 return;
133
134         counter_sched_out(group_counter, cpuctx, ctx);
135
136         /*
137          * Schedule out siblings (if any):
138          */
139         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
140                 counter_sched_out(counter, cpuctx, ctx);
141
142         if (group_counter->hw_event.exclusive)
143                 cpuctx->exclusive = 0;
144 }
145
146 /*
147  * Cross CPU call to remove a performance counter
148  *
149  * We disable the counter on the hardware level first. After that we
150  * remove it from the context list.
151  */
152 static void __perf_counter_remove_from_context(void *info)
153 {
154         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
155         struct perf_counter *counter = info;
156         struct perf_counter_context *ctx = counter->ctx;
157         unsigned long flags;
158         u64 perf_flags;
159
160         /*
161          * If this is a task context, we need to check whether it is
162          * the current task context of this cpu. If not it has been
163          * scheduled out before the smp call arrived.
164          */
165         if (ctx->task && cpuctx->task_ctx != ctx)
166                 return;
167
168         curr_rq_lock_irq_save(&flags);
169         spin_lock(&ctx->lock);
170
171         counter_sched_out(counter, cpuctx, ctx);
172
173         counter->task = NULL;
174         ctx->nr_counters--;
175
176         /*
177          * Protect the list operation against NMI by disabling the
178          * counters on a global level. NOP for non NMI based counters.
179          */
180         perf_flags = hw_perf_save_disable();
181         list_del_counter(counter, ctx);
182         hw_perf_restore(perf_flags);
183
184         if (!ctx->task) {
185                 /*
186                  * Allow more per task counters with respect to the
187                  * reservation:
188                  */
189                 cpuctx->max_pertask =
190                         min(perf_max_counters - ctx->nr_counters,
191                             perf_max_counters - perf_reserved_percpu);
192         }
193
194         spin_unlock(&ctx->lock);
195         curr_rq_unlock_irq_restore(&flags);
196 }
197
198
199 /*
200  * Remove the counter from a task's (or a CPU's) list of counters.
201  *
202  * Must be called with counter->mutex and ctx->mutex held.
203  *
204  * CPU counters are removed with a smp call. For task counters we only
205  * call when the task is on a CPU.
206  */
207 static void perf_counter_remove_from_context(struct perf_counter *counter)
208 {
209         struct perf_counter_context *ctx = counter->ctx;
210         struct task_struct *task = ctx->task;
211
212         if (!task) {
213                 /*
214                  * Per cpu counters are removed via an smp call and
215                  * the removal is always sucessful.
216                  */
217                 smp_call_function_single(counter->cpu,
218                                          __perf_counter_remove_from_context,
219                                          counter, 1);
220                 return;
221         }
222
223 retry:
224         task_oncpu_function_call(task, __perf_counter_remove_from_context,
225                                  counter);
226
227         spin_lock_irq(&ctx->lock);
228         /*
229          * If the context is active we need to retry the smp call.
230          */
231         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
232                 spin_unlock_irq(&ctx->lock);
233                 goto retry;
234         }
235
236         /*
237          * The lock prevents that this context is scheduled in so we
238          * can remove the counter safely, if the call above did not
239          * succeed.
240          */
241         if (!list_empty(&counter->list_entry)) {
242                 ctx->nr_counters--;
243                 list_del_counter(counter, ctx);
244                 counter->task = NULL;
245         }
246         spin_unlock_irq(&ctx->lock);
247 }
248
249 /*
250  * Cross CPU call to disable a performance counter
251  */
252 static void __perf_counter_disable(void *info)
253 {
254         struct perf_counter *counter = info;
255         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
256         struct perf_counter_context *ctx = counter->ctx;
257         unsigned long flags;
258
259         /*
260          * If this is a per-task counter, need to check whether this
261          * counter's task is the current task on this cpu.
262          */
263         if (ctx->task && cpuctx->task_ctx != ctx)
264                 return;
265
266         curr_rq_lock_irq_save(&flags);
267         spin_lock(&ctx->lock);
268
269         /*
270          * If the counter is on, turn it off.
271          * If it is in error state, leave it in error state.
272          */
273         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
274                 if (counter == counter->group_leader)
275                         group_sched_out(counter, cpuctx, ctx);
276                 else
277                         counter_sched_out(counter, cpuctx, ctx);
278                 counter->state = PERF_COUNTER_STATE_OFF;
279         }
280
281         spin_unlock(&ctx->lock);
282         curr_rq_unlock_irq_restore(&flags);
283 }
284
285 /*
286  * Disable a counter.
287  */
288 static void perf_counter_disable(struct perf_counter *counter)
289 {
290         struct perf_counter_context *ctx = counter->ctx;
291         struct task_struct *task = ctx->task;
292
293         if (!task) {
294                 /*
295                  * Disable the counter on the cpu that it's on
296                  */
297                 smp_call_function_single(counter->cpu, __perf_counter_disable,
298                                          counter, 1);
299                 return;
300         }
301
302  retry:
303         task_oncpu_function_call(task, __perf_counter_disable, counter);
304
305         spin_lock_irq(&ctx->lock);
306         /*
307          * If the counter is still active, we need to retry the cross-call.
308          */
309         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
310                 spin_unlock_irq(&ctx->lock);
311                 goto retry;
312         }
313
314         /*
315          * Since we have the lock this context can't be scheduled
316          * in, so we can change the state safely.
317          */
318         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
319                 counter->state = PERF_COUNTER_STATE_OFF;
320
321         spin_unlock_irq(&ctx->lock);
322 }
323
324 /*
325  * Disable a counter and all its children.
326  */
327 static void perf_counter_disable_family(struct perf_counter *counter)
328 {
329         struct perf_counter *child;
330
331         perf_counter_disable(counter);
332
333         /*
334          * Lock the mutex to protect the list of children
335          */
336         mutex_lock(&counter->mutex);
337         list_for_each_entry(child, &counter->child_list, child_list)
338                 perf_counter_disable(child);
339         mutex_unlock(&counter->mutex);
340 }
341
342 static int
343 counter_sched_in(struct perf_counter *counter,
344                  struct perf_cpu_context *cpuctx,
345                  struct perf_counter_context *ctx,
346                  int cpu)
347 {
348         if (counter->state <= PERF_COUNTER_STATE_OFF)
349                 return 0;
350
351         counter->state = PERF_COUNTER_STATE_ACTIVE;
352         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
353         /*
354          * The new state must be visible before we turn it on in the hardware:
355          */
356         smp_wmb();
357
358         if (counter->hw_ops->enable(counter)) {
359                 counter->state = PERF_COUNTER_STATE_INACTIVE;
360                 counter->oncpu = -1;
361                 return -EAGAIN;
362         }
363
364         if (!is_software_counter(counter))
365                 cpuctx->active_oncpu++;
366         ctx->nr_active++;
367
368         if (counter->hw_event.exclusive)
369                 cpuctx->exclusive = 1;
370
371         return 0;
372 }
373
374 /*
375  * Return 1 for a group consisting entirely of software counters,
376  * 0 if the group contains any hardware counters.
377  */
378 static int is_software_only_group(struct perf_counter *leader)
379 {
380         struct perf_counter *counter;
381
382         if (!is_software_counter(leader))
383                 return 0;
384         list_for_each_entry(counter, &leader->sibling_list, list_entry)
385                 if (!is_software_counter(counter))
386                         return 0;
387         return 1;
388 }
389
390 /*
391  * Work out whether we can put this counter group on the CPU now.
392  */
393 static int group_can_go_on(struct perf_counter *counter,
394                            struct perf_cpu_context *cpuctx,
395                            int can_add_hw)
396 {
397         /*
398          * Groups consisting entirely of software counters can always go on.
399          */
400         if (is_software_only_group(counter))
401                 return 1;
402         /*
403          * If an exclusive group is already on, no other hardware
404          * counters can go on.
405          */
406         if (cpuctx->exclusive)
407                 return 0;
408         /*
409          * If this group is exclusive and there are already
410          * counters on the CPU, it can't go on.
411          */
412         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
413                 return 0;
414         /*
415          * Otherwise, try to add it if all previous groups were able
416          * to go on.
417          */
418         return can_add_hw;
419 }
420
421 /*
422  * Cross CPU call to install and enable a performance counter
423  */
424 static void __perf_install_in_context(void *info)
425 {
426         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
427         struct perf_counter *counter = info;
428         struct perf_counter_context *ctx = counter->ctx;
429         struct perf_counter *leader = counter->group_leader;
430         int cpu = smp_processor_id();
431         unsigned long flags;
432         u64 perf_flags;
433         int err;
434
435         /*
436          * If this is a task context, we need to check whether it is
437          * the current task context of this cpu. If not it has been
438          * scheduled out before the smp call arrived.
439          */
440         if (ctx->task && cpuctx->task_ctx != ctx)
441                 return;
442
443         curr_rq_lock_irq_save(&flags);
444         spin_lock(&ctx->lock);
445
446         /*
447          * Protect the list operation against NMI by disabling the
448          * counters on a global level. NOP for non NMI based counters.
449          */
450         perf_flags = hw_perf_save_disable();
451
452         list_add_counter(counter, ctx);
453         ctx->nr_counters++;
454         counter->prev_state = PERF_COUNTER_STATE_OFF;
455
456         /*
457          * Don't put the counter on if it is disabled or if
458          * it is in a group and the group isn't on.
459          */
460         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
461             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
462                 goto unlock;
463
464         /*
465          * An exclusive counter can't go on if there are already active
466          * hardware counters, and no hardware counter can go on if there
467          * is already an exclusive counter on.
468          */
469         if (!group_can_go_on(counter, cpuctx, 1))
470                 err = -EEXIST;
471         else
472                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
473
474         if (err) {
475                 /*
476                  * This counter couldn't go on.  If it is in a group
477                  * then we have to pull the whole group off.
478                  * If the counter group is pinned then put it in error state.
479                  */
480                 if (leader != counter)
481                         group_sched_out(leader, cpuctx, ctx);
482                 if (leader->hw_event.pinned)
483                         leader->state = PERF_COUNTER_STATE_ERROR;
484         }
485
486         if (!err && !ctx->task && cpuctx->max_pertask)
487                 cpuctx->max_pertask--;
488
489  unlock:
490         hw_perf_restore(perf_flags);
491
492         spin_unlock(&ctx->lock);
493         curr_rq_unlock_irq_restore(&flags);
494 }
495
496 /*
497  * Attach a performance counter to a context
498  *
499  * First we add the counter to the list with the hardware enable bit
500  * in counter->hw_config cleared.
501  *
502  * If the counter is attached to a task which is on a CPU we use a smp
503  * call to enable it in the task context. The task might have been
504  * scheduled away, but we check this in the smp call again.
505  *
506  * Must be called with ctx->mutex held.
507  */
508 static void
509 perf_install_in_context(struct perf_counter_context *ctx,
510                         struct perf_counter *counter,
511                         int cpu)
512 {
513         struct task_struct *task = ctx->task;
514
515         if (!task) {
516                 /*
517                  * Per cpu counters are installed via an smp call and
518                  * the install is always sucessful.
519                  */
520                 smp_call_function_single(cpu, __perf_install_in_context,
521                                          counter, 1);
522                 return;
523         }
524
525         counter->task = task;
526 retry:
527         task_oncpu_function_call(task, __perf_install_in_context,
528                                  counter);
529
530         spin_lock_irq(&ctx->lock);
531         /*
532          * we need to retry the smp call.
533          */
534         if (ctx->is_active && list_empty(&counter->list_entry)) {
535                 spin_unlock_irq(&ctx->lock);
536                 goto retry;
537         }
538
539         /*
540          * The lock prevents that this context is scheduled in so we
541          * can add the counter safely, if it the call above did not
542          * succeed.
543          */
544         if (list_empty(&counter->list_entry)) {
545                 list_add_counter(counter, ctx);
546                 ctx->nr_counters++;
547         }
548         spin_unlock_irq(&ctx->lock);
549 }
550
551 /*
552  * Cross CPU call to enable a performance counter
553  */
554 static void __perf_counter_enable(void *info)
555 {
556         struct perf_counter *counter = info;
557         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558         struct perf_counter_context *ctx = counter->ctx;
559         struct perf_counter *leader = counter->group_leader;
560         unsigned long flags;
561         int err;
562
563         /*
564          * If this is a per-task counter, need to check whether this
565          * counter's task is the current task on this cpu.
566          */
567         if (ctx->task && cpuctx->task_ctx != ctx)
568                 return;
569
570         curr_rq_lock_irq_save(&flags);
571         spin_lock(&ctx->lock);
572
573         counter->prev_state = counter->state;
574         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
575                 goto unlock;
576         counter->state = PERF_COUNTER_STATE_INACTIVE;
577
578         /*
579          * If the counter is in a group and isn't the group leader,
580          * then don't put it on unless the group is on.
581          */
582         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
583                 goto unlock;
584
585         if (!group_can_go_on(counter, cpuctx, 1))
586                 err = -EEXIST;
587         else
588                 err = counter_sched_in(counter, cpuctx, ctx,
589                                        smp_processor_id());
590
591         if (err) {
592                 /*
593                  * If this counter can't go on and it's part of a
594                  * group, then the whole group has to come off.
595                  */
596                 if (leader != counter)
597                         group_sched_out(leader, cpuctx, ctx);
598                 if (leader->hw_event.pinned)
599                         leader->state = PERF_COUNTER_STATE_ERROR;
600         }
601
602  unlock:
603         spin_unlock(&ctx->lock);
604         curr_rq_unlock_irq_restore(&flags);
605 }
606
607 /*
608  * Enable a counter.
609  */
610 static void perf_counter_enable(struct perf_counter *counter)
611 {
612         struct perf_counter_context *ctx = counter->ctx;
613         struct task_struct *task = ctx->task;
614
615         if (!task) {
616                 /*
617                  * Enable the counter on the cpu that it's on
618                  */
619                 smp_call_function_single(counter->cpu, __perf_counter_enable,
620                                          counter, 1);
621                 return;
622         }
623
624         spin_lock_irq(&ctx->lock);
625         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
626                 goto out;
627
628         /*
629          * If the counter is in error state, clear that first.
630          * That way, if we see the counter in error state below, we
631          * know that it has gone back into error state, as distinct
632          * from the task having been scheduled away before the
633          * cross-call arrived.
634          */
635         if (counter->state == PERF_COUNTER_STATE_ERROR)
636                 counter->state = PERF_COUNTER_STATE_OFF;
637
638  retry:
639         spin_unlock_irq(&ctx->lock);
640         task_oncpu_function_call(task, __perf_counter_enable, counter);
641
642         spin_lock_irq(&ctx->lock);
643
644         /*
645          * If the context is active and the counter is still off,
646          * we need to retry the cross-call.
647          */
648         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
649                 goto retry;
650
651         /*
652          * Since we have the lock this context can't be scheduled
653          * in, so we can change the state safely.
654          */
655         if (counter->state == PERF_COUNTER_STATE_OFF)
656                 counter->state = PERF_COUNTER_STATE_INACTIVE;
657  out:
658         spin_unlock_irq(&ctx->lock);
659 }
660
661 /*
662  * Enable a counter and all its children.
663  */
664 static void perf_counter_enable_family(struct perf_counter *counter)
665 {
666         struct perf_counter *child;
667
668         perf_counter_enable(counter);
669
670         /*
671          * Lock the mutex to protect the list of children
672          */
673         mutex_lock(&counter->mutex);
674         list_for_each_entry(child, &counter->child_list, child_list)
675                 perf_counter_enable(child);
676         mutex_unlock(&counter->mutex);
677 }
678
679 void __perf_counter_sched_out(struct perf_counter_context *ctx,
680                               struct perf_cpu_context *cpuctx)
681 {
682         struct perf_counter *counter;
683         u64 flags;
684
685         spin_lock(&ctx->lock);
686         ctx->is_active = 0;
687         if (likely(!ctx->nr_counters))
688                 goto out;
689
690         flags = hw_perf_save_disable();
691         if (ctx->nr_active) {
692                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
693                         group_sched_out(counter, cpuctx, ctx);
694         }
695         hw_perf_restore(flags);
696  out:
697         spin_unlock(&ctx->lock);
698 }
699
700 /*
701  * Called from scheduler to remove the counters of the current task,
702  * with interrupts disabled.
703  *
704  * We stop each counter and update the counter value in counter->count.
705  *
706  * This does not protect us against NMI, but disable()
707  * sets the disabled bit in the control field of counter _before_
708  * accessing the counter control register. If a NMI hits, then it will
709  * not restart the counter.
710  */
711 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
712 {
713         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
714         struct perf_counter_context *ctx = &task->perf_counter_ctx;
715         struct pt_regs *regs;
716
717         if (likely(!cpuctx->task_ctx))
718                 return;
719
720         regs = task_pt_regs(task);
721         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
722         __perf_counter_sched_out(ctx, cpuctx);
723
724         cpuctx->task_ctx = NULL;
725 }
726
727 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
728 {
729         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
730 }
731
732 static int
733 group_sched_in(struct perf_counter *group_counter,
734                struct perf_cpu_context *cpuctx,
735                struct perf_counter_context *ctx,
736                int cpu)
737 {
738         struct perf_counter *counter, *partial_group;
739         int ret;
740
741         if (group_counter->state == PERF_COUNTER_STATE_OFF)
742                 return 0;
743
744         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
745         if (ret)
746                 return ret < 0 ? ret : 0;
747
748         group_counter->prev_state = group_counter->state;
749         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
750                 return -EAGAIN;
751
752         /*
753          * Schedule in siblings as one group (if any):
754          */
755         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
756                 counter->prev_state = counter->state;
757                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
758                         partial_group = counter;
759                         goto group_error;
760                 }
761         }
762
763         return 0;
764
765 group_error:
766         /*
767          * Groups can be scheduled in as one unit only, so undo any
768          * partial group before returning:
769          */
770         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
771                 if (counter == partial_group)
772                         break;
773                 counter_sched_out(counter, cpuctx, ctx);
774         }
775         counter_sched_out(group_counter, cpuctx, ctx);
776
777         return -EAGAIN;
778 }
779
780 static void
781 __perf_counter_sched_in(struct perf_counter_context *ctx,
782                         struct perf_cpu_context *cpuctx, int cpu)
783 {
784         struct perf_counter *counter;
785         u64 flags;
786         int can_add_hw = 1;
787
788         spin_lock(&ctx->lock);
789         ctx->is_active = 1;
790         if (likely(!ctx->nr_counters))
791                 goto out;
792
793         flags = hw_perf_save_disable();
794
795         /*
796          * First go through the list and put on any pinned groups
797          * in order to give them the best chance of going on.
798          */
799         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
800                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
801                     !counter->hw_event.pinned)
802                         continue;
803                 if (counter->cpu != -1 && counter->cpu != cpu)
804                         continue;
805
806                 if (group_can_go_on(counter, cpuctx, 1))
807                         group_sched_in(counter, cpuctx, ctx, cpu);
808
809                 /*
810                  * If this pinned group hasn't been scheduled,
811                  * put it in error state.
812                  */
813                 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
814                         counter->state = PERF_COUNTER_STATE_ERROR;
815         }
816
817         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
818                 /*
819                  * Ignore counters in OFF or ERROR state, and
820                  * ignore pinned counters since we did them already.
821                  */
822                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
823                     counter->hw_event.pinned)
824                         continue;
825
826                 /*
827                  * Listen to the 'cpu' scheduling filter constraint
828                  * of counters:
829                  */
830                 if (counter->cpu != -1 && counter->cpu != cpu)
831                         continue;
832
833                 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
834                         if (group_sched_in(counter, cpuctx, ctx, cpu))
835                                 can_add_hw = 0;
836                 }
837         }
838         hw_perf_restore(flags);
839  out:
840         spin_unlock(&ctx->lock);
841 }
842
843 /*
844  * Called from scheduler to add the counters of the current task
845  * with interrupts disabled.
846  *
847  * We restore the counter value and then enable it.
848  *
849  * This does not protect us against NMI, but enable()
850  * sets the enabled bit in the control field of counter _before_
851  * accessing the counter control register. If a NMI hits, then it will
852  * keep the counter running.
853  */
854 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
855 {
856         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
857         struct perf_counter_context *ctx = &task->perf_counter_ctx;
858
859         __perf_counter_sched_in(ctx, cpuctx, cpu);
860         cpuctx->task_ctx = ctx;
861 }
862
863 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
864 {
865         struct perf_counter_context *ctx = &cpuctx->ctx;
866
867         __perf_counter_sched_in(ctx, cpuctx, cpu);
868 }
869
870 int perf_counter_task_disable(void)
871 {
872         struct task_struct *curr = current;
873         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
874         struct perf_counter *counter;
875         unsigned long flags;
876         u64 perf_flags;
877         int cpu;
878
879         if (likely(!ctx->nr_counters))
880                 return 0;
881
882         curr_rq_lock_irq_save(&flags);
883         cpu = smp_processor_id();
884
885         /* force the update of the task clock: */
886         __task_delta_exec(curr, 1);
887
888         perf_counter_task_sched_out(curr, cpu);
889
890         spin_lock(&ctx->lock);
891
892         /*
893          * Disable all the counters:
894          */
895         perf_flags = hw_perf_save_disable();
896
897         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
898                 if (counter->state != PERF_COUNTER_STATE_ERROR)
899                         counter->state = PERF_COUNTER_STATE_OFF;
900         }
901
902         hw_perf_restore(perf_flags);
903
904         spin_unlock(&ctx->lock);
905
906         curr_rq_unlock_irq_restore(&flags);
907
908         return 0;
909 }
910
911 int perf_counter_task_enable(void)
912 {
913         struct task_struct *curr = current;
914         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
915         struct perf_counter *counter;
916         unsigned long flags;
917         u64 perf_flags;
918         int cpu;
919
920         if (likely(!ctx->nr_counters))
921                 return 0;
922
923         curr_rq_lock_irq_save(&flags);
924         cpu = smp_processor_id();
925
926         /* force the update of the task clock: */
927         __task_delta_exec(curr, 1);
928
929         perf_counter_task_sched_out(curr, cpu);
930
931         spin_lock(&ctx->lock);
932
933         /*
934          * Disable all the counters:
935          */
936         perf_flags = hw_perf_save_disable();
937
938         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
939                 if (counter->state > PERF_COUNTER_STATE_OFF)
940                         continue;
941                 counter->state = PERF_COUNTER_STATE_INACTIVE;
942                 counter->hw_event.disabled = 0;
943         }
944         hw_perf_restore(perf_flags);
945
946         spin_unlock(&ctx->lock);
947
948         perf_counter_task_sched_in(curr, cpu);
949
950         curr_rq_unlock_irq_restore(&flags);
951
952         return 0;
953 }
954
955 /*
956  * Round-robin a context's counters:
957  */
958 static void rotate_ctx(struct perf_counter_context *ctx)
959 {
960         struct perf_counter *counter;
961         u64 perf_flags;
962
963         if (!ctx->nr_counters)
964                 return;
965
966         spin_lock(&ctx->lock);
967         /*
968          * Rotate the first entry last (works just fine for group counters too):
969          */
970         perf_flags = hw_perf_save_disable();
971         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
972                 list_move_tail(&counter->list_entry, &ctx->counter_list);
973                 break;
974         }
975         hw_perf_restore(perf_flags);
976
977         spin_unlock(&ctx->lock);
978 }
979
980 void perf_counter_task_tick(struct task_struct *curr, int cpu)
981 {
982         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
983         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
984         const int rotate_percpu = 0;
985
986         if (rotate_percpu)
987                 perf_counter_cpu_sched_out(cpuctx);
988         perf_counter_task_sched_out(curr, cpu);
989
990         if (rotate_percpu)
991                 rotate_ctx(&cpuctx->ctx);
992         rotate_ctx(ctx);
993
994         if (rotate_percpu)
995                 perf_counter_cpu_sched_in(cpuctx, cpu);
996         perf_counter_task_sched_in(curr, cpu);
997 }
998
999 /*
1000  * Cross CPU call to read the hardware counter
1001  */
1002 static void __read(void *info)
1003 {
1004         struct perf_counter *counter = info;
1005         unsigned long flags;
1006
1007         curr_rq_lock_irq_save(&flags);
1008         counter->hw_ops->read(counter);
1009         curr_rq_unlock_irq_restore(&flags);
1010 }
1011
1012 static u64 perf_counter_read(struct perf_counter *counter)
1013 {
1014         /*
1015          * If counter is enabled and currently active on a CPU, update the
1016          * value in the counter structure:
1017          */
1018         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1019                 smp_call_function_single(counter->oncpu,
1020                                          __read, counter, 1);
1021         }
1022
1023         return atomic64_read(&counter->count);
1024 }
1025
1026 static void put_context(struct perf_counter_context *ctx)
1027 {
1028         if (ctx->task)
1029                 put_task_struct(ctx->task);
1030 }
1031
1032 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1033 {
1034         struct perf_cpu_context *cpuctx;
1035         struct perf_counter_context *ctx;
1036         struct task_struct *task;
1037
1038         /*
1039          * If cpu is not a wildcard then this is a percpu counter:
1040          */
1041         if (cpu != -1) {
1042                 /* Must be root to operate on a CPU counter: */
1043                 if (!capable(CAP_SYS_ADMIN))
1044                         return ERR_PTR(-EACCES);
1045
1046                 if (cpu < 0 || cpu > num_possible_cpus())
1047                         return ERR_PTR(-EINVAL);
1048
1049                 /*
1050                  * We could be clever and allow to attach a counter to an
1051                  * offline CPU and activate it when the CPU comes up, but
1052                  * that's for later.
1053                  */
1054                 if (!cpu_isset(cpu, cpu_online_map))
1055                         return ERR_PTR(-ENODEV);
1056
1057                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1058                 ctx = &cpuctx->ctx;
1059
1060                 return ctx;
1061         }
1062
1063         rcu_read_lock();
1064         if (!pid)
1065                 task = current;
1066         else
1067                 task = find_task_by_vpid(pid);
1068         if (task)
1069                 get_task_struct(task);
1070         rcu_read_unlock();
1071
1072         if (!task)
1073                 return ERR_PTR(-ESRCH);
1074
1075         ctx = &task->perf_counter_ctx;
1076         ctx->task = task;
1077
1078         /* Reuse ptrace permission checks for now. */
1079         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1080                 put_context(ctx);
1081                 return ERR_PTR(-EACCES);
1082         }
1083
1084         return ctx;
1085 }
1086
1087 static void free_counter_rcu(struct rcu_head *head)
1088 {
1089         struct perf_counter *counter;
1090
1091         counter = container_of(head, struct perf_counter, rcu_head);
1092         kfree(counter);
1093 }
1094
1095 static void free_counter(struct perf_counter *counter)
1096 {
1097         if (counter->destroy)
1098                 counter->destroy(counter);
1099
1100         call_rcu(&counter->rcu_head, free_counter_rcu);
1101 }
1102
1103 /*
1104  * Called when the last reference to the file is gone.
1105  */
1106 static int perf_release(struct inode *inode, struct file *file)
1107 {
1108         struct perf_counter *counter = file->private_data;
1109         struct perf_counter_context *ctx = counter->ctx;
1110
1111         file->private_data = NULL;
1112
1113         mutex_lock(&ctx->mutex);
1114         mutex_lock(&counter->mutex);
1115
1116         perf_counter_remove_from_context(counter);
1117
1118         mutex_unlock(&counter->mutex);
1119         mutex_unlock(&ctx->mutex);
1120
1121         free_counter(counter);
1122         put_context(ctx);
1123
1124         return 0;
1125 }
1126
1127 /*
1128  * Read the performance counter - simple non blocking version for now
1129  */
1130 static ssize_t
1131 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1132 {
1133         u64 cntval;
1134
1135         if (count < sizeof(cntval))
1136                 return -EINVAL;
1137
1138         /*
1139          * Return end-of-file for a read on a counter that is in
1140          * error state (i.e. because it was pinned but it couldn't be
1141          * scheduled on to the CPU at some point).
1142          */
1143         if (counter->state == PERF_COUNTER_STATE_ERROR)
1144                 return 0;
1145
1146         mutex_lock(&counter->mutex);
1147         cntval = perf_counter_read(counter);
1148         mutex_unlock(&counter->mutex);
1149
1150         return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1151 }
1152
1153 static ssize_t
1154 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1155 {
1156         struct perf_counter *counter = file->private_data;
1157
1158         return perf_read_hw(counter, buf, count);
1159 }
1160
1161 static unsigned int perf_poll(struct file *file, poll_table *wait)
1162 {
1163         struct perf_counter *counter = file->private_data;
1164         unsigned int events = POLLIN;
1165
1166         poll_wait(file, &counter->waitq, wait);
1167
1168         return events;
1169 }
1170
1171 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1172 {
1173         struct perf_counter *counter = file->private_data;
1174         int err = 0;
1175
1176         switch (cmd) {
1177         case PERF_COUNTER_IOC_ENABLE:
1178                 perf_counter_enable_family(counter);
1179                 break;
1180         case PERF_COUNTER_IOC_DISABLE:
1181                 perf_counter_disable_family(counter);
1182                 break;
1183         default:
1184                 err = -ENOTTY;
1185         }
1186         return err;
1187 }
1188
1189 static void __perf_counter_update_userpage(struct perf_counter *counter,
1190                                            struct perf_mmap_data *data)
1191 {
1192         struct perf_counter_mmap_page *userpg = data->user_page;
1193
1194         /*
1195          * Disable preemption so as to not let the corresponding user-space
1196          * spin too long if we get preempted.
1197          */
1198         preempt_disable();
1199         ++userpg->lock;
1200         smp_wmb();
1201         userpg->index = counter->hw.idx;
1202         userpg->offset = atomic64_read(&counter->count);
1203         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1204                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1205
1206         userpg->data_head = atomic_read(&data->head);
1207         smp_wmb();
1208         ++userpg->lock;
1209         preempt_enable();
1210 }
1211
1212 void perf_counter_update_userpage(struct perf_counter *counter)
1213 {
1214         struct perf_mmap_data *data;
1215
1216         rcu_read_lock();
1217         data = rcu_dereference(counter->data);
1218         if (data)
1219                 __perf_counter_update_userpage(counter, data);
1220         rcu_read_unlock();
1221 }
1222
1223 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1224 {
1225         struct perf_counter *counter = vma->vm_file->private_data;
1226         struct perf_mmap_data *data;
1227         int ret = VM_FAULT_SIGBUS;
1228
1229         rcu_read_lock();
1230         data = rcu_dereference(counter->data);
1231         if (!data)
1232                 goto unlock;
1233
1234         if (vmf->pgoff == 0) {
1235                 vmf->page = virt_to_page(data->user_page);
1236         } else {
1237                 int nr = vmf->pgoff - 1;
1238
1239                 if ((unsigned)nr > data->nr_pages)
1240                         goto unlock;
1241
1242                 vmf->page = virt_to_page(data->data_pages[nr]);
1243         }
1244         get_page(vmf->page);
1245         ret = 0;
1246 unlock:
1247         rcu_read_unlock();
1248
1249         return ret;
1250 }
1251
1252 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1253 {
1254         struct perf_mmap_data *data;
1255         unsigned long size;
1256         int i;
1257
1258         WARN_ON(atomic_read(&counter->mmap_count));
1259
1260         size = sizeof(struct perf_mmap_data);
1261         size += nr_pages * sizeof(void *);
1262
1263         data = kzalloc(size, GFP_KERNEL);
1264         if (!data)
1265                 goto fail;
1266
1267         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1268         if (!data->user_page)
1269                 goto fail_user_page;
1270
1271         for (i = 0; i < nr_pages; i++) {
1272                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1273                 if (!data->data_pages[i])
1274                         goto fail_data_pages;
1275         }
1276
1277         data->nr_pages = nr_pages;
1278
1279         rcu_assign_pointer(counter->data, data);
1280
1281         return 0;
1282
1283 fail_data_pages:
1284         for (i--; i >= 0; i--)
1285                 free_page((unsigned long)data->data_pages[i]);
1286
1287         free_page((unsigned long)data->user_page);
1288
1289 fail_user_page:
1290         kfree(data);
1291
1292 fail:
1293         return -ENOMEM;
1294 }
1295
1296 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1297 {
1298         struct perf_mmap_data *data = container_of(rcu_head,
1299                         struct perf_mmap_data, rcu_head);
1300         int i;
1301
1302         free_page((unsigned long)data->user_page);
1303         for (i = 0; i < data->nr_pages; i++)
1304                 free_page((unsigned long)data->data_pages[i]);
1305         kfree(data);
1306 }
1307
1308 static void perf_mmap_data_free(struct perf_counter *counter)
1309 {
1310         struct perf_mmap_data *data = counter->data;
1311
1312         WARN_ON(atomic_read(&counter->mmap_count));
1313
1314         rcu_assign_pointer(counter->data, NULL);
1315         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1316 }
1317
1318 static void perf_mmap_open(struct vm_area_struct *vma)
1319 {
1320         struct perf_counter *counter = vma->vm_file->private_data;
1321
1322         atomic_inc(&counter->mmap_count);
1323 }
1324
1325 static void perf_mmap_close(struct vm_area_struct *vma)
1326 {
1327         struct perf_counter *counter = vma->vm_file->private_data;
1328
1329         if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1330                                       &counter->mmap_mutex)) {
1331                 perf_mmap_data_free(counter);
1332                 mutex_unlock(&counter->mmap_mutex);
1333         }
1334 }
1335
1336 static struct vm_operations_struct perf_mmap_vmops = {
1337         .open = perf_mmap_open,
1338         .close = perf_mmap_close,
1339         .fault = perf_mmap_fault,
1340 };
1341
1342 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1343 {
1344         struct perf_counter *counter = file->private_data;
1345         unsigned long vma_size;
1346         unsigned long nr_pages;
1347         unsigned long locked, lock_limit;
1348         int ret = 0;
1349
1350         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1351                 return -EINVAL;
1352
1353         vma_size = vma->vm_end - vma->vm_start;
1354         nr_pages = (vma_size / PAGE_SIZE) - 1;
1355
1356         if (nr_pages == 0 || !is_power_of_2(nr_pages))
1357                 return -EINVAL;
1358
1359         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1360                 return -EINVAL;
1361
1362         if (vma->vm_pgoff != 0)
1363                 return -EINVAL;
1364
1365         locked = vma_size >>  PAGE_SHIFT;
1366         locked += vma->vm_mm->locked_vm;
1367
1368         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1369         lock_limit >>= PAGE_SHIFT;
1370
1371         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
1372                 return -EPERM;
1373
1374         mutex_lock(&counter->mmap_mutex);
1375         if (atomic_inc_not_zero(&counter->mmap_count))
1376                 goto out;
1377
1378         WARN_ON(counter->data);
1379         ret = perf_mmap_data_alloc(counter, nr_pages);
1380         if (!ret)
1381                 atomic_set(&counter->mmap_count, 1);
1382 out:
1383         mutex_unlock(&counter->mmap_mutex);
1384
1385         vma->vm_flags &= ~VM_MAYWRITE;
1386         vma->vm_flags |= VM_RESERVED;
1387         vma->vm_ops = &perf_mmap_vmops;
1388
1389         return ret;
1390 }
1391
1392 static const struct file_operations perf_fops = {
1393         .release                = perf_release,
1394         .read                   = perf_read,
1395         .poll                   = perf_poll,
1396         .unlocked_ioctl         = perf_ioctl,
1397         .compat_ioctl           = perf_ioctl,
1398         .mmap                   = perf_mmap,
1399 };
1400
1401 /*
1402  * Output
1403  */
1404
1405 static int perf_output_write(struct perf_counter *counter, int nmi,
1406                              void *buf, ssize_t size)
1407 {
1408         struct perf_mmap_data *data;
1409         unsigned int offset, head, nr;
1410         unsigned int len;
1411         int ret, wakeup;
1412
1413         rcu_read_lock();
1414         ret = -ENOSPC;
1415         data = rcu_dereference(counter->data);
1416         if (!data)
1417                 goto out;
1418
1419         if (!data->nr_pages)
1420                 goto out;
1421
1422         ret = -EINVAL;
1423         if (size > PAGE_SIZE)
1424                 goto out;
1425
1426         do {
1427                 offset = head = atomic_read(&data->head);
1428                 head += sizeof(u64);
1429         } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1430
1431         wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1432
1433         nr = (offset >> PAGE_SHIFT) & (data->nr_pages - 1);
1434         offset &= PAGE_SIZE - 1;
1435
1436         len = min_t(unsigned int, PAGE_SIZE - offset, size);
1437         memcpy(data->data_pages[nr] + offset, buf, len);
1438         size -= len;
1439
1440         if (size) {
1441                 nr = (nr + 1) & (data->nr_pages - 1);
1442                 memcpy(data->data_pages[nr], buf + len, size);
1443         }
1444
1445         /*
1446          * generate a poll() wakeup for every page boundary crossed
1447          */
1448         if (wakeup) {
1449                 __perf_counter_update_userpage(counter, data);
1450                 if (nmi) {
1451                         counter->wakeup_pending = 1;
1452                         set_perf_counter_pending();
1453                 } else
1454                         wake_up(&counter->waitq);
1455         }
1456         ret = 0;
1457 out:
1458         rcu_read_unlock();
1459
1460         return ret;
1461 }
1462
1463 static void perf_output_simple(struct perf_counter *counter,
1464                                int nmi, struct pt_regs *regs)
1465 {
1466         u64 entry;
1467
1468         entry = instruction_pointer(regs);
1469
1470         perf_output_write(counter, nmi, &entry, sizeof(entry));
1471 }
1472
1473 struct group_entry {
1474         u64 event;
1475         u64 counter;
1476 };
1477
1478 static void perf_output_group(struct perf_counter *counter, int nmi)
1479 {
1480         struct perf_counter *leader, *sub;
1481
1482         leader = counter->group_leader;
1483         list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1484                 struct group_entry entry;
1485
1486                 if (sub != counter)
1487                         sub->hw_ops->read(sub);
1488
1489                 entry.event = sub->hw_event.config;
1490                 entry.counter = atomic64_read(&sub->count);
1491
1492                 perf_output_write(counter, nmi, &entry, sizeof(entry));
1493         }
1494 }
1495
1496 void perf_counter_output(struct perf_counter *counter,
1497                          int nmi, struct pt_regs *regs)
1498 {
1499         switch (counter->hw_event.record_type) {
1500         case PERF_RECORD_SIMPLE:
1501                 return;
1502
1503         case PERF_RECORD_IRQ:
1504                 perf_output_simple(counter, nmi, regs);
1505                 break;
1506
1507         case PERF_RECORD_GROUP:
1508                 perf_output_group(counter, nmi);
1509                 break;
1510         }
1511 }
1512
1513 /*
1514  * Generic software counter infrastructure
1515  */
1516
1517 static void perf_swcounter_update(struct perf_counter *counter)
1518 {
1519         struct hw_perf_counter *hwc = &counter->hw;
1520         u64 prev, now;
1521         s64 delta;
1522
1523 again:
1524         prev = atomic64_read(&hwc->prev_count);
1525         now = atomic64_read(&hwc->count);
1526         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1527                 goto again;
1528
1529         delta = now - prev;
1530
1531         atomic64_add(delta, &counter->count);
1532         atomic64_sub(delta, &hwc->period_left);
1533 }
1534
1535 static void perf_swcounter_set_period(struct perf_counter *counter)
1536 {
1537         struct hw_perf_counter *hwc = &counter->hw;
1538         s64 left = atomic64_read(&hwc->period_left);
1539         s64 period = hwc->irq_period;
1540
1541         if (unlikely(left <= -period)) {
1542                 left = period;
1543                 atomic64_set(&hwc->period_left, left);
1544         }
1545
1546         if (unlikely(left <= 0)) {
1547                 left += period;
1548                 atomic64_add(period, &hwc->period_left);
1549         }
1550
1551         atomic64_set(&hwc->prev_count, -left);
1552         atomic64_set(&hwc->count, -left);
1553 }
1554
1555 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1556 {
1557         struct perf_counter *counter;
1558         struct pt_regs *regs;
1559
1560         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1561         counter->hw_ops->read(counter);
1562
1563         regs = get_irq_regs();
1564         /*
1565          * In case we exclude kernel IPs or are somehow not in interrupt
1566          * context, provide the next best thing, the user IP.
1567          */
1568         if ((counter->hw_event.exclude_kernel || !regs) &&
1569                         !counter->hw_event.exclude_user)
1570                 regs = task_pt_regs(current);
1571
1572         if (regs)
1573                 perf_counter_output(counter, 0, regs);
1574
1575         hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1576
1577         return HRTIMER_RESTART;
1578 }
1579
1580 static void perf_swcounter_overflow(struct perf_counter *counter,
1581                                     int nmi, struct pt_regs *regs)
1582 {
1583         perf_swcounter_update(counter);
1584         perf_swcounter_set_period(counter);
1585         perf_counter_output(counter, nmi, regs);
1586 }
1587
1588 static int perf_swcounter_match(struct perf_counter *counter,
1589                                 enum perf_event_types type,
1590                                 u32 event, struct pt_regs *regs)
1591 {
1592         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1593                 return 0;
1594
1595         if (perf_event_raw(&counter->hw_event))
1596                 return 0;
1597
1598         if (perf_event_type(&counter->hw_event) != type)
1599                 return 0;
1600
1601         if (perf_event_id(&counter->hw_event) != event)
1602                 return 0;
1603
1604         if (counter->hw_event.exclude_user && user_mode(regs))
1605                 return 0;
1606
1607         if (counter->hw_event.exclude_kernel && !user_mode(regs))
1608                 return 0;
1609
1610         return 1;
1611 }
1612
1613 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1614                                int nmi, struct pt_regs *regs)
1615 {
1616         int neg = atomic64_add_negative(nr, &counter->hw.count);
1617         if (counter->hw.irq_period && !neg)
1618                 perf_swcounter_overflow(counter, nmi, regs);
1619 }
1620
1621 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1622                                      enum perf_event_types type, u32 event,
1623                                      u64 nr, int nmi, struct pt_regs *regs)
1624 {
1625         struct perf_counter *counter;
1626
1627         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1628                 return;
1629
1630         rcu_read_lock();
1631         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1632                 if (perf_swcounter_match(counter, type, event, regs))
1633                         perf_swcounter_add(counter, nr, nmi, regs);
1634         }
1635         rcu_read_unlock();
1636 }
1637
1638 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1639 {
1640         if (in_nmi())
1641                 return &cpuctx->recursion[3];
1642
1643         if (in_irq())
1644                 return &cpuctx->recursion[2];
1645
1646         if (in_softirq())
1647                 return &cpuctx->recursion[1];
1648
1649         return &cpuctx->recursion[0];
1650 }
1651
1652 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1653                                    u64 nr, int nmi, struct pt_regs *regs)
1654 {
1655         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1656         int *recursion = perf_swcounter_recursion_context(cpuctx);
1657
1658         if (*recursion)
1659                 goto out;
1660
1661         (*recursion)++;
1662         barrier();
1663
1664         perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1665         if (cpuctx->task_ctx) {
1666                 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1667                                 nr, nmi, regs);
1668         }
1669
1670         barrier();
1671         (*recursion)--;
1672
1673 out:
1674         put_cpu_var(perf_cpu_context);
1675 }
1676
1677 void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1678 {
1679         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1680 }
1681
1682 static void perf_swcounter_read(struct perf_counter *counter)
1683 {
1684         perf_swcounter_update(counter);
1685 }
1686
1687 static int perf_swcounter_enable(struct perf_counter *counter)
1688 {
1689         perf_swcounter_set_period(counter);
1690         return 0;
1691 }
1692
1693 static void perf_swcounter_disable(struct perf_counter *counter)
1694 {
1695         perf_swcounter_update(counter);
1696 }
1697
1698 static const struct hw_perf_counter_ops perf_ops_generic = {
1699         .enable         = perf_swcounter_enable,
1700         .disable        = perf_swcounter_disable,
1701         .read           = perf_swcounter_read,
1702 };
1703
1704 /*
1705  * Software counter: cpu wall time clock
1706  */
1707
1708 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1709 {
1710         int cpu = raw_smp_processor_id();
1711         s64 prev;
1712         u64 now;
1713
1714         now = cpu_clock(cpu);
1715         prev = atomic64_read(&counter->hw.prev_count);
1716         atomic64_set(&counter->hw.prev_count, now);
1717         atomic64_add(now - prev, &counter->count);
1718 }
1719
1720 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1721 {
1722         struct hw_perf_counter *hwc = &counter->hw;
1723         int cpu = raw_smp_processor_id();
1724
1725         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
1726         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1727         hwc->hrtimer.function = perf_swcounter_hrtimer;
1728         if (hwc->irq_period) {
1729                 __hrtimer_start_range_ns(&hwc->hrtimer,
1730                                 ns_to_ktime(hwc->irq_period), 0,
1731                                 HRTIMER_MODE_REL, 0);
1732         }
1733
1734         return 0;
1735 }
1736
1737 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1738 {
1739         hrtimer_cancel(&counter->hw.hrtimer);
1740         cpu_clock_perf_counter_update(counter);
1741 }
1742
1743 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1744 {
1745         cpu_clock_perf_counter_update(counter);
1746 }
1747
1748 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1749         .enable         = cpu_clock_perf_counter_enable,
1750         .disable        = cpu_clock_perf_counter_disable,
1751         .read           = cpu_clock_perf_counter_read,
1752 };
1753
1754 /*
1755  * Software counter: task time clock
1756  */
1757
1758 /*
1759  * Called from within the scheduler:
1760  */
1761 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1762 {
1763         struct task_struct *curr = counter->task;
1764         u64 delta;
1765
1766         delta = __task_delta_exec(curr, update);
1767
1768         return curr->se.sum_exec_runtime + delta;
1769 }
1770
1771 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1772 {
1773         u64 prev;
1774         s64 delta;
1775
1776         prev = atomic64_read(&counter->hw.prev_count);
1777
1778         atomic64_set(&counter->hw.prev_count, now);
1779
1780         delta = now - prev;
1781
1782         atomic64_add(delta, &counter->count);
1783 }
1784
1785 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1786 {
1787         struct hw_perf_counter *hwc = &counter->hw;
1788
1789         atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
1790         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1791         hwc->hrtimer.function = perf_swcounter_hrtimer;
1792         if (hwc->irq_period) {
1793                 __hrtimer_start_range_ns(&hwc->hrtimer,
1794                                 ns_to_ktime(hwc->irq_period), 0,
1795                                 HRTIMER_MODE_REL, 0);
1796         }
1797
1798         return 0;
1799 }
1800
1801 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1802 {
1803         hrtimer_cancel(&counter->hw.hrtimer);
1804         task_clock_perf_counter_update(counter,
1805                         task_clock_perf_counter_val(counter, 0));
1806 }
1807
1808 static void task_clock_perf_counter_read(struct perf_counter *counter)
1809 {
1810         task_clock_perf_counter_update(counter,
1811                         task_clock_perf_counter_val(counter, 1));
1812 }
1813
1814 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1815         .enable         = task_clock_perf_counter_enable,
1816         .disable        = task_clock_perf_counter_disable,
1817         .read           = task_clock_perf_counter_read,
1818 };
1819
1820 /*
1821  * Software counter: cpu migrations
1822  */
1823
1824 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1825 {
1826         struct task_struct *curr = counter->ctx->task;
1827
1828         if (curr)
1829                 return curr->se.nr_migrations;
1830         return cpu_nr_migrations(smp_processor_id());
1831 }
1832
1833 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1834 {
1835         u64 prev, now;
1836         s64 delta;
1837
1838         prev = atomic64_read(&counter->hw.prev_count);
1839         now = get_cpu_migrations(counter);
1840
1841         atomic64_set(&counter->hw.prev_count, now);
1842
1843         delta = now - prev;
1844
1845         atomic64_add(delta, &counter->count);
1846 }
1847
1848 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1849 {
1850         cpu_migrations_perf_counter_update(counter);
1851 }
1852
1853 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1854 {
1855         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1856                 atomic64_set(&counter->hw.prev_count,
1857                              get_cpu_migrations(counter));
1858         return 0;
1859 }
1860
1861 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1862 {
1863         cpu_migrations_perf_counter_update(counter);
1864 }
1865
1866 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1867         .enable         = cpu_migrations_perf_counter_enable,
1868         .disable        = cpu_migrations_perf_counter_disable,
1869         .read           = cpu_migrations_perf_counter_read,
1870 };
1871
1872 #ifdef CONFIG_EVENT_PROFILE
1873 void perf_tpcounter_event(int event_id)
1874 {
1875         struct pt_regs *regs = get_irq_regs();
1876
1877         if (!regs)
1878                 regs = task_pt_regs(current);
1879
1880         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
1881 }
1882
1883 extern int ftrace_profile_enable(int);
1884 extern void ftrace_profile_disable(int);
1885
1886 static void tp_perf_counter_destroy(struct perf_counter *counter)
1887 {
1888         ftrace_profile_disable(perf_event_id(&counter->hw_event));
1889 }
1890
1891 static const struct hw_perf_counter_ops *
1892 tp_perf_counter_init(struct perf_counter *counter)
1893 {
1894         int event_id = perf_event_id(&counter->hw_event);
1895         int ret;
1896
1897         ret = ftrace_profile_enable(event_id);
1898         if (ret)
1899                 return NULL;
1900
1901         counter->destroy = tp_perf_counter_destroy;
1902         counter->hw.irq_period = counter->hw_event.irq_period;
1903
1904         return &perf_ops_generic;
1905 }
1906 #else
1907 static const struct hw_perf_counter_ops *
1908 tp_perf_counter_init(struct perf_counter *counter)
1909 {
1910         return NULL;
1911 }
1912 #endif
1913
1914 static const struct hw_perf_counter_ops *
1915 sw_perf_counter_init(struct perf_counter *counter)
1916 {
1917         struct perf_counter_hw_event *hw_event = &counter->hw_event;
1918         const struct hw_perf_counter_ops *hw_ops = NULL;
1919         struct hw_perf_counter *hwc = &counter->hw;
1920
1921         /*
1922          * Software counters (currently) can't in general distinguish
1923          * between user, kernel and hypervisor events.
1924          * However, context switches and cpu migrations are considered
1925          * to be kernel events, and page faults are never hypervisor
1926          * events.
1927          */
1928         switch (perf_event_id(&counter->hw_event)) {
1929         case PERF_COUNT_CPU_CLOCK:
1930                 hw_ops = &perf_ops_cpu_clock;
1931
1932                 if (hw_event->irq_period && hw_event->irq_period < 10000)
1933                         hw_event->irq_period = 10000;
1934                 break;
1935         case PERF_COUNT_TASK_CLOCK:
1936                 /*
1937                  * If the user instantiates this as a per-cpu counter,
1938                  * use the cpu_clock counter instead.
1939                  */
1940                 if (counter->ctx->task)
1941                         hw_ops = &perf_ops_task_clock;
1942                 else
1943                         hw_ops = &perf_ops_cpu_clock;
1944
1945                 if (hw_event->irq_period && hw_event->irq_period < 10000)
1946                         hw_event->irq_period = 10000;
1947                 break;
1948         case PERF_COUNT_PAGE_FAULTS:
1949         case PERF_COUNT_PAGE_FAULTS_MIN:
1950         case PERF_COUNT_PAGE_FAULTS_MAJ:
1951         case PERF_COUNT_CONTEXT_SWITCHES:
1952                 hw_ops = &perf_ops_generic;
1953                 break;
1954         case PERF_COUNT_CPU_MIGRATIONS:
1955                 if (!counter->hw_event.exclude_kernel)
1956                         hw_ops = &perf_ops_cpu_migrations;
1957                 break;
1958         }
1959
1960         if (hw_ops)
1961                 hwc->irq_period = hw_event->irq_period;
1962
1963         return hw_ops;
1964 }
1965
1966 /*
1967  * Allocate and initialize a counter structure
1968  */
1969 static struct perf_counter *
1970 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1971                    int cpu,
1972                    struct perf_counter_context *ctx,
1973                    struct perf_counter *group_leader,
1974                    gfp_t gfpflags)
1975 {
1976         const struct hw_perf_counter_ops *hw_ops;
1977         struct perf_counter *counter;
1978
1979         counter = kzalloc(sizeof(*counter), gfpflags);
1980         if (!counter)
1981                 return NULL;
1982
1983         /*
1984          * Single counters are their own group leaders, with an
1985          * empty sibling list:
1986          */
1987         if (!group_leader)
1988                 group_leader = counter;
1989
1990         mutex_init(&counter->mutex);
1991         INIT_LIST_HEAD(&counter->list_entry);
1992         INIT_LIST_HEAD(&counter->event_entry);
1993         INIT_LIST_HEAD(&counter->sibling_list);
1994         init_waitqueue_head(&counter->waitq);
1995
1996         mutex_init(&counter->mmap_mutex);
1997
1998         INIT_LIST_HEAD(&counter->child_list);
1999
2000         counter->cpu                    = cpu;
2001         counter->hw_event               = *hw_event;
2002         counter->wakeup_pending         = 0;
2003         counter->group_leader           = group_leader;
2004         counter->hw_ops                 = NULL;
2005         counter->ctx                    = ctx;
2006
2007         counter->state = PERF_COUNTER_STATE_INACTIVE;
2008         if (hw_event->disabled)
2009                 counter->state = PERF_COUNTER_STATE_OFF;
2010
2011         hw_ops = NULL;
2012
2013         if (perf_event_raw(hw_event)) {
2014                 hw_ops = hw_perf_counter_init(counter);
2015                 goto done;
2016         }
2017
2018         switch (perf_event_type(hw_event)) {
2019         case PERF_TYPE_HARDWARE:
2020                 hw_ops = hw_perf_counter_init(counter);
2021                 break;
2022
2023         case PERF_TYPE_SOFTWARE:
2024                 hw_ops = sw_perf_counter_init(counter);
2025                 break;
2026
2027         case PERF_TYPE_TRACEPOINT:
2028                 hw_ops = tp_perf_counter_init(counter);
2029                 break;
2030         }
2031
2032         if (!hw_ops) {
2033                 kfree(counter);
2034                 return NULL;
2035         }
2036 done:
2037         counter->hw_ops = hw_ops;
2038
2039         return counter;
2040 }
2041
2042 /**
2043  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2044  *
2045  * @hw_event_uptr:      event type attributes for monitoring/sampling
2046  * @pid:                target pid
2047  * @cpu:                target cpu
2048  * @group_fd:           group leader counter fd
2049  */
2050 SYSCALL_DEFINE5(perf_counter_open,
2051                 const struct perf_counter_hw_event __user *, hw_event_uptr,
2052                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2053 {
2054         struct perf_counter *counter, *group_leader;
2055         struct perf_counter_hw_event hw_event;
2056         struct perf_counter_context *ctx;
2057         struct file *counter_file = NULL;
2058         struct file *group_file = NULL;
2059         int fput_needed = 0;
2060         int fput_needed2 = 0;
2061         int ret;
2062
2063         /* for future expandability... */
2064         if (flags)
2065                 return -EINVAL;
2066
2067         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2068                 return -EFAULT;
2069
2070         /*
2071          * Get the target context (task or percpu):
2072          */
2073         ctx = find_get_context(pid, cpu);
2074         if (IS_ERR(ctx))
2075                 return PTR_ERR(ctx);
2076
2077         /*
2078          * Look up the group leader (we will attach this counter to it):
2079          */
2080         group_leader = NULL;
2081         if (group_fd != -1) {
2082                 ret = -EINVAL;
2083                 group_file = fget_light(group_fd, &fput_needed);
2084                 if (!group_file)
2085                         goto err_put_context;
2086                 if (group_file->f_op != &perf_fops)
2087                         goto err_put_context;
2088
2089                 group_leader = group_file->private_data;
2090                 /*
2091                  * Do not allow a recursive hierarchy (this new sibling
2092                  * becoming part of another group-sibling):
2093                  */
2094                 if (group_leader->group_leader != group_leader)
2095                         goto err_put_context;
2096                 /*
2097                  * Do not allow to attach to a group in a different
2098                  * task or CPU context:
2099                  */
2100                 if (group_leader->ctx != ctx)
2101                         goto err_put_context;
2102                 /*
2103                  * Only a group leader can be exclusive or pinned
2104                  */
2105                 if (hw_event.exclusive || hw_event.pinned)
2106                         goto err_put_context;
2107         }
2108
2109         ret = -EINVAL;
2110         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2111                                      GFP_KERNEL);
2112         if (!counter)
2113                 goto err_put_context;
2114
2115         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2116         if (ret < 0)
2117                 goto err_free_put_context;
2118
2119         counter_file = fget_light(ret, &fput_needed2);
2120         if (!counter_file)
2121                 goto err_free_put_context;
2122
2123         counter->filp = counter_file;
2124         mutex_lock(&ctx->mutex);
2125         perf_install_in_context(ctx, counter, cpu);
2126         mutex_unlock(&ctx->mutex);
2127
2128         fput_light(counter_file, fput_needed2);
2129
2130 out_fput:
2131         fput_light(group_file, fput_needed);
2132
2133         return ret;
2134
2135 err_free_put_context:
2136         kfree(counter);
2137
2138 err_put_context:
2139         put_context(ctx);
2140
2141         goto out_fput;
2142 }
2143
2144 /*
2145  * Initialize the perf_counter context in a task_struct:
2146  */
2147 static void
2148 __perf_counter_init_context(struct perf_counter_context *ctx,
2149                             struct task_struct *task)
2150 {
2151         memset(ctx, 0, sizeof(*ctx));
2152         spin_lock_init(&ctx->lock);
2153         mutex_init(&ctx->mutex);
2154         INIT_LIST_HEAD(&ctx->counter_list);
2155         INIT_LIST_HEAD(&ctx->event_list);
2156         ctx->task = task;
2157 }
2158
2159 /*
2160  * inherit a counter from parent task to child task:
2161  */
2162 static struct perf_counter *
2163 inherit_counter(struct perf_counter *parent_counter,
2164               struct task_struct *parent,
2165               struct perf_counter_context *parent_ctx,
2166               struct task_struct *child,
2167               struct perf_counter *group_leader,
2168               struct perf_counter_context *child_ctx)
2169 {
2170         struct perf_counter *child_counter;
2171
2172         /*
2173          * Instead of creating recursive hierarchies of counters,
2174          * we link inherited counters back to the original parent,
2175          * which has a filp for sure, which we use as the reference
2176          * count:
2177          */
2178         if (parent_counter->parent)
2179                 parent_counter = parent_counter->parent;
2180
2181         child_counter = perf_counter_alloc(&parent_counter->hw_event,
2182                                            parent_counter->cpu, child_ctx,
2183                                            group_leader, GFP_KERNEL);
2184         if (!child_counter)
2185                 return NULL;
2186
2187         /*
2188          * Link it up in the child's context:
2189          */
2190         child_counter->task = child;
2191         list_add_counter(child_counter, child_ctx);
2192         child_ctx->nr_counters++;
2193
2194         child_counter->parent = parent_counter;
2195         /*
2196          * inherit into child's child as well:
2197          */
2198         child_counter->hw_event.inherit = 1;
2199
2200         /*
2201          * Get a reference to the parent filp - we will fput it
2202          * when the child counter exits. This is safe to do because
2203          * we are in the parent and we know that the filp still
2204          * exists and has a nonzero count:
2205          */
2206         atomic_long_inc(&parent_counter->filp->f_count);
2207
2208         /*
2209          * Link this into the parent counter's child list
2210          */
2211         mutex_lock(&parent_counter->mutex);
2212         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2213
2214         /*
2215          * Make the child state follow the state of the parent counter,
2216          * not its hw_event.disabled bit.  We hold the parent's mutex,
2217          * so we won't race with perf_counter_{en,dis}able_family.
2218          */
2219         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2220                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2221         else
2222                 child_counter->state = PERF_COUNTER_STATE_OFF;
2223
2224         mutex_unlock(&parent_counter->mutex);
2225
2226         return child_counter;
2227 }
2228
2229 static int inherit_group(struct perf_counter *parent_counter,
2230               struct task_struct *parent,
2231               struct perf_counter_context *parent_ctx,
2232               struct task_struct *child,
2233               struct perf_counter_context *child_ctx)
2234 {
2235         struct perf_counter *leader;
2236         struct perf_counter *sub;
2237
2238         leader = inherit_counter(parent_counter, parent, parent_ctx,
2239                                  child, NULL, child_ctx);
2240         if (!leader)
2241                 return -ENOMEM;
2242         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2243                 if (!inherit_counter(sub, parent, parent_ctx,
2244                                      child, leader, child_ctx))
2245                         return -ENOMEM;
2246         }
2247         return 0;
2248 }
2249
2250 static void sync_child_counter(struct perf_counter *child_counter,
2251                                struct perf_counter *parent_counter)
2252 {
2253         u64 parent_val, child_val;
2254
2255         parent_val = atomic64_read(&parent_counter->count);
2256         child_val = atomic64_read(&child_counter->count);
2257
2258         /*
2259          * Add back the child's count to the parent's count:
2260          */
2261         atomic64_add(child_val, &parent_counter->count);
2262
2263         /*
2264          * Remove this counter from the parent's list
2265          */
2266         mutex_lock(&parent_counter->mutex);
2267         list_del_init(&child_counter->child_list);
2268         mutex_unlock(&parent_counter->mutex);
2269
2270         /*
2271          * Release the parent counter, if this was the last
2272          * reference to it.
2273          */
2274         fput(parent_counter->filp);
2275 }
2276
2277 static void
2278 __perf_counter_exit_task(struct task_struct *child,
2279                          struct perf_counter *child_counter,
2280                          struct perf_counter_context *child_ctx)
2281 {
2282         struct perf_counter *parent_counter;
2283         struct perf_counter *sub, *tmp;
2284
2285         /*
2286          * If we do not self-reap then we have to wait for the
2287          * child task to unschedule (it will happen for sure),
2288          * so that its counter is at its final count. (This
2289          * condition triggers rarely - child tasks usually get
2290          * off their CPU before the parent has a chance to
2291          * get this far into the reaping action)
2292          */
2293         if (child != current) {
2294                 wait_task_inactive(child, 0);
2295                 list_del_init(&child_counter->list_entry);
2296         } else {
2297                 struct perf_cpu_context *cpuctx;
2298                 unsigned long flags;
2299                 u64 perf_flags;
2300
2301                 /*
2302                  * Disable and unlink this counter.
2303                  *
2304                  * Be careful about zapping the list - IRQ/NMI context
2305                  * could still be processing it:
2306                  */
2307                 curr_rq_lock_irq_save(&flags);
2308                 perf_flags = hw_perf_save_disable();
2309
2310                 cpuctx = &__get_cpu_var(perf_cpu_context);
2311
2312                 group_sched_out(child_counter, cpuctx, child_ctx);
2313
2314                 list_del_init(&child_counter->list_entry);
2315
2316                 child_ctx->nr_counters--;
2317
2318                 hw_perf_restore(perf_flags);
2319                 curr_rq_unlock_irq_restore(&flags);
2320         }
2321
2322         parent_counter = child_counter->parent;
2323         /*
2324          * It can happen that parent exits first, and has counters
2325          * that are still around due to the child reference. These
2326          * counters need to be zapped - but otherwise linger.
2327          */
2328         if (parent_counter) {
2329                 sync_child_counter(child_counter, parent_counter);
2330                 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2331                                          list_entry) {
2332                         if (sub->parent) {
2333                                 sync_child_counter(sub, sub->parent);
2334                                 free_counter(sub);
2335                         }
2336                 }
2337                 free_counter(child_counter);
2338         }
2339 }
2340
2341 /*
2342  * When a child task exits, feed back counter values to parent counters.
2343  *
2344  * Note: we may be running in child context, but the PID is not hashed
2345  * anymore so new counters will not be added.
2346  */
2347 void perf_counter_exit_task(struct task_struct *child)
2348 {
2349         struct perf_counter *child_counter, *tmp;
2350         struct perf_counter_context *child_ctx;
2351
2352         child_ctx = &child->perf_counter_ctx;
2353
2354         if (likely(!child_ctx->nr_counters))
2355                 return;
2356
2357         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2358                                  list_entry)
2359                 __perf_counter_exit_task(child, child_counter, child_ctx);
2360 }
2361
2362 /*
2363  * Initialize the perf_counter context in task_struct
2364  */
2365 void perf_counter_init_task(struct task_struct *child)
2366 {
2367         struct perf_counter_context *child_ctx, *parent_ctx;
2368         struct perf_counter *counter;
2369         struct task_struct *parent = current;
2370
2371         child_ctx  =  &child->perf_counter_ctx;
2372         parent_ctx = &parent->perf_counter_ctx;
2373
2374         __perf_counter_init_context(child_ctx, child);
2375
2376         /*
2377          * This is executed from the parent task context, so inherit
2378          * counters that have been marked for cloning:
2379          */
2380
2381         if (likely(!parent_ctx->nr_counters))
2382                 return;
2383
2384         /*
2385          * Lock the parent list. No need to lock the child - not PID
2386          * hashed yet and not running, so nobody can access it.
2387          */
2388         mutex_lock(&parent_ctx->mutex);
2389
2390         /*
2391          * We dont have to disable NMIs - we are only looking at
2392          * the list, not manipulating it:
2393          */
2394         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2395                 if (!counter->hw_event.inherit)
2396                         continue;
2397
2398                 if (inherit_group(counter, parent,
2399                                   parent_ctx, child, child_ctx))
2400                         break;
2401         }
2402
2403         mutex_unlock(&parent_ctx->mutex);
2404 }
2405
2406 static void __cpuinit perf_counter_init_cpu(int cpu)
2407 {
2408         struct perf_cpu_context *cpuctx;
2409
2410         cpuctx = &per_cpu(perf_cpu_context, cpu);
2411         __perf_counter_init_context(&cpuctx->ctx, NULL);
2412
2413         mutex_lock(&perf_resource_mutex);
2414         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2415         mutex_unlock(&perf_resource_mutex);
2416
2417         hw_perf_counter_setup(cpu);
2418 }
2419
2420 #ifdef CONFIG_HOTPLUG_CPU
2421 static void __perf_counter_exit_cpu(void *info)
2422 {
2423         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2424         struct perf_counter_context *ctx = &cpuctx->ctx;
2425         struct perf_counter *counter, *tmp;
2426
2427         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2428                 __perf_counter_remove_from_context(counter);
2429 }
2430 static void perf_counter_exit_cpu(int cpu)
2431 {
2432         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2433         struct perf_counter_context *ctx = &cpuctx->ctx;
2434
2435         mutex_lock(&ctx->mutex);
2436         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2437         mutex_unlock(&ctx->mutex);
2438 }
2439 #else
2440 static inline void perf_counter_exit_cpu(int cpu) { }
2441 #endif
2442
2443 static int __cpuinit
2444 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2445 {
2446         unsigned int cpu = (long)hcpu;
2447
2448         switch (action) {
2449
2450         case CPU_UP_PREPARE:
2451         case CPU_UP_PREPARE_FROZEN:
2452                 perf_counter_init_cpu(cpu);
2453                 break;
2454
2455         case CPU_DOWN_PREPARE:
2456         case CPU_DOWN_PREPARE_FROZEN:
2457                 perf_counter_exit_cpu(cpu);
2458                 break;
2459
2460         default:
2461                 break;
2462         }
2463
2464         return NOTIFY_OK;
2465 }
2466
2467 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2468         .notifier_call          = perf_cpu_notify,
2469 };
2470
2471 static int __init perf_counter_init(void)
2472 {
2473         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2474                         (void *)(long)smp_processor_id());
2475         register_cpu_notifier(&perf_cpu_nb);
2476
2477         return 0;
2478 }
2479 early_initcall(perf_counter_init);
2480
2481 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2482 {
2483         return sprintf(buf, "%d\n", perf_reserved_percpu);
2484 }
2485
2486 static ssize_t
2487 perf_set_reserve_percpu(struct sysdev_class *class,
2488                         const char *buf,
2489                         size_t count)
2490 {
2491         struct perf_cpu_context *cpuctx;
2492         unsigned long val;
2493         int err, cpu, mpt;
2494
2495         err = strict_strtoul(buf, 10, &val);
2496         if (err)
2497                 return err;
2498         if (val > perf_max_counters)
2499                 return -EINVAL;
2500
2501         mutex_lock(&perf_resource_mutex);
2502         perf_reserved_percpu = val;
2503         for_each_online_cpu(cpu) {
2504                 cpuctx = &per_cpu(perf_cpu_context, cpu);
2505                 spin_lock_irq(&cpuctx->ctx.lock);
2506                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2507                           perf_max_counters - perf_reserved_percpu);
2508                 cpuctx->max_pertask = mpt;
2509                 spin_unlock_irq(&cpuctx->ctx.lock);
2510         }
2511         mutex_unlock(&perf_resource_mutex);
2512
2513         return count;
2514 }
2515
2516 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2517 {
2518         return sprintf(buf, "%d\n", perf_overcommit);
2519 }
2520
2521 static ssize_t
2522 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2523 {
2524         unsigned long val;
2525         int err;
2526
2527         err = strict_strtoul(buf, 10, &val);
2528         if (err)
2529                 return err;
2530         if (val > 1)
2531                 return -EINVAL;
2532
2533         mutex_lock(&perf_resource_mutex);
2534         perf_overcommit = val;
2535         mutex_unlock(&perf_resource_mutex);
2536
2537         return count;
2538 }
2539
2540 static SYSDEV_CLASS_ATTR(
2541                                 reserve_percpu,
2542                                 0644,
2543                                 perf_show_reserve_percpu,
2544                                 perf_set_reserve_percpu
2545                         );
2546
2547 static SYSDEV_CLASS_ATTR(
2548                                 overcommit,
2549                                 0644,
2550                                 perf_show_overcommit,
2551                                 perf_set_overcommit
2552                         );
2553
2554 static struct attribute *perfclass_attrs[] = {
2555         &attr_reserve_percpu.attr,
2556         &attr_overcommit.attr,
2557         NULL
2558 };
2559
2560 static struct attribute_group perfclass_attr_group = {
2561         .attrs                  = perfclass_attrs,
2562         .name                   = "perf_counters",
2563 };
2564
2565 static int __init perf_counter_sysfs_init(void)
2566 {
2567         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2568                                   &perfclass_attr_group);
2569 }
2570 device_initcall(perf_counter_sysfs_init);