perf_counter: fix perf_poll()
[linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *
8  *  For licensing details see kernel-base/COPYING
9  */
10
11 #include <linux/fs.h>
12 #include <linux/cpu.h>
13 #include <linux/smp.h>
14 #include <linux/file.h>
15 #include <linux/poll.h>
16 #include <linux/sysfs.h>
17 #include <linux/ptrace.h>
18 #include <linux/percpu.h>
19 #include <linux/uaccess.h>
20 #include <linux/syscalls.h>
21 #include <linux/anon_inodes.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/perf_counter.h>
24 #include <linux/mm.h>
25 #include <linux/vmstat.h>
26 #include <linux/rculist.h>
27 #include <linux/hardirq.h>
28
29 #include <asm/irq_regs.h>
30
31 /*
32  * Each CPU has a list of per CPU counters:
33  */
34 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
35
36 int perf_max_counters __read_mostly = 1;
37 static int perf_reserved_percpu __read_mostly;
38 static int perf_overcommit __read_mostly = 1;
39
40 /*
41  * Mutex for (sysadmin-configurable) counter reservations:
42  */
43 static DEFINE_MUTEX(perf_resource_mutex);
44
45 /*
46  * Architecture provided APIs - weak aliases:
47  */
48 extern __weak const struct hw_perf_counter_ops *
49 hw_perf_counter_init(struct perf_counter *counter)
50 {
51         return NULL;
52 }
53
54 u64 __weak hw_perf_save_disable(void)           { return 0; }
55 void __weak hw_perf_restore(u64 ctrl)           { barrier(); }
56 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
57 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
58                struct perf_cpu_context *cpuctx,
59                struct perf_counter_context *ctx, int cpu)
60 {
61         return 0;
62 }
63
64 void __weak perf_counter_print_debug(void)      { }
65
66 static void
67 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
68 {
69         struct perf_counter *group_leader = counter->group_leader;
70
71         /*
72          * Depending on whether it is a standalone or sibling counter,
73          * add it straight to the context's counter list, or to the group
74          * leader's sibling list:
75          */
76         if (counter->group_leader == counter)
77                 list_add_tail(&counter->list_entry, &ctx->counter_list);
78         else
79                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
80
81         list_add_rcu(&counter->event_entry, &ctx->event_list);
82 }
83
84 static void
85 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
86 {
87         struct perf_counter *sibling, *tmp;
88
89         list_del_init(&counter->list_entry);
90         list_del_rcu(&counter->event_entry);
91
92         /*
93          * If this was a group counter with sibling counters then
94          * upgrade the siblings to singleton counters by adding them
95          * to the context list directly:
96          */
97         list_for_each_entry_safe(sibling, tmp,
98                                  &counter->sibling_list, list_entry) {
99
100                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
101                 sibling->group_leader = sibling;
102         }
103 }
104
105 static void
106 counter_sched_out(struct perf_counter *counter,
107                   struct perf_cpu_context *cpuctx,
108                   struct perf_counter_context *ctx)
109 {
110         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
111                 return;
112
113         counter->state = PERF_COUNTER_STATE_INACTIVE;
114         counter->hw_ops->disable(counter);
115         counter->oncpu = -1;
116
117         if (!is_software_counter(counter))
118                 cpuctx->active_oncpu--;
119         ctx->nr_active--;
120         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
121                 cpuctx->exclusive = 0;
122 }
123
124 static void
125 group_sched_out(struct perf_counter *group_counter,
126                 struct perf_cpu_context *cpuctx,
127                 struct perf_counter_context *ctx)
128 {
129         struct perf_counter *counter;
130
131         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
132                 return;
133
134         counter_sched_out(group_counter, cpuctx, ctx);
135
136         /*
137          * Schedule out siblings (if any):
138          */
139         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
140                 counter_sched_out(counter, cpuctx, ctx);
141
142         if (group_counter->hw_event.exclusive)
143                 cpuctx->exclusive = 0;
144 }
145
146 /*
147  * Cross CPU call to remove a performance counter
148  *
149  * We disable the counter on the hardware level first. After that we
150  * remove it from the context list.
151  */
152 static void __perf_counter_remove_from_context(void *info)
153 {
154         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
155         struct perf_counter *counter = info;
156         struct perf_counter_context *ctx = counter->ctx;
157         unsigned long flags;
158         u64 perf_flags;
159
160         /*
161          * If this is a task context, we need to check whether it is
162          * the current task context of this cpu. If not it has been
163          * scheduled out before the smp call arrived.
164          */
165         if (ctx->task && cpuctx->task_ctx != ctx)
166                 return;
167
168         curr_rq_lock_irq_save(&flags);
169         spin_lock(&ctx->lock);
170
171         counter_sched_out(counter, cpuctx, ctx);
172
173         counter->task = NULL;
174         ctx->nr_counters--;
175
176         /*
177          * Protect the list operation against NMI by disabling the
178          * counters on a global level. NOP for non NMI based counters.
179          */
180         perf_flags = hw_perf_save_disable();
181         list_del_counter(counter, ctx);
182         hw_perf_restore(perf_flags);
183
184         if (!ctx->task) {
185                 /*
186                  * Allow more per task counters with respect to the
187                  * reservation:
188                  */
189                 cpuctx->max_pertask =
190                         min(perf_max_counters - ctx->nr_counters,
191                             perf_max_counters - perf_reserved_percpu);
192         }
193
194         spin_unlock(&ctx->lock);
195         curr_rq_unlock_irq_restore(&flags);
196 }
197
198
199 /*
200  * Remove the counter from a task's (or a CPU's) list of counters.
201  *
202  * Must be called with counter->mutex and ctx->mutex held.
203  *
204  * CPU counters are removed with a smp call. For task counters we only
205  * call when the task is on a CPU.
206  */
207 static void perf_counter_remove_from_context(struct perf_counter *counter)
208 {
209         struct perf_counter_context *ctx = counter->ctx;
210         struct task_struct *task = ctx->task;
211
212         if (!task) {
213                 /*
214                  * Per cpu counters are removed via an smp call and
215                  * the removal is always sucessful.
216                  */
217                 smp_call_function_single(counter->cpu,
218                                          __perf_counter_remove_from_context,
219                                          counter, 1);
220                 return;
221         }
222
223 retry:
224         task_oncpu_function_call(task, __perf_counter_remove_from_context,
225                                  counter);
226
227         spin_lock_irq(&ctx->lock);
228         /*
229          * If the context is active we need to retry the smp call.
230          */
231         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
232                 spin_unlock_irq(&ctx->lock);
233                 goto retry;
234         }
235
236         /*
237          * The lock prevents that this context is scheduled in so we
238          * can remove the counter safely, if the call above did not
239          * succeed.
240          */
241         if (!list_empty(&counter->list_entry)) {
242                 ctx->nr_counters--;
243                 list_del_counter(counter, ctx);
244                 counter->task = NULL;
245         }
246         spin_unlock_irq(&ctx->lock);
247 }
248
249 /*
250  * Cross CPU call to disable a performance counter
251  */
252 static void __perf_counter_disable(void *info)
253 {
254         struct perf_counter *counter = info;
255         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
256         struct perf_counter_context *ctx = counter->ctx;
257         unsigned long flags;
258
259         /*
260          * If this is a per-task counter, need to check whether this
261          * counter's task is the current task on this cpu.
262          */
263         if (ctx->task && cpuctx->task_ctx != ctx)
264                 return;
265
266         curr_rq_lock_irq_save(&flags);
267         spin_lock(&ctx->lock);
268
269         /*
270          * If the counter is on, turn it off.
271          * If it is in error state, leave it in error state.
272          */
273         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
274                 if (counter == counter->group_leader)
275                         group_sched_out(counter, cpuctx, ctx);
276                 else
277                         counter_sched_out(counter, cpuctx, ctx);
278                 counter->state = PERF_COUNTER_STATE_OFF;
279         }
280
281         spin_unlock(&ctx->lock);
282         curr_rq_unlock_irq_restore(&flags);
283 }
284
285 /*
286  * Disable a counter.
287  */
288 static void perf_counter_disable(struct perf_counter *counter)
289 {
290         struct perf_counter_context *ctx = counter->ctx;
291         struct task_struct *task = ctx->task;
292
293         if (!task) {
294                 /*
295                  * Disable the counter on the cpu that it's on
296                  */
297                 smp_call_function_single(counter->cpu, __perf_counter_disable,
298                                          counter, 1);
299                 return;
300         }
301
302  retry:
303         task_oncpu_function_call(task, __perf_counter_disable, counter);
304
305         spin_lock_irq(&ctx->lock);
306         /*
307          * If the counter is still active, we need to retry the cross-call.
308          */
309         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
310                 spin_unlock_irq(&ctx->lock);
311                 goto retry;
312         }
313
314         /*
315          * Since we have the lock this context can't be scheduled
316          * in, so we can change the state safely.
317          */
318         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
319                 counter->state = PERF_COUNTER_STATE_OFF;
320
321         spin_unlock_irq(&ctx->lock);
322 }
323
324 /*
325  * Disable a counter and all its children.
326  */
327 static void perf_counter_disable_family(struct perf_counter *counter)
328 {
329         struct perf_counter *child;
330
331         perf_counter_disable(counter);
332
333         /*
334          * Lock the mutex to protect the list of children
335          */
336         mutex_lock(&counter->mutex);
337         list_for_each_entry(child, &counter->child_list, child_list)
338                 perf_counter_disable(child);
339         mutex_unlock(&counter->mutex);
340 }
341
342 static int
343 counter_sched_in(struct perf_counter *counter,
344                  struct perf_cpu_context *cpuctx,
345                  struct perf_counter_context *ctx,
346                  int cpu)
347 {
348         if (counter->state <= PERF_COUNTER_STATE_OFF)
349                 return 0;
350
351         counter->state = PERF_COUNTER_STATE_ACTIVE;
352         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
353         /*
354          * The new state must be visible before we turn it on in the hardware:
355          */
356         smp_wmb();
357
358         if (counter->hw_ops->enable(counter)) {
359                 counter->state = PERF_COUNTER_STATE_INACTIVE;
360                 counter->oncpu = -1;
361                 return -EAGAIN;
362         }
363
364         if (!is_software_counter(counter))
365                 cpuctx->active_oncpu++;
366         ctx->nr_active++;
367
368         if (counter->hw_event.exclusive)
369                 cpuctx->exclusive = 1;
370
371         return 0;
372 }
373
374 /*
375  * Return 1 for a group consisting entirely of software counters,
376  * 0 if the group contains any hardware counters.
377  */
378 static int is_software_only_group(struct perf_counter *leader)
379 {
380         struct perf_counter *counter;
381
382         if (!is_software_counter(leader))
383                 return 0;
384         list_for_each_entry(counter, &leader->sibling_list, list_entry)
385                 if (!is_software_counter(counter))
386                         return 0;
387         return 1;
388 }
389
390 /*
391  * Work out whether we can put this counter group on the CPU now.
392  */
393 static int group_can_go_on(struct perf_counter *counter,
394                            struct perf_cpu_context *cpuctx,
395                            int can_add_hw)
396 {
397         /*
398          * Groups consisting entirely of software counters can always go on.
399          */
400         if (is_software_only_group(counter))
401                 return 1;
402         /*
403          * If an exclusive group is already on, no other hardware
404          * counters can go on.
405          */
406         if (cpuctx->exclusive)
407                 return 0;
408         /*
409          * If this group is exclusive and there are already
410          * counters on the CPU, it can't go on.
411          */
412         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
413                 return 0;
414         /*
415          * Otherwise, try to add it if all previous groups were able
416          * to go on.
417          */
418         return can_add_hw;
419 }
420
421 /*
422  * Cross CPU call to install and enable a performance counter
423  */
424 static void __perf_install_in_context(void *info)
425 {
426         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
427         struct perf_counter *counter = info;
428         struct perf_counter_context *ctx = counter->ctx;
429         struct perf_counter *leader = counter->group_leader;
430         int cpu = smp_processor_id();
431         unsigned long flags;
432         u64 perf_flags;
433         int err;
434
435         /*
436          * If this is a task context, we need to check whether it is
437          * the current task context of this cpu. If not it has been
438          * scheduled out before the smp call arrived.
439          */
440         if (ctx->task && cpuctx->task_ctx != ctx)
441                 return;
442
443         curr_rq_lock_irq_save(&flags);
444         spin_lock(&ctx->lock);
445
446         /*
447          * Protect the list operation against NMI by disabling the
448          * counters on a global level. NOP for non NMI based counters.
449          */
450         perf_flags = hw_perf_save_disable();
451
452         list_add_counter(counter, ctx);
453         ctx->nr_counters++;
454         counter->prev_state = PERF_COUNTER_STATE_OFF;
455
456         /*
457          * Don't put the counter on if it is disabled or if
458          * it is in a group and the group isn't on.
459          */
460         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
461             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
462                 goto unlock;
463
464         /*
465          * An exclusive counter can't go on if there are already active
466          * hardware counters, and no hardware counter can go on if there
467          * is already an exclusive counter on.
468          */
469         if (!group_can_go_on(counter, cpuctx, 1))
470                 err = -EEXIST;
471         else
472                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
473
474         if (err) {
475                 /*
476                  * This counter couldn't go on.  If it is in a group
477                  * then we have to pull the whole group off.
478                  * If the counter group is pinned then put it in error state.
479                  */
480                 if (leader != counter)
481                         group_sched_out(leader, cpuctx, ctx);
482                 if (leader->hw_event.pinned)
483                         leader->state = PERF_COUNTER_STATE_ERROR;
484         }
485
486         if (!err && !ctx->task && cpuctx->max_pertask)
487                 cpuctx->max_pertask--;
488
489  unlock:
490         hw_perf_restore(perf_flags);
491
492         spin_unlock(&ctx->lock);
493         curr_rq_unlock_irq_restore(&flags);
494 }
495
496 /*
497  * Attach a performance counter to a context
498  *
499  * First we add the counter to the list with the hardware enable bit
500  * in counter->hw_config cleared.
501  *
502  * If the counter is attached to a task which is on a CPU we use a smp
503  * call to enable it in the task context. The task might have been
504  * scheduled away, but we check this in the smp call again.
505  *
506  * Must be called with ctx->mutex held.
507  */
508 static void
509 perf_install_in_context(struct perf_counter_context *ctx,
510                         struct perf_counter *counter,
511                         int cpu)
512 {
513         struct task_struct *task = ctx->task;
514
515         if (!task) {
516                 /*
517                  * Per cpu counters are installed via an smp call and
518                  * the install is always sucessful.
519                  */
520                 smp_call_function_single(cpu, __perf_install_in_context,
521                                          counter, 1);
522                 return;
523         }
524
525         counter->task = task;
526 retry:
527         task_oncpu_function_call(task, __perf_install_in_context,
528                                  counter);
529
530         spin_lock_irq(&ctx->lock);
531         /*
532          * we need to retry the smp call.
533          */
534         if (ctx->is_active && list_empty(&counter->list_entry)) {
535                 spin_unlock_irq(&ctx->lock);
536                 goto retry;
537         }
538
539         /*
540          * The lock prevents that this context is scheduled in so we
541          * can add the counter safely, if it the call above did not
542          * succeed.
543          */
544         if (list_empty(&counter->list_entry)) {
545                 list_add_counter(counter, ctx);
546                 ctx->nr_counters++;
547         }
548         spin_unlock_irq(&ctx->lock);
549 }
550
551 /*
552  * Cross CPU call to enable a performance counter
553  */
554 static void __perf_counter_enable(void *info)
555 {
556         struct perf_counter *counter = info;
557         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
558         struct perf_counter_context *ctx = counter->ctx;
559         struct perf_counter *leader = counter->group_leader;
560         unsigned long flags;
561         int err;
562
563         /*
564          * If this is a per-task counter, need to check whether this
565          * counter's task is the current task on this cpu.
566          */
567         if (ctx->task && cpuctx->task_ctx != ctx)
568                 return;
569
570         curr_rq_lock_irq_save(&flags);
571         spin_lock(&ctx->lock);
572
573         counter->prev_state = counter->state;
574         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
575                 goto unlock;
576         counter->state = PERF_COUNTER_STATE_INACTIVE;
577
578         /*
579          * If the counter is in a group and isn't the group leader,
580          * then don't put it on unless the group is on.
581          */
582         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
583                 goto unlock;
584
585         if (!group_can_go_on(counter, cpuctx, 1))
586                 err = -EEXIST;
587         else
588                 err = counter_sched_in(counter, cpuctx, ctx,
589                                        smp_processor_id());
590
591         if (err) {
592                 /*
593                  * If this counter can't go on and it's part of a
594                  * group, then the whole group has to come off.
595                  */
596                 if (leader != counter)
597                         group_sched_out(leader, cpuctx, ctx);
598                 if (leader->hw_event.pinned)
599                         leader->state = PERF_COUNTER_STATE_ERROR;
600         }
601
602  unlock:
603         spin_unlock(&ctx->lock);
604         curr_rq_unlock_irq_restore(&flags);
605 }
606
607 /*
608  * Enable a counter.
609  */
610 static void perf_counter_enable(struct perf_counter *counter)
611 {
612         struct perf_counter_context *ctx = counter->ctx;
613         struct task_struct *task = ctx->task;
614
615         if (!task) {
616                 /*
617                  * Enable the counter on the cpu that it's on
618                  */
619                 smp_call_function_single(counter->cpu, __perf_counter_enable,
620                                          counter, 1);
621                 return;
622         }
623
624         spin_lock_irq(&ctx->lock);
625         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
626                 goto out;
627
628         /*
629          * If the counter is in error state, clear that first.
630          * That way, if we see the counter in error state below, we
631          * know that it has gone back into error state, as distinct
632          * from the task having been scheduled away before the
633          * cross-call arrived.
634          */
635         if (counter->state == PERF_COUNTER_STATE_ERROR)
636                 counter->state = PERF_COUNTER_STATE_OFF;
637
638  retry:
639         spin_unlock_irq(&ctx->lock);
640         task_oncpu_function_call(task, __perf_counter_enable, counter);
641
642         spin_lock_irq(&ctx->lock);
643
644         /*
645          * If the context is active and the counter is still off,
646          * we need to retry the cross-call.
647          */
648         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
649                 goto retry;
650
651         /*
652          * Since we have the lock this context can't be scheduled
653          * in, so we can change the state safely.
654          */
655         if (counter->state == PERF_COUNTER_STATE_OFF)
656                 counter->state = PERF_COUNTER_STATE_INACTIVE;
657  out:
658         spin_unlock_irq(&ctx->lock);
659 }
660
661 /*
662  * Enable a counter and all its children.
663  */
664 static void perf_counter_enable_family(struct perf_counter *counter)
665 {
666         struct perf_counter *child;
667
668         perf_counter_enable(counter);
669
670         /*
671          * Lock the mutex to protect the list of children
672          */
673         mutex_lock(&counter->mutex);
674         list_for_each_entry(child, &counter->child_list, child_list)
675                 perf_counter_enable(child);
676         mutex_unlock(&counter->mutex);
677 }
678
679 void __perf_counter_sched_out(struct perf_counter_context *ctx,
680                               struct perf_cpu_context *cpuctx)
681 {
682         struct perf_counter *counter;
683         u64 flags;
684
685         spin_lock(&ctx->lock);
686         ctx->is_active = 0;
687         if (likely(!ctx->nr_counters))
688                 goto out;
689
690         flags = hw_perf_save_disable();
691         if (ctx->nr_active) {
692                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
693                         group_sched_out(counter, cpuctx, ctx);
694         }
695         hw_perf_restore(flags);
696  out:
697         spin_unlock(&ctx->lock);
698 }
699
700 /*
701  * Called from scheduler to remove the counters of the current task,
702  * with interrupts disabled.
703  *
704  * We stop each counter and update the counter value in counter->count.
705  *
706  * This does not protect us against NMI, but disable()
707  * sets the disabled bit in the control field of counter _before_
708  * accessing the counter control register. If a NMI hits, then it will
709  * not restart the counter.
710  */
711 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
712 {
713         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
714         struct perf_counter_context *ctx = &task->perf_counter_ctx;
715         struct pt_regs *regs;
716
717         if (likely(!cpuctx->task_ctx))
718                 return;
719
720         regs = task_pt_regs(task);
721         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs);
722         __perf_counter_sched_out(ctx, cpuctx);
723
724         cpuctx->task_ctx = NULL;
725 }
726
727 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
728 {
729         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
730 }
731
732 static int
733 group_sched_in(struct perf_counter *group_counter,
734                struct perf_cpu_context *cpuctx,
735                struct perf_counter_context *ctx,
736                int cpu)
737 {
738         struct perf_counter *counter, *partial_group;
739         int ret;
740
741         if (group_counter->state == PERF_COUNTER_STATE_OFF)
742                 return 0;
743
744         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
745         if (ret)
746                 return ret < 0 ? ret : 0;
747
748         group_counter->prev_state = group_counter->state;
749         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
750                 return -EAGAIN;
751
752         /*
753          * Schedule in siblings as one group (if any):
754          */
755         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
756                 counter->prev_state = counter->state;
757                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
758                         partial_group = counter;
759                         goto group_error;
760                 }
761         }
762
763         return 0;
764
765 group_error:
766         /*
767          * Groups can be scheduled in as one unit only, so undo any
768          * partial group before returning:
769          */
770         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
771                 if (counter == partial_group)
772                         break;
773                 counter_sched_out(counter, cpuctx, ctx);
774         }
775         counter_sched_out(group_counter, cpuctx, ctx);
776
777         return -EAGAIN;
778 }
779
780 static void
781 __perf_counter_sched_in(struct perf_counter_context *ctx,
782                         struct perf_cpu_context *cpuctx, int cpu)
783 {
784         struct perf_counter *counter;
785         u64 flags;
786         int can_add_hw = 1;
787
788         spin_lock(&ctx->lock);
789         ctx->is_active = 1;
790         if (likely(!ctx->nr_counters))
791                 goto out;
792
793         flags = hw_perf_save_disable();
794
795         /*
796          * First go through the list and put on any pinned groups
797          * in order to give them the best chance of going on.
798          */
799         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
800                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
801                     !counter->hw_event.pinned)
802                         continue;
803                 if (counter->cpu != -1 && counter->cpu != cpu)
804                         continue;
805
806                 if (group_can_go_on(counter, cpuctx, 1))
807                         group_sched_in(counter, cpuctx, ctx, cpu);
808
809                 /*
810                  * If this pinned group hasn't been scheduled,
811                  * put it in error state.
812                  */
813                 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
814                         counter->state = PERF_COUNTER_STATE_ERROR;
815         }
816
817         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
818                 /*
819                  * Ignore counters in OFF or ERROR state, and
820                  * ignore pinned counters since we did them already.
821                  */
822                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
823                     counter->hw_event.pinned)
824                         continue;
825
826                 /*
827                  * Listen to the 'cpu' scheduling filter constraint
828                  * of counters:
829                  */
830                 if (counter->cpu != -1 && counter->cpu != cpu)
831                         continue;
832
833                 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
834                         if (group_sched_in(counter, cpuctx, ctx, cpu))
835                                 can_add_hw = 0;
836                 }
837         }
838         hw_perf_restore(flags);
839  out:
840         spin_unlock(&ctx->lock);
841 }
842
843 /*
844  * Called from scheduler to add the counters of the current task
845  * with interrupts disabled.
846  *
847  * We restore the counter value and then enable it.
848  *
849  * This does not protect us against NMI, but enable()
850  * sets the enabled bit in the control field of counter _before_
851  * accessing the counter control register. If a NMI hits, then it will
852  * keep the counter running.
853  */
854 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
855 {
856         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
857         struct perf_counter_context *ctx = &task->perf_counter_ctx;
858
859         __perf_counter_sched_in(ctx, cpuctx, cpu);
860         cpuctx->task_ctx = ctx;
861 }
862
863 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
864 {
865         struct perf_counter_context *ctx = &cpuctx->ctx;
866
867         __perf_counter_sched_in(ctx, cpuctx, cpu);
868 }
869
870 int perf_counter_task_disable(void)
871 {
872         struct task_struct *curr = current;
873         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
874         struct perf_counter *counter;
875         unsigned long flags;
876         u64 perf_flags;
877         int cpu;
878
879         if (likely(!ctx->nr_counters))
880                 return 0;
881
882         curr_rq_lock_irq_save(&flags);
883         cpu = smp_processor_id();
884
885         /* force the update of the task clock: */
886         __task_delta_exec(curr, 1);
887
888         perf_counter_task_sched_out(curr, cpu);
889
890         spin_lock(&ctx->lock);
891
892         /*
893          * Disable all the counters:
894          */
895         perf_flags = hw_perf_save_disable();
896
897         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
898                 if (counter->state != PERF_COUNTER_STATE_ERROR)
899                         counter->state = PERF_COUNTER_STATE_OFF;
900         }
901
902         hw_perf_restore(perf_flags);
903
904         spin_unlock(&ctx->lock);
905
906         curr_rq_unlock_irq_restore(&flags);
907
908         return 0;
909 }
910
911 int perf_counter_task_enable(void)
912 {
913         struct task_struct *curr = current;
914         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
915         struct perf_counter *counter;
916         unsigned long flags;
917         u64 perf_flags;
918         int cpu;
919
920         if (likely(!ctx->nr_counters))
921                 return 0;
922
923         curr_rq_lock_irq_save(&flags);
924         cpu = smp_processor_id();
925
926         /* force the update of the task clock: */
927         __task_delta_exec(curr, 1);
928
929         perf_counter_task_sched_out(curr, cpu);
930
931         spin_lock(&ctx->lock);
932
933         /*
934          * Disable all the counters:
935          */
936         perf_flags = hw_perf_save_disable();
937
938         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
939                 if (counter->state > PERF_COUNTER_STATE_OFF)
940                         continue;
941                 counter->state = PERF_COUNTER_STATE_INACTIVE;
942                 counter->hw_event.disabled = 0;
943         }
944         hw_perf_restore(perf_flags);
945
946         spin_unlock(&ctx->lock);
947
948         perf_counter_task_sched_in(curr, cpu);
949
950         curr_rq_unlock_irq_restore(&flags);
951
952         return 0;
953 }
954
955 /*
956  * Round-robin a context's counters:
957  */
958 static void rotate_ctx(struct perf_counter_context *ctx)
959 {
960         struct perf_counter *counter;
961         u64 perf_flags;
962
963         if (!ctx->nr_counters)
964                 return;
965
966         spin_lock(&ctx->lock);
967         /*
968          * Rotate the first entry last (works just fine for group counters too):
969          */
970         perf_flags = hw_perf_save_disable();
971         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
972                 list_move_tail(&counter->list_entry, &ctx->counter_list);
973                 break;
974         }
975         hw_perf_restore(perf_flags);
976
977         spin_unlock(&ctx->lock);
978 }
979
980 void perf_counter_task_tick(struct task_struct *curr, int cpu)
981 {
982         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
983         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
984         const int rotate_percpu = 0;
985
986         if (rotate_percpu)
987                 perf_counter_cpu_sched_out(cpuctx);
988         perf_counter_task_sched_out(curr, cpu);
989
990         if (rotate_percpu)
991                 rotate_ctx(&cpuctx->ctx);
992         rotate_ctx(ctx);
993
994         if (rotate_percpu)
995                 perf_counter_cpu_sched_in(cpuctx, cpu);
996         perf_counter_task_sched_in(curr, cpu);
997 }
998
999 /*
1000  * Cross CPU call to read the hardware counter
1001  */
1002 static void __read(void *info)
1003 {
1004         struct perf_counter *counter = info;
1005         unsigned long flags;
1006
1007         curr_rq_lock_irq_save(&flags);
1008         counter->hw_ops->read(counter);
1009         curr_rq_unlock_irq_restore(&flags);
1010 }
1011
1012 static u64 perf_counter_read(struct perf_counter *counter)
1013 {
1014         /*
1015          * If counter is enabled and currently active on a CPU, update the
1016          * value in the counter structure:
1017          */
1018         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1019                 smp_call_function_single(counter->oncpu,
1020                                          __read, counter, 1);
1021         }
1022
1023         return atomic64_read(&counter->count);
1024 }
1025
1026 static void put_context(struct perf_counter_context *ctx)
1027 {
1028         if (ctx->task)
1029                 put_task_struct(ctx->task);
1030 }
1031
1032 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1033 {
1034         struct perf_cpu_context *cpuctx;
1035         struct perf_counter_context *ctx;
1036         struct task_struct *task;
1037
1038         /*
1039          * If cpu is not a wildcard then this is a percpu counter:
1040          */
1041         if (cpu != -1) {
1042                 /* Must be root to operate on a CPU counter: */
1043                 if (!capable(CAP_SYS_ADMIN))
1044                         return ERR_PTR(-EACCES);
1045
1046                 if (cpu < 0 || cpu > num_possible_cpus())
1047                         return ERR_PTR(-EINVAL);
1048
1049                 /*
1050                  * We could be clever and allow to attach a counter to an
1051                  * offline CPU and activate it when the CPU comes up, but
1052                  * that's for later.
1053                  */
1054                 if (!cpu_isset(cpu, cpu_online_map))
1055                         return ERR_PTR(-ENODEV);
1056
1057                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1058                 ctx = &cpuctx->ctx;
1059
1060                 return ctx;
1061         }
1062
1063         rcu_read_lock();
1064         if (!pid)
1065                 task = current;
1066         else
1067                 task = find_task_by_vpid(pid);
1068         if (task)
1069                 get_task_struct(task);
1070         rcu_read_unlock();
1071
1072         if (!task)
1073                 return ERR_PTR(-ESRCH);
1074
1075         ctx = &task->perf_counter_ctx;
1076         ctx->task = task;
1077
1078         /* Reuse ptrace permission checks for now. */
1079         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1080                 put_context(ctx);
1081                 return ERR_PTR(-EACCES);
1082         }
1083
1084         return ctx;
1085 }
1086
1087 static void free_counter_rcu(struct rcu_head *head)
1088 {
1089         struct perf_counter *counter;
1090
1091         counter = container_of(head, struct perf_counter, rcu_head);
1092         kfree(counter);
1093 }
1094
1095 static void free_counter(struct perf_counter *counter)
1096 {
1097         if (counter->destroy)
1098                 counter->destroy(counter);
1099
1100         call_rcu(&counter->rcu_head, free_counter_rcu);
1101 }
1102
1103 /*
1104  * Called when the last reference to the file is gone.
1105  */
1106 static int perf_release(struct inode *inode, struct file *file)
1107 {
1108         struct perf_counter *counter = file->private_data;
1109         struct perf_counter_context *ctx = counter->ctx;
1110
1111         file->private_data = NULL;
1112
1113         mutex_lock(&ctx->mutex);
1114         mutex_lock(&counter->mutex);
1115
1116         perf_counter_remove_from_context(counter);
1117
1118         mutex_unlock(&counter->mutex);
1119         mutex_unlock(&ctx->mutex);
1120
1121         free_counter(counter);
1122         put_context(ctx);
1123
1124         return 0;
1125 }
1126
1127 /*
1128  * Read the performance counter - simple non blocking version for now
1129  */
1130 static ssize_t
1131 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1132 {
1133         u64 cntval;
1134
1135         if (count < sizeof(cntval))
1136                 return -EINVAL;
1137
1138         /*
1139          * Return end-of-file for a read on a counter that is in
1140          * error state (i.e. because it was pinned but it couldn't be
1141          * scheduled on to the CPU at some point).
1142          */
1143         if (counter->state == PERF_COUNTER_STATE_ERROR)
1144                 return 0;
1145
1146         mutex_lock(&counter->mutex);
1147         cntval = perf_counter_read(counter);
1148         mutex_unlock(&counter->mutex);
1149
1150         return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1151 }
1152
1153 static ssize_t
1154 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1155 {
1156         struct perf_counter *counter = file->private_data;
1157
1158         return perf_read_hw(counter, buf, count);
1159 }
1160
1161 static unsigned int perf_poll(struct file *file, poll_table *wait)
1162 {
1163         struct perf_counter *counter = file->private_data;
1164         struct perf_mmap_data *data;
1165         unsigned int events;
1166
1167         rcu_read_lock();
1168         data = rcu_dereference(counter->data);
1169         if (data)
1170                 events = atomic_xchg(&data->wakeup, 0);
1171         else
1172                 events = POLL_HUP;
1173         rcu_read_unlock();
1174
1175         poll_wait(file, &counter->waitq, wait);
1176
1177         return events;
1178 }
1179
1180 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1181 {
1182         struct perf_counter *counter = file->private_data;
1183         int err = 0;
1184
1185         switch (cmd) {
1186         case PERF_COUNTER_IOC_ENABLE:
1187                 perf_counter_enable_family(counter);
1188                 break;
1189         case PERF_COUNTER_IOC_DISABLE:
1190                 perf_counter_disable_family(counter);
1191                 break;
1192         default:
1193                 err = -ENOTTY;
1194         }
1195         return err;
1196 }
1197
1198 static void __perf_counter_update_userpage(struct perf_counter *counter,
1199                                            struct perf_mmap_data *data)
1200 {
1201         struct perf_counter_mmap_page *userpg = data->user_page;
1202
1203         /*
1204          * Disable preemption so as to not let the corresponding user-space
1205          * spin too long if we get preempted.
1206          */
1207         preempt_disable();
1208         ++userpg->lock;
1209         smp_wmb();
1210         userpg->index = counter->hw.idx;
1211         userpg->offset = atomic64_read(&counter->count);
1212         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1213                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1214
1215         userpg->data_head = atomic_read(&data->head);
1216         smp_wmb();
1217         ++userpg->lock;
1218         preempt_enable();
1219 }
1220
1221 void perf_counter_update_userpage(struct perf_counter *counter)
1222 {
1223         struct perf_mmap_data *data;
1224
1225         rcu_read_lock();
1226         data = rcu_dereference(counter->data);
1227         if (data)
1228                 __perf_counter_update_userpage(counter, data);
1229         rcu_read_unlock();
1230 }
1231
1232 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1233 {
1234         struct perf_counter *counter = vma->vm_file->private_data;
1235         struct perf_mmap_data *data;
1236         int ret = VM_FAULT_SIGBUS;
1237
1238         rcu_read_lock();
1239         data = rcu_dereference(counter->data);
1240         if (!data)
1241                 goto unlock;
1242
1243         if (vmf->pgoff == 0) {
1244                 vmf->page = virt_to_page(data->user_page);
1245         } else {
1246                 int nr = vmf->pgoff - 1;
1247
1248                 if ((unsigned)nr > data->nr_pages)
1249                         goto unlock;
1250
1251                 vmf->page = virt_to_page(data->data_pages[nr]);
1252         }
1253         get_page(vmf->page);
1254         ret = 0;
1255 unlock:
1256         rcu_read_unlock();
1257
1258         return ret;
1259 }
1260
1261 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1262 {
1263         struct perf_mmap_data *data;
1264         unsigned long size;
1265         int i;
1266
1267         WARN_ON(atomic_read(&counter->mmap_count));
1268
1269         size = sizeof(struct perf_mmap_data);
1270         size += nr_pages * sizeof(void *);
1271
1272         data = kzalloc(size, GFP_KERNEL);
1273         if (!data)
1274                 goto fail;
1275
1276         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1277         if (!data->user_page)
1278                 goto fail_user_page;
1279
1280         for (i = 0; i < nr_pages; i++) {
1281                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1282                 if (!data->data_pages[i])
1283                         goto fail_data_pages;
1284         }
1285
1286         data->nr_pages = nr_pages;
1287
1288         rcu_assign_pointer(counter->data, data);
1289
1290         return 0;
1291
1292 fail_data_pages:
1293         for (i--; i >= 0; i--)
1294                 free_page((unsigned long)data->data_pages[i]);
1295
1296         free_page((unsigned long)data->user_page);
1297
1298 fail_user_page:
1299         kfree(data);
1300
1301 fail:
1302         return -ENOMEM;
1303 }
1304
1305 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1306 {
1307         struct perf_mmap_data *data = container_of(rcu_head,
1308                         struct perf_mmap_data, rcu_head);
1309         int i;
1310
1311         free_page((unsigned long)data->user_page);
1312         for (i = 0; i < data->nr_pages; i++)
1313                 free_page((unsigned long)data->data_pages[i]);
1314         kfree(data);
1315 }
1316
1317 static void perf_mmap_data_free(struct perf_counter *counter)
1318 {
1319         struct perf_mmap_data *data = counter->data;
1320
1321         WARN_ON(atomic_read(&counter->mmap_count));
1322
1323         rcu_assign_pointer(counter->data, NULL);
1324         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1325 }
1326
1327 static void perf_mmap_open(struct vm_area_struct *vma)
1328 {
1329         struct perf_counter *counter = vma->vm_file->private_data;
1330
1331         atomic_inc(&counter->mmap_count);
1332 }
1333
1334 static void perf_mmap_close(struct vm_area_struct *vma)
1335 {
1336         struct perf_counter *counter = vma->vm_file->private_data;
1337
1338         if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1339                                       &counter->mmap_mutex)) {
1340                 perf_mmap_data_free(counter);
1341                 mutex_unlock(&counter->mmap_mutex);
1342         }
1343 }
1344
1345 static struct vm_operations_struct perf_mmap_vmops = {
1346         .open = perf_mmap_open,
1347         .close = perf_mmap_close,
1348         .fault = perf_mmap_fault,
1349 };
1350
1351 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1352 {
1353         struct perf_counter *counter = file->private_data;
1354         unsigned long vma_size;
1355         unsigned long nr_pages;
1356         unsigned long locked, lock_limit;
1357         int ret = 0;
1358
1359         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1360                 return -EINVAL;
1361
1362         vma_size = vma->vm_end - vma->vm_start;
1363         nr_pages = (vma_size / PAGE_SIZE) - 1;
1364
1365         if (nr_pages == 0 || !is_power_of_2(nr_pages))
1366                 return -EINVAL;
1367
1368         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1369                 return -EINVAL;
1370
1371         if (vma->vm_pgoff != 0)
1372                 return -EINVAL;
1373
1374         locked = vma_size >>  PAGE_SHIFT;
1375         locked += vma->vm_mm->locked_vm;
1376
1377         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1378         lock_limit >>= PAGE_SHIFT;
1379
1380         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK))
1381                 return -EPERM;
1382
1383         mutex_lock(&counter->mmap_mutex);
1384         if (atomic_inc_not_zero(&counter->mmap_count))
1385                 goto out;
1386
1387         WARN_ON(counter->data);
1388         ret = perf_mmap_data_alloc(counter, nr_pages);
1389         if (!ret)
1390                 atomic_set(&counter->mmap_count, 1);
1391 out:
1392         mutex_unlock(&counter->mmap_mutex);
1393
1394         vma->vm_flags &= ~VM_MAYWRITE;
1395         vma->vm_flags |= VM_RESERVED;
1396         vma->vm_ops = &perf_mmap_vmops;
1397
1398         return ret;
1399 }
1400
1401 static const struct file_operations perf_fops = {
1402         .release                = perf_release,
1403         .read                   = perf_read,
1404         .poll                   = perf_poll,
1405         .unlocked_ioctl         = perf_ioctl,
1406         .compat_ioctl           = perf_ioctl,
1407         .mmap                   = perf_mmap,
1408 };
1409
1410 /*
1411  * Output
1412  */
1413
1414 static int perf_output_write(struct perf_counter *counter, int nmi,
1415                              void *buf, ssize_t size)
1416 {
1417         struct perf_mmap_data *data;
1418         unsigned int offset, head, nr;
1419         unsigned int len;
1420         int ret, wakeup;
1421
1422         rcu_read_lock();
1423         ret = -ENOSPC;
1424         data = rcu_dereference(counter->data);
1425         if (!data)
1426                 goto out;
1427
1428         if (!data->nr_pages)
1429                 goto out;
1430
1431         ret = -EINVAL;
1432         if (size > PAGE_SIZE)
1433                 goto out;
1434
1435         do {
1436                 offset = head = atomic_read(&data->head);
1437                 head += size;
1438         } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1439
1440         wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1441
1442         nr = (offset >> PAGE_SHIFT) & (data->nr_pages - 1);
1443         offset &= PAGE_SIZE - 1;
1444
1445         len = min_t(unsigned int, PAGE_SIZE - offset, size);
1446         memcpy(data->data_pages[nr] + offset, buf, len);
1447         size -= len;
1448
1449         if (size) {
1450                 nr = (nr + 1) & (data->nr_pages - 1);
1451                 memcpy(data->data_pages[nr], buf + len, size);
1452         }
1453
1454         /*
1455          * generate a poll() wakeup for every page boundary crossed
1456          */
1457         if (wakeup) {
1458                 atomic_xchg(&data->wakeup, POLL_IN);
1459                 __perf_counter_update_userpage(counter, data);
1460                 if (nmi) {
1461                         counter->wakeup_pending = 1;
1462                         set_perf_counter_pending();
1463                 } else
1464                         wake_up(&counter->waitq);
1465         }
1466         ret = 0;
1467 out:
1468         rcu_read_unlock();
1469
1470         return ret;
1471 }
1472
1473 static void perf_output_simple(struct perf_counter *counter,
1474                                int nmi, struct pt_regs *regs)
1475 {
1476         u64 entry;
1477
1478         entry = instruction_pointer(regs);
1479
1480         perf_output_write(counter, nmi, &entry, sizeof(entry));
1481 }
1482
1483 struct group_entry {
1484         u64 event;
1485         u64 counter;
1486 };
1487
1488 static void perf_output_group(struct perf_counter *counter, int nmi)
1489 {
1490         struct perf_counter *leader, *sub;
1491
1492         leader = counter->group_leader;
1493         list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1494                 struct group_entry entry;
1495
1496                 if (sub != counter)
1497                         sub->hw_ops->read(sub);
1498
1499                 entry.event = sub->hw_event.config;
1500                 entry.counter = atomic64_read(&sub->count);
1501
1502                 perf_output_write(counter, nmi, &entry, sizeof(entry));
1503         }
1504 }
1505
1506 void perf_counter_output(struct perf_counter *counter,
1507                          int nmi, struct pt_regs *regs)
1508 {
1509         switch (counter->hw_event.record_type) {
1510         case PERF_RECORD_SIMPLE:
1511                 return;
1512
1513         case PERF_RECORD_IRQ:
1514                 perf_output_simple(counter, nmi, regs);
1515                 break;
1516
1517         case PERF_RECORD_GROUP:
1518                 perf_output_group(counter, nmi);
1519                 break;
1520         }
1521 }
1522
1523 /*
1524  * Generic software counter infrastructure
1525  */
1526
1527 static void perf_swcounter_update(struct perf_counter *counter)
1528 {
1529         struct hw_perf_counter *hwc = &counter->hw;
1530         u64 prev, now;
1531         s64 delta;
1532
1533 again:
1534         prev = atomic64_read(&hwc->prev_count);
1535         now = atomic64_read(&hwc->count);
1536         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
1537                 goto again;
1538
1539         delta = now - prev;
1540
1541         atomic64_add(delta, &counter->count);
1542         atomic64_sub(delta, &hwc->period_left);
1543 }
1544
1545 static void perf_swcounter_set_period(struct perf_counter *counter)
1546 {
1547         struct hw_perf_counter *hwc = &counter->hw;
1548         s64 left = atomic64_read(&hwc->period_left);
1549         s64 period = hwc->irq_period;
1550
1551         if (unlikely(left <= -period)) {
1552                 left = period;
1553                 atomic64_set(&hwc->period_left, left);
1554         }
1555
1556         if (unlikely(left <= 0)) {
1557                 left += period;
1558                 atomic64_add(period, &hwc->period_left);
1559         }
1560
1561         atomic64_set(&hwc->prev_count, -left);
1562         atomic64_set(&hwc->count, -left);
1563 }
1564
1565 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
1566 {
1567         struct perf_counter *counter;
1568         struct pt_regs *regs;
1569
1570         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
1571         counter->hw_ops->read(counter);
1572
1573         regs = get_irq_regs();
1574         /*
1575          * In case we exclude kernel IPs or are somehow not in interrupt
1576          * context, provide the next best thing, the user IP.
1577          */
1578         if ((counter->hw_event.exclude_kernel || !regs) &&
1579                         !counter->hw_event.exclude_user)
1580                 regs = task_pt_regs(current);
1581
1582         if (regs)
1583                 perf_counter_output(counter, 0, regs);
1584
1585         hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
1586
1587         return HRTIMER_RESTART;
1588 }
1589
1590 static void perf_swcounter_overflow(struct perf_counter *counter,
1591                                     int nmi, struct pt_regs *regs)
1592 {
1593         perf_swcounter_update(counter);
1594         perf_swcounter_set_period(counter);
1595         perf_counter_output(counter, nmi, regs);
1596 }
1597
1598 static int perf_swcounter_match(struct perf_counter *counter,
1599                                 enum perf_event_types type,
1600                                 u32 event, struct pt_regs *regs)
1601 {
1602         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1603                 return 0;
1604
1605         if (perf_event_raw(&counter->hw_event))
1606                 return 0;
1607
1608         if (perf_event_type(&counter->hw_event) != type)
1609                 return 0;
1610
1611         if (perf_event_id(&counter->hw_event) != event)
1612                 return 0;
1613
1614         if (counter->hw_event.exclude_user && user_mode(regs))
1615                 return 0;
1616
1617         if (counter->hw_event.exclude_kernel && !user_mode(regs))
1618                 return 0;
1619
1620         return 1;
1621 }
1622
1623 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
1624                                int nmi, struct pt_regs *regs)
1625 {
1626         int neg = atomic64_add_negative(nr, &counter->hw.count);
1627         if (counter->hw.irq_period && !neg)
1628                 perf_swcounter_overflow(counter, nmi, regs);
1629 }
1630
1631 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
1632                                      enum perf_event_types type, u32 event,
1633                                      u64 nr, int nmi, struct pt_regs *regs)
1634 {
1635         struct perf_counter *counter;
1636
1637         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
1638                 return;
1639
1640         rcu_read_lock();
1641         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
1642                 if (perf_swcounter_match(counter, type, event, regs))
1643                         perf_swcounter_add(counter, nr, nmi, regs);
1644         }
1645         rcu_read_unlock();
1646 }
1647
1648 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
1649 {
1650         if (in_nmi())
1651                 return &cpuctx->recursion[3];
1652
1653         if (in_irq())
1654                 return &cpuctx->recursion[2];
1655
1656         if (in_softirq())
1657                 return &cpuctx->recursion[1];
1658
1659         return &cpuctx->recursion[0];
1660 }
1661
1662 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
1663                                    u64 nr, int nmi, struct pt_regs *regs)
1664 {
1665         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
1666         int *recursion = perf_swcounter_recursion_context(cpuctx);
1667
1668         if (*recursion)
1669                 goto out;
1670
1671         (*recursion)++;
1672         barrier();
1673
1674         perf_swcounter_ctx_event(&cpuctx->ctx, type, event, nr, nmi, regs);
1675         if (cpuctx->task_ctx) {
1676                 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
1677                                 nr, nmi, regs);
1678         }
1679
1680         barrier();
1681         (*recursion)--;
1682
1683 out:
1684         put_cpu_var(perf_cpu_context);
1685 }
1686
1687 void perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs)
1688 {
1689         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs);
1690 }
1691
1692 static void perf_swcounter_read(struct perf_counter *counter)
1693 {
1694         perf_swcounter_update(counter);
1695 }
1696
1697 static int perf_swcounter_enable(struct perf_counter *counter)
1698 {
1699         perf_swcounter_set_period(counter);
1700         return 0;
1701 }
1702
1703 static void perf_swcounter_disable(struct perf_counter *counter)
1704 {
1705         perf_swcounter_update(counter);
1706 }
1707
1708 static const struct hw_perf_counter_ops perf_ops_generic = {
1709         .enable         = perf_swcounter_enable,
1710         .disable        = perf_swcounter_disable,
1711         .read           = perf_swcounter_read,
1712 };
1713
1714 /*
1715  * Software counter: cpu wall time clock
1716  */
1717
1718 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1719 {
1720         int cpu = raw_smp_processor_id();
1721         s64 prev;
1722         u64 now;
1723
1724         now = cpu_clock(cpu);
1725         prev = atomic64_read(&counter->hw.prev_count);
1726         atomic64_set(&counter->hw.prev_count, now);
1727         atomic64_add(now - prev, &counter->count);
1728 }
1729
1730 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1731 {
1732         struct hw_perf_counter *hwc = &counter->hw;
1733         int cpu = raw_smp_processor_id();
1734
1735         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
1736         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1737         hwc->hrtimer.function = perf_swcounter_hrtimer;
1738         if (hwc->irq_period) {
1739                 __hrtimer_start_range_ns(&hwc->hrtimer,
1740                                 ns_to_ktime(hwc->irq_period), 0,
1741                                 HRTIMER_MODE_REL, 0);
1742         }
1743
1744         return 0;
1745 }
1746
1747 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1748 {
1749         hrtimer_cancel(&counter->hw.hrtimer);
1750         cpu_clock_perf_counter_update(counter);
1751 }
1752
1753 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1754 {
1755         cpu_clock_perf_counter_update(counter);
1756 }
1757
1758 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1759         .enable         = cpu_clock_perf_counter_enable,
1760         .disable        = cpu_clock_perf_counter_disable,
1761         .read           = cpu_clock_perf_counter_read,
1762 };
1763
1764 /*
1765  * Software counter: task time clock
1766  */
1767
1768 /*
1769  * Called from within the scheduler:
1770  */
1771 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1772 {
1773         struct task_struct *curr = counter->task;
1774         u64 delta;
1775
1776         delta = __task_delta_exec(curr, update);
1777
1778         return curr->se.sum_exec_runtime + delta;
1779 }
1780
1781 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1782 {
1783         u64 prev;
1784         s64 delta;
1785
1786         prev = atomic64_read(&counter->hw.prev_count);
1787
1788         atomic64_set(&counter->hw.prev_count, now);
1789
1790         delta = now - prev;
1791
1792         atomic64_add(delta, &counter->count);
1793 }
1794
1795 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1796 {
1797         struct hw_perf_counter *hwc = &counter->hw;
1798
1799         atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
1800         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1801         hwc->hrtimer.function = perf_swcounter_hrtimer;
1802         if (hwc->irq_period) {
1803                 __hrtimer_start_range_ns(&hwc->hrtimer,
1804                                 ns_to_ktime(hwc->irq_period), 0,
1805                                 HRTIMER_MODE_REL, 0);
1806         }
1807
1808         return 0;
1809 }
1810
1811 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1812 {
1813         hrtimer_cancel(&counter->hw.hrtimer);
1814         task_clock_perf_counter_update(counter,
1815                         task_clock_perf_counter_val(counter, 0));
1816 }
1817
1818 static void task_clock_perf_counter_read(struct perf_counter *counter)
1819 {
1820         task_clock_perf_counter_update(counter,
1821                         task_clock_perf_counter_val(counter, 1));
1822 }
1823
1824 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1825         .enable         = task_clock_perf_counter_enable,
1826         .disable        = task_clock_perf_counter_disable,
1827         .read           = task_clock_perf_counter_read,
1828 };
1829
1830 /*
1831  * Software counter: cpu migrations
1832  */
1833
1834 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1835 {
1836         struct task_struct *curr = counter->ctx->task;
1837
1838         if (curr)
1839                 return curr->se.nr_migrations;
1840         return cpu_nr_migrations(smp_processor_id());
1841 }
1842
1843 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1844 {
1845         u64 prev, now;
1846         s64 delta;
1847
1848         prev = atomic64_read(&counter->hw.prev_count);
1849         now = get_cpu_migrations(counter);
1850
1851         atomic64_set(&counter->hw.prev_count, now);
1852
1853         delta = now - prev;
1854
1855         atomic64_add(delta, &counter->count);
1856 }
1857
1858 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1859 {
1860         cpu_migrations_perf_counter_update(counter);
1861 }
1862
1863 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1864 {
1865         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1866                 atomic64_set(&counter->hw.prev_count,
1867                              get_cpu_migrations(counter));
1868         return 0;
1869 }
1870
1871 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1872 {
1873         cpu_migrations_perf_counter_update(counter);
1874 }
1875
1876 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1877         .enable         = cpu_migrations_perf_counter_enable,
1878         .disable        = cpu_migrations_perf_counter_disable,
1879         .read           = cpu_migrations_perf_counter_read,
1880 };
1881
1882 #ifdef CONFIG_EVENT_PROFILE
1883 void perf_tpcounter_event(int event_id)
1884 {
1885         struct pt_regs *regs = get_irq_regs();
1886
1887         if (!regs)
1888                 regs = task_pt_regs(current);
1889
1890         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs);
1891 }
1892
1893 extern int ftrace_profile_enable(int);
1894 extern void ftrace_profile_disable(int);
1895
1896 static void tp_perf_counter_destroy(struct perf_counter *counter)
1897 {
1898         ftrace_profile_disable(perf_event_id(&counter->hw_event));
1899 }
1900
1901 static const struct hw_perf_counter_ops *
1902 tp_perf_counter_init(struct perf_counter *counter)
1903 {
1904         int event_id = perf_event_id(&counter->hw_event);
1905         int ret;
1906
1907         ret = ftrace_profile_enable(event_id);
1908         if (ret)
1909                 return NULL;
1910
1911         counter->destroy = tp_perf_counter_destroy;
1912         counter->hw.irq_period = counter->hw_event.irq_period;
1913
1914         return &perf_ops_generic;
1915 }
1916 #else
1917 static const struct hw_perf_counter_ops *
1918 tp_perf_counter_init(struct perf_counter *counter)
1919 {
1920         return NULL;
1921 }
1922 #endif
1923
1924 static const struct hw_perf_counter_ops *
1925 sw_perf_counter_init(struct perf_counter *counter)
1926 {
1927         struct perf_counter_hw_event *hw_event = &counter->hw_event;
1928         const struct hw_perf_counter_ops *hw_ops = NULL;
1929         struct hw_perf_counter *hwc = &counter->hw;
1930
1931         /*
1932          * Software counters (currently) can't in general distinguish
1933          * between user, kernel and hypervisor events.
1934          * However, context switches and cpu migrations are considered
1935          * to be kernel events, and page faults are never hypervisor
1936          * events.
1937          */
1938         switch (perf_event_id(&counter->hw_event)) {
1939         case PERF_COUNT_CPU_CLOCK:
1940                 hw_ops = &perf_ops_cpu_clock;
1941
1942                 if (hw_event->irq_period && hw_event->irq_period < 10000)
1943                         hw_event->irq_period = 10000;
1944                 break;
1945         case PERF_COUNT_TASK_CLOCK:
1946                 /*
1947                  * If the user instantiates this as a per-cpu counter,
1948                  * use the cpu_clock counter instead.
1949                  */
1950                 if (counter->ctx->task)
1951                         hw_ops = &perf_ops_task_clock;
1952                 else
1953                         hw_ops = &perf_ops_cpu_clock;
1954
1955                 if (hw_event->irq_period && hw_event->irq_period < 10000)
1956                         hw_event->irq_period = 10000;
1957                 break;
1958         case PERF_COUNT_PAGE_FAULTS:
1959         case PERF_COUNT_PAGE_FAULTS_MIN:
1960         case PERF_COUNT_PAGE_FAULTS_MAJ:
1961         case PERF_COUNT_CONTEXT_SWITCHES:
1962                 hw_ops = &perf_ops_generic;
1963                 break;
1964         case PERF_COUNT_CPU_MIGRATIONS:
1965                 if (!counter->hw_event.exclude_kernel)
1966                         hw_ops = &perf_ops_cpu_migrations;
1967                 break;
1968         }
1969
1970         if (hw_ops)
1971                 hwc->irq_period = hw_event->irq_period;
1972
1973         return hw_ops;
1974 }
1975
1976 /*
1977  * Allocate and initialize a counter structure
1978  */
1979 static struct perf_counter *
1980 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1981                    int cpu,
1982                    struct perf_counter_context *ctx,
1983                    struct perf_counter *group_leader,
1984                    gfp_t gfpflags)
1985 {
1986         const struct hw_perf_counter_ops *hw_ops;
1987         struct perf_counter *counter;
1988
1989         counter = kzalloc(sizeof(*counter), gfpflags);
1990         if (!counter)
1991                 return NULL;
1992
1993         /*
1994          * Single counters are their own group leaders, with an
1995          * empty sibling list:
1996          */
1997         if (!group_leader)
1998                 group_leader = counter;
1999
2000         mutex_init(&counter->mutex);
2001         INIT_LIST_HEAD(&counter->list_entry);
2002         INIT_LIST_HEAD(&counter->event_entry);
2003         INIT_LIST_HEAD(&counter->sibling_list);
2004         init_waitqueue_head(&counter->waitq);
2005
2006         mutex_init(&counter->mmap_mutex);
2007
2008         INIT_LIST_HEAD(&counter->child_list);
2009
2010         counter->cpu                    = cpu;
2011         counter->hw_event               = *hw_event;
2012         counter->wakeup_pending         = 0;
2013         counter->group_leader           = group_leader;
2014         counter->hw_ops                 = NULL;
2015         counter->ctx                    = ctx;
2016
2017         counter->state = PERF_COUNTER_STATE_INACTIVE;
2018         if (hw_event->disabled)
2019                 counter->state = PERF_COUNTER_STATE_OFF;
2020
2021         hw_ops = NULL;
2022
2023         if (perf_event_raw(hw_event)) {
2024                 hw_ops = hw_perf_counter_init(counter);
2025                 goto done;
2026         }
2027
2028         switch (perf_event_type(hw_event)) {
2029         case PERF_TYPE_HARDWARE:
2030                 hw_ops = hw_perf_counter_init(counter);
2031                 break;
2032
2033         case PERF_TYPE_SOFTWARE:
2034                 hw_ops = sw_perf_counter_init(counter);
2035                 break;
2036
2037         case PERF_TYPE_TRACEPOINT:
2038                 hw_ops = tp_perf_counter_init(counter);
2039                 break;
2040         }
2041
2042         if (!hw_ops) {
2043                 kfree(counter);
2044                 return NULL;
2045         }
2046 done:
2047         counter->hw_ops = hw_ops;
2048
2049         return counter;
2050 }
2051
2052 /**
2053  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2054  *
2055  * @hw_event_uptr:      event type attributes for monitoring/sampling
2056  * @pid:                target pid
2057  * @cpu:                target cpu
2058  * @group_fd:           group leader counter fd
2059  */
2060 SYSCALL_DEFINE5(perf_counter_open,
2061                 const struct perf_counter_hw_event __user *, hw_event_uptr,
2062                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2063 {
2064         struct perf_counter *counter, *group_leader;
2065         struct perf_counter_hw_event hw_event;
2066         struct perf_counter_context *ctx;
2067         struct file *counter_file = NULL;
2068         struct file *group_file = NULL;
2069         int fput_needed = 0;
2070         int fput_needed2 = 0;
2071         int ret;
2072
2073         /* for future expandability... */
2074         if (flags)
2075                 return -EINVAL;
2076
2077         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2078                 return -EFAULT;
2079
2080         /*
2081          * Get the target context (task or percpu):
2082          */
2083         ctx = find_get_context(pid, cpu);
2084         if (IS_ERR(ctx))
2085                 return PTR_ERR(ctx);
2086
2087         /*
2088          * Look up the group leader (we will attach this counter to it):
2089          */
2090         group_leader = NULL;
2091         if (group_fd != -1) {
2092                 ret = -EINVAL;
2093                 group_file = fget_light(group_fd, &fput_needed);
2094                 if (!group_file)
2095                         goto err_put_context;
2096                 if (group_file->f_op != &perf_fops)
2097                         goto err_put_context;
2098
2099                 group_leader = group_file->private_data;
2100                 /*
2101                  * Do not allow a recursive hierarchy (this new sibling
2102                  * becoming part of another group-sibling):
2103                  */
2104                 if (group_leader->group_leader != group_leader)
2105                         goto err_put_context;
2106                 /*
2107                  * Do not allow to attach to a group in a different
2108                  * task or CPU context:
2109                  */
2110                 if (group_leader->ctx != ctx)
2111                         goto err_put_context;
2112                 /*
2113                  * Only a group leader can be exclusive or pinned
2114                  */
2115                 if (hw_event.exclusive || hw_event.pinned)
2116                         goto err_put_context;
2117         }
2118
2119         ret = -EINVAL;
2120         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2121                                      GFP_KERNEL);
2122         if (!counter)
2123                 goto err_put_context;
2124
2125         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2126         if (ret < 0)
2127                 goto err_free_put_context;
2128
2129         counter_file = fget_light(ret, &fput_needed2);
2130         if (!counter_file)
2131                 goto err_free_put_context;
2132
2133         counter->filp = counter_file;
2134         mutex_lock(&ctx->mutex);
2135         perf_install_in_context(ctx, counter, cpu);
2136         mutex_unlock(&ctx->mutex);
2137
2138         fput_light(counter_file, fput_needed2);
2139
2140 out_fput:
2141         fput_light(group_file, fput_needed);
2142
2143         return ret;
2144
2145 err_free_put_context:
2146         kfree(counter);
2147
2148 err_put_context:
2149         put_context(ctx);
2150
2151         goto out_fput;
2152 }
2153
2154 /*
2155  * Initialize the perf_counter context in a task_struct:
2156  */
2157 static void
2158 __perf_counter_init_context(struct perf_counter_context *ctx,
2159                             struct task_struct *task)
2160 {
2161         memset(ctx, 0, sizeof(*ctx));
2162         spin_lock_init(&ctx->lock);
2163         mutex_init(&ctx->mutex);
2164         INIT_LIST_HEAD(&ctx->counter_list);
2165         INIT_LIST_HEAD(&ctx->event_list);
2166         ctx->task = task;
2167 }
2168
2169 /*
2170  * inherit a counter from parent task to child task:
2171  */
2172 static struct perf_counter *
2173 inherit_counter(struct perf_counter *parent_counter,
2174               struct task_struct *parent,
2175               struct perf_counter_context *parent_ctx,
2176               struct task_struct *child,
2177               struct perf_counter *group_leader,
2178               struct perf_counter_context *child_ctx)
2179 {
2180         struct perf_counter *child_counter;
2181
2182         /*
2183          * Instead of creating recursive hierarchies of counters,
2184          * we link inherited counters back to the original parent,
2185          * which has a filp for sure, which we use as the reference
2186          * count:
2187          */
2188         if (parent_counter->parent)
2189                 parent_counter = parent_counter->parent;
2190
2191         child_counter = perf_counter_alloc(&parent_counter->hw_event,
2192                                            parent_counter->cpu, child_ctx,
2193                                            group_leader, GFP_KERNEL);
2194         if (!child_counter)
2195                 return NULL;
2196
2197         /*
2198          * Link it up in the child's context:
2199          */
2200         child_counter->task = child;
2201         list_add_counter(child_counter, child_ctx);
2202         child_ctx->nr_counters++;
2203
2204         child_counter->parent = parent_counter;
2205         /*
2206          * inherit into child's child as well:
2207          */
2208         child_counter->hw_event.inherit = 1;
2209
2210         /*
2211          * Get a reference to the parent filp - we will fput it
2212          * when the child counter exits. This is safe to do because
2213          * we are in the parent and we know that the filp still
2214          * exists and has a nonzero count:
2215          */
2216         atomic_long_inc(&parent_counter->filp->f_count);
2217
2218         /*
2219          * Link this into the parent counter's child list
2220          */
2221         mutex_lock(&parent_counter->mutex);
2222         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
2223
2224         /*
2225          * Make the child state follow the state of the parent counter,
2226          * not its hw_event.disabled bit.  We hold the parent's mutex,
2227          * so we won't race with perf_counter_{en,dis}able_family.
2228          */
2229         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
2230                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
2231         else
2232                 child_counter->state = PERF_COUNTER_STATE_OFF;
2233
2234         mutex_unlock(&parent_counter->mutex);
2235
2236         return child_counter;
2237 }
2238
2239 static int inherit_group(struct perf_counter *parent_counter,
2240               struct task_struct *parent,
2241               struct perf_counter_context *parent_ctx,
2242               struct task_struct *child,
2243               struct perf_counter_context *child_ctx)
2244 {
2245         struct perf_counter *leader;
2246         struct perf_counter *sub;
2247
2248         leader = inherit_counter(parent_counter, parent, parent_ctx,
2249                                  child, NULL, child_ctx);
2250         if (!leader)
2251                 return -ENOMEM;
2252         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
2253                 if (!inherit_counter(sub, parent, parent_ctx,
2254                                      child, leader, child_ctx))
2255                         return -ENOMEM;
2256         }
2257         return 0;
2258 }
2259
2260 static void sync_child_counter(struct perf_counter *child_counter,
2261                                struct perf_counter *parent_counter)
2262 {
2263         u64 parent_val, child_val;
2264
2265         parent_val = atomic64_read(&parent_counter->count);
2266         child_val = atomic64_read(&child_counter->count);
2267
2268         /*
2269          * Add back the child's count to the parent's count:
2270          */
2271         atomic64_add(child_val, &parent_counter->count);
2272
2273         /*
2274          * Remove this counter from the parent's list
2275          */
2276         mutex_lock(&parent_counter->mutex);
2277         list_del_init(&child_counter->child_list);
2278         mutex_unlock(&parent_counter->mutex);
2279
2280         /*
2281          * Release the parent counter, if this was the last
2282          * reference to it.
2283          */
2284         fput(parent_counter->filp);
2285 }
2286
2287 static void
2288 __perf_counter_exit_task(struct task_struct *child,
2289                          struct perf_counter *child_counter,
2290                          struct perf_counter_context *child_ctx)
2291 {
2292         struct perf_counter *parent_counter;
2293         struct perf_counter *sub, *tmp;
2294
2295         /*
2296          * If we do not self-reap then we have to wait for the
2297          * child task to unschedule (it will happen for sure),
2298          * so that its counter is at its final count. (This
2299          * condition triggers rarely - child tasks usually get
2300          * off their CPU before the parent has a chance to
2301          * get this far into the reaping action)
2302          */
2303         if (child != current) {
2304                 wait_task_inactive(child, 0);
2305                 list_del_init(&child_counter->list_entry);
2306         } else {
2307                 struct perf_cpu_context *cpuctx;
2308                 unsigned long flags;
2309                 u64 perf_flags;
2310
2311                 /*
2312                  * Disable and unlink this counter.
2313                  *
2314                  * Be careful about zapping the list - IRQ/NMI context
2315                  * could still be processing it:
2316                  */
2317                 curr_rq_lock_irq_save(&flags);
2318                 perf_flags = hw_perf_save_disable();
2319
2320                 cpuctx = &__get_cpu_var(perf_cpu_context);
2321
2322                 group_sched_out(child_counter, cpuctx, child_ctx);
2323
2324                 list_del_init(&child_counter->list_entry);
2325
2326                 child_ctx->nr_counters--;
2327
2328                 hw_perf_restore(perf_flags);
2329                 curr_rq_unlock_irq_restore(&flags);
2330         }
2331
2332         parent_counter = child_counter->parent;
2333         /*
2334          * It can happen that parent exits first, and has counters
2335          * that are still around due to the child reference. These
2336          * counters need to be zapped - but otherwise linger.
2337          */
2338         if (parent_counter) {
2339                 sync_child_counter(child_counter, parent_counter);
2340                 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
2341                                          list_entry) {
2342                         if (sub->parent) {
2343                                 sync_child_counter(sub, sub->parent);
2344                                 free_counter(sub);
2345                         }
2346                 }
2347                 free_counter(child_counter);
2348         }
2349 }
2350
2351 /*
2352  * When a child task exits, feed back counter values to parent counters.
2353  *
2354  * Note: we may be running in child context, but the PID is not hashed
2355  * anymore so new counters will not be added.
2356  */
2357 void perf_counter_exit_task(struct task_struct *child)
2358 {
2359         struct perf_counter *child_counter, *tmp;
2360         struct perf_counter_context *child_ctx;
2361
2362         child_ctx = &child->perf_counter_ctx;
2363
2364         if (likely(!child_ctx->nr_counters))
2365                 return;
2366
2367         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
2368                                  list_entry)
2369                 __perf_counter_exit_task(child, child_counter, child_ctx);
2370 }
2371
2372 /*
2373  * Initialize the perf_counter context in task_struct
2374  */
2375 void perf_counter_init_task(struct task_struct *child)
2376 {
2377         struct perf_counter_context *child_ctx, *parent_ctx;
2378         struct perf_counter *counter;
2379         struct task_struct *parent = current;
2380
2381         child_ctx  =  &child->perf_counter_ctx;
2382         parent_ctx = &parent->perf_counter_ctx;
2383
2384         __perf_counter_init_context(child_ctx, child);
2385
2386         /*
2387          * This is executed from the parent task context, so inherit
2388          * counters that have been marked for cloning:
2389          */
2390
2391         if (likely(!parent_ctx->nr_counters))
2392                 return;
2393
2394         /*
2395          * Lock the parent list. No need to lock the child - not PID
2396          * hashed yet and not running, so nobody can access it.
2397          */
2398         mutex_lock(&parent_ctx->mutex);
2399
2400         /*
2401          * We dont have to disable NMIs - we are only looking at
2402          * the list, not manipulating it:
2403          */
2404         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2405                 if (!counter->hw_event.inherit)
2406                         continue;
2407
2408                 if (inherit_group(counter, parent,
2409                                   parent_ctx, child, child_ctx))
2410                         break;
2411         }
2412
2413         mutex_unlock(&parent_ctx->mutex);
2414 }
2415
2416 static void __cpuinit perf_counter_init_cpu(int cpu)
2417 {
2418         struct perf_cpu_context *cpuctx;
2419
2420         cpuctx = &per_cpu(perf_cpu_context, cpu);
2421         __perf_counter_init_context(&cpuctx->ctx, NULL);
2422
2423         mutex_lock(&perf_resource_mutex);
2424         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2425         mutex_unlock(&perf_resource_mutex);
2426
2427         hw_perf_counter_setup(cpu);
2428 }
2429
2430 #ifdef CONFIG_HOTPLUG_CPU
2431 static void __perf_counter_exit_cpu(void *info)
2432 {
2433         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2434         struct perf_counter_context *ctx = &cpuctx->ctx;
2435         struct perf_counter *counter, *tmp;
2436
2437         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2438                 __perf_counter_remove_from_context(counter);
2439 }
2440 static void perf_counter_exit_cpu(int cpu)
2441 {
2442         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2443         struct perf_counter_context *ctx = &cpuctx->ctx;
2444
2445         mutex_lock(&ctx->mutex);
2446         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2447         mutex_unlock(&ctx->mutex);
2448 }
2449 #else
2450 static inline void perf_counter_exit_cpu(int cpu) { }
2451 #endif
2452
2453 static int __cpuinit
2454 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2455 {
2456         unsigned int cpu = (long)hcpu;
2457
2458         switch (action) {
2459
2460         case CPU_UP_PREPARE:
2461         case CPU_UP_PREPARE_FROZEN:
2462                 perf_counter_init_cpu(cpu);
2463                 break;
2464
2465         case CPU_DOWN_PREPARE:
2466         case CPU_DOWN_PREPARE_FROZEN:
2467                 perf_counter_exit_cpu(cpu);
2468                 break;
2469
2470         default:
2471                 break;
2472         }
2473
2474         return NOTIFY_OK;
2475 }
2476
2477 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2478         .notifier_call          = perf_cpu_notify,
2479 };
2480
2481 static int __init perf_counter_init(void)
2482 {
2483         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2484                         (void *)(long)smp_processor_id());
2485         register_cpu_notifier(&perf_cpu_nb);
2486
2487         return 0;
2488 }
2489 early_initcall(perf_counter_init);
2490
2491 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2492 {
2493         return sprintf(buf, "%d\n", perf_reserved_percpu);
2494 }
2495
2496 static ssize_t
2497 perf_set_reserve_percpu(struct sysdev_class *class,
2498                         const char *buf,
2499                         size_t count)
2500 {
2501         struct perf_cpu_context *cpuctx;
2502         unsigned long val;
2503         int err, cpu, mpt;
2504
2505         err = strict_strtoul(buf, 10, &val);
2506         if (err)
2507                 return err;
2508         if (val > perf_max_counters)
2509                 return -EINVAL;
2510
2511         mutex_lock(&perf_resource_mutex);
2512         perf_reserved_percpu = val;
2513         for_each_online_cpu(cpu) {
2514                 cpuctx = &per_cpu(perf_cpu_context, cpu);
2515                 spin_lock_irq(&cpuctx->ctx.lock);
2516                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2517                           perf_max_counters - perf_reserved_percpu);
2518                 cpuctx->max_pertask = mpt;
2519                 spin_unlock_irq(&cpuctx->ctx.lock);
2520         }
2521         mutex_unlock(&perf_resource_mutex);
2522
2523         return count;
2524 }
2525
2526 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2527 {
2528         return sprintf(buf, "%d\n", perf_overcommit);
2529 }
2530
2531 static ssize_t
2532 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2533 {
2534         unsigned long val;
2535         int err;
2536
2537         err = strict_strtoul(buf, 10, &val);
2538         if (err)
2539                 return err;
2540         if (val > 1)
2541                 return -EINVAL;
2542
2543         mutex_lock(&perf_resource_mutex);
2544         perf_overcommit = val;
2545         mutex_unlock(&perf_resource_mutex);
2546
2547         return count;
2548 }
2549
2550 static SYSDEV_CLASS_ATTR(
2551                                 reserve_percpu,
2552                                 0644,
2553                                 perf_show_reserve_percpu,
2554                                 perf_set_reserve_percpu
2555                         );
2556
2557 static SYSDEV_CLASS_ATTR(
2558                                 overcommit,
2559                                 0644,
2560                                 perf_show_overcommit,
2561                                 perf_set_overcommit
2562                         );
2563
2564 static struct attribute *perfclass_attrs[] = {
2565         &attr_reserve_percpu.attr,
2566         &attr_overcommit.attr,
2567         NULL
2568 };
2569
2570 static struct attribute_group perfclass_attr_group = {
2571         .attrs                  = perfclass_attrs,
2572         .name                   = "perf_counters",
2573 };
2574
2575 static int __init perf_counter_sysfs_init(void)
2576 {
2577         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2578                                   &perfclass_attr_group);
2579 }
2580 device_initcall(perf_counter_sysfs_init);