Merge branch 'x86/apic' into perfcounters/core
[linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *  For licencing details see kernel-base/COPYING
8  */
9
10 #include <linux/fs.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/sysfs.h>
16 #include <linux/ptrace.h>
17 #include <linux/percpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/perf_counter.h>
23 #include <linux/mm.h>
24 #include <linux/vmstat.h>
25
26 /*
27  * Each CPU has a list of per CPU counters:
28  */
29 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
30
31 int perf_max_counters __read_mostly = 1;
32 static int perf_reserved_percpu __read_mostly;
33 static int perf_overcommit __read_mostly = 1;
34
35 /*
36  * Mutex for (sysadmin-configurable) counter reservations:
37  */
38 static DEFINE_MUTEX(perf_resource_mutex);
39
40 /*
41  * Architecture provided APIs - weak aliases:
42  */
43 extern __weak const struct hw_perf_counter_ops *
44 hw_perf_counter_init(struct perf_counter *counter)
45 {
46         return NULL;
47 }
48
49 u64 __weak hw_perf_save_disable(void)           { return 0; }
50 void __weak hw_perf_restore(u64 ctrl)           { barrier(); }
51 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
52 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
53                struct perf_cpu_context *cpuctx,
54                struct perf_counter_context *ctx, int cpu)
55 {
56         return 0;
57 }
58
59 void __weak perf_counter_print_debug(void)      { }
60
61 static void
62 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
63 {
64         struct perf_counter *group_leader = counter->group_leader;
65
66         /*
67          * Depending on whether it is a standalone or sibling counter,
68          * add it straight to the context's counter list, or to the group
69          * leader's sibling list:
70          */
71         if (counter->group_leader == counter)
72                 list_add_tail(&counter->list_entry, &ctx->counter_list);
73         else
74                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
75 }
76
77 static void
78 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
79 {
80         struct perf_counter *sibling, *tmp;
81
82         list_del_init(&counter->list_entry);
83
84         /*
85          * If this was a group counter with sibling counters then
86          * upgrade the siblings to singleton counters by adding them
87          * to the context list directly:
88          */
89         list_for_each_entry_safe(sibling, tmp,
90                                  &counter->sibling_list, list_entry) {
91
92                 list_del_init(&sibling->list_entry);
93                 list_add_tail(&sibling->list_entry, &ctx->counter_list);
94                 sibling->group_leader = sibling;
95         }
96 }
97
98 static void
99 counter_sched_out(struct perf_counter *counter,
100                   struct perf_cpu_context *cpuctx,
101                   struct perf_counter_context *ctx)
102 {
103         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
104                 return;
105
106         counter->state = PERF_COUNTER_STATE_INACTIVE;
107         counter->hw_ops->disable(counter);
108         counter->oncpu = -1;
109
110         if (!is_software_counter(counter))
111                 cpuctx->active_oncpu--;
112         ctx->nr_active--;
113         if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
114                 cpuctx->exclusive = 0;
115 }
116
117 static void
118 group_sched_out(struct perf_counter *group_counter,
119                 struct perf_cpu_context *cpuctx,
120                 struct perf_counter_context *ctx)
121 {
122         struct perf_counter *counter;
123
124         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
125                 return;
126
127         counter_sched_out(group_counter, cpuctx, ctx);
128
129         /*
130          * Schedule out siblings (if any):
131          */
132         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
133                 counter_sched_out(counter, cpuctx, ctx);
134
135         if (group_counter->hw_event.exclusive)
136                 cpuctx->exclusive = 0;
137 }
138
139 /*
140  * Cross CPU call to remove a performance counter
141  *
142  * We disable the counter on the hardware level first. After that we
143  * remove it from the context list.
144  */
145 static void __perf_counter_remove_from_context(void *info)
146 {
147         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
148         struct perf_counter *counter = info;
149         struct perf_counter_context *ctx = counter->ctx;
150         unsigned long flags;
151         u64 perf_flags;
152
153         /*
154          * If this is a task context, we need to check whether it is
155          * the current task context of this cpu. If not it has been
156          * scheduled out before the smp call arrived.
157          */
158         if (ctx->task && cpuctx->task_ctx != ctx)
159                 return;
160
161         curr_rq_lock_irq_save(&flags);
162         spin_lock(&ctx->lock);
163
164         counter_sched_out(counter, cpuctx, ctx);
165
166         counter->task = NULL;
167         ctx->nr_counters--;
168
169         /*
170          * Protect the list operation against NMI by disabling the
171          * counters on a global level. NOP for non NMI based counters.
172          */
173         perf_flags = hw_perf_save_disable();
174         list_del_counter(counter, ctx);
175         hw_perf_restore(perf_flags);
176
177         if (!ctx->task) {
178                 /*
179                  * Allow more per task counters with respect to the
180                  * reservation:
181                  */
182                 cpuctx->max_pertask =
183                         min(perf_max_counters - ctx->nr_counters,
184                             perf_max_counters - perf_reserved_percpu);
185         }
186
187         spin_unlock(&ctx->lock);
188         curr_rq_unlock_irq_restore(&flags);
189 }
190
191
192 /*
193  * Remove the counter from a task's (or a CPU's) list of counters.
194  *
195  * Must be called with counter->mutex and ctx->mutex held.
196  *
197  * CPU counters are removed with a smp call. For task counters we only
198  * call when the task is on a CPU.
199  */
200 static void perf_counter_remove_from_context(struct perf_counter *counter)
201 {
202         struct perf_counter_context *ctx = counter->ctx;
203         struct task_struct *task = ctx->task;
204
205         if (!task) {
206                 /*
207                  * Per cpu counters are removed via an smp call and
208                  * the removal is always sucessful.
209                  */
210                 smp_call_function_single(counter->cpu,
211                                          __perf_counter_remove_from_context,
212                                          counter, 1);
213                 return;
214         }
215
216 retry:
217         task_oncpu_function_call(task, __perf_counter_remove_from_context,
218                                  counter);
219
220         spin_lock_irq(&ctx->lock);
221         /*
222          * If the context is active we need to retry the smp call.
223          */
224         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
225                 spin_unlock_irq(&ctx->lock);
226                 goto retry;
227         }
228
229         /*
230          * The lock prevents that this context is scheduled in so we
231          * can remove the counter safely, if the call above did not
232          * succeed.
233          */
234         if (!list_empty(&counter->list_entry)) {
235                 ctx->nr_counters--;
236                 list_del_counter(counter, ctx);
237                 counter->task = NULL;
238         }
239         spin_unlock_irq(&ctx->lock);
240 }
241
242 /*
243  * Cross CPU call to disable a performance counter
244  */
245 static void __perf_counter_disable(void *info)
246 {
247         struct perf_counter *counter = info;
248         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
249         struct perf_counter_context *ctx = counter->ctx;
250         unsigned long flags;
251
252         /*
253          * If this is a per-task counter, need to check whether this
254          * counter's task is the current task on this cpu.
255          */
256         if (ctx->task && cpuctx->task_ctx != ctx)
257                 return;
258
259         curr_rq_lock_irq_save(&flags);
260         spin_lock(&ctx->lock);
261
262         /*
263          * If the counter is on, turn it off.
264          * If it is in error state, leave it in error state.
265          */
266         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
267                 if (counter == counter->group_leader)
268                         group_sched_out(counter, cpuctx, ctx);
269                 else
270                         counter_sched_out(counter, cpuctx, ctx);
271                 counter->state = PERF_COUNTER_STATE_OFF;
272         }
273
274         spin_unlock(&ctx->lock);
275         curr_rq_unlock_irq_restore(&flags);
276 }
277
278 /*
279  * Disable a counter.
280  */
281 static void perf_counter_disable(struct perf_counter *counter)
282 {
283         struct perf_counter_context *ctx = counter->ctx;
284         struct task_struct *task = ctx->task;
285
286         if (!task) {
287                 /*
288                  * Disable the counter on the cpu that it's on
289                  */
290                 smp_call_function_single(counter->cpu, __perf_counter_disable,
291                                          counter, 1);
292                 return;
293         }
294
295  retry:
296         task_oncpu_function_call(task, __perf_counter_disable, counter);
297
298         spin_lock_irq(&ctx->lock);
299         /*
300          * If the counter is still active, we need to retry the cross-call.
301          */
302         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
303                 spin_unlock_irq(&ctx->lock);
304                 goto retry;
305         }
306
307         /*
308          * Since we have the lock this context can't be scheduled
309          * in, so we can change the state safely.
310          */
311         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
312                 counter->state = PERF_COUNTER_STATE_OFF;
313
314         spin_unlock_irq(&ctx->lock);
315 }
316
317 /*
318  * Disable a counter and all its children.
319  */
320 static void perf_counter_disable_family(struct perf_counter *counter)
321 {
322         struct perf_counter *child;
323
324         perf_counter_disable(counter);
325
326         /*
327          * Lock the mutex to protect the list of children
328          */
329         mutex_lock(&counter->mutex);
330         list_for_each_entry(child, &counter->child_list, child_list)
331                 perf_counter_disable(child);
332         mutex_unlock(&counter->mutex);
333 }
334
335 static int
336 counter_sched_in(struct perf_counter *counter,
337                  struct perf_cpu_context *cpuctx,
338                  struct perf_counter_context *ctx,
339                  int cpu)
340 {
341         if (counter->state <= PERF_COUNTER_STATE_OFF)
342                 return 0;
343
344         counter->state = PERF_COUNTER_STATE_ACTIVE;
345         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
346         /*
347          * The new state must be visible before we turn it on in the hardware:
348          */
349         smp_wmb();
350
351         if (counter->hw_ops->enable(counter)) {
352                 counter->state = PERF_COUNTER_STATE_INACTIVE;
353                 counter->oncpu = -1;
354                 return -EAGAIN;
355         }
356
357         if (!is_software_counter(counter))
358                 cpuctx->active_oncpu++;
359         ctx->nr_active++;
360
361         if (counter->hw_event.exclusive)
362                 cpuctx->exclusive = 1;
363
364         return 0;
365 }
366
367 /*
368  * Return 1 for a group consisting entirely of software counters,
369  * 0 if the group contains any hardware counters.
370  */
371 static int is_software_only_group(struct perf_counter *leader)
372 {
373         struct perf_counter *counter;
374
375         if (!is_software_counter(leader))
376                 return 0;
377         list_for_each_entry(counter, &leader->sibling_list, list_entry)
378                 if (!is_software_counter(counter))
379                         return 0;
380         return 1;
381 }
382
383 /*
384  * Work out whether we can put this counter group on the CPU now.
385  */
386 static int group_can_go_on(struct perf_counter *counter,
387                            struct perf_cpu_context *cpuctx,
388                            int can_add_hw)
389 {
390         /*
391          * Groups consisting entirely of software counters can always go on.
392          */
393         if (is_software_only_group(counter))
394                 return 1;
395         /*
396          * If an exclusive group is already on, no other hardware
397          * counters can go on.
398          */
399         if (cpuctx->exclusive)
400                 return 0;
401         /*
402          * If this group is exclusive and there are already
403          * counters on the CPU, it can't go on.
404          */
405         if (counter->hw_event.exclusive && cpuctx->active_oncpu)
406                 return 0;
407         /*
408          * Otherwise, try to add it if all previous groups were able
409          * to go on.
410          */
411         return can_add_hw;
412 }
413
414 /*
415  * Cross CPU call to install and enable a performance counter
416  */
417 static void __perf_install_in_context(void *info)
418 {
419         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
420         struct perf_counter *counter = info;
421         struct perf_counter_context *ctx = counter->ctx;
422         struct perf_counter *leader = counter->group_leader;
423         int cpu = smp_processor_id();
424         unsigned long flags;
425         u64 perf_flags;
426         int err;
427
428         /*
429          * If this is a task context, we need to check whether it is
430          * the current task context of this cpu. If not it has been
431          * scheduled out before the smp call arrived.
432          */
433         if (ctx->task && cpuctx->task_ctx != ctx)
434                 return;
435
436         curr_rq_lock_irq_save(&flags);
437         spin_lock(&ctx->lock);
438
439         /*
440          * Protect the list operation against NMI by disabling the
441          * counters on a global level. NOP for non NMI based counters.
442          */
443         perf_flags = hw_perf_save_disable();
444
445         list_add_counter(counter, ctx);
446         ctx->nr_counters++;
447         counter->prev_state = PERF_COUNTER_STATE_OFF;
448
449         /*
450          * Don't put the counter on if it is disabled or if
451          * it is in a group and the group isn't on.
452          */
453         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
454             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
455                 goto unlock;
456
457         /*
458          * An exclusive counter can't go on if there are already active
459          * hardware counters, and no hardware counter can go on if there
460          * is already an exclusive counter on.
461          */
462         if (!group_can_go_on(counter, cpuctx, 1))
463                 err = -EEXIST;
464         else
465                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
466
467         if (err) {
468                 /*
469                  * This counter couldn't go on.  If it is in a group
470                  * then we have to pull the whole group off.
471                  * If the counter group is pinned then put it in error state.
472                  */
473                 if (leader != counter)
474                         group_sched_out(leader, cpuctx, ctx);
475                 if (leader->hw_event.pinned)
476                         leader->state = PERF_COUNTER_STATE_ERROR;
477         }
478
479         if (!err && !ctx->task && cpuctx->max_pertask)
480                 cpuctx->max_pertask--;
481
482  unlock:
483         hw_perf_restore(perf_flags);
484
485         spin_unlock(&ctx->lock);
486         curr_rq_unlock_irq_restore(&flags);
487 }
488
489 /*
490  * Attach a performance counter to a context
491  *
492  * First we add the counter to the list with the hardware enable bit
493  * in counter->hw_config cleared.
494  *
495  * If the counter is attached to a task which is on a CPU we use a smp
496  * call to enable it in the task context. The task might have been
497  * scheduled away, but we check this in the smp call again.
498  *
499  * Must be called with ctx->mutex held.
500  */
501 static void
502 perf_install_in_context(struct perf_counter_context *ctx,
503                         struct perf_counter *counter,
504                         int cpu)
505 {
506         struct task_struct *task = ctx->task;
507
508         if (!task) {
509                 /*
510                  * Per cpu counters are installed via an smp call and
511                  * the install is always sucessful.
512                  */
513                 smp_call_function_single(cpu, __perf_install_in_context,
514                                          counter, 1);
515                 return;
516         }
517
518         counter->task = task;
519 retry:
520         task_oncpu_function_call(task, __perf_install_in_context,
521                                  counter);
522
523         spin_lock_irq(&ctx->lock);
524         /*
525          * we need to retry the smp call.
526          */
527         if (ctx->is_active && list_empty(&counter->list_entry)) {
528                 spin_unlock_irq(&ctx->lock);
529                 goto retry;
530         }
531
532         /*
533          * The lock prevents that this context is scheduled in so we
534          * can add the counter safely, if it the call above did not
535          * succeed.
536          */
537         if (list_empty(&counter->list_entry)) {
538                 list_add_counter(counter, ctx);
539                 ctx->nr_counters++;
540         }
541         spin_unlock_irq(&ctx->lock);
542 }
543
544 /*
545  * Cross CPU call to enable a performance counter
546  */
547 static void __perf_counter_enable(void *info)
548 {
549         struct perf_counter *counter = info;
550         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
551         struct perf_counter_context *ctx = counter->ctx;
552         struct perf_counter *leader = counter->group_leader;
553         unsigned long flags;
554         int err;
555
556         /*
557          * If this is a per-task counter, need to check whether this
558          * counter's task is the current task on this cpu.
559          */
560         if (ctx->task && cpuctx->task_ctx != ctx)
561                 return;
562
563         curr_rq_lock_irq_save(&flags);
564         spin_lock(&ctx->lock);
565
566         counter->prev_state = counter->state;
567         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
568                 goto unlock;
569         counter->state = PERF_COUNTER_STATE_INACTIVE;
570
571         /*
572          * If the counter is in a group and isn't the group leader,
573          * then don't put it on unless the group is on.
574          */
575         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
576                 goto unlock;
577
578         if (!group_can_go_on(counter, cpuctx, 1))
579                 err = -EEXIST;
580         else
581                 err = counter_sched_in(counter, cpuctx, ctx,
582                                        smp_processor_id());
583
584         if (err) {
585                 /*
586                  * If this counter can't go on and it's part of a
587                  * group, then the whole group has to come off.
588                  */
589                 if (leader != counter)
590                         group_sched_out(leader, cpuctx, ctx);
591                 if (leader->hw_event.pinned)
592                         leader->state = PERF_COUNTER_STATE_ERROR;
593         }
594
595  unlock:
596         spin_unlock(&ctx->lock);
597         curr_rq_unlock_irq_restore(&flags);
598 }
599
600 /*
601  * Enable a counter.
602  */
603 static void perf_counter_enable(struct perf_counter *counter)
604 {
605         struct perf_counter_context *ctx = counter->ctx;
606         struct task_struct *task = ctx->task;
607
608         if (!task) {
609                 /*
610                  * Enable the counter on the cpu that it's on
611                  */
612                 smp_call_function_single(counter->cpu, __perf_counter_enable,
613                                          counter, 1);
614                 return;
615         }
616
617         spin_lock_irq(&ctx->lock);
618         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
619                 goto out;
620
621         /*
622          * If the counter is in error state, clear that first.
623          * That way, if we see the counter in error state below, we
624          * know that it has gone back into error state, as distinct
625          * from the task having been scheduled away before the
626          * cross-call arrived.
627          */
628         if (counter->state == PERF_COUNTER_STATE_ERROR)
629                 counter->state = PERF_COUNTER_STATE_OFF;
630
631  retry:
632         spin_unlock_irq(&ctx->lock);
633         task_oncpu_function_call(task, __perf_counter_enable, counter);
634
635         spin_lock_irq(&ctx->lock);
636
637         /*
638          * If the context is active and the counter is still off,
639          * we need to retry the cross-call.
640          */
641         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
642                 goto retry;
643
644         /*
645          * Since we have the lock this context can't be scheduled
646          * in, so we can change the state safely.
647          */
648         if (counter->state == PERF_COUNTER_STATE_OFF)
649                 counter->state = PERF_COUNTER_STATE_INACTIVE;
650  out:
651         spin_unlock_irq(&ctx->lock);
652 }
653
654 /*
655  * Enable a counter and all its children.
656  */
657 static void perf_counter_enable_family(struct perf_counter *counter)
658 {
659         struct perf_counter *child;
660
661         perf_counter_enable(counter);
662
663         /*
664          * Lock the mutex to protect the list of children
665          */
666         mutex_lock(&counter->mutex);
667         list_for_each_entry(child, &counter->child_list, child_list)
668                 perf_counter_enable(child);
669         mutex_unlock(&counter->mutex);
670 }
671
672 void __perf_counter_sched_out(struct perf_counter_context *ctx,
673                               struct perf_cpu_context *cpuctx)
674 {
675         struct perf_counter *counter;
676         u64 flags;
677
678         spin_lock(&ctx->lock);
679         ctx->is_active = 0;
680         if (likely(!ctx->nr_counters))
681                 goto out;
682
683         flags = hw_perf_save_disable();
684         if (ctx->nr_active) {
685                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
686                         group_sched_out(counter, cpuctx, ctx);
687         }
688         hw_perf_restore(flags);
689  out:
690         spin_unlock(&ctx->lock);
691 }
692
693 /*
694  * Called from scheduler to remove the counters of the current task,
695  * with interrupts disabled.
696  *
697  * We stop each counter and update the counter value in counter->count.
698  *
699  * This does not protect us against NMI, but disable()
700  * sets the disabled bit in the control field of counter _before_
701  * accessing the counter control register. If a NMI hits, then it will
702  * not restart the counter.
703  */
704 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
705 {
706         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
707         struct perf_counter_context *ctx = &task->perf_counter_ctx;
708
709         if (likely(!cpuctx->task_ctx))
710                 return;
711
712         __perf_counter_sched_out(ctx, cpuctx);
713
714         cpuctx->task_ctx = NULL;
715 }
716
717 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
718 {
719         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
720 }
721
722 static int
723 group_sched_in(struct perf_counter *group_counter,
724                struct perf_cpu_context *cpuctx,
725                struct perf_counter_context *ctx,
726                int cpu)
727 {
728         struct perf_counter *counter, *partial_group;
729         int ret;
730
731         if (group_counter->state == PERF_COUNTER_STATE_OFF)
732                 return 0;
733
734         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
735         if (ret)
736                 return ret < 0 ? ret : 0;
737
738         group_counter->prev_state = group_counter->state;
739         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
740                 return -EAGAIN;
741
742         /*
743          * Schedule in siblings as one group (if any):
744          */
745         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
746                 counter->prev_state = counter->state;
747                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
748                         partial_group = counter;
749                         goto group_error;
750                 }
751         }
752
753         return 0;
754
755 group_error:
756         /*
757          * Groups can be scheduled in as one unit only, so undo any
758          * partial group before returning:
759          */
760         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
761                 if (counter == partial_group)
762                         break;
763                 counter_sched_out(counter, cpuctx, ctx);
764         }
765         counter_sched_out(group_counter, cpuctx, ctx);
766
767         return -EAGAIN;
768 }
769
770 static void
771 __perf_counter_sched_in(struct perf_counter_context *ctx,
772                         struct perf_cpu_context *cpuctx, int cpu)
773 {
774         struct perf_counter *counter;
775         u64 flags;
776         int can_add_hw = 1;
777
778         spin_lock(&ctx->lock);
779         ctx->is_active = 1;
780         if (likely(!ctx->nr_counters))
781                 goto out;
782
783         flags = hw_perf_save_disable();
784
785         /*
786          * First go through the list and put on any pinned groups
787          * in order to give them the best chance of going on.
788          */
789         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
790                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
791                     !counter->hw_event.pinned)
792                         continue;
793                 if (counter->cpu != -1 && counter->cpu != cpu)
794                         continue;
795
796                 if (group_can_go_on(counter, cpuctx, 1))
797                         group_sched_in(counter, cpuctx, ctx, cpu);
798
799                 /*
800                  * If this pinned group hasn't been scheduled,
801                  * put it in error state.
802                  */
803                 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
804                         counter->state = PERF_COUNTER_STATE_ERROR;
805         }
806
807         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
808                 /*
809                  * Ignore counters in OFF or ERROR state, and
810                  * ignore pinned counters since we did them already.
811                  */
812                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
813                     counter->hw_event.pinned)
814                         continue;
815
816                 /*
817                  * Listen to the 'cpu' scheduling filter constraint
818                  * of counters:
819                  */
820                 if (counter->cpu != -1 && counter->cpu != cpu)
821                         continue;
822
823                 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
824                         if (group_sched_in(counter, cpuctx, ctx, cpu))
825                                 can_add_hw = 0;
826                 }
827         }
828         hw_perf_restore(flags);
829  out:
830         spin_unlock(&ctx->lock);
831 }
832
833 /*
834  * Called from scheduler to add the counters of the current task
835  * with interrupts disabled.
836  *
837  * We restore the counter value and then enable it.
838  *
839  * This does not protect us against NMI, but enable()
840  * sets the enabled bit in the control field of counter _before_
841  * accessing the counter control register. If a NMI hits, then it will
842  * keep the counter running.
843  */
844 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
845 {
846         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
847         struct perf_counter_context *ctx = &task->perf_counter_ctx;
848
849         __perf_counter_sched_in(ctx, cpuctx, cpu);
850         cpuctx->task_ctx = ctx;
851 }
852
853 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
854 {
855         struct perf_counter_context *ctx = &cpuctx->ctx;
856
857         __perf_counter_sched_in(ctx, cpuctx, cpu);
858 }
859
860 int perf_counter_task_disable(void)
861 {
862         struct task_struct *curr = current;
863         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
864         struct perf_counter *counter;
865         unsigned long flags;
866         u64 perf_flags;
867         int cpu;
868
869         if (likely(!ctx->nr_counters))
870                 return 0;
871
872         curr_rq_lock_irq_save(&flags);
873         cpu = smp_processor_id();
874
875         /* force the update of the task clock: */
876         __task_delta_exec(curr, 1);
877
878         perf_counter_task_sched_out(curr, cpu);
879
880         spin_lock(&ctx->lock);
881
882         /*
883          * Disable all the counters:
884          */
885         perf_flags = hw_perf_save_disable();
886
887         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
888                 if (counter->state != PERF_COUNTER_STATE_ERROR)
889                         counter->state = PERF_COUNTER_STATE_OFF;
890         }
891
892         hw_perf_restore(perf_flags);
893
894         spin_unlock(&ctx->lock);
895
896         curr_rq_unlock_irq_restore(&flags);
897
898         return 0;
899 }
900
901 int perf_counter_task_enable(void)
902 {
903         struct task_struct *curr = current;
904         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
905         struct perf_counter *counter;
906         unsigned long flags;
907         u64 perf_flags;
908         int cpu;
909
910         if (likely(!ctx->nr_counters))
911                 return 0;
912
913         curr_rq_lock_irq_save(&flags);
914         cpu = smp_processor_id();
915
916         /* force the update of the task clock: */
917         __task_delta_exec(curr, 1);
918
919         perf_counter_task_sched_out(curr, cpu);
920
921         spin_lock(&ctx->lock);
922
923         /*
924          * Disable all the counters:
925          */
926         perf_flags = hw_perf_save_disable();
927
928         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
929                 if (counter->state > PERF_COUNTER_STATE_OFF)
930                         continue;
931                 counter->state = PERF_COUNTER_STATE_INACTIVE;
932                 counter->hw_event.disabled = 0;
933         }
934         hw_perf_restore(perf_flags);
935
936         spin_unlock(&ctx->lock);
937
938         perf_counter_task_sched_in(curr, cpu);
939
940         curr_rq_unlock_irq_restore(&flags);
941
942         return 0;
943 }
944
945 /*
946  * Round-robin a context's counters:
947  */
948 static void rotate_ctx(struct perf_counter_context *ctx)
949 {
950         struct perf_counter *counter;
951         u64 perf_flags;
952
953         if (!ctx->nr_counters)
954                 return;
955
956         spin_lock(&ctx->lock);
957         /*
958          * Rotate the first entry last (works just fine for group counters too):
959          */
960         perf_flags = hw_perf_save_disable();
961         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
962                 list_del(&counter->list_entry);
963                 list_add_tail(&counter->list_entry, &ctx->counter_list);
964                 break;
965         }
966         hw_perf_restore(perf_flags);
967
968         spin_unlock(&ctx->lock);
969 }
970
971 void perf_counter_task_tick(struct task_struct *curr, int cpu)
972 {
973         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
974         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
975         const int rotate_percpu = 0;
976
977         if (rotate_percpu)
978                 perf_counter_cpu_sched_out(cpuctx);
979         perf_counter_task_sched_out(curr, cpu);
980
981         if (rotate_percpu)
982                 rotate_ctx(&cpuctx->ctx);
983         rotate_ctx(ctx);
984
985         if (rotate_percpu)
986                 perf_counter_cpu_sched_in(cpuctx, cpu);
987         perf_counter_task_sched_in(curr, cpu);
988 }
989
990 /*
991  * Cross CPU call to read the hardware counter
992  */
993 static void __read(void *info)
994 {
995         struct perf_counter *counter = info;
996         unsigned long flags;
997
998         curr_rq_lock_irq_save(&flags);
999         counter->hw_ops->read(counter);
1000         curr_rq_unlock_irq_restore(&flags);
1001 }
1002
1003 static u64 perf_counter_read(struct perf_counter *counter)
1004 {
1005         /*
1006          * If counter is enabled and currently active on a CPU, update the
1007          * value in the counter structure:
1008          */
1009         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1010                 smp_call_function_single(counter->oncpu,
1011                                          __read, counter, 1);
1012         }
1013
1014         return atomic64_read(&counter->count);
1015 }
1016
1017 /*
1018  * Cross CPU call to switch performance data pointers
1019  */
1020 static void __perf_switch_irq_data(void *info)
1021 {
1022         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1023         struct perf_counter *counter = info;
1024         struct perf_counter_context *ctx = counter->ctx;
1025         struct perf_data *oldirqdata = counter->irqdata;
1026
1027         /*
1028          * If this is a task context, we need to check whether it is
1029          * the current task context of this cpu. If not it has been
1030          * scheduled out before the smp call arrived.
1031          */
1032         if (ctx->task) {
1033                 if (cpuctx->task_ctx != ctx)
1034                         return;
1035                 spin_lock(&ctx->lock);
1036         }
1037
1038         /* Change the pointer NMI safe */
1039         atomic_long_set((atomic_long_t *)&counter->irqdata,
1040                         (unsigned long) counter->usrdata);
1041         counter->usrdata = oldirqdata;
1042
1043         if (ctx->task)
1044                 spin_unlock(&ctx->lock);
1045 }
1046
1047 static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
1048 {
1049         struct perf_counter_context *ctx = counter->ctx;
1050         struct perf_data *oldirqdata = counter->irqdata;
1051         struct task_struct *task = ctx->task;
1052
1053         if (!task) {
1054                 smp_call_function_single(counter->cpu,
1055                                          __perf_switch_irq_data,
1056                                          counter, 1);
1057                 return counter->usrdata;
1058         }
1059
1060 retry:
1061         spin_lock_irq(&ctx->lock);
1062         if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
1063                 counter->irqdata = counter->usrdata;
1064                 counter->usrdata = oldirqdata;
1065                 spin_unlock_irq(&ctx->lock);
1066                 return oldirqdata;
1067         }
1068         spin_unlock_irq(&ctx->lock);
1069         task_oncpu_function_call(task, __perf_switch_irq_data, counter);
1070         /* Might have failed, because task was scheduled out */
1071         if (counter->irqdata == oldirqdata)
1072                 goto retry;
1073
1074         return counter->usrdata;
1075 }
1076
1077 static void put_context(struct perf_counter_context *ctx)
1078 {
1079         if (ctx->task)
1080                 put_task_struct(ctx->task);
1081 }
1082
1083 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1084 {
1085         struct perf_cpu_context *cpuctx;
1086         struct perf_counter_context *ctx;
1087         struct task_struct *task;
1088
1089         /*
1090          * If cpu is not a wildcard then this is a percpu counter:
1091          */
1092         if (cpu != -1) {
1093                 /* Must be root to operate on a CPU counter: */
1094                 if (!capable(CAP_SYS_ADMIN))
1095                         return ERR_PTR(-EACCES);
1096
1097                 if (cpu < 0 || cpu > num_possible_cpus())
1098                         return ERR_PTR(-EINVAL);
1099
1100                 /*
1101                  * We could be clever and allow to attach a counter to an
1102                  * offline CPU and activate it when the CPU comes up, but
1103                  * that's for later.
1104                  */
1105                 if (!cpu_isset(cpu, cpu_online_map))
1106                         return ERR_PTR(-ENODEV);
1107
1108                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1109                 ctx = &cpuctx->ctx;
1110
1111                 return ctx;
1112         }
1113
1114         rcu_read_lock();
1115         if (!pid)
1116                 task = current;
1117         else
1118                 task = find_task_by_vpid(pid);
1119         if (task)
1120                 get_task_struct(task);
1121         rcu_read_unlock();
1122
1123         if (!task)
1124                 return ERR_PTR(-ESRCH);
1125
1126         ctx = &task->perf_counter_ctx;
1127         ctx->task = task;
1128
1129         /* Reuse ptrace permission checks for now. */
1130         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1131                 put_context(ctx);
1132                 return ERR_PTR(-EACCES);
1133         }
1134
1135         return ctx;
1136 }
1137
1138 /*
1139  * Called when the last reference to the file is gone.
1140  */
1141 static int perf_release(struct inode *inode, struct file *file)
1142 {
1143         struct perf_counter *counter = file->private_data;
1144         struct perf_counter_context *ctx = counter->ctx;
1145
1146         file->private_data = NULL;
1147
1148         mutex_lock(&ctx->mutex);
1149         mutex_lock(&counter->mutex);
1150
1151         perf_counter_remove_from_context(counter);
1152
1153         mutex_unlock(&counter->mutex);
1154         mutex_unlock(&ctx->mutex);
1155
1156         kfree(counter);
1157         put_context(ctx);
1158
1159         return 0;
1160 }
1161
1162 /*
1163  * Read the performance counter - simple non blocking version for now
1164  */
1165 static ssize_t
1166 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1167 {
1168         u64 cntval;
1169
1170         if (count != sizeof(cntval))
1171                 return -EINVAL;
1172
1173         /*
1174          * Return end-of-file for a read on a counter that is in
1175          * error state (i.e. because it was pinned but it couldn't be
1176          * scheduled on to the CPU at some point).
1177          */
1178         if (counter->state == PERF_COUNTER_STATE_ERROR)
1179                 return 0;
1180
1181         mutex_lock(&counter->mutex);
1182         cntval = perf_counter_read(counter);
1183         mutex_unlock(&counter->mutex);
1184
1185         return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
1186 }
1187
1188 static ssize_t
1189 perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
1190 {
1191         if (!usrdata->len)
1192                 return 0;
1193
1194         count = min(count, (size_t)usrdata->len);
1195         if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
1196                 return -EFAULT;
1197
1198         /* Adjust the counters */
1199         usrdata->len -= count;
1200         if (!usrdata->len)
1201                 usrdata->rd_idx = 0;
1202         else
1203                 usrdata->rd_idx += count;
1204
1205         return count;
1206 }
1207
1208 static ssize_t
1209 perf_read_irq_data(struct perf_counter  *counter,
1210                    char __user          *buf,
1211                    size_t               count,
1212                    int                  nonblocking)
1213 {
1214         struct perf_data *irqdata, *usrdata;
1215         DECLARE_WAITQUEUE(wait, current);
1216         ssize_t res, res2;
1217
1218         irqdata = counter->irqdata;
1219         usrdata = counter->usrdata;
1220
1221         if (usrdata->len + irqdata->len >= count)
1222                 goto read_pending;
1223
1224         if (nonblocking)
1225                 return -EAGAIN;
1226
1227         spin_lock_irq(&counter->waitq.lock);
1228         __add_wait_queue(&counter->waitq, &wait);
1229         for (;;) {
1230                 set_current_state(TASK_INTERRUPTIBLE);
1231                 if (usrdata->len + irqdata->len >= count)
1232                         break;
1233
1234                 if (signal_pending(current))
1235                         break;
1236
1237                 if (counter->state == PERF_COUNTER_STATE_ERROR)
1238                         break;
1239
1240                 spin_unlock_irq(&counter->waitq.lock);
1241                 schedule();
1242                 spin_lock_irq(&counter->waitq.lock);
1243         }
1244         __remove_wait_queue(&counter->waitq, &wait);
1245         __set_current_state(TASK_RUNNING);
1246         spin_unlock_irq(&counter->waitq.lock);
1247
1248         if (usrdata->len + irqdata->len < count &&
1249             counter->state != PERF_COUNTER_STATE_ERROR)
1250                 return -ERESTARTSYS;
1251 read_pending:
1252         mutex_lock(&counter->mutex);
1253
1254         /* Drain pending data first: */
1255         res = perf_copy_usrdata(usrdata, buf, count);
1256         if (res < 0 || res == count)
1257                 goto out;
1258
1259         /* Switch irq buffer: */
1260         usrdata = perf_switch_irq_data(counter);
1261         res2 = perf_copy_usrdata(usrdata, buf + res, count - res);
1262         if (res2 < 0) {
1263                 if (!res)
1264                         res = -EFAULT;
1265         } else {
1266                 res += res2;
1267         }
1268 out:
1269         mutex_unlock(&counter->mutex);
1270
1271         return res;
1272 }
1273
1274 static ssize_t
1275 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1276 {
1277         struct perf_counter *counter = file->private_data;
1278
1279         switch (counter->hw_event.record_type) {
1280         case PERF_RECORD_SIMPLE:
1281                 return perf_read_hw(counter, buf, count);
1282
1283         case PERF_RECORD_IRQ:
1284         case PERF_RECORD_GROUP:
1285                 return perf_read_irq_data(counter, buf, count,
1286                                           file->f_flags & O_NONBLOCK);
1287         }
1288         return -EINVAL;
1289 }
1290
1291 static unsigned int perf_poll(struct file *file, poll_table *wait)
1292 {
1293         struct perf_counter *counter = file->private_data;
1294         unsigned int events = 0;
1295         unsigned long flags;
1296
1297         poll_wait(file, &counter->waitq, wait);
1298
1299         spin_lock_irqsave(&counter->waitq.lock, flags);
1300         if (counter->usrdata->len || counter->irqdata->len)
1301                 events |= POLLIN;
1302         spin_unlock_irqrestore(&counter->waitq.lock, flags);
1303
1304         return events;
1305 }
1306
1307 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1308 {
1309         struct perf_counter *counter = file->private_data;
1310         int err = 0;
1311
1312         switch (cmd) {
1313         case PERF_COUNTER_IOC_ENABLE:
1314                 perf_counter_enable_family(counter);
1315                 break;
1316         case PERF_COUNTER_IOC_DISABLE:
1317                 perf_counter_disable_family(counter);
1318                 break;
1319         default:
1320                 err = -ENOTTY;
1321         }
1322         return err;
1323 }
1324
1325 static const struct file_operations perf_fops = {
1326         .release                = perf_release,
1327         .read                   = perf_read,
1328         .poll                   = perf_poll,
1329         .unlocked_ioctl         = perf_ioctl,
1330         .compat_ioctl           = perf_ioctl,
1331 };
1332
1333 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
1334 {
1335         int cpu = raw_smp_processor_id();
1336
1337         atomic64_set(&counter->hw.prev_count, cpu_clock(cpu));
1338         return 0;
1339 }
1340
1341 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
1342 {
1343         int cpu = raw_smp_processor_id();
1344         s64 prev;
1345         u64 now;
1346
1347         now = cpu_clock(cpu);
1348         prev = atomic64_read(&counter->hw.prev_count);
1349         atomic64_set(&counter->hw.prev_count, now);
1350         atomic64_add(now - prev, &counter->count);
1351 }
1352
1353 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
1354 {
1355         cpu_clock_perf_counter_update(counter);
1356 }
1357
1358 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
1359 {
1360         cpu_clock_perf_counter_update(counter);
1361 }
1362
1363 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
1364         .enable         = cpu_clock_perf_counter_enable,
1365         .disable        = cpu_clock_perf_counter_disable,
1366         .read           = cpu_clock_perf_counter_read,
1367 };
1368
1369 /*
1370  * Called from within the scheduler:
1371  */
1372 static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
1373 {
1374         struct task_struct *curr = counter->task;
1375         u64 delta;
1376
1377         delta = __task_delta_exec(curr, update);
1378
1379         return curr->se.sum_exec_runtime + delta;
1380 }
1381
1382 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
1383 {
1384         u64 prev;
1385         s64 delta;
1386
1387         prev = atomic64_read(&counter->hw.prev_count);
1388
1389         atomic64_set(&counter->hw.prev_count, now);
1390
1391         delta = now - prev;
1392
1393         atomic64_add(delta, &counter->count);
1394 }
1395
1396 static void task_clock_perf_counter_read(struct perf_counter *counter)
1397 {
1398         u64 now = task_clock_perf_counter_val(counter, 1);
1399
1400         task_clock_perf_counter_update(counter, now);
1401 }
1402
1403 static int task_clock_perf_counter_enable(struct perf_counter *counter)
1404 {
1405         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1406                 atomic64_set(&counter->hw.prev_count,
1407                              task_clock_perf_counter_val(counter, 0));
1408
1409         return 0;
1410 }
1411
1412 static void task_clock_perf_counter_disable(struct perf_counter *counter)
1413 {
1414         u64 now = task_clock_perf_counter_val(counter, 0);
1415
1416         task_clock_perf_counter_update(counter, now);
1417 }
1418
1419 static const struct hw_perf_counter_ops perf_ops_task_clock = {
1420         .enable         = task_clock_perf_counter_enable,
1421         .disable        = task_clock_perf_counter_disable,
1422         .read           = task_clock_perf_counter_read,
1423 };
1424
1425 #ifdef CONFIG_VM_EVENT_COUNTERS
1426 #define cpu_page_faults()       __get_cpu_var(vm_event_states).event[PGFAULT]
1427 #else
1428 #define cpu_page_faults()       0
1429 #endif
1430
1431 static u64 get_page_faults(struct perf_counter *counter)
1432 {
1433         struct task_struct *curr = counter->ctx->task;
1434
1435         if (curr)
1436                 return curr->maj_flt + curr->min_flt;
1437         return cpu_page_faults();
1438 }
1439
1440 static void page_faults_perf_counter_update(struct perf_counter *counter)
1441 {
1442         u64 prev, now;
1443         s64 delta;
1444
1445         prev = atomic64_read(&counter->hw.prev_count);
1446         now = get_page_faults(counter);
1447
1448         atomic64_set(&counter->hw.prev_count, now);
1449
1450         delta = now - prev;
1451
1452         atomic64_add(delta, &counter->count);
1453 }
1454
1455 static void page_faults_perf_counter_read(struct perf_counter *counter)
1456 {
1457         page_faults_perf_counter_update(counter);
1458 }
1459
1460 static int page_faults_perf_counter_enable(struct perf_counter *counter)
1461 {
1462         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1463                 atomic64_set(&counter->hw.prev_count, get_page_faults(counter));
1464         return 0;
1465 }
1466
1467 static void page_faults_perf_counter_disable(struct perf_counter *counter)
1468 {
1469         page_faults_perf_counter_update(counter);
1470 }
1471
1472 static const struct hw_perf_counter_ops perf_ops_page_faults = {
1473         .enable         = page_faults_perf_counter_enable,
1474         .disable        = page_faults_perf_counter_disable,
1475         .read           = page_faults_perf_counter_read,
1476 };
1477
1478 static u64 get_context_switches(struct perf_counter *counter)
1479 {
1480         struct task_struct *curr = counter->ctx->task;
1481
1482         if (curr)
1483                 return curr->nvcsw + curr->nivcsw;
1484         return cpu_nr_switches(smp_processor_id());
1485 }
1486
1487 static void context_switches_perf_counter_update(struct perf_counter *counter)
1488 {
1489         u64 prev, now;
1490         s64 delta;
1491
1492         prev = atomic64_read(&counter->hw.prev_count);
1493         now = get_context_switches(counter);
1494
1495         atomic64_set(&counter->hw.prev_count, now);
1496
1497         delta = now - prev;
1498
1499         atomic64_add(delta, &counter->count);
1500 }
1501
1502 static void context_switches_perf_counter_read(struct perf_counter *counter)
1503 {
1504         context_switches_perf_counter_update(counter);
1505 }
1506
1507 static int context_switches_perf_counter_enable(struct perf_counter *counter)
1508 {
1509         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1510                 atomic64_set(&counter->hw.prev_count,
1511                              get_context_switches(counter));
1512         return 0;
1513 }
1514
1515 static void context_switches_perf_counter_disable(struct perf_counter *counter)
1516 {
1517         context_switches_perf_counter_update(counter);
1518 }
1519
1520 static const struct hw_perf_counter_ops perf_ops_context_switches = {
1521         .enable         = context_switches_perf_counter_enable,
1522         .disable        = context_switches_perf_counter_disable,
1523         .read           = context_switches_perf_counter_read,
1524 };
1525
1526 static inline u64 get_cpu_migrations(struct perf_counter *counter)
1527 {
1528         struct task_struct *curr = counter->ctx->task;
1529
1530         if (curr)
1531                 return curr->se.nr_migrations;
1532         return cpu_nr_migrations(smp_processor_id());
1533 }
1534
1535 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
1536 {
1537         u64 prev, now;
1538         s64 delta;
1539
1540         prev = atomic64_read(&counter->hw.prev_count);
1541         now = get_cpu_migrations(counter);
1542
1543         atomic64_set(&counter->hw.prev_count, now);
1544
1545         delta = now - prev;
1546
1547         atomic64_add(delta, &counter->count);
1548 }
1549
1550 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1551 {
1552         cpu_migrations_perf_counter_update(counter);
1553 }
1554
1555 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1556 {
1557         if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
1558                 atomic64_set(&counter->hw.prev_count,
1559                              get_cpu_migrations(counter));
1560         return 0;
1561 }
1562
1563 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1564 {
1565         cpu_migrations_perf_counter_update(counter);
1566 }
1567
1568 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1569         .enable         = cpu_migrations_perf_counter_enable,
1570         .disable        = cpu_migrations_perf_counter_disable,
1571         .read           = cpu_migrations_perf_counter_read,
1572 };
1573
1574 static const struct hw_perf_counter_ops *
1575 sw_perf_counter_init(struct perf_counter *counter)
1576 {
1577         const struct hw_perf_counter_ops *hw_ops = NULL;
1578
1579         /*
1580          * Software counters (currently) can't in general distinguish
1581          * between user, kernel and hypervisor events.
1582          * However, context switches and cpu migrations are considered
1583          * to be kernel events, and page faults are never hypervisor
1584          * events.
1585          */
1586         switch (counter->hw_event.type) {
1587         case PERF_COUNT_CPU_CLOCK:
1588                 if (!(counter->hw_event.exclude_user ||
1589                       counter->hw_event.exclude_kernel ||
1590                       counter->hw_event.exclude_hv))
1591                         hw_ops = &perf_ops_cpu_clock;
1592                 break;
1593         case PERF_COUNT_TASK_CLOCK:
1594                 if (counter->hw_event.exclude_user ||
1595                     counter->hw_event.exclude_kernel ||
1596                     counter->hw_event.exclude_hv)
1597                         break;
1598                 /*
1599                  * If the user instantiates this as a per-cpu counter,
1600                  * use the cpu_clock counter instead.
1601                  */
1602                 if (counter->ctx->task)
1603                         hw_ops = &perf_ops_task_clock;
1604                 else
1605                         hw_ops = &perf_ops_cpu_clock;
1606                 break;
1607         case PERF_COUNT_PAGE_FAULTS:
1608                 if (!(counter->hw_event.exclude_user ||
1609                       counter->hw_event.exclude_kernel))
1610                         hw_ops = &perf_ops_page_faults;
1611                 break;
1612         case PERF_COUNT_CONTEXT_SWITCHES:
1613                 if (!counter->hw_event.exclude_kernel)
1614                         hw_ops = &perf_ops_context_switches;
1615                 break;
1616         case PERF_COUNT_CPU_MIGRATIONS:
1617                 if (!counter->hw_event.exclude_kernel)
1618                         hw_ops = &perf_ops_cpu_migrations;
1619                 break;
1620         default:
1621                 break;
1622         }
1623         return hw_ops;
1624 }
1625
1626 /*
1627  * Allocate and initialize a counter structure
1628  */
1629 static struct perf_counter *
1630 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1631                    int cpu,
1632                    struct perf_counter_context *ctx,
1633                    struct perf_counter *group_leader,
1634                    gfp_t gfpflags)
1635 {
1636         const struct hw_perf_counter_ops *hw_ops;
1637         struct perf_counter *counter;
1638
1639         counter = kzalloc(sizeof(*counter), gfpflags);
1640         if (!counter)
1641                 return NULL;
1642
1643         /*
1644          * Single counters are their own group leaders, with an
1645          * empty sibling list:
1646          */
1647         if (!group_leader)
1648                 group_leader = counter;
1649
1650         mutex_init(&counter->mutex);
1651         INIT_LIST_HEAD(&counter->list_entry);
1652         INIT_LIST_HEAD(&counter->sibling_list);
1653         init_waitqueue_head(&counter->waitq);
1654
1655         INIT_LIST_HEAD(&counter->child_list);
1656
1657         counter->irqdata                = &counter->data[0];
1658         counter->usrdata                = &counter->data[1];
1659         counter->cpu                    = cpu;
1660         counter->hw_event               = *hw_event;
1661         counter->wakeup_pending         = 0;
1662         counter->group_leader           = group_leader;
1663         counter->hw_ops                 = NULL;
1664         counter->ctx                    = ctx;
1665
1666         counter->state = PERF_COUNTER_STATE_INACTIVE;
1667         if (hw_event->disabled)
1668                 counter->state = PERF_COUNTER_STATE_OFF;
1669
1670         hw_ops = NULL;
1671         if (!hw_event->raw && hw_event->type < 0)
1672                 hw_ops = sw_perf_counter_init(counter);
1673         else
1674                 hw_ops = hw_perf_counter_init(counter);
1675
1676         if (!hw_ops) {
1677                 kfree(counter);
1678                 return NULL;
1679         }
1680         counter->hw_ops = hw_ops;
1681
1682         return counter;
1683 }
1684
1685 /**
1686  * sys_perf_task_open - open a performance counter, associate it to a task/cpu
1687  *
1688  * @hw_event_uptr:      event type attributes for monitoring/sampling
1689  * @pid:                target pid
1690  * @cpu:                target cpu
1691  * @group_fd:           group leader counter fd
1692  */
1693 asmlinkage int
1694 sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user,
1695                       pid_t pid, int cpu, int group_fd)
1696 {
1697         struct perf_counter *counter, *group_leader;
1698         struct perf_counter_hw_event hw_event;
1699         struct perf_counter_context *ctx;
1700         struct file *counter_file = NULL;
1701         struct file *group_file = NULL;
1702         int fput_needed = 0;
1703         int fput_needed2 = 0;
1704         int ret;
1705
1706         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
1707                 return -EFAULT;
1708
1709         /*
1710          * Get the target context (task or percpu):
1711          */
1712         ctx = find_get_context(pid, cpu);
1713         if (IS_ERR(ctx))
1714                 return PTR_ERR(ctx);
1715
1716         /*
1717          * Look up the group leader (we will attach this counter to it):
1718          */
1719         group_leader = NULL;
1720         if (group_fd != -1) {
1721                 ret = -EINVAL;
1722                 group_file = fget_light(group_fd, &fput_needed);
1723                 if (!group_file)
1724                         goto err_put_context;
1725                 if (group_file->f_op != &perf_fops)
1726                         goto err_put_context;
1727
1728                 group_leader = group_file->private_data;
1729                 /*
1730                  * Do not allow a recursive hierarchy (this new sibling
1731                  * becoming part of another group-sibling):
1732                  */
1733                 if (group_leader->group_leader != group_leader)
1734                         goto err_put_context;
1735                 /*
1736                  * Do not allow to attach to a group in a different
1737                  * task or CPU context:
1738                  */
1739                 if (group_leader->ctx != ctx)
1740                         goto err_put_context;
1741                 /*
1742                  * Only a group leader can be exclusive or pinned
1743                  */
1744                 if (hw_event.exclusive || hw_event.pinned)
1745                         goto err_put_context;
1746         }
1747
1748         ret = -EINVAL;
1749         counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
1750                                      GFP_KERNEL);
1751         if (!counter)
1752                 goto err_put_context;
1753
1754         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
1755         if (ret < 0)
1756                 goto err_free_put_context;
1757
1758         counter_file = fget_light(ret, &fput_needed2);
1759         if (!counter_file)
1760                 goto err_free_put_context;
1761
1762         counter->filp = counter_file;
1763         mutex_lock(&ctx->mutex);
1764         perf_install_in_context(ctx, counter, cpu);
1765         mutex_unlock(&ctx->mutex);
1766
1767         fput_light(counter_file, fput_needed2);
1768
1769 out_fput:
1770         fput_light(group_file, fput_needed);
1771
1772         return ret;
1773
1774 err_free_put_context:
1775         kfree(counter);
1776
1777 err_put_context:
1778         put_context(ctx);
1779
1780         goto out_fput;
1781 }
1782
1783 /*
1784  * Initialize the perf_counter context in a task_struct:
1785  */
1786 static void
1787 __perf_counter_init_context(struct perf_counter_context *ctx,
1788                             struct task_struct *task)
1789 {
1790         memset(ctx, 0, sizeof(*ctx));
1791         spin_lock_init(&ctx->lock);
1792         mutex_init(&ctx->mutex);
1793         INIT_LIST_HEAD(&ctx->counter_list);
1794         ctx->task = task;
1795 }
1796
1797 /*
1798  * inherit a counter from parent task to child task:
1799  */
1800 static struct perf_counter *
1801 inherit_counter(struct perf_counter *parent_counter,
1802               struct task_struct *parent,
1803               struct perf_counter_context *parent_ctx,
1804               struct task_struct *child,
1805               struct perf_counter *group_leader,
1806               struct perf_counter_context *child_ctx)
1807 {
1808         struct perf_counter *child_counter;
1809
1810         /*
1811          * Instead of creating recursive hierarchies of counters,
1812          * we link inherited counters back to the original parent,
1813          * which has a filp for sure, which we use as the reference
1814          * count:
1815          */
1816         if (parent_counter->parent)
1817                 parent_counter = parent_counter->parent;
1818
1819         child_counter = perf_counter_alloc(&parent_counter->hw_event,
1820                                            parent_counter->cpu, child_ctx,
1821                                            group_leader, GFP_KERNEL);
1822         if (!child_counter)
1823                 return NULL;
1824
1825         /*
1826          * Link it up in the child's context:
1827          */
1828         child_counter->task = child;
1829         list_add_counter(child_counter, child_ctx);
1830         child_ctx->nr_counters++;
1831
1832         child_counter->parent = parent_counter;
1833         /*
1834          * inherit into child's child as well:
1835          */
1836         child_counter->hw_event.inherit = 1;
1837
1838         /*
1839          * Get a reference to the parent filp - we will fput it
1840          * when the child counter exits. This is safe to do because
1841          * we are in the parent and we know that the filp still
1842          * exists and has a nonzero count:
1843          */
1844         atomic_long_inc(&parent_counter->filp->f_count);
1845
1846         /*
1847          * Link this into the parent counter's child list
1848          */
1849         mutex_lock(&parent_counter->mutex);
1850         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
1851
1852         /*
1853          * Make the child state follow the state of the parent counter,
1854          * not its hw_event.disabled bit.  We hold the parent's mutex,
1855          * so we won't race with perf_counter_{en,dis}able_family.
1856          */
1857         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
1858                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
1859         else
1860                 child_counter->state = PERF_COUNTER_STATE_OFF;
1861
1862         mutex_unlock(&parent_counter->mutex);
1863
1864         return child_counter;
1865 }
1866
1867 static int inherit_group(struct perf_counter *parent_counter,
1868               struct task_struct *parent,
1869               struct perf_counter_context *parent_ctx,
1870               struct task_struct *child,
1871               struct perf_counter_context *child_ctx)
1872 {
1873         struct perf_counter *leader;
1874         struct perf_counter *sub;
1875
1876         leader = inherit_counter(parent_counter, parent, parent_ctx,
1877                                  child, NULL, child_ctx);
1878         if (!leader)
1879                 return -ENOMEM;
1880         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
1881                 if (!inherit_counter(sub, parent, parent_ctx,
1882                                      child, leader, child_ctx))
1883                         return -ENOMEM;
1884         }
1885         return 0;
1886 }
1887
1888 static void sync_child_counter(struct perf_counter *child_counter,
1889                                struct perf_counter *parent_counter)
1890 {
1891         u64 parent_val, child_val;
1892
1893         parent_val = atomic64_read(&parent_counter->count);
1894         child_val = atomic64_read(&child_counter->count);
1895
1896         /*
1897          * Add back the child's count to the parent's count:
1898          */
1899         atomic64_add(child_val, &parent_counter->count);
1900
1901         /*
1902          * Remove this counter from the parent's list
1903          */
1904         mutex_lock(&parent_counter->mutex);
1905         list_del_init(&child_counter->child_list);
1906         mutex_unlock(&parent_counter->mutex);
1907
1908         /*
1909          * Release the parent counter, if this was the last
1910          * reference to it.
1911          */
1912         fput(parent_counter->filp);
1913 }
1914
1915 static void
1916 __perf_counter_exit_task(struct task_struct *child,
1917                          struct perf_counter *child_counter,
1918                          struct perf_counter_context *child_ctx)
1919 {
1920         struct perf_counter *parent_counter;
1921         struct perf_counter *sub, *tmp;
1922
1923         /*
1924          * If we do not self-reap then we have to wait for the
1925          * child task to unschedule (it will happen for sure),
1926          * so that its counter is at its final count. (This
1927          * condition triggers rarely - child tasks usually get
1928          * off their CPU before the parent has a chance to
1929          * get this far into the reaping action)
1930          */
1931         if (child != current) {
1932                 wait_task_inactive(child, 0);
1933                 list_del_init(&child_counter->list_entry);
1934         } else {
1935                 struct perf_cpu_context *cpuctx;
1936                 unsigned long flags;
1937                 u64 perf_flags;
1938
1939                 /*
1940                  * Disable and unlink this counter.
1941                  *
1942                  * Be careful about zapping the list - IRQ/NMI context
1943                  * could still be processing it:
1944                  */
1945                 curr_rq_lock_irq_save(&flags);
1946                 perf_flags = hw_perf_save_disable();
1947
1948                 cpuctx = &__get_cpu_var(perf_cpu_context);
1949
1950                 group_sched_out(child_counter, cpuctx, child_ctx);
1951
1952                 list_del_init(&child_counter->list_entry);
1953
1954                 child_ctx->nr_counters--;
1955
1956                 hw_perf_restore(perf_flags);
1957                 curr_rq_unlock_irq_restore(&flags);
1958         }
1959
1960         parent_counter = child_counter->parent;
1961         /*
1962          * It can happen that parent exits first, and has counters
1963          * that are still around due to the child reference. These
1964          * counters need to be zapped - but otherwise linger.
1965          */
1966         if (parent_counter) {
1967                 sync_child_counter(child_counter, parent_counter);
1968                 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
1969                                          list_entry) {
1970                         if (sub->parent) {
1971                                 sync_child_counter(sub, sub->parent);
1972                                 kfree(sub);
1973                         }
1974                 }
1975                 kfree(child_counter);
1976         }
1977 }
1978
1979 /*
1980  * When a child task exits, feed back counter values to parent counters.
1981  *
1982  * Note: we may be running in child context, but the PID is not hashed
1983  * anymore so new counters will not be added.
1984  */
1985 void perf_counter_exit_task(struct task_struct *child)
1986 {
1987         struct perf_counter *child_counter, *tmp;
1988         struct perf_counter_context *child_ctx;
1989
1990         child_ctx = &child->perf_counter_ctx;
1991
1992         if (likely(!child_ctx->nr_counters))
1993                 return;
1994
1995         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
1996                                  list_entry)
1997                 __perf_counter_exit_task(child, child_counter, child_ctx);
1998 }
1999
2000 /*
2001  * Initialize the perf_counter context in task_struct
2002  */
2003 void perf_counter_init_task(struct task_struct *child)
2004 {
2005         struct perf_counter_context *child_ctx, *parent_ctx;
2006         struct perf_counter *counter;
2007         struct task_struct *parent = current;
2008
2009         child_ctx  =  &child->perf_counter_ctx;
2010         parent_ctx = &parent->perf_counter_ctx;
2011
2012         __perf_counter_init_context(child_ctx, child);
2013
2014         /*
2015          * This is executed from the parent task context, so inherit
2016          * counters that have been marked for cloning:
2017          */
2018
2019         if (likely(!parent_ctx->nr_counters))
2020                 return;
2021
2022         /*
2023          * Lock the parent list. No need to lock the child - not PID
2024          * hashed yet and not running, so nobody can access it.
2025          */
2026         mutex_lock(&parent_ctx->mutex);
2027
2028         /*
2029          * We dont have to disable NMIs - we are only looking at
2030          * the list, not manipulating it:
2031          */
2032         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
2033                 if (!counter->hw_event.inherit)
2034                         continue;
2035
2036                 if (inherit_group(counter, parent,
2037                                   parent_ctx, child, child_ctx))
2038                         break;
2039         }
2040
2041         mutex_unlock(&parent_ctx->mutex);
2042 }
2043
2044 static void __cpuinit perf_counter_init_cpu(int cpu)
2045 {
2046         struct perf_cpu_context *cpuctx;
2047
2048         cpuctx = &per_cpu(perf_cpu_context, cpu);
2049         __perf_counter_init_context(&cpuctx->ctx, NULL);
2050
2051         mutex_lock(&perf_resource_mutex);
2052         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
2053         mutex_unlock(&perf_resource_mutex);
2054
2055         hw_perf_counter_setup(cpu);
2056 }
2057
2058 #ifdef CONFIG_HOTPLUG_CPU
2059 static void __perf_counter_exit_cpu(void *info)
2060 {
2061         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
2062         struct perf_counter_context *ctx = &cpuctx->ctx;
2063         struct perf_counter *counter, *tmp;
2064
2065         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
2066                 __perf_counter_remove_from_context(counter);
2067 }
2068 static void perf_counter_exit_cpu(int cpu)
2069 {
2070         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
2071         struct perf_counter_context *ctx = &cpuctx->ctx;
2072
2073         mutex_lock(&ctx->mutex);
2074         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
2075         mutex_unlock(&ctx->mutex);
2076 }
2077 #else
2078 static inline void perf_counter_exit_cpu(int cpu) { }
2079 #endif
2080
2081 static int __cpuinit
2082 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
2083 {
2084         unsigned int cpu = (long)hcpu;
2085
2086         switch (action) {
2087
2088         case CPU_UP_PREPARE:
2089         case CPU_UP_PREPARE_FROZEN:
2090                 perf_counter_init_cpu(cpu);
2091                 break;
2092
2093         case CPU_DOWN_PREPARE:
2094         case CPU_DOWN_PREPARE_FROZEN:
2095                 perf_counter_exit_cpu(cpu);
2096                 break;
2097
2098         default:
2099                 break;
2100         }
2101
2102         return NOTIFY_OK;
2103 }
2104
2105 static struct notifier_block __cpuinitdata perf_cpu_nb = {
2106         .notifier_call          = perf_cpu_notify,
2107 };
2108
2109 static int __init perf_counter_init(void)
2110 {
2111         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
2112                         (void *)(long)smp_processor_id());
2113         register_cpu_notifier(&perf_cpu_nb);
2114
2115         return 0;
2116 }
2117 early_initcall(perf_counter_init);
2118
2119 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
2120 {
2121         return sprintf(buf, "%d\n", perf_reserved_percpu);
2122 }
2123
2124 static ssize_t
2125 perf_set_reserve_percpu(struct sysdev_class *class,
2126                         const char *buf,
2127                         size_t count)
2128 {
2129         struct perf_cpu_context *cpuctx;
2130         unsigned long val;
2131         int err, cpu, mpt;
2132
2133         err = strict_strtoul(buf, 10, &val);
2134         if (err)
2135                 return err;
2136         if (val > perf_max_counters)
2137                 return -EINVAL;
2138
2139         mutex_lock(&perf_resource_mutex);
2140         perf_reserved_percpu = val;
2141         for_each_online_cpu(cpu) {
2142                 cpuctx = &per_cpu(perf_cpu_context, cpu);
2143                 spin_lock_irq(&cpuctx->ctx.lock);
2144                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
2145                           perf_max_counters - perf_reserved_percpu);
2146                 cpuctx->max_pertask = mpt;
2147                 spin_unlock_irq(&cpuctx->ctx.lock);
2148         }
2149         mutex_unlock(&perf_resource_mutex);
2150
2151         return count;
2152 }
2153
2154 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
2155 {
2156         return sprintf(buf, "%d\n", perf_overcommit);
2157 }
2158
2159 static ssize_t
2160 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
2161 {
2162         unsigned long val;
2163         int err;
2164
2165         err = strict_strtoul(buf, 10, &val);
2166         if (err)
2167                 return err;
2168         if (val > 1)
2169                 return -EINVAL;
2170
2171         mutex_lock(&perf_resource_mutex);
2172         perf_overcommit = val;
2173         mutex_unlock(&perf_resource_mutex);
2174
2175         return count;
2176 }
2177
2178 static SYSDEV_CLASS_ATTR(
2179                                 reserve_percpu,
2180                                 0644,
2181                                 perf_show_reserve_percpu,
2182                                 perf_set_reserve_percpu
2183                         );
2184
2185 static SYSDEV_CLASS_ATTR(
2186                                 overcommit,
2187                                 0644,
2188                                 perf_show_overcommit,
2189                                 perf_set_overcommit
2190                         );
2191
2192 static struct attribute *perfclass_attrs[] = {
2193         &attr_reserve_percpu.attr,
2194         &attr_overcommit.attr,
2195         NULL
2196 };
2197
2198 static struct attribute_group perfclass_attr_group = {
2199         .attrs                  = perfclass_attrs,
2200         .name                   = "perf_counters",
2201 };
2202
2203 static int __init perf_counter_sysfs_init(void)
2204 {
2205         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
2206                                   &perfclass_attr_group);
2207 }
2208 device_initcall(perf_counter_sysfs_init);