perfcounters: use hw_event.disable flag
[linux-2.6] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6  *
7  *  For licencing details see kernel-base/COPYING
8  */
9
10 #include <linux/fs.h>
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/sysfs.h>
16 #include <linux/ptrace.h>
17 #include <linux/percpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/perf_counter.h>
22
23 /*
24  * Each CPU has a list of per CPU counters:
25  */
26 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
27
28 int perf_max_counters __read_mostly = 1;
29 static int perf_reserved_percpu __read_mostly;
30 static int perf_overcommit __read_mostly = 1;
31
32 /*
33  * Mutex for (sysadmin-configurable) counter reservations:
34  */
35 static DEFINE_MUTEX(perf_resource_mutex);
36
37 /*
38  * Architecture provided APIs - weak aliases:
39  */
40 extern __weak const struct hw_perf_counter_ops *
41 hw_perf_counter_init(struct perf_counter *counter)
42 {
43         return ERR_PTR(-EINVAL);
44 }
45
46 u64 __weak hw_perf_save_disable(void)           { return 0; }
47 void __weak hw_perf_restore(u64 ctrl)           { }
48 void __weak hw_perf_counter_setup(void)         { }
49
50 static void
51 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
52 {
53         struct perf_counter *group_leader = counter->group_leader;
54
55         /*
56          * Depending on whether it is a standalone or sibling counter,
57          * add it straight to the context's counter list, or to the group
58          * leader's sibling list:
59          */
60         if (counter->group_leader == counter)
61                 list_add_tail(&counter->list_entry, &ctx->counter_list);
62         else
63                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
64 }
65
66 static void
67 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
68 {
69         struct perf_counter *sibling, *tmp;
70
71         list_del_init(&counter->list_entry);
72
73         /*
74          * If this was a group counter with sibling counters then
75          * upgrade the siblings to singleton counters by adding them
76          * to the context list directly:
77          */
78         list_for_each_entry_safe(sibling, tmp,
79                                  &counter->sibling_list, list_entry) {
80
81                 list_del_init(&sibling->list_entry);
82                 list_add_tail(&sibling->list_entry, &ctx->counter_list);
83                 sibling->group_leader = sibling;
84         }
85 }
86
87 /*
88  * Cross CPU call to remove a performance counter
89  *
90  * We disable the counter on the hardware level first. After that we
91  * remove it from the context list.
92  */
93 static void __perf_counter_remove_from_context(void *info)
94 {
95         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
96         struct perf_counter *counter = info;
97         struct perf_counter_context *ctx = counter->ctx;
98         unsigned long flags;
99         u64 perf_flags;
100
101         /*
102          * If this is a task context, we need to check whether it is
103          * the current task context of this cpu. If not it has been
104          * scheduled out before the smp call arrived.
105          */
106         if (ctx->task && cpuctx->task_ctx != ctx)
107                 return;
108
109         spin_lock_irqsave(&ctx->lock, flags);
110
111         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
112                 counter->hw_ops->hw_perf_counter_disable(counter);
113                 counter->state = PERF_COUNTER_STATE_INACTIVE;
114                 ctx->nr_active--;
115                 cpuctx->active_oncpu--;
116                 counter->task = NULL;
117         }
118         ctx->nr_counters--;
119
120         /*
121          * Protect the list operation against NMI by disabling the
122          * counters on a global level. NOP for non NMI based counters.
123          */
124         perf_flags = hw_perf_save_disable();
125         list_del_counter(counter, ctx);
126         hw_perf_restore(perf_flags);
127
128         if (!ctx->task) {
129                 /*
130                  * Allow more per task counters with respect to the
131                  * reservation:
132                  */
133                 cpuctx->max_pertask =
134                         min(perf_max_counters - ctx->nr_counters,
135                             perf_max_counters - perf_reserved_percpu);
136         }
137
138         spin_unlock_irqrestore(&ctx->lock, flags);
139 }
140
141
142 /*
143  * Remove the counter from a task's (or a CPU's) list of counters.
144  *
145  * Must be called with counter->mutex held.
146  *
147  * CPU counters are removed with a smp call. For task counters we only
148  * call when the task is on a CPU.
149  */
150 static void perf_counter_remove_from_context(struct perf_counter *counter)
151 {
152         struct perf_counter_context *ctx = counter->ctx;
153         struct task_struct *task = ctx->task;
154
155         if (!task) {
156                 /*
157                  * Per cpu counters are removed via an smp call and
158                  * the removal is always sucessful.
159                  */
160                 smp_call_function_single(counter->cpu,
161                                          __perf_counter_remove_from_context,
162                                          counter, 1);
163                 return;
164         }
165
166 retry:
167         task_oncpu_function_call(task, __perf_counter_remove_from_context,
168                                  counter);
169
170         spin_lock_irq(&ctx->lock);
171         /*
172          * If the context is active we need to retry the smp call.
173          */
174         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
175                 spin_unlock_irq(&ctx->lock);
176                 goto retry;
177         }
178
179         /*
180          * The lock prevents that this context is scheduled in so we
181          * can remove the counter safely, if the call above did not
182          * succeed.
183          */
184         if (!list_empty(&counter->list_entry)) {
185                 ctx->nr_counters--;
186                 list_del_counter(counter, ctx);
187                 counter->task = NULL;
188         }
189         spin_unlock_irq(&ctx->lock);
190 }
191
192 /*
193  * Cross CPU call to install and enable a preformance counter
194  */
195 static void __perf_install_in_context(void *info)
196 {
197         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
198         struct perf_counter *counter = info;
199         struct perf_counter_context *ctx = counter->ctx;
200         int cpu = smp_processor_id();
201         unsigned long flags;
202         u64 perf_flags;
203
204         /*
205          * If this is a task context, we need to check whether it is
206          * the current task context of this cpu. If not it has been
207          * scheduled out before the smp call arrived.
208          */
209         if (ctx->task && cpuctx->task_ctx != ctx)
210                 return;
211
212         spin_lock_irqsave(&ctx->lock, flags);
213
214         /*
215          * Protect the list operation against NMI by disabling the
216          * counters on a global level. NOP for non NMI based counters.
217          */
218         perf_flags = hw_perf_save_disable();
219         list_add_counter(counter, ctx);
220         hw_perf_restore(perf_flags);
221
222         ctx->nr_counters++;
223
224         if (cpuctx->active_oncpu < perf_max_counters) {
225                 counter->state = PERF_COUNTER_STATE_ACTIVE;
226                 counter->oncpu = cpu;
227                 ctx->nr_active++;
228                 cpuctx->active_oncpu++;
229                 counter->hw_ops->hw_perf_counter_enable(counter);
230         }
231
232         if (!ctx->task && cpuctx->max_pertask)
233                 cpuctx->max_pertask--;
234
235         spin_unlock_irqrestore(&ctx->lock, flags);
236 }
237
238 /*
239  * Attach a performance counter to a context
240  *
241  * First we add the counter to the list with the hardware enable bit
242  * in counter->hw_config cleared.
243  *
244  * If the counter is attached to a task which is on a CPU we use a smp
245  * call to enable it in the task context. The task might have been
246  * scheduled away, but we check this in the smp call again.
247  */
248 static void
249 perf_install_in_context(struct perf_counter_context *ctx,
250                         struct perf_counter *counter,
251                         int cpu)
252 {
253         struct task_struct *task = ctx->task;
254
255         counter->ctx = ctx;
256         if (!task) {
257                 /*
258                  * Per cpu counters are installed via an smp call and
259                  * the install is always sucessful.
260                  */
261                 smp_call_function_single(cpu, __perf_install_in_context,
262                                          counter, 1);
263                 return;
264         }
265
266         counter->task = task;
267 retry:
268         task_oncpu_function_call(task, __perf_install_in_context,
269                                  counter);
270
271         spin_lock_irq(&ctx->lock);
272         /*
273          * we need to retry the smp call.
274          */
275         if (ctx->nr_active && list_empty(&counter->list_entry)) {
276                 spin_unlock_irq(&ctx->lock);
277                 goto retry;
278         }
279
280         /*
281          * The lock prevents that this context is scheduled in so we
282          * can add the counter safely, if it the call above did not
283          * succeed.
284          */
285         if (list_empty(&counter->list_entry)) {
286                 list_add_counter(counter, ctx);
287                 ctx->nr_counters++;
288         }
289         spin_unlock_irq(&ctx->lock);
290 }
291
292 static void
293 counter_sched_out(struct perf_counter *counter,
294                   struct perf_cpu_context *cpuctx,
295                   struct perf_counter_context *ctx)
296 {
297         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
298                 return;
299
300         counter->hw_ops->hw_perf_counter_disable(counter);
301         counter->state = PERF_COUNTER_STATE_INACTIVE;
302         counter->oncpu = -1;
303
304         cpuctx->active_oncpu--;
305         ctx->nr_active--;
306 }
307
308 static void
309 group_sched_out(struct perf_counter *group_counter,
310                 struct perf_cpu_context *cpuctx,
311                 struct perf_counter_context *ctx)
312 {
313         struct perf_counter *counter;
314
315         counter_sched_out(group_counter, cpuctx, ctx);
316
317         /*
318          * Schedule out siblings (if any):
319          */
320         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
321                 counter_sched_out(counter, cpuctx, ctx);
322 }
323
324 /*
325  * Called from scheduler to remove the counters of the current task,
326  * with interrupts disabled.
327  *
328  * We stop each counter and update the counter value in counter->count.
329  *
330  * This does not protect us against NMI, but hw_perf_counter_disable()
331  * sets the disabled bit in the control field of counter _before_
332  * accessing the counter control register. If a NMI hits, then it will
333  * not restart the counter.
334  */
335 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
336 {
337         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
338         struct perf_counter_context *ctx = &task->perf_counter_ctx;
339         struct perf_counter *counter;
340
341         if (likely(!cpuctx->task_ctx))
342                 return;
343
344         spin_lock(&ctx->lock);
345         if (ctx->nr_active) {
346                 list_for_each_entry(counter, &ctx->counter_list, list_entry)
347                         group_sched_out(counter, cpuctx, ctx);
348         }
349         spin_unlock(&ctx->lock);
350         cpuctx->task_ctx = NULL;
351 }
352
353 static void
354 counter_sched_in(struct perf_counter *counter,
355                  struct perf_cpu_context *cpuctx,
356                  struct perf_counter_context *ctx,
357                  int cpu)
358 {
359         if (counter->state == PERF_COUNTER_STATE_OFF)
360                 return;
361
362         counter->hw_ops->hw_perf_counter_enable(counter);
363         counter->state = PERF_COUNTER_STATE_ACTIVE;
364         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
365
366         cpuctx->active_oncpu++;
367         ctx->nr_active++;
368 }
369
370 static void
371 group_sched_in(struct perf_counter *group_counter,
372                struct perf_cpu_context *cpuctx,
373                struct perf_counter_context *ctx,
374                int cpu)
375 {
376         struct perf_counter *counter;
377
378         counter_sched_in(group_counter, cpuctx, ctx, cpu);
379
380         /*
381          * Schedule in siblings as one group (if any):
382          */
383         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
384                 counter_sched_in(counter, cpuctx, ctx, cpu);
385 }
386
387 /*
388  * Called from scheduler to add the counters of the current task
389  * with interrupts disabled.
390  *
391  * We restore the counter value and then enable it.
392  *
393  * This does not protect us against NMI, but hw_perf_counter_enable()
394  * sets the enabled bit in the control field of counter _before_
395  * accessing the counter control register. If a NMI hits, then it will
396  * keep the counter running.
397  */
398 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
399 {
400         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
401         struct perf_counter_context *ctx = &task->perf_counter_ctx;
402         struct perf_counter *counter;
403
404         if (likely(!ctx->nr_counters))
405                 return;
406
407         spin_lock(&ctx->lock);
408         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
409                 if (ctx->nr_active == cpuctx->max_pertask)
410                         break;
411
412                 /*
413                  * Listen to the 'cpu' scheduling filter constraint
414                  * of counters:
415                  */
416                 if (counter->cpu != -1 && counter->cpu != cpu)
417                         continue;
418
419                 group_sched_in(counter, cpuctx, ctx, cpu);
420         }
421         spin_unlock(&ctx->lock);
422
423         cpuctx->task_ctx = ctx;
424 }
425
426 int perf_counter_task_disable(void)
427 {
428         struct task_struct *curr = current;
429         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
430         struct perf_counter *counter;
431         u64 perf_flags;
432         int cpu;
433
434         if (likely(!ctx->nr_counters))
435                 return 0;
436
437         local_irq_disable();
438         cpu = smp_processor_id();
439
440         perf_counter_task_sched_out(curr, cpu);
441
442         spin_lock(&ctx->lock);
443
444         /*
445          * Disable all the counters:
446          */
447         perf_flags = hw_perf_save_disable();
448
449         list_for_each_entry(counter, &ctx->counter_list, list_entry)
450                 counter->state = PERF_COUNTER_STATE_OFF;
451
452         hw_perf_restore(perf_flags);
453
454         spin_unlock(&ctx->lock);
455
456         local_irq_enable();
457
458         return 0;
459 }
460
461 int perf_counter_task_enable(void)
462 {
463         struct task_struct *curr = current;
464         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
465         struct perf_counter *counter;
466         u64 perf_flags;
467         int cpu;
468
469         if (likely(!ctx->nr_counters))
470                 return 0;
471
472         local_irq_disable();
473         cpu = smp_processor_id();
474
475         spin_lock(&ctx->lock);
476
477         /*
478          * Disable all the counters:
479          */
480         perf_flags = hw_perf_save_disable();
481
482         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
483                 if (counter->state != PERF_COUNTER_STATE_OFF)
484                         continue;
485                 counter->state = PERF_COUNTER_STATE_INACTIVE;
486         }
487         hw_perf_restore(perf_flags);
488
489         spin_unlock(&ctx->lock);
490
491         perf_counter_task_sched_in(curr, cpu);
492
493         local_irq_enable();
494
495         return 0;
496 }
497
498 void perf_counter_task_tick(struct task_struct *curr, int cpu)
499 {
500         struct perf_counter_context *ctx = &curr->perf_counter_ctx;
501         struct perf_counter *counter;
502         u64 perf_flags;
503
504         if (likely(!ctx->nr_counters))
505                 return;
506
507         perf_counter_task_sched_out(curr, cpu);
508
509         spin_lock(&ctx->lock);
510
511         /*
512          * Rotate the first entry last (works just fine for group counters too):
513          */
514         perf_flags = hw_perf_save_disable();
515         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
516                 list_del(&counter->list_entry);
517                 list_add_tail(&counter->list_entry, &ctx->counter_list);
518                 break;
519         }
520         hw_perf_restore(perf_flags);
521
522         spin_unlock(&ctx->lock);
523
524         perf_counter_task_sched_in(curr, cpu);
525 }
526
527 /*
528  * Cross CPU call to read the hardware counter
529  */
530 static void __hw_perf_counter_read(void *info)
531 {
532         struct perf_counter *counter = info;
533
534         counter->hw_ops->hw_perf_counter_read(counter);
535 }
536
537 static u64 perf_counter_read(struct perf_counter *counter)
538 {
539         /*
540          * If counter is enabled and currently active on a CPU, update the
541          * value in the counter structure:
542          */
543         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
544                 smp_call_function_single(counter->oncpu,
545                                          __hw_perf_counter_read, counter, 1);
546         }
547
548         return atomic64_read(&counter->count);
549 }
550
551 /*
552  * Cross CPU call to switch performance data pointers
553  */
554 static void __perf_switch_irq_data(void *info)
555 {
556         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
557         struct perf_counter *counter = info;
558         struct perf_counter_context *ctx = counter->ctx;
559         struct perf_data *oldirqdata = counter->irqdata;
560
561         /*
562          * If this is a task context, we need to check whether it is
563          * the current task context of this cpu. If not it has been
564          * scheduled out before the smp call arrived.
565          */
566         if (ctx->task) {
567                 if (cpuctx->task_ctx != ctx)
568                         return;
569                 spin_lock(&ctx->lock);
570         }
571
572         /* Change the pointer NMI safe */
573         atomic_long_set((atomic_long_t *)&counter->irqdata,
574                         (unsigned long) counter->usrdata);
575         counter->usrdata = oldirqdata;
576
577         if (ctx->task)
578                 spin_unlock(&ctx->lock);
579 }
580
581 static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
582 {
583         struct perf_counter_context *ctx = counter->ctx;
584         struct perf_data *oldirqdata = counter->irqdata;
585         struct task_struct *task = ctx->task;
586
587         if (!task) {
588                 smp_call_function_single(counter->cpu,
589                                          __perf_switch_irq_data,
590                                          counter, 1);
591                 return counter->usrdata;
592         }
593
594 retry:
595         spin_lock_irq(&ctx->lock);
596         if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
597                 counter->irqdata = counter->usrdata;
598                 counter->usrdata = oldirqdata;
599                 spin_unlock_irq(&ctx->lock);
600                 return oldirqdata;
601         }
602         spin_unlock_irq(&ctx->lock);
603         task_oncpu_function_call(task, __perf_switch_irq_data, counter);
604         /* Might have failed, because task was scheduled out */
605         if (counter->irqdata == oldirqdata)
606                 goto retry;
607
608         return counter->usrdata;
609 }
610
611 static void put_context(struct perf_counter_context *ctx)
612 {
613         if (ctx->task)
614                 put_task_struct(ctx->task);
615 }
616
617 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
618 {
619         struct perf_cpu_context *cpuctx;
620         struct perf_counter_context *ctx;
621         struct task_struct *task;
622
623         /*
624          * If cpu is not a wildcard then this is a percpu counter:
625          */
626         if (cpu != -1) {
627                 /* Must be root to operate on a CPU counter: */
628                 if (!capable(CAP_SYS_ADMIN))
629                         return ERR_PTR(-EACCES);
630
631                 if (cpu < 0 || cpu > num_possible_cpus())
632                         return ERR_PTR(-EINVAL);
633
634                 /*
635                  * We could be clever and allow to attach a counter to an
636                  * offline CPU and activate it when the CPU comes up, but
637                  * that's for later.
638                  */
639                 if (!cpu_isset(cpu, cpu_online_map))
640                         return ERR_PTR(-ENODEV);
641
642                 cpuctx = &per_cpu(perf_cpu_context, cpu);
643                 ctx = &cpuctx->ctx;
644
645                 return ctx;
646         }
647
648         rcu_read_lock();
649         if (!pid)
650                 task = current;
651         else
652                 task = find_task_by_vpid(pid);
653         if (task)
654                 get_task_struct(task);
655         rcu_read_unlock();
656
657         if (!task)
658                 return ERR_PTR(-ESRCH);
659
660         ctx = &task->perf_counter_ctx;
661         ctx->task = task;
662
663         /* Reuse ptrace permission checks for now. */
664         if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
665                 put_context(ctx);
666                 return ERR_PTR(-EACCES);
667         }
668
669         return ctx;
670 }
671
672 /*
673  * Called when the last reference to the file is gone.
674  */
675 static int perf_release(struct inode *inode, struct file *file)
676 {
677         struct perf_counter *counter = file->private_data;
678         struct perf_counter_context *ctx = counter->ctx;
679
680         file->private_data = NULL;
681
682         mutex_lock(&counter->mutex);
683
684         perf_counter_remove_from_context(counter);
685         put_context(ctx);
686
687         mutex_unlock(&counter->mutex);
688
689         kfree(counter);
690
691         return 0;
692 }
693
694 /*
695  * Read the performance counter - simple non blocking version for now
696  */
697 static ssize_t
698 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
699 {
700         u64 cntval;
701
702         if (count != sizeof(cntval))
703                 return -EINVAL;
704
705         mutex_lock(&counter->mutex);
706         cntval = perf_counter_read(counter);
707         mutex_unlock(&counter->mutex);
708
709         return put_user(cntval, (u64 __user *) buf) ? -EFAULT : sizeof(cntval);
710 }
711
712 static ssize_t
713 perf_copy_usrdata(struct perf_data *usrdata, char __user *buf, size_t count)
714 {
715         if (!usrdata->len)
716                 return 0;
717
718         count = min(count, (size_t)usrdata->len);
719         if (copy_to_user(buf, usrdata->data + usrdata->rd_idx, count))
720                 return -EFAULT;
721
722         /* Adjust the counters */
723         usrdata->len -= count;
724         if (!usrdata->len)
725                 usrdata->rd_idx = 0;
726         else
727                 usrdata->rd_idx += count;
728
729         return count;
730 }
731
732 static ssize_t
733 perf_read_irq_data(struct perf_counter  *counter,
734                    char __user          *buf,
735                    size_t               count,
736                    int                  nonblocking)
737 {
738         struct perf_data *irqdata, *usrdata;
739         DECLARE_WAITQUEUE(wait, current);
740         ssize_t res;
741
742         irqdata = counter->irqdata;
743         usrdata = counter->usrdata;
744
745         if (usrdata->len + irqdata->len >= count)
746                 goto read_pending;
747
748         if (nonblocking)
749                 return -EAGAIN;
750
751         spin_lock_irq(&counter->waitq.lock);
752         __add_wait_queue(&counter->waitq, &wait);
753         for (;;) {
754                 set_current_state(TASK_INTERRUPTIBLE);
755                 if (usrdata->len + irqdata->len >= count)
756                         break;
757
758                 if (signal_pending(current))
759                         break;
760
761                 spin_unlock_irq(&counter->waitq.lock);
762                 schedule();
763                 spin_lock_irq(&counter->waitq.lock);
764         }
765         __remove_wait_queue(&counter->waitq, &wait);
766         __set_current_state(TASK_RUNNING);
767         spin_unlock_irq(&counter->waitq.lock);
768
769         if (usrdata->len + irqdata->len < count)
770                 return -ERESTARTSYS;
771 read_pending:
772         mutex_lock(&counter->mutex);
773
774         /* Drain pending data first: */
775         res = perf_copy_usrdata(usrdata, buf, count);
776         if (res < 0 || res == count)
777                 goto out;
778
779         /* Switch irq buffer: */
780         usrdata = perf_switch_irq_data(counter);
781         if (perf_copy_usrdata(usrdata, buf + res, count - res) < 0) {
782                 if (!res)
783                         res = -EFAULT;
784         } else {
785                 res = count;
786         }
787 out:
788         mutex_unlock(&counter->mutex);
789
790         return res;
791 }
792
793 static ssize_t
794 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
795 {
796         struct perf_counter *counter = file->private_data;
797
798         switch (counter->hw_event.record_type) {
799         case PERF_RECORD_SIMPLE:
800                 return perf_read_hw(counter, buf, count);
801
802         case PERF_RECORD_IRQ:
803         case PERF_RECORD_GROUP:
804                 return perf_read_irq_data(counter, buf, count,
805                                           file->f_flags & O_NONBLOCK);
806         }
807         return -EINVAL;
808 }
809
810 static unsigned int perf_poll(struct file *file, poll_table *wait)
811 {
812         struct perf_counter *counter = file->private_data;
813         unsigned int events = 0;
814         unsigned long flags;
815
816         poll_wait(file, &counter->waitq, wait);
817
818         spin_lock_irqsave(&counter->waitq.lock, flags);
819         if (counter->usrdata->len || counter->irqdata->len)
820                 events |= POLLIN;
821         spin_unlock_irqrestore(&counter->waitq.lock, flags);
822
823         return events;
824 }
825
826 static const struct file_operations perf_fops = {
827         .release                = perf_release,
828         .read                   = perf_read,
829         .poll                   = perf_poll,
830 };
831
832 static void cpu_clock_perf_counter_enable(struct perf_counter *counter)
833 {
834 }
835
836 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
837 {
838 }
839
840 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
841 {
842         int cpu = raw_smp_processor_id();
843
844         atomic64_set(&counter->count, cpu_clock(cpu));
845 }
846
847 static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
848         .hw_perf_counter_enable         = cpu_clock_perf_counter_enable,
849         .hw_perf_counter_disable        = cpu_clock_perf_counter_disable,
850         .hw_perf_counter_read           = cpu_clock_perf_counter_read,
851 };
852
853 static void task_clock_perf_counter_update(struct perf_counter *counter)
854 {
855         u64 prev, now;
856         s64 delta;
857
858         prev = atomic64_read(&counter->hw.prev_count);
859         now = current->se.sum_exec_runtime;
860
861         atomic64_set(&counter->hw.prev_count, now);
862
863         delta = now - prev;
864         if (WARN_ON_ONCE(delta < 0))
865                 delta = 0;
866
867         atomic64_add(delta, &counter->count);
868 }
869
870 static void task_clock_perf_counter_read(struct perf_counter *counter)
871 {
872         task_clock_perf_counter_update(counter);
873 }
874
875 static void task_clock_perf_counter_enable(struct perf_counter *counter)
876 {
877         atomic64_set(&counter->hw.prev_count, current->se.sum_exec_runtime);
878 }
879
880 static void task_clock_perf_counter_disable(struct perf_counter *counter)
881 {
882         task_clock_perf_counter_update(counter);
883 }
884
885 static const struct hw_perf_counter_ops perf_ops_task_clock = {
886         .hw_perf_counter_enable         = task_clock_perf_counter_enable,
887         .hw_perf_counter_disable        = task_clock_perf_counter_disable,
888         .hw_perf_counter_read           = task_clock_perf_counter_read,
889 };
890
891 static u64 get_page_faults(void)
892 {
893         struct task_struct *curr = current;
894
895         return curr->maj_flt + curr->min_flt;
896 }
897
898 static void page_faults_perf_counter_update(struct perf_counter *counter)
899 {
900         u64 prev, now;
901         s64 delta;
902
903         prev = atomic64_read(&counter->hw.prev_count);
904         now = get_page_faults();
905
906         atomic64_set(&counter->hw.prev_count, now);
907
908         delta = now - prev;
909         if (WARN_ON_ONCE(delta < 0))
910                 delta = 0;
911
912         atomic64_add(delta, &counter->count);
913 }
914
915 static void page_faults_perf_counter_read(struct perf_counter *counter)
916 {
917         page_faults_perf_counter_update(counter);
918 }
919
920 static void page_faults_perf_counter_enable(struct perf_counter *counter)
921 {
922         /*
923          * page-faults is a per-task value already,
924          * so we dont have to clear it on switch-in.
925          */
926 }
927
928 static void page_faults_perf_counter_disable(struct perf_counter *counter)
929 {
930         page_faults_perf_counter_update(counter);
931 }
932
933 static const struct hw_perf_counter_ops perf_ops_page_faults = {
934         .hw_perf_counter_enable         = page_faults_perf_counter_enable,
935         .hw_perf_counter_disable        = page_faults_perf_counter_disable,
936         .hw_perf_counter_read           = page_faults_perf_counter_read,
937 };
938
939 static u64 get_context_switches(void)
940 {
941         struct task_struct *curr = current;
942
943         return curr->nvcsw + curr->nivcsw;
944 }
945
946 static void context_switches_perf_counter_update(struct perf_counter *counter)
947 {
948         u64 prev, now;
949         s64 delta;
950
951         prev = atomic64_read(&counter->hw.prev_count);
952         now = get_context_switches();
953
954         atomic64_set(&counter->hw.prev_count, now);
955
956         delta = now - prev;
957         if (WARN_ON_ONCE(delta < 0))
958                 delta = 0;
959
960         atomic64_add(delta, &counter->count);
961 }
962
963 static void context_switches_perf_counter_read(struct perf_counter *counter)
964 {
965         context_switches_perf_counter_update(counter);
966 }
967
968 static void context_switches_perf_counter_enable(struct perf_counter *counter)
969 {
970         /*
971          * ->nvcsw + curr->nivcsw is a per-task value already,
972          * so we dont have to clear it on switch-in.
973          */
974 }
975
976 static void context_switches_perf_counter_disable(struct perf_counter *counter)
977 {
978         context_switches_perf_counter_update(counter);
979 }
980
981 static const struct hw_perf_counter_ops perf_ops_context_switches = {
982         .hw_perf_counter_enable         = context_switches_perf_counter_enable,
983         .hw_perf_counter_disable        = context_switches_perf_counter_disable,
984         .hw_perf_counter_read           = context_switches_perf_counter_read,
985 };
986
987 static inline u64 get_cpu_migrations(void)
988 {
989         return current->se.nr_migrations;
990 }
991
992 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
993 {
994         u64 prev, now;
995         s64 delta;
996
997         prev = atomic64_read(&counter->hw.prev_count);
998         now = get_cpu_migrations();
999
1000         atomic64_set(&counter->hw.prev_count, now);
1001
1002         delta = now - prev;
1003         if (WARN_ON_ONCE(delta < 0))
1004                 delta = 0;
1005
1006         atomic64_add(delta, &counter->count);
1007 }
1008
1009 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
1010 {
1011         cpu_migrations_perf_counter_update(counter);
1012 }
1013
1014 static void cpu_migrations_perf_counter_enable(struct perf_counter *counter)
1015 {
1016         /*
1017          * se.nr_migrations is a per-task value already,
1018          * so we dont have to clear it on switch-in.
1019          */
1020 }
1021
1022 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
1023 {
1024         cpu_migrations_perf_counter_update(counter);
1025 }
1026
1027 static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
1028         .hw_perf_counter_enable         = cpu_migrations_perf_counter_enable,
1029         .hw_perf_counter_disable        = cpu_migrations_perf_counter_disable,
1030         .hw_perf_counter_read           = cpu_migrations_perf_counter_read,
1031 };
1032
1033 static const struct hw_perf_counter_ops *
1034 sw_perf_counter_init(struct perf_counter *counter)
1035 {
1036         const struct hw_perf_counter_ops *hw_ops = NULL;
1037
1038         switch (counter->hw_event.type) {
1039         case PERF_COUNT_CPU_CLOCK:
1040                 hw_ops = &perf_ops_cpu_clock;
1041                 break;
1042         case PERF_COUNT_TASK_CLOCK:
1043                 hw_ops = &perf_ops_task_clock;
1044                 break;
1045         case PERF_COUNT_PAGE_FAULTS:
1046                 hw_ops = &perf_ops_page_faults;
1047                 break;
1048         case PERF_COUNT_CONTEXT_SWITCHES:
1049                 hw_ops = &perf_ops_context_switches;
1050                 break;
1051         case PERF_COUNT_CPU_MIGRATIONS:
1052                 hw_ops = &perf_ops_cpu_migrations;
1053                 break;
1054         default:
1055                 break;
1056         }
1057         return hw_ops;
1058 }
1059
1060 /*
1061  * Allocate and initialize a counter structure
1062  */
1063 static struct perf_counter *
1064 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
1065                    int cpu,
1066                    struct perf_counter *group_leader,
1067                    gfp_t gfpflags)
1068 {
1069         const struct hw_perf_counter_ops *hw_ops;
1070         struct perf_counter *counter;
1071
1072         counter = kzalloc(sizeof(*counter), gfpflags);
1073         if (!counter)
1074                 return NULL;
1075
1076         /*
1077          * Single counters are their own group leaders, with an
1078          * empty sibling list:
1079          */
1080         if (!group_leader)
1081                 group_leader = counter;
1082
1083         mutex_init(&counter->mutex);
1084         INIT_LIST_HEAD(&counter->list_entry);
1085         INIT_LIST_HEAD(&counter->sibling_list);
1086         init_waitqueue_head(&counter->waitq);
1087
1088         counter->irqdata                = &counter->data[0];
1089         counter->usrdata                = &counter->data[1];
1090         counter->cpu                    = cpu;
1091         counter->hw_event               = *hw_event;
1092         counter->wakeup_pending         = 0;
1093         counter->group_leader           = group_leader;
1094         counter->hw_ops                 = NULL;
1095
1096         if (hw_event->disabled)
1097                 counter->state = PERF_COUNTER_STATE_OFF;
1098
1099         hw_ops = NULL;
1100         if (!hw_event->raw && hw_event->type < 0)
1101                 hw_ops = sw_perf_counter_init(counter);
1102         if (!hw_ops)
1103                 hw_ops = hw_perf_counter_init(counter);
1104
1105         if (!hw_ops) {
1106                 kfree(counter);
1107                 return NULL;
1108         }
1109         counter->hw_ops = hw_ops;
1110
1111         return counter;
1112 }
1113
1114 /**
1115  * sys_perf_task_open - open a performance counter, associate it to a task/cpu
1116  *
1117  * @hw_event_uptr:      event type attributes for monitoring/sampling
1118  * @pid:                target pid
1119  * @cpu:                target cpu
1120  * @group_fd:           group leader counter fd
1121  */
1122 asmlinkage int
1123 sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr __user,
1124                       pid_t pid, int cpu, int group_fd)
1125 {
1126         struct perf_counter *counter, *group_leader;
1127         struct perf_counter_hw_event hw_event;
1128         struct perf_counter_context *ctx;
1129         struct file *counter_file = NULL;
1130         struct file *group_file = NULL;
1131         int fput_needed = 0;
1132         int fput_needed2 = 0;
1133         int ret;
1134
1135         if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
1136                 return -EFAULT;
1137
1138         /*
1139          * Get the target context (task or percpu):
1140          */
1141         ctx = find_get_context(pid, cpu);
1142         if (IS_ERR(ctx))
1143                 return PTR_ERR(ctx);
1144
1145         /*
1146          * Look up the group leader (we will attach this counter to it):
1147          */
1148         group_leader = NULL;
1149         if (group_fd != -1) {
1150                 ret = -EINVAL;
1151                 group_file = fget_light(group_fd, &fput_needed);
1152                 if (!group_file)
1153                         goto err_put_context;
1154                 if (group_file->f_op != &perf_fops)
1155                         goto err_put_context;
1156
1157                 group_leader = group_file->private_data;
1158                 /*
1159                  * Do not allow a recursive hierarchy (this new sibling
1160                  * becoming part of another group-sibling):
1161                  */
1162                 if (group_leader->group_leader != group_leader)
1163                         goto err_put_context;
1164                 /*
1165                  * Do not allow to attach to a group in a different
1166                  * task or CPU context:
1167                  */
1168                 if (group_leader->ctx != ctx)
1169                         goto err_put_context;
1170         }
1171
1172         ret = -EINVAL;
1173         counter = perf_counter_alloc(&hw_event, cpu, group_leader, GFP_KERNEL);
1174         if (!counter)
1175                 goto err_put_context;
1176
1177         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
1178         if (ret < 0)
1179                 goto err_free_put_context;
1180
1181         counter_file = fget_light(ret, &fput_needed2);
1182         if (!counter_file)
1183                 goto err_free_put_context;
1184
1185         counter->filp = counter_file;
1186         perf_install_in_context(ctx, counter, cpu);
1187
1188         fput_light(counter_file, fput_needed2);
1189
1190 out_fput:
1191         fput_light(group_file, fput_needed);
1192
1193         return ret;
1194
1195 err_free_put_context:
1196         kfree(counter);
1197
1198 err_put_context:
1199         put_context(ctx);
1200
1201         goto out_fput;
1202 }
1203
1204 /*
1205  * Initialize the perf_counter context in a task_struct:
1206  */
1207 static void
1208 __perf_counter_init_context(struct perf_counter_context *ctx,
1209                             struct task_struct *task)
1210 {
1211         memset(ctx, 0, sizeof(*ctx));
1212         spin_lock_init(&ctx->lock);
1213         INIT_LIST_HEAD(&ctx->counter_list);
1214         ctx->task = task;
1215 }
1216
1217 /*
1218  * inherit a counter from parent task to child task:
1219  */
1220 static int
1221 inherit_counter(struct perf_counter *parent_counter,
1222               struct task_struct *parent,
1223               struct perf_counter_context *parent_ctx,
1224               struct task_struct *child,
1225               struct perf_counter_context *child_ctx)
1226 {
1227         struct perf_counter *child_counter;
1228
1229         child_counter = perf_counter_alloc(&parent_counter->hw_event,
1230                                             parent_counter->cpu, NULL,
1231                                             GFP_ATOMIC);
1232         if (!child_counter)
1233                 return -ENOMEM;
1234
1235         /*
1236          * Link it up in the child's context:
1237          */
1238         child_counter->ctx = child_ctx;
1239         child_counter->task = child;
1240         list_add_counter(child_counter, child_ctx);
1241         child_ctx->nr_counters++;
1242
1243         child_counter->parent = parent_counter;
1244         parent_counter->nr_inherited++;
1245         /*
1246          * inherit into child's child as well:
1247          */
1248         child_counter->hw_event.inherit = 1;
1249
1250         /*
1251          * Get a reference to the parent filp - we will fput it
1252          * when the child counter exits. This is safe to do because
1253          * we are in the parent and we know that the filp still
1254          * exists and has a nonzero count:
1255          */
1256         atomic_long_inc(&parent_counter->filp->f_count);
1257
1258         return 0;
1259 }
1260
1261 static void
1262 __perf_counter_exit_task(struct task_struct *child,
1263                          struct perf_counter *child_counter,
1264                          struct perf_counter_context *child_ctx)
1265 {
1266         struct perf_counter *parent_counter;
1267         u64 parent_val, child_val;
1268         u64 perf_flags;
1269
1270         /*
1271          * Disable and unlink this counter.
1272          *
1273          * Be careful about zapping the list - IRQ/NMI context
1274          * could still be processing it:
1275          */
1276         local_irq_disable();
1277         perf_flags = hw_perf_save_disable();
1278
1279         if (child_counter->state == PERF_COUNTER_STATE_ACTIVE) {
1280                 struct perf_cpu_context *cpuctx;
1281
1282                 cpuctx = &__get_cpu_var(perf_cpu_context);
1283
1284                 child_counter->hw_ops->hw_perf_counter_disable(child_counter);
1285                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
1286                 child_counter->oncpu = -1;
1287
1288                 cpuctx->active_oncpu--;
1289                 child_ctx->nr_active--;
1290         }
1291
1292         list_del_init(&child_counter->list_entry);
1293
1294         hw_perf_restore(perf_flags);
1295         local_irq_enable();
1296
1297         parent_counter = child_counter->parent;
1298         /*
1299          * It can happen that parent exits first, and has counters
1300          * that are still around due to the child reference. These
1301          * counters need to be zapped - but otherwise linger.
1302          */
1303         if (!parent_counter)
1304                 return;
1305
1306         parent_val = atomic64_read(&parent_counter->count);
1307         child_val = atomic64_read(&child_counter->count);
1308
1309         /*
1310          * Add back the child's count to the parent's count:
1311          */
1312         atomic64_add(child_val, &parent_counter->count);
1313
1314         fput(parent_counter->filp);
1315
1316         kfree(child_counter);
1317 }
1318
1319 /*
1320  * When a child task exist, feed back counter values to parent counters.
1321  *
1322  * Note: we are running in child context, but the PID is not hashed
1323  * anymore so new counters will not be added.
1324  */
1325 void perf_counter_exit_task(struct task_struct *child)
1326 {
1327         struct perf_counter *child_counter, *tmp;
1328         struct perf_counter_context *child_ctx;
1329
1330         child_ctx = &child->perf_counter_ctx;
1331
1332         if (likely(!child_ctx->nr_counters))
1333                 return;
1334
1335         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
1336                                  list_entry)
1337                 __perf_counter_exit_task(child, child_counter, child_ctx);
1338 }
1339
1340 /*
1341  * Initialize the perf_counter context in task_struct
1342  */
1343 void perf_counter_init_task(struct task_struct *child)
1344 {
1345         struct perf_counter_context *child_ctx, *parent_ctx;
1346         struct perf_counter *counter, *parent_counter;
1347         struct task_struct *parent = current;
1348         unsigned long flags;
1349
1350         child_ctx  =  &child->perf_counter_ctx;
1351         parent_ctx = &parent->perf_counter_ctx;
1352
1353         __perf_counter_init_context(child_ctx, child);
1354
1355         /*
1356          * This is executed from the parent task context, so inherit
1357          * counters that have been marked for cloning:
1358          */
1359
1360         if (likely(!parent_ctx->nr_counters))
1361                 return;
1362
1363         /*
1364          * Lock the parent list. No need to lock the child - not PID
1365          * hashed yet and not running, so nobody can access it.
1366          */
1367         spin_lock_irqsave(&parent_ctx->lock, flags);
1368
1369         /*
1370          * We dont have to disable NMIs - we are only looking at
1371          * the list, not manipulating it:
1372          */
1373         list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
1374                 if (!counter->hw_event.inherit || counter->group_leader != counter)
1375                         continue;
1376
1377                 /*
1378                  * Instead of creating recursive hierarchies of counters,
1379                  * we link inheritd counters back to the original parent,
1380                  * which has a filp for sure, which we use as the reference
1381                  * count:
1382                  */
1383                 parent_counter = counter;
1384                 if (counter->parent)
1385                         parent_counter = counter->parent;
1386
1387                 if (inherit_counter(parent_counter, parent,
1388                                   parent_ctx, child, child_ctx))
1389                         break;
1390         }
1391
1392         spin_unlock_irqrestore(&parent_ctx->lock, flags);
1393 }
1394
1395 static void __cpuinit perf_counter_init_cpu(int cpu)
1396 {
1397         struct perf_cpu_context *cpuctx;
1398
1399         cpuctx = &per_cpu(perf_cpu_context, cpu);
1400         __perf_counter_init_context(&cpuctx->ctx, NULL);
1401
1402         mutex_lock(&perf_resource_mutex);
1403         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
1404         mutex_unlock(&perf_resource_mutex);
1405
1406         hw_perf_counter_setup();
1407 }
1408
1409 #ifdef CONFIG_HOTPLUG_CPU
1410 static void __perf_counter_exit_cpu(void *info)
1411 {
1412         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1413         struct perf_counter_context *ctx = &cpuctx->ctx;
1414         struct perf_counter *counter, *tmp;
1415
1416         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
1417                 __perf_counter_remove_from_context(counter);
1418
1419 }
1420 static void perf_counter_exit_cpu(int cpu)
1421 {
1422         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
1423 }
1424 #else
1425 static inline void perf_counter_exit_cpu(int cpu) { }
1426 #endif
1427
1428 static int __cpuinit
1429 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1430 {
1431         unsigned int cpu = (long)hcpu;
1432
1433         switch (action) {
1434
1435         case CPU_UP_PREPARE:
1436         case CPU_UP_PREPARE_FROZEN:
1437                 perf_counter_init_cpu(cpu);
1438                 break;
1439
1440         case CPU_DOWN_PREPARE:
1441         case CPU_DOWN_PREPARE_FROZEN:
1442                 perf_counter_exit_cpu(cpu);
1443                 break;
1444
1445         default:
1446                 break;
1447         }
1448
1449         return NOTIFY_OK;
1450 }
1451
1452 static struct notifier_block __cpuinitdata perf_cpu_nb = {
1453         .notifier_call          = perf_cpu_notify,
1454 };
1455
1456 static int __init perf_counter_init(void)
1457 {
1458         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
1459                         (void *)(long)smp_processor_id());
1460         register_cpu_notifier(&perf_cpu_nb);
1461
1462         return 0;
1463 }
1464 early_initcall(perf_counter_init);
1465
1466 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
1467 {
1468         return sprintf(buf, "%d\n", perf_reserved_percpu);
1469 }
1470
1471 static ssize_t
1472 perf_set_reserve_percpu(struct sysdev_class *class,
1473                         const char *buf,
1474                         size_t count)
1475 {
1476         struct perf_cpu_context *cpuctx;
1477         unsigned long val;
1478         int err, cpu, mpt;
1479
1480         err = strict_strtoul(buf, 10, &val);
1481         if (err)
1482                 return err;
1483         if (val > perf_max_counters)
1484                 return -EINVAL;
1485
1486         mutex_lock(&perf_resource_mutex);
1487         perf_reserved_percpu = val;
1488         for_each_online_cpu(cpu) {
1489                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1490                 spin_lock_irq(&cpuctx->ctx.lock);
1491                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
1492                           perf_max_counters - perf_reserved_percpu);
1493                 cpuctx->max_pertask = mpt;
1494                 spin_unlock_irq(&cpuctx->ctx.lock);
1495         }
1496         mutex_unlock(&perf_resource_mutex);
1497
1498         return count;
1499 }
1500
1501 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
1502 {
1503         return sprintf(buf, "%d\n", perf_overcommit);
1504 }
1505
1506 static ssize_t
1507 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
1508 {
1509         unsigned long val;
1510         int err;
1511
1512         err = strict_strtoul(buf, 10, &val);
1513         if (err)
1514                 return err;
1515         if (val > 1)
1516                 return -EINVAL;
1517
1518         mutex_lock(&perf_resource_mutex);
1519         perf_overcommit = val;
1520         mutex_unlock(&perf_resource_mutex);
1521
1522         return count;
1523 }
1524
1525 static SYSDEV_CLASS_ATTR(
1526                                 reserve_percpu,
1527                                 0644,
1528                                 perf_show_reserve_percpu,
1529                                 perf_set_reserve_percpu
1530                         );
1531
1532 static SYSDEV_CLASS_ATTR(
1533                                 overcommit,
1534                                 0644,
1535                                 perf_show_overcommit,
1536                                 perf_set_overcommit
1537                         );
1538
1539 static struct attribute *perfclass_attrs[] = {
1540         &attr_reserve_percpu.attr,
1541         &attr_overcommit.attr,
1542         NULL
1543 };
1544
1545 static struct attribute_group perfclass_attr_group = {
1546         .attrs                  = perfclass_attrs,
1547         .name                   = "perf_counters",
1548 };
1549
1550 static int __init perf_counter_sysfs_init(void)
1551 {
1552         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
1553                                   &perfclass_attr_group);
1554 }
1555 device_initcall(perf_counter_sysfs_init);