4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
9 * - DS and BTS hardware configuration
10 * - buffer overflow handling (to be done)
14 * - security checking (is the caller allowed to trace the task)
15 * - buffer allocation (memory accounting)
18 * Copyright (C) 2007-2009 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/errno.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
28 #include <linux/trace_clock.h>
32 #include "ds_selftest.h"
35 * The configuration for a particular DS hardware implementation:
37 struct ds_configuration {
38 /* The name of the configuration: */
41 /* The size of pointer-typed fields in DS, BTS, and PEBS: */
42 unsigned char sizeof_ptr_field;
44 /* The size of a BTS/PEBS record in bytes: */
45 unsigned char sizeof_rec[2];
47 /* Control bit-masks indexed by enum ds_feature: */
48 unsigned long ctl[dsf_ctl_max];
50 static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array);
52 #define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())
54 /* Maximal size of a DS configuration: */
55 #define MAX_SIZEOF_DS (12 * 8)
57 /* Maximal size of a BTS record: */
58 #define MAX_SIZEOF_BTS (3 * 8)
60 /* BTS and PEBS buffer alignment: */
61 #define DS_ALIGNMENT (1 << 3)
63 /* Mask of control bits in the DS MSR register: */
65 ( ds_cfg.ctl[dsf_bts] | \
66 ds_cfg.ctl[dsf_bts_kernel] | \
67 ds_cfg.ctl[dsf_bts_user] | \
68 ds_cfg.ctl[dsf_bts_overflow] )
71 * A BTS or PEBS tracer.
73 * This holds the configuration of the tracer and serves as a handle
74 * to identify tracers.
77 /* The DS context (partially) owned by this tracer. */
78 struct ds_context *context;
79 /* The buffer provided on ds_request() and its size in bytes. */
85 /* The common DS part: */
88 /* The trace including the DS configuration: */
89 struct bts_trace trace;
91 /* Buffer overflow notification function: */
92 bts_ovfl_callback_t ovfl;
94 /* Active flags affecting trace collection. */
99 /* The common DS part: */
102 /* The trace including the DS configuration: */
103 struct pebs_trace trace;
105 /* Buffer overflow notification function: */
106 pebs_ovfl_callback_t ovfl;
110 * Debug Store (DS) save area configuration (see Intel64 and IA32
111 * Architectures Software Developer's Manual, section 18.5)
113 * The DS configuration consists of the following fields; different
114 * architetures vary in the size of those fields.
116 * - double-word aligned base linear address of the BTS buffer
117 * - write pointer into the BTS buffer
118 * - end linear address of the BTS buffer (one byte beyond the end of
120 * - interrupt pointer into BTS buffer
121 * (interrupt occurs when write pointer passes interrupt pointer)
122 * - double-word aligned base linear address of the PEBS buffer
123 * - write pointer into the PEBS buffer
124 * - end linear address of the PEBS buffer (one byte beyond the end of
126 * - interrupt pointer into PEBS buffer
127 * (interrupt occurs when write pointer passes interrupt pointer)
128 * - value to which counter is reset following counter overflow
130 * Later architectures use 64bit pointers throughout, whereas earlier
131 * architectures use 32bit pointers in 32bit mode.
134 * We compute the base address for the first 8 fields based on:
135 * - the field size stored in the DS configuration
136 * - the relative field position
137 * - an offset giving the start of the respective region
139 * This offset is further used to index various arrays holding
140 * information for BTS and PEBS at the respective index.
142 * On later 32bit processors, we only access the lower 32bit of the
143 * 64bit pointer fields. The upper halves will be zeroed out.
150 ds_interrupt_threshold,
158 static inline unsigned long
159 ds_get(const unsigned char *base, enum ds_qualifier qual, enum ds_field field)
161 base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
162 return *(unsigned long *)base;
166 ds_set(unsigned char *base, enum ds_qualifier qual, enum ds_field field,
169 base += (ds_cfg.sizeof_ptr_field * (field + (4 * qual)));
170 (*(unsigned long *)base) = value;
175 * Locking is done only for allocating BTS or PEBS resources.
177 static DEFINE_SPINLOCK(ds_lock);
180 * We either support (system-wide) per-cpu or per-thread allocation.
181 * We distinguish the two based on the task_struct pointer, where a
182 * NULL pointer indicates per-cpu allocation for the current cpu.
184 * Allocations are use-counted. As soon as resources are allocated,
185 * further allocations must be of the same type (per-cpu or
186 * per-thread). We model this by counting allocations (i.e. the number
187 * of tracers of a certain type) for one type negatively:
189 * >0 number of per-thread tracers
190 * <0 number of per-cpu tracers
192 * Tracers essentially gives the number of ds contexts for a certain
193 * type of allocation.
195 static atomic_t tracers = ATOMIC_INIT(0);
197 static inline int get_tracer(struct task_struct *task)
201 spin_lock_irq(&ds_lock);
205 if (atomic_read(&tracers) < 0)
207 atomic_inc(&tracers);
210 if (atomic_read(&tracers) > 0)
212 atomic_dec(&tracers);
217 spin_unlock_irq(&ds_lock);
221 static inline void put_tracer(struct task_struct *task)
224 atomic_dec(&tracers);
226 atomic_inc(&tracers);
230 * The DS context is either attached to a thread or to a cpu:
231 * - in the former case, the thread_struct contains a pointer to the
233 * - in the latter case, we use a static array of per-cpu context
236 * Contexts are use-counted. They are allocated on first access and
237 * deallocated when the last user puts the context.
240 /* The DS configuration; goes into MSR_IA32_DS_AREA: */
241 unsigned char ds[MAX_SIZEOF_DS];
243 /* The owner of the BTS and PEBS configuration, respectively: */
244 struct bts_tracer *bts_master;
245 struct pebs_tracer *pebs_master;
250 /* Pointer to the context pointer field: */
251 struct ds_context **this;
253 /* The traced task; NULL for cpu tracing: */
254 struct task_struct *task;
256 /* The traced cpu; only valid if task is NULL: */
260 static DEFINE_PER_CPU(struct ds_context *, cpu_context);
263 static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
265 struct ds_context **p_context =
266 (task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
267 struct ds_context *context = NULL;
268 struct ds_context *new_context = NULL;
270 /* Chances are small that we already have a context. */
271 new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
275 spin_lock_irq(&ds_lock);
277 context = *p_context;
278 if (likely(!context)) {
279 context = new_context;
281 context->this = p_context;
282 context->task = task;
286 *p_context = context;
291 spin_unlock_irq(&ds_lock);
293 if (context != new_context)
299 static void ds_put_context(struct ds_context *context)
301 struct task_struct *task;
307 spin_lock_irqsave(&ds_lock, irq);
309 if (--context->count) {
310 spin_unlock_irqrestore(&ds_lock, irq);
314 *(context->this) = NULL;
316 task = context->task;
319 clear_tsk_thread_flag(task, TIF_DS_AREA_MSR);
322 * We leave the (now dangling) pointer to the DS configuration in
323 * the DS_AREA msr. This is as good or as bad as replacing it with
324 * NULL - the hardware would crash if we enabled tracing.
326 * This saves us some problems with having to write an msr on a
327 * different cpu while preventing others from doing the same for the
328 * next context for that same cpu.
331 spin_unlock_irqrestore(&ds_lock, irq);
333 /* The context might still be in use for context switching. */
334 if (task && (task != current))
335 wait_task_context_switch(task);
340 static void ds_install_ds_area(struct ds_context *context)
344 ds = (unsigned long)context->ds;
347 * There is a race between the bts master and the pebs master.
349 * The thread/cpu access is synchronized via get/put_cpu() for
350 * task tracing and via wrmsr_on_cpu for cpu tracing.
352 * If bts and pebs are collected for the same task or same cpu,
353 * the same confiuration is written twice.
357 if (context->task == current)
358 wrmsrl(MSR_IA32_DS_AREA, ds);
359 set_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);
362 wrmsr_on_cpu(context->cpu, MSR_IA32_DS_AREA,
363 (u32)((u64)ds), (u32)((u64)ds >> 32));
367 * Call the tracer's callback on a buffer overflow.
369 * context: the ds context
370 * qual: the buffer type
372 static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
376 if (context->bts_master &&
377 context->bts_master->ovfl)
378 context->bts_master->ovfl(context->bts_master);
381 if (context->pebs_master &&
382 context->pebs_master->ovfl)
383 context->pebs_master->ovfl(context->pebs_master);
390 * Write raw data into the BTS or PEBS buffer.
392 * The remainder of any partially written record is zeroed out.
394 * context: the DS context
395 * qual: the buffer type
396 * record: the data to write
397 * size: the size of the data
399 static int ds_write(struct ds_context *context, enum ds_qualifier qual,
400 const void *record, size_t size)
402 int bytes_written = 0;
408 unsigned long base, index, end, write_end, int_th;
409 unsigned long write_size, adj_write_size;
412 * Write as much as possible without producing an
413 * overflow interrupt.
415 * Interrupt_threshold must either be
416 * - bigger than absolute_maximum or
417 * - point to a record between buffer_base and absolute_maximum
419 * Index points to a valid record.
421 base = ds_get(context->ds, qual, ds_buffer_base);
422 index = ds_get(context->ds, qual, ds_index);
423 end = ds_get(context->ds, qual, ds_absolute_maximum);
424 int_th = ds_get(context->ds, qual, ds_interrupt_threshold);
426 write_end = min(end, int_th);
429 * If we are already beyond the interrupt threshold,
430 * we fill the entire buffer.
432 if (write_end <= index)
435 if (write_end <= index)
438 write_size = min((unsigned long) size, write_end - index);
439 memcpy((void *)index, record, write_size);
441 record = (const char *)record + write_size;
443 bytes_written += write_size;
445 adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
446 adj_write_size *= ds_cfg.sizeof_rec[qual];
448 /* Zero out trailing bytes. */
449 memset((char *)index + write_size, 0,
450 adj_write_size - write_size);
451 index += adj_write_size;
455 ds_set(context->ds, qual, ds_index, index);
458 ds_overflow(context, qual);
461 return bytes_written;
466 * Branch Trace Store (BTS) uses the following format. Different
467 * architectures vary in the size of those fields.
468 * - source linear address
469 * - destination linear address
472 * Later architectures use 64bit pointers throughout, whereas earlier
473 * architectures use 32bit pointers in 32bit mode.
475 * We compute the base address for the fields based on:
476 * - the field size stored in the DS configuration
477 * - the relative field position
479 * In order to store additional information in the BTS buffer, we use
480 * a special source address to indicate that the record requires
481 * special interpretation.
483 * Netburst indicated via a bit in the flags field whether the branch
484 * was predicted; this is ignored.
486 * We use two levels of abstraction:
487 * - the raw data level defined here
488 * - an arch-independent level defined in ds.h
500 bts_qual_mask = (bts_qual_max - 1),
501 bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
504 static inline unsigned long bts_get(const char *base, enum bts_field field)
506 base += (ds_cfg.sizeof_ptr_field * field);
507 return *(unsigned long *)base;
510 static inline void bts_set(char *base, enum bts_field field, unsigned long val)
512 base += (ds_cfg.sizeof_ptr_field * field);;
513 (*(unsigned long *)base) = val;
518 * The raw BTS data is architecture dependent.
520 * For higher-level users, we give an arch-independent view.
521 * - ds.h defines struct bts_struct
522 * - bts_read translates one raw bts record into a bts_struct
523 * - bts_write translates one bts_struct into the raw format and
524 * writes it into the top of the parameter tracer's buffer.
526 * return: bytes read/written on success; -Eerrno, otherwise
529 bts_read(struct bts_tracer *tracer, const void *at, struct bts_struct *out)
534 if (at < tracer->trace.ds.begin)
537 if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
540 memset(out, 0, sizeof(*out));
541 if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
542 out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
543 out->variant.event.clock = bts_get(at, bts_clock);
544 out->variant.event.pid = bts_get(at, bts_pid);
546 out->qualifier = bts_branch;
547 out->variant.lbr.from = bts_get(at, bts_from);
548 out->variant.lbr.to = bts_get(at, bts_to);
550 if (!out->variant.lbr.from && !out->variant.lbr.to)
551 out->qualifier = bts_invalid;
554 return ds_cfg.sizeof_rec[ds_bts];
557 static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
559 unsigned char raw[MAX_SIZEOF_BTS];
564 if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
567 switch (in->qualifier) {
569 bts_set(raw, bts_from, 0);
570 bts_set(raw, bts_to, 0);
571 bts_set(raw, bts_flags, 0);
574 bts_set(raw, bts_from, in->variant.lbr.from);
575 bts_set(raw, bts_to, in->variant.lbr.to);
576 bts_set(raw, bts_flags, 0);
578 case bts_task_arrives:
579 case bts_task_departs:
580 bts_set(raw, bts_qual, (bts_escape | in->qualifier));
581 bts_set(raw, bts_clock, in->variant.event.clock);
582 bts_set(raw, bts_pid, in->variant.event.pid);
588 return ds_write(tracer->ds.context, ds_bts, raw,
589 ds_cfg.sizeof_rec[ds_bts]);
593 static void ds_write_config(struct ds_context *context,
594 struct ds_trace *cfg, enum ds_qualifier qual)
596 unsigned char *ds = context->ds;
598 ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
599 ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
600 ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
601 ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
604 static void ds_read_config(struct ds_context *context,
605 struct ds_trace *cfg, enum ds_qualifier qual)
607 unsigned char *ds = context->ds;
609 cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
610 cfg->top = (void *)ds_get(ds, qual, ds_index);
611 cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
612 cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
615 static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
616 void *base, size_t size, size_t ith,
617 unsigned int flags) {
618 unsigned long buffer, adj;
621 * Adjust the buffer address and size to meet alignment
623 * - buffer is double-word aligned
624 * - size is multiple of record size
626 * We checked the size at the very beginning; we have enough
627 * space to do the adjustment.
629 buffer = (unsigned long)base;
631 adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
635 trace->n = size / ds_cfg.sizeof_rec[qual];
636 trace->size = ds_cfg.sizeof_rec[qual];
638 size = (trace->n * trace->size);
640 trace->begin = (void *)buffer;
641 trace->top = trace->begin;
642 trace->end = (void *)(buffer + size);
644 * The value for 'no threshold' is -1, which will set the
645 * threshold outside of the buffer, just like we want it.
647 ith *= ds_cfg.sizeof_rec[qual];
648 trace->ith = (void *)(buffer + size - ith);
650 trace->flags = flags;
654 static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
655 enum ds_qualifier qual, struct task_struct *task,
656 int cpu, void *base, size_t size, size_t th)
658 struct ds_context *context;
662 if (!ds_cfg.sizeof_rec[qual])
669 /* We need space for alignment adjustments in ds_init_ds_trace(). */
671 if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
674 if (th != (size_t)-1) {
675 th *= ds_cfg.sizeof_rec[qual];
682 tracer->buffer = base;
686 context = ds_get_context(task, cpu);
689 tracer->context = context;
692 * Defer any tracer-specific initialization work for the context until
693 * context ownership has been clarified.
701 static struct bts_tracer *ds_request_bts(struct task_struct *task, int cpu,
702 void *base, size_t size,
703 bts_ovfl_callback_t ovfl, size_t th,
706 struct bts_tracer *tracer;
709 /* Buffer overflow notification is not yet implemented. */
714 error = get_tracer(task);
719 tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
724 /* Do some more error checking and acquire a tracing context. */
725 error = ds_request(&tracer->ds, &tracer->trace.ds,
726 ds_bts, task, cpu, base, size, th);
730 /* Claim the bts part of the tracing context we acquired above. */
731 spin_lock_irq(&ds_lock);
734 if (tracer->ds.context->bts_master)
736 tracer->ds.context->bts_master = tracer;
738 spin_unlock_irq(&ds_lock);
741 * Now that we own the bts part of the context, let's complete the
742 * initialization for that part.
744 ds_init_ds_trace(&tracer->trace.ds, ds_bts, base, size, th, flags);
745 ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
746 ds_install_ds_area(tracer->ds.context);
748 tracer->trace.read = bts_read;
749 tracer->trace.write = bts_write;
752 ds_resume_bts(tracer);
757 spin_unlock_irq(&ds_lock);
758 ds_put_context(tracer->ds.context);
764 return ERR_PTR(error);
767 struct bts_tracer *ds_request_bts_task(struct task_struct *task,
768 void *base, size_t size,
769 bts_ovfl_callback_t ovfl,
770 size_t th, unsigned int flags)
772 return ds_request_bts(task, 0, base, size, ovfl, th, flags);
775 struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
776 bts_ovfl_callback_t ovfl,
777 size_t th, unsigned int flags)
779 return ds_request_bts(NULL, cpu, base, size, ovfl, th, flags);
782 static struct pebs_tracer *ds_request_pebs(struct task_struct *task, int cpu,
783 void *base, size_t size,
784 pebs_ovfl_callback_t ovfl, size_t th,
787 struct pebs_tracer *tracer;
790 /* Buffer overflow notification is not yet implemented. */
795 error = get_tracer(task);
800 tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
805 /* Do some more error checking and acquire a tracing context. */
806 error = ds_request(&tracer->ds, &tracer->trace.ds,
807 ds_pebs, task, cpu, base, size, th);
811 /* Claim the pebs part of the tracing context we acquired above. */
812 spin_lock_irq(&ds_lock);
815 if (tracer->ds.context->pebs_master)
817 tracer->ds.context->pebs_master = tracer;
819 spin_unlock_irq(&ds_lock);
822 * Now that we own the pebs part of the context, let's complete the
823 * initialization for that part.
825 ds_init_ds_trace(&tracer->trace.ds, ds_pebs, base, size, th, flags);
826 ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
827 ds_install_ds_area(tracer->ds.context);
830 ds_resume_pebs(tracer);
835 spin_unlock_irq(&ds_lock);
836 ds_put_context(tracer->ds.context);
842 return ERR_PTR(error);
845 struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
846 void *base, size_t size,
847 pebs_ovfl_callback_t ovfl,
848 size_t th, unsigned int flags)
850 return ds_request_pebs(task, 0, base, size, ovfl, th, flags);
853 struct pebs_tracer *ds_request_pebs_cpu(int cpu, void *base, size_t size,
854 pebs_ovfl_callback_t ovfl,
855 size_t th, unsigned int flags)
857 return ds_request_pebs(NULL, cpu, base, size, ovfl, th, flags);
860 static void ds_free_bts(struct bts_tracer *tracer)
862 struct task_struct *task;
864 task = tracer->ds.context->task;
866 WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
867 tracer->ds.context->bts_master = NULL;
869 /* Make sure tracing stopped and the tracer is not in use. */
870 if (task && (task != current))
871 wait_task_context_switch(task);
873 ds_put_context(tracer->ds.context);
879 void ds_release_bts(struct bts_tracer *tracer)
886 ds_suspend_bts(tracer);
890 int ds_release_bts_noirq(struct bts_tracer *tracer)
892 struct task_struct *task;
899 task = tracer->ds.context->task;
905 (tracer->ds.context->cpu != smp_processor_id()))
909 if (task && (task != current))
912 ds_suspend_bts_noirq(tracer);
917 local_irq_restore(irq);
921 static void update_task_debugctlmsr(struct task_struct *task,
922 unsigned long debugctlmsr)
924 task->thread.debugctlmsr = debugctlmsr;
928 update_debugctlmsr(debugctlmsr);
930 if (task->thread.debugctlmsr)
931 set_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
933 clear_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
937 void ds_suspend_bts(struct bts_tracer *tracer)
939 struct task_struct *task;
940 unsigned long debugctlmsr;
948 task = tracer->ds.context->task;
949 cpu = tracer->ds.context->cpu;
951 WARN_ON(!task && irqs_disabled());
953 debugctlmsr = (task ?
954 task->thread.debugctlmsr :
955 get_debugctlmsr_on_cpu(cpu));
956 debugctlmsr &= ~BTS_CONTROL;
959 update_task_debugctlmsr(task, debugctlmsr);
961 update_debugctlmsr_on_cpu(cpu, debugctlmsr);
964 int ds_suspend_bts_noirq(struct bts_tracer *tracer)
966 struct task_struct *task;
967 unsigned long debugctlmsr, irq;
975 task = tracer->ds.context->task;
976 cpu = tracer->ds.context->cpu;
981 if (!task && (cpu != smp_processor_id()))
984 debugctlmsr = (task ?
985 task->thread.debugctlmsr :
987 debugctlmsr &= ~BTS_CONTROL;
990 update_task_debugctlmsr(task, debugctlmsr);
992 update_debugctlmsr(debugctlmsr);
996 local_irq_restore(irq);
1000 static unsigned long ds_bts_control(struct bts_tracer *tracer)
1002 unsigned long control;
1004 control = ds_cfg.ctl[dsf_bts];
1005 if (!(tracer->trace.ds.flags & BTS_KERNEL))
1006 control |= ds_cfg.ctl[dsf_bts_kernel];
1007 if (!(tracer->trace.ds.flags & BTS_USER))
1008 control |= ds_cfg.ctl[dsf_bts_user];
1013 void ds_resume_bts(struct bts_tracer *tracer)
1015 struct task_struct *task;
1016 unsigned long debugctlmsr;
1022 tracer->flags = tracer->trace.ds.flags;
1024 task = tracer->ds.context->task;
1025 cpu = tracer->ds.context->cpu;
1027 WARN_ON(!task && irqs_disabled());
1029 debugctlmsr = (task ?
1030 task->thread.debugctlmsr :
1031 get_debugctlmsr_on_cpu(cpu));
1032 debugctlmsr |= ds_bts_control(tracer);
1035 update_task_debugctlmsr(task, debugctlmsr);
1037 update_debugctlmsr_on_cpu(cpu, debugctlmsr);
1040 int ds_resume_bts_noirq(struct bts_tracer *tracer)
1042 struct task_struct *task;
1043 unsigned long debugctlmsr, irq;
1049 tracer->flags = tracer->trace.ds.flags;
1051 task = tracer->ds.context->task;
1052 cpu = tracer->ds.context->cpu;
1054 local_irq_save(irq);
1057 if (!task && (cpu != smp_processor_id()))
1060 debugctlmsr = (task ?
1061 task->thread.debugctlmsr :
1063 debugctlmsr |= ds_bts_control(tracer);
1066 update_task_debugctlmsr(task, debugctlmsr);
1068 update_debugctlmsr(debugctlmsr);
1072 local_irq_restore(irq);
1076 static void ds_free_pebs(struct pebs_tracer *tracer)
1078 struct task_struct *task;
1080 task = tracer->ds.context->task;
1082 WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
1083 tracer->ds.context->pebs_master = NULL;
1085 ds_put_context(tracer->ds.context);
1091 void ds_release_pebs(struct pebs_tracer *tracer)
1098 ds_suspend_pebs(tracer);
1099 ds_free_pebs(tracer);
1102 int ds_release_pebs_noirq(struct pebs_tracer *tracer)
1104 struct task_struct *task;
1111 task = tracer->ds.context->task;
1113 local_irq_save(irq);
1117 (tracer->ds.context->cpu != smp_processor_id()))
1121 if (task && (task != current))
1124 ds_suspend_pebs_noirq(tracer);
1125 ds_free_pebs(tracer);
1129 local_irq_restore(irq);
1133 void ds_suspend_pebs(struct pebs_tracer *tracer)
1138 int ds_suspend_pebs_noirq(struct pebs_tracer *tracer)
1143 void ds_resume_pebs(struct pebs_tracer *tracer)
1148 int ds_resume_pebs_noirq(struct pebs_tracer *tracer)
1153 const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
1158 ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
1159 return &tracer->trace;
1162 const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
1167 ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
1168 tracer->trace.reset_value =
1169 *(u64 *)(tracer->ds.context->ds +
1170 (ds_cfg.sizeof_ptr_field * 8));
1172 return &tracer->trace;
1175 int ds_reset_bts(struct bts_tracer *tracer)
1180 tracer->trace.ds.top = tracer->trace.ds.begin;
1182 ds_set(tracer->ds.context->ds, ds_bts, ds_index,
1183 (unsigned long)tracer->trace.ds.top);
1188 int ds_reset_pebs(struct pebs_tracer *tracer)
1193 tracer->trace.ds.top = tracer->trace.ds.begin;
1195 ds_set(tracer->ds.context->ds, ds_bts, ds_index,
1196 (unsigned long)tracer->trace.ds.top);
1201 int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
1206 *(u64 *)(tracer->ds.context->ds +
1207 (ds_cfg.sizeof_ptr_field * 8)) = value;
1212 static const struct ds_configuration ds_cfg_netburst = {
1214 .ctl[dsf_bts] = (1 << 2) | (1 << 3),
1215 .ctl[dsf_bts_kernel] = (1 << 5),
1216 .ctl[dsf_bts_user] = (1 << 6),
1218 static const struct ds_configuration ds_cfg_pentium_m = {
1219 .name = "Pentium M",
1220 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
1222 static const struct ds_configuration ds_cfg_core2_atom = {
1223 .name = "Core 2/Atom",
1224 .ctl[dsf_bts] = (1 << 6) | (1 << 7),
1225 .ctl[dsf_bts_kernel] = (1 << 9),
1226 .ctl[dsf_bts_user] = (1 << 10),
1230 ds_configure(const struct ds_configuration *cfg,
1231 struct cpuinfo_x86 *cpu)
1233 unsigned long nr_pebs_fields = 0;
1235 printk(KERN_INFO "[ds] using %s configuration\n", cfg->name);
1238 nr_pebs_fields = 10;
1240 nr_pebs_fields = 18;
1243 memset(&ds_cfg, 0, sizeof(ds_cfg));
1246 ds_cfg.sizeof_ptr_field =
1247 (cpu_has(cpu, X86_FEATURE_DTES64) ? 8 : 4);
1249 ds_cfg.sizeof_rec[ds_bts] = ds_cfg.sizeof_ptr_field * 3;
1250 ds_cfg.sizeof_rec[ds_pebs] = ds_cfg.sizeof_ptr_field * nr_pebs_fields;
1252 if (!cpu_has(cpu, X86_FEATURE_BTS)) {
1253 ds_cfg.sizeof_rec[ds_bts] = 0;
1254 printk(KERN_INFO "[ds] bts not available\n");
1256 if (!cpu_has(cpu, X86_FEATURE_PEBS)) {
1257 ds_cfg.sizeof_rec[ds_pebs] = 0;
1258 printk(KERN_INFO "[ds] pebs not available\n");
1261 printk(KERN_INFO "[ds] sizes: address: %u bit, ",
1262 8 * ds_cfg.sizeof_ptr_field);
1263 printk("bts/pebs record: %u/%u bytes\n",
1264 ds_cfg.sizeof_rec[ds_bts], ds_cfg.sizeof_rec[ds_pebs]);
1266 WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_ptr_field));
1269 void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
1273 switch (c->x86_model) {
1275 case 0xd: /* Pentium M */
1276 ds_configure(&ds_cfg_pentium_m, c);
1279 case 0x17: /* Core2 */
1280 case 0x1c: /* Atom */
1281 ds_configure(&ds_cfg_core2_atom, c);
1283 case 0x1a: /* Core i7 */
1285 /* Sorry, don't know about them. */
1290 switch (c->x86_model) {
1293 case 0x2: /* Netburst */
1294 ds_configure(&ds_cfg_netburst, c);
1297 /* Sorry, don't know about them. */
1302 /* Sorry, don't know about them. */
1307 static inline void ds_take_timestamp(struct ds_context *context,
1308 enum bts_qualifier qualifier,
1309 struct task_struct *task)
1311 struct bts_tracer *tracer = context->bts_master;
1312 struct bts_struct ts;
1314 /* Prevent compilers from reading the tracer pointer twice. */
1317 if (!tracer || !(tracer->flags & BTS_TIMESTAMPS))
1320 memset(&ts, 0, sizeof(ts));
1321 ts.qualifier = qualifier;
1322 ts.variant.event.clock = trace_clock_global();
1323 ts.variant.event.pid = task->pid;
1325 bts_write(tracer, &ts);
1329 * Change the DS configuration from tracing prev to tracing next.
1331 void ds_switch_to(struct task_struct *prev, struct task_struct *next)
1333 struct ds_context *prev_ctx = prev->thread.ds_ctx;
1334 struct ds_context *next_ctx = next->thread.ds_ctx;
1335 unsigned long debugctlmsr = next->thread.debugctlmsr;
1337 /* Make sure all data is read before we start. */
1341 update_debugctlmsr(0);
1343 ds_take_timestamp(prev_ctx, bts_task_departs, prev);
1347 ds_take_timestamp(next_ctx, bts_task_arrives, next);
1349 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
1352 update_debugctlmsr(debugctlmsr);
1355 static __init int ds_selftest(void)
1357 if (ds_cfg.sizeof_rec[ds_bts]) {
1360 error = ds_selftest_bts();
1362 WARN(1, "[ds] selftest failed. disabling bts.\n");
1363 ds_cfg.sizeof_rec[ds_bts] = 0;
1367 if (ds_cfg.sizeof_rec[ds_pebs]) {
1370 error = ds_selftest_pebs();
1372 WARN(1, "[ds] selftest failed. disabling pebs.\n");
1373 ds_cfg.sizeof_rec[ds_pebs] = 0;
1379 device_initcall(ds_selftest);