Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / drivers / oprofile / cpu_buffer.h
1 /**
2  * @file cpu_buffer.h
3  *
4  * @remark Copyright 2002 OProfile authors
5  * @remark Read the file COPYING
6  *
7  * @author John Levon <levon@movementarian.org>
8  */
9
10 #ifndef OPROFILE_CPU_BUFFER_H
11 #define OPROFILE_CPU_BUFFER_H
12
13 #include <linux/types.h>
14 #include <linux/spinlock.h>
15 #include <linux/workqueue.h>
16 #include <linux/cache.h>
17 #include <linux/sched.h>
18 #include <linux/ring_buffer.h>
19
20 struct task_struct;
21
22 int alloc_cpu_buffers(void);
23 void free_cpu_buffers(void);
24
25 void start_cpu_work(void);
26 void end_cpu_work(void);
27
28 /* CPU buffer is composed of such entries (which are
29  * also used for context switch notes)
30  */
31 struct op_sample {
32         unsigned long eip;
33         unsigned long event;
34 };
35
36 struct op_entry {
37         struct ring_buffer_event *event;
38         struct op_sample *sample;
39         unsigned long irq_flags;
40 };
41
42 struct oprofile_cpu_buffer {
43         volatile unsigned long head_pos;
44         volatile unsigned long tail_pos;
45         unsigned long buffer_size;
46         struct task_struct *last_task;
47         int last_is_kernel;
48         int tracing;
49         unsigned long sample_received;
50         unsigned long sample_lost_overflow;
51         unsigned long backtrace_aborted;
52         unsigned long sample_invalid_eip;
53         int cpu;
54         struct delayed_work work;
55 };
56
57 extern struct ring_buffer *op_ring_buffer_read;
58 extern struct ring_buffer *op_ring_buffer_write;
59 DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
60
61 /*
62  * Resets the cpu buffer to a sane state.
63  *
64  * reset these to invalid values; the next sample collected will
65  * populate the buffer with proper values to initialize the buffer
66  */
67 static inline void cpu_buffer_reset(int cpu)
68 {
69         struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
70
71         cpu_buf->last_is_kernel = -1;
72         cpu_buf->last_task = NULL;
73 }
74
75 static inline int cpu_buffer_write_entry(struct op_entry *entry)
76 {
77         entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
78                                                 sizeof(struct op_sample),
79                                                 &entry->irq_flags);
80         if (entry->event)
81                 entry->sample = ring_buffer_event_data(entry->event);
82         else
83                 entry->sample = NULL;
84
85         if (!entry->sample)
86                 return -ENOMEM;
87
88         return 0;
89 }
90
91 static inline int cpu_buffer_write_commit(struct op_entry *entry)
92 {
93         return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
94                                          entry->irq_flags);
95 }
96
97 static inline struct op_sample *cpu_buffer_read_entry(int cpu)
98 {
99         struct ring_buffer_event *e;
100         e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101         if (e)
102                 return ring_buffer_event_data(e);
103         if (ring_buffer_swap_cpu(op_ring_buffer_read,
104                                  op_ring_buffer_write,
105                                  cpu))
106                 return NULL;
107         e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108         if (e)
109                 return ring_buffer_event_data(e);
110         return NULL;
111 }
112
113 /* "acquire" as many cpu buffer slots as we can */
114 static inline unsigned long cpu_buffer_entries(int cpu)
115 {
116         return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
117                 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
118 }
119
120 /* transient events for the CPU buffer -> event buffer */
121 #define CPU_IS_KERNEL 1
122 #define CPU_TRACE_BEGIN 2
123 #define IBS_FETCH_BEGIN 3
124 #define IBS_OP_BEGIN    4
125
126 #endif /* OPROFILE_CPU_BUFFER_H */