relay: fix for possible loss/corruption of produced subbufs
[linux-2.6] / kernel / trace / trace_power.c
1 /*
2  * ring buffer based C-state tracer
3  *
4  * Arjan van de Ven <arjan@linux.intel.com>
5  * Copyright (C) 2008 Intel Corporation
6  *
7  * Much is borrowed from trace_boot.c which is
8  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
9  *
10  */
11
12 #include <linux/init.h>
13 #include <linux/debugfs.h>
14 #include <linux/ftrace.h>
15 #include <linux/kallsyms.h>
16 #include <linux/module.h>
17
18 #include "trace.h"
19
20 static struct trace_array *power_trace;
21 static int __read_mostly trace_power_enabled;
22
23
24 static void start_power_trace(struct trace_array *tr)
25 {
26         trace_power_enabled = 1;
27 }
28
29 static void stop_power_trace(struct trace_array *tr)
30 {
31         trace_power_enabled = 0;
32 }
33
34
35 static int power_trace_init(struct trace_array *tr)
36 {
37         int cpu;
38         power_trace = tr;
39
40         trace_power_enabled = 1;
41
42         for_each_cpu(cpu, cpu_possible_mask)
43                 tracing_reset(tr, cpu);
44         return 0;
45 }
46
47 static enum print_line_t power_print_line(struct trace_iterator *iter)
48 {
49         int ret = 0;
50         struct trace_entry *entry = iter->ent;
51         struct trace_power *field ;
52         struct power_trace *it;
53         struct trace_seq *s = &iter->seq;
54         struct timespec stamp;
55         struct timespec duration;
56
57         trace_assign_type(field, entry);
58         it = &field->state_data;
59         stamp = ktime_to_timespec(it->stamp);
60         duration = ktime_to_timespec(ktime_sub(it->end, it->stamp));
61
62         if (entry->type == TRACE_POWER) {
63                 if (it->type == POWER_CSTATE)
64                         ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n",
65                                           stamp.tv_sec,
66                                           stamp.tv_nsec,
67                                           it->state, iter->cpu,
68                                           duration.tv_sec,
69                                           duration.tv_nsec);
70                 if (it->type == POWER_PSTATE)
71                         ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n",
72                                           stamp.tv_sec,
73                                           stamp.tv_nsec,
74                                           it->state, iter->cpu);
75                 if (!ret)
76                         return TRACE_TYPE_PARTIAL_LINE;
77                 return TRACE_TYPE_HANDLED;
78         }
79         return TRACE_TYPE_UNHANDLED;
80 }
81
82 static struct tracer power_tracer __read_mostly =
83 {
84         .name           = "power",
85         .init           = power_trace_init,
86         .start          = start_power_trace,
87         .stop           = stop_power_trace,
88         .reset          = stop_power_trace,
89         .print_line     = power_print_line,
90 };
91
92 static int init_power_trace(void)
93 {
94         return register_tracer(&power_tracer);
95 }
96 device_initcall(init_power_trace);
97
98 void trace_power_start(struct power_trace *it, unsigned int type,
99                          unsigned int level)
100 {
101         if (!trace_power_enabled)
102                 return;
103
104         memset(it, 0, sizeof(struct power_trace));
105         it->state = level;
106         it->type = type;
107         it->stamp = ktime_get();
108 }
109 EXPORT_SYMBOL_GPL(trace_power_start);
110
111
112 void trace_power_end(struct power_trace *it)
113 {
114         struct ring_buffer_event *event;
115         struct trace_power *entry;
116         struct trace_array_cpu *data;
117         unsigned long irq_flags;
118         struct trace_array *tr = power_trace;
119
120         if (!trace_power_enabled)
121                 return;
122
123         preempt_disable();
124         it->end = ktime_get();
125         data = tr->data[smp_processor_id()];
126
127         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
128                                          &irq_flags);
129         if (!event)
130                 goto out;
131         entry   = ring_buffer_event_data(event);
132         tracing_generic_entry_update(&entry->ent, 0, 0);
133         entry->ent.type = TRACE_POWER;
134         entry->state_data = *it;
135         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
136
137         trace_wake_up();
138
139  out:
140         preempt_enable();
141 }
142 EXPORT_SYMBOL_GPL(trace_power_end);
143
144 void trace_power_mark(struct power_trace *it, unsigned int type,
145                          unsigned int level)
146 {
147         struct ring_buffer_event *event;
148         struct trace_power *entry;
149         struct trace_array_cpu *data;
150         unsigned long irq_flags;
151         struct trace_array *tr = power_trace;
152
153         if (!trace_power_enabled)
154                 return;
155
156         memset(it, 0, sizeof(struct power_trace));
157         it->state = level;
158         it->type = type;
159         it->stamp = ktime_get();
160         preempt_disable();
161         it->end = it->stamp;
162         data = tr->data[smp_processor_id()];
163
164         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
165                                          &irq_flags);
166         if (!event)
167                 goto out;
168         entry   = ring_buffer_event_data(event);
169         tracing_generic_entry_update(&entry->ent, 0, 0);
170         entry->ent.type = TRACE_POWER;
171         entry->state_data = *it;
172         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
173
174         trace_wake_up();
175
176  out:
177         preempt_enable();
178 }
179 EXPORT_SYMBOL_GPL(trace_power_mark);