Merge branches 'tracing/ftrace' and 'linus' into tracing/core
[linux-2.6] / kernel / trace / kmemtrace.c
1 /*
2  * Memory allocator tracing
3  *
4  * Copyright (C) 2008 Eduard - Gabriel Munteanu
5  * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
11 #include <linux/fs.h>
12 #include <linux/seq_file.h>
13 #include <trace/kmemtrace.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 /* Select an alternative, minimalistic output than the original one */
19 #define TRACE_KMEM_OPT_MINIMAL  0x1
20
21 static struct tracer_opt kmem_opts[] = {
22         /* Default disable the minimalistic output */
23         { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
24         { }
25 };
26
27 static struct tracer_flags kmem_tracer_flags = {
28         .val = 0,
29         .opts = kmem_opts
30 };
31
32
33 static bool kmem_tracing_enabled __read_mostly;
34 static struct trace_array *kmemtrace_array;
35
36 static int kmem_trace_init(struct trace_array *tr)
37 {
38         int cpu;
39         kmemtrace_array = tr;
40
41         for_each_cpu_mask(cpu, cpu_possible_map)
42                 tracing_reset(tr, cpu);
43
44         kmem_tracing_enabled = true;
45
46         return 0;
47 }
48
49 static void kmem_trace_reset(struct trace_array *tr)
50 {
51         kmem_tracing_enabled = false;
52 }
53
54 static void kmemtrace_headers(struct seq_file *s)
55 {
56         /* Don't need headers for the original kmemtrace output */
57         if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
58                 return;
59
60         seq_printf(s, "#\n");
61         seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
62                         "      POINTER         NODE    CALLER\n");
63         seq_printf(s, "# FREE   |      |     |       |       "
64                         "       |   |            |        |\n");
65         seq_printf(s, "# |\n\n");
66 }
67
68 /*
69  * The two following functions give the original output from kmemtrace,
70  * or something close to....perhaps they need some missing things
71  */
72 static enum print_line_t
73 kmemtrace_print_alloc_original(struct trace_iterator *iter,
74                                 struct kmemtrace_alloc_entry *entry)
75 {
76         struct trace_seq *s = &iter->seq;
77         int ret;
78
79         /* Taken from the old linux/kmemtrace.h */
80         ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
81           "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
82            entry->type_id, entry->call_site, (unsigned long) entry->ptr,
83            (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
84            (unsigned long) entry->gfp_flags, entry->node);
85
86         if (!ret)
87                 return TRACE_TYPE_PARTIAL_LINE;
88
89         return TRACE_TYPE_HANDLED;
90 }
91
92 static enum print_line_t
93 kmemtrace_print_free_original(struct trace_iterator *iter,
94                                 struct kmemtrace_free_entry *entry)
95 {
96         struct trace_seq *s = &iter->seq;
97         int ret;
98
99         /* Taken from the old linux/kmemtrace.h */
100         ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
101            entry->type_id, entry->call_site, (unsigned long) entry->ptr);
102
103         if (!ret)
104                 return TRACE_TYPE_PARTIAL_LINE;
105
106         return TRACE_TYPE_HANDLED;
107 }
108
109
110 /* The two other following provide a more minimalistic output */
111 static enum print_line_t
112 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
113                                         struct kmemtrace_alloc_entry *entry)
114 {
115         struct trace_seq *s = &iter->seq;
116         int ret;
117
118         /* Alloc entry */
119         ret = trace_seq_printf(s, "  +      ");
120         if (!ret)
121                 return TRACE_TYPE_PARTIAL_LINE;
122
123         /* Type */
124         switch (entry->type_id) {
125         case KMEMTRACE_TYPE_KMALLOC:
126                 ret = trace_seq_printf(s, "K   ");
127                 break;
128         case KMEMTRACE_TYPE_CACHE:
129                 ret = trace_seq_printf(s, "C   ");
130                 break;
131         case KMEMTRACE_TYPE_PAGES:
132                 ret = trace_seq_printf(s, "P   ");
133                 break;
134         default:
135                 ret = trace_seq_printf(s, "?   ");
136         }
137
138         if (!ret)
139                 return TRACE_TYPE_PARTIAL_LINE;
140
141         /* Requested */
142         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
143         if (!ret)
144                 return TRACE_TYPE_PARTIAL_LINE;
145
146         /* Allocated */
147         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
148         if (!ret)
149                 return TRACE_TYPE_PARTIAL_LINE;
150
151         /* Flags
152          * TODO: would be better to see the name of the GFP flag names
153          */
154         ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
155         if (!ret)
156                 return TRACE_TYPE_PARTIAL_LINE;
157
158         /* Pointer to allocated */
159         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
160         if (!ret)
161                 return TRACE_TYPE_PARTIAL_LINE;
162
163         /* Node */
164         ret = trace_seq_printf(s, "%4d   ", entry->node);
165         if (!ret)
166                 return TRACE_TYPE_PARTIAL_LINE;
167
168         /* Call site */
169         ret = seq_print_ip_sym(s, entry->call_site, 0);
170         if (!ret)
171                 return TRACE_TYPE_PARTIAL_LINE;
172
173         if (!trace_seq_printf(s, "\n"))
174                 return TRACE_TYPE_PARTIAL_LINE;
175
176         return TRACE_TYPE_HANDLED;
177 }
178
179 static enum print_line_t
180 kmemtrace_print_free_compress(struct trace_iterator *iter,
181                                 struct kmemtrace_free_entry *entry)
182 {
183         struct trace_seq *s = &iter->seq;
184         int ret;
185
186         /* Free entry */
187         ret = trace_seq_printf(s, "  -      ");
188         if (!ret)
189                 return TRACE_TYPE_PARTIAL_LINE;
190
191         /* Type */
192         switch (entry->type_id) {
193         case KMEMTRACE_TYPE_KMALLOC:
194                 ret = trace_seq_printf(s, "K     ");
195                 break;
196         case KMEMTRACE_TYPE_CACHE:
197                 ret = trace_seq_printf(s, "C     ");
198                 break;
199         case KMEMTRACE_TYPE_PAGES:
200                 ret = trace_seq_printf(s, "P     ");
201                 break;
202         default:
203                 ret = trace_seq_printf(s, "?     ");
204         }
205
206         if (!ret)
207                 return TRACE_TYPE_PARTIAL_LINE;
208
209         /* Skip requested/allocated/flags */
210         ret = trace_seq_printf(s, "                       ");
211         if (!ret)
212                 return TRACE_TYPE_PARTIAL_LINE;
213
214         /* Pointer to allocated */
215         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
216         if (!ret)
217                 return TRACE_TYPE_PARTIAL_LINE;
218
219         /* Skip node */
220         ret = trace_seq_printf(s, "       ");
221         if (!ret)
222                 return TRACE_TYPE_PARTIAL_LINE;
223
224         /* Call site */
225         ret = seq_print_ip_sym(s, entry->call_site, 0);
226         if (!ret)
227                 return TRACE_TYPE_PARTIAL_LINE;
228
229         if (!trace_seq_printf(s, "\n"))
230                 return TRACE_TYPE_PARTIAL_LINE;
231
232         return TRACE_TYPE_HANDLED;
233 }
234
235 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
236 {
237         struct trace_entry *entry = iter->ent;
238
239         switch (entry->type) {
240         case TRACE_KMEM_ALLOC: {
241                 struct kmemtrace_alloc_entry *field;
242                 trace_assign_type(field, entry);
243                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
244                         return kmemtrace_print_alloc_compress(iter, field);
245                 else
246                         return kmemtrace_print_alloc_original(iter, field);
247         }
248
249         case TRACE_KMEM_FREE: {
250                 struct kmemtrace_free_entry *field;
251                 trace_assign_type(field, entry);
252                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
253                         return kmemtrace_print_free_compress(iter, field);
254                 else
255                         return kmemtrace_print_free_original(iter, field);
256         }
257
258         default:
259                 return TRACE_TYPE_UNHANDLED;
260         }
261 }
262
263 /* Trace allocations */
264 void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
265                              unsigned long call_site,
266                              const void *ptr,
267                              size_t bytes_req,
268                              size_t bytes_alloc,
269                              gfp_t gfp_flags,
270                              int node)
271 {
272         struct ring_buffer_event *event;
273         struct kmemtrace_alloc_entry *entry;
274         struct trace_array *tr = kmemtrace_array;
275
276         if (!kmem_tracing_enabled)
277                 return;
278
279         event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC,
280                                           sizeof(*entry), 0, 0);
281         if (!event)
282                 return;
283         entry   = ring_buffer_event_data(event);
284
285         entry->call_site = call_site;
286         entry->ptr = ptr;
287         entry->bytes_req = bytes_req;
288         entry->bytes_alloc = bytes_alloc;
289         entry->gfp_flags = gfp_flags;
290         entry->node     =       node;
291
292         trace_buffer_unlock_commit(tr, event, 0, 0);
293 }
294 EXPORT_SYMBOL(kmemtrace_mark_alloc_node);
295
296 void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
297                        unsigned long call_site,
298                        const void *ptr)
299 {
300         struct ring_buffer_event *event;
301         struct kmemtrace_free_entry *entry;
302         struct trace_array *tr = kmemtrace_array;
303
304         if (!kmem_tracing_enabled)
305                 return;
306
307         event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE,
308                                           sizeof(*entry), 0, 0);
309         if (!event)
310                 return;
311         entry   = ring_buffer_event_data(event);
312         entry->type_id  = type_id;
313         entry->call_site = call_site;
314         entry->ptr = ptr;
315
316         trace_buffer_unlock_commit(tr, event, 0, 0);
317 }
318 EXPORT_SYMBOL(kmemtrace_mark_free);
319
320 static struct tracer kmem_tracer __read_mostly = {
321         .name           = "kmemtrace",
322         .init           = kmem_trace_init,
323         .reset          = kmem_trace_reset,
324         .print_line     = kmemtrace_print_line,
325         .print_header = kmemtrace_headers,
326         .flags          = &kmem_tracer_flags
327 };
328
329 void kmemtrace_init(void)
330 {
331         /* earliest opportunity to start kmem tracing */
332 }
333
334 static int __init init_kmem_tracer(void)
335 {
336         return register_tracer(&kmem_tracer);
337 }
338
339 device_initcall(init_kmem_tracer);