Merge branch 'linus' into irq/numa
[linux-2.6] / kernel / trace / kmemtrace.c
1 /*
2  * Memory allocator tracing
3  *
4  * Copyright (C) 2008 Eduard - Gabriel Munteanu
5  * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8
9 #include <linux/tracepoint.h>
10 #include <linux/seq_file.h>
11 #include <linux/debugfs.h>
12 #include <linux/dcache.h>
13 #include <linux/fs.h>
14
15 #include <trace/kmemtrace.h>
16
17 #include "trace_output.h"
18 #include "trace.h"
19
20 /* Select an alternative, minimalistic output than the original one */
21 #define TRACE_KMEM_OPT_MINIMAL  0x1
22
23 static struct tracer_opt kmem_opts[] = {
24         /* Default disable the minimalistic output */
25         { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
26         { }
27 };
28
29 static struct tracer_flags kmem_tracer_flags = {
30         .val                    = 0,
31         .opts                   = kmem_opts
32 };
33
34 static struct trace_array *kmemtrace_array;
35
36 /* Trace allocations */
37 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
38                                    unsigned long call_site,
39                                    const void *ptr,
40                                    size_t bytes_req,
41                                    size_t bytes_alloc,
42                                    gfp_t gfp_flags,
43                                    int node)
44 {
45         struct trace_array *tr = kmemtrace_array;
46         struct kmemtrace_alloc_entry *entry;
47         struct ring_buffer_event *event;
48
49         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
50         if (!event)
51                 return;
52
53         entry = ring_buffer_event_data(event);
54         tracing_generic_entry_update(&entry->ent, 0, 0);
55
56         entry->ent.type         = TRACE_KMEM_ALLOC;
57         entry->type_id          = type_id;
58         entry->call_site        = call_site;
59         entry->ptr              = ptr;
60         entry->bytes_req        = bytes_req;
61         entry->bytes_alloc      = bytes_alloc;
62         entry->gfp_flags        = gfp_flags;
63         entry->node             = node;
64
65         ring_buffer_unlock_commit(tr->buffer, event);
66
67         trace_wake_up();
68 }
69
70 static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
71                                   unsigned long call_site,
72                                   const void *ptr)
73 {
74         struct trace_array *tr = kmemtrace_array;
75         struct kmemtrace_free_entry *entry;
76         struct ring_buffer_event *event;
77
78         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
79         if (!event)
80                 return;
81         entry   = ring_buffer_event_data(event);
82         tracing_generic_entry_update(&entry->ent, 0, 0);
83
84         entry->ent.type         = TRACE_KMEM_FREE;
85         entry->type_id          = type_id;
86         entry->call_site        = call_site;
87         entry->ptr              = ptr;
88
89         ring_buffer_unlock_commit(tr->buffer, event);
90
91         trace_wake_up();
92 }
93
94 static void kmemtrace_kmalloc(unsigned long call_site,
95                               const void *ptr,
96                               size_t bytes_req,
97                               size_t bytes_alloc,
98                               gfp_t gfp_flags)
99 {
100         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
101                         bytes_req, bytes_alloc, gfp_flags, -1);
102 }
103
104 static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
105                                        const void *ptr,
106                                        size_t bytes_req,
107                                        size_t bytes_alloc,
108                                        gfp_t gfp_flags)
109 {
110         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
111                         bytes_req, bytes_alloc, gfp_flags, -1);
112 }
113
114 static void kmemtrace_kmalloc_node(unsigned long call_site,
115                                    const void *ptr,
116                                    size_t bytes_req,
117                                    size_t bytes_alloc,
118                                    gfp_t gfp_flags,
119                                    int node)
120 {
121         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
122                         bytes_req, bytes_alloc, gfp_flags, node);
123 }
124
125 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
126                                             const void *ptr,
127                                             size_t bytes_req,
128                                             size_t bytes_alloc,
129                                             gfp_t gfp_flags,
130                                             int node)
131 {
132         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
133                         bytes_req, bytes_alloc, gfp_flags, node);
134 }
135
136 static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
137 {
138         kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
139 }
140
141 static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
142 {
143         kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
144 }
145
146 static int kmemtrace_start_probes(void)
147 {
148         int err;
149
150         err = register_trace_kmalloc(kmemtrace_kmalloc);
151         if (err)
152                 return err;
153         err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
154         if (err)
155                 return err;
156         err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
157         if (err)
158                 return err;
159         err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
160         if (err)
161                 return err;
162         err = register_trace_kfree(kmemtrace_kfree);
163         if (err)
164                 return err;
165         err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
166
167         return err;
168 }
169
170 static void kmemtrace_stop_probes(void)
171 {
172         unregister_trace_kmalloc(kmemtrace_kmalloc);
173         unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
174         unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
175         unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
176         unregister_trace_kfree(kmemtrace_kfree);
177         unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
178 }
179
180 static int kmem_trace_init(struct trace_array *tr)
181 {
182         int cpu;
183         kmemtrace_array = tr;
184
185         for_each_cpu_mask(cpu, cpu_possible_map)
186                 tracing_reset(tr, cpu);
187
188         kmemtrace_start_probes();
189
190         return 0;
191 }
192
193 static void kmem_trace_reset(struct trace_array *tr)
194 {
195         kmemtrace_stop_probes();
196 }
197
198 static void kmemtrace_headers(struct seq_file *s)
199 {
200         /* Don't need headers for the original kmemtrace output */
201         if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
202                 return;
203
204         seq_printf(s, "#\n");
205         seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
206                         "      POINTER         NODE    CALLER\n");
207         seq_printf(s, "# FREE   |      |     |       |       "
208                         "       |   |            |        |\n");
209         seq_printf(s, "# |\n\n");
210 }
211
212 /*
213  * The following functions give the original output from kmemtrace,
214  * plus the origin CPU, since reordering occurs in-kernel now.
215  */
216
217 #define KMEMTRACE_USER_ALLOC    0
218 #define KMEMTRACE_USER_FREE     1
219
220 struct kmemtrace_user_event {
221         u8                      event_id;
222         u8                      type_id;
223         u16                     event_size;
224         u32                     cpu;
225         u64                     timestamp;
226         unsigned long           call_site;
227         unsigned long           ptr;
228 };
229
230 struct kmemtrace_user_event_alloc {
231         size_t                  bytes_req;
232         size_t                  bytes_alloc;
233         unsigned                gfp_flags;
234         int                     node;
235 };
236
237 static enum print_line_t
238 kmemtrace_print_alloc_user(struct trace_iterator *iter,
239                            struct kmemtrace_alloc_entry *entry)
240 {
241         struct kmemtrace_user_event_alloc *ev_alloc;
242         struct trace_seq *s = &iter->seq;
243         struct kmemtrace_user_event *ev;
244
245         ev = trace_seq_reserve(s, sizeof(*ev));
246         if (!ev)
247                 return TRACE_TYPE_PARTIAL_LINE;
248
249         ev->event_id            = KMEMTRACE_USER_ALLOC;
250         ev->type_id             = entry->type_id;
251         ev->event_size          = sizeof(*ev) + sizeof(*ev_alloc);
252         ev->cpu                 = iter->cpu;
253         ev->timestamp           = iter->ts;
254         ev->call_site           = entry->call_site;
255         ev->ptr                 = (unsigned long)entry->ptr;
256
257         ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
258         if (!ev_alloc)
259                 return TRACE_TYPE_PARTIAL_LINE;
260
261         ev_alloc->bytes_req     = entry->bytes_req;
262         ev_alloc->bytes_alloc   = entry->bytes_alloc;
263         ev_alloc->gfp_flags     = entry->gfp_flags;
264         ev_alloc->node          = entry->node;
265
266         return TRACE_TYPE_HANDLED;
267 }
268
269 static enum print_line_t
270 kmemtrace_print_free_user(struct trace_iterator *iter,
271                           struct kmemtrace_free_entry *entry)
272 {
273         struct trace_seq *s = &iter->seq;
274         struct kmemtrace_user_event *ev;
275
276         ev = trace_seq_reserve(s, sizeof(*ev));
277         if (!ev)
278                 return TRACE_TYPE_PARTIAL_LINE;
279
280         ev->event_id            = KMEMTRACE_USER_FREE;
281         ev->type_id             = entry->type_id;
282         ev->event_size          = sizeof(*ev);
283         ev->cpu                 = iter->cpu;
284         ev->timestamp           = iter->ts;
285         ev->call_site           = entry->call_site;
286         ev->ptr                 = (unsigned long)entry->ptr;
287
288         return TRACE_TYPE_HANDLED;
289 }
290
291 /* The two other following provide a more minimalistic output */
292 static enum print_line_t
293 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
294                                         struct kmemtrace_alloc_entry *entry)
295 {
296         struct trace_seq *s = &iter->seq;
297         int ret;
298
299         /* Alloc entry */
300         ret = trace_seq_printf(s, "  +      ");
301         if (!ret)
302                 return TRACE_TYPE_PARTIAL_LINE;
303
304         /* Type */
305         switch (entry->type_id) {
306         case KMEMTRACE_TYPE_KMALLOC:
307                 ret = trace_seq_printf(s, "K   ");
308                 break;
309         case KMEMTRACE_TYPE_CACHE:
310                 ret = trace_seq_printf(s, "C   ");
311                 break;
312         case KMEMTRACE_TYPE_PAGES:
313                 ret = trace_seq_printf(s, "P   ");
314                 break;
315         default:
316                 ret = trace_seq_printf(s, "?   ");
317         }
318
319         if (!ret)
320                 return TRACE_TYPE_PARTIAL_LINE;
321
322         /* Requested */
323         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
324         if (!ret)
325                 return TRACE_TYPE_PARTIAL_LINE;
326
327         /* Allocated */
328         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
329         if (!ret)
330                 return TRACE_TYPE_PARTIAL_LINE;
331
332         /* Flags
333          * TODO: would be better to see the name of the GFP flag names
334          */
335         ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
336         if (!ret)
337                 return TRACE_TYPE_PARTIAL_LINE;
338
339         /* Pointer to allocated */
340         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
341         if (!ret)
342                 return TRACE_TYPE_PARTIAL_LINE;
343
344         /* Node */
345         ret = trace_seq_printf(s, "%4d   ", entry->node);
346         if (!ret)
347                 return TRACE_TYPE_PARTIAL_LINE;
348
349         /* Call site */
350         ret = seq_print_ip_sym(s, entry->call_site, 0);
351         if (!ret)
352                 return TRACE_TYPE_PARTIAL_LINE;
353
354         if (!trace_seq_printf(s, "\n"))
355                 return TRACE_TYPE_PARTIAL_LINE;
356
357         return TRACE_TYPE_HANDLED;
358 }
359
360 static enum print_line_t
361 kmemtrace_print_free_compress(struct trace_iterator *iter,
362                               struct kmemtrace_free_entry *entry)
363 {
364         struct trace_seq *s = &iter->seq;
365         int ret;
366
367         /* Free entry */
368         ret = trace_seq_printf(s, "  -      ");
369         if (!ret)
370                 return TRACE_TYPE_PARTIAL_LINE;
371
372         /* Type */
373         switch (entry->type_id) {
374         case KMEMTRACE_TYPE_KMALLOC:
375                 ret = trace_seq_printf(s, "K     ");
376                 break;
377         case KMEMTRACE_TYPE_CACHE:
378                 ret = trace_seq_printf(s, "C     ");
379                 break;
380         case KMEMTRACE_TYPE_PAGES:
381                 ret = trace_seq_printf(s, "P     ");
382                 break;
383         default:
384                 ret = trace_seq_printf(s, "?     ");
385         }
386
387         if (!ret)
388                 return TRACE_TYPE_PARTIAL_LINE;
389
390         /* Skip requested/allocated/flags */
391         ret = trace_seq_printf(s, "                       ");
392         if (!ret)
393                 return TRACE_TYPE_PARTIAL_LINE;
394
395         /* Pointer to allocated */
396         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
397         if (!ret)
398                 return TRACE_TYPE_PARTIAL_LINE;
399
400         /* Skip node */
401         ret = trace_seq_printf(s, "       ");
402         if (!ret)
403                 return TRACE_TYPE_PARTIAL_LINE;
404
405         /* Call site */
406         ret = seq_print_ip_sym(s, entry->call_site, 0);
407         if (!ret)
408                 return TRACE_TYPE_PARTIAL_LINE;
409
410         if (!trace_seq_printf(s, "\n"))
411                 return TRACE_TYPE_PARTIAL_LINE;
412
413         return TRACE_TYPE_HANDLED;
414 }
415
416 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
417 {
418         struct trace_entry *entry = iter->ent;
419
420         switch (entry->type) {
421         case TRACE_KMEM_ALLOC: {
422                 struct kmemtrace_alloc_entry *field;
423
424                 trace_assign_type(field, entry);
425                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
426                         return kmemtrace_print_alloc_compress(iter, field);
427                 else
428                         return kmemtrace_print_alloc_user(iter, field);
429         }
430
431         case TRACE_KMEM_FREE: {
432                 struct kmemtrace_free_entry *field;
433
434                 trace_assign_type(field, entry);
435                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
436                         return kmemtrace_print_free_compress(iter, field);
437                 else
438                         return kmemtrace_print_free_user(iter, field);
439         }
440
441         default:
442                 return TRACE_TYPE_UNHANDLED;
443         }
444 }
445
446 static struct tracer kmem_tracer __read_mostly = {
447         .name                   = "kmemtrace",
448         .init                   = kmem_trace_init,
449         .reset                  = kmem_trace_reset,
450         .print_line             = kmemtrace_print_line,
451         .print_header           = kmemtrace_headers,
452         .flags                  = &kmem_tracer_flags
453 };
454
455 void kmemtrace_init(void)
456 {
457         /* earliest opportunity to start kmem tracing */
458 }
459
460 static int __init init_kmem_tracer(void)
461 {
462         return register_tracer(&kmem_tracer);
463 }
464 device_initcall(init_kmem_tracer);